From be587bfc1d49790877014b79bd6719a6a398dd71 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Tue, 15 May 2018 16:28:45 +0200 Subject: [PATCH 01/44] Side-step pending deletes check (#30607) When we split/shrink an index we open several IndexWriter instances causeing file-deletes to be pending on windows. This subsequently fails when we open an IW to bootstrap the index history due to pending deletes. This change sidesteps the check since we know our history goes forward in terms of files and segments. Closes #30416 --- .../test/indices.split/20_source_mapping.yml | 7 ++----- .../java/org/elasticsearch/index/store/Store.java | 15 +++++++++++++-- .../admin/indices/create/ShrinkIndexIT.java | 2 -- .../action/admin/indices/create/SplitIndexIT.java | 2 -- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml index 4bac4bf5b0807..88d3f3c610202 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -1,11 +1,8 @@ --- "Split index ignores target template mapping": - skip: - # when re-enabling uncomment the below skips - version: "all" - reason: "AwaitsFix'ing, see https://github.com/elastic/elasticsearch/issues/30503" - # version: " - 6.3.99" - # reason: expects warnings that pre-6.4.0 will not send + version: " - 6.3.99" + reason: expects warnings that pre-6.4.0 will not send features: "warnings" # create index diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index f9ba5f900c03d..ae3762cee7725 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -731,13 +731,13 @@ static final class StoreDirectory extends FilterDirectory { private final Logger deletesLogger; - StoreDirectory(Directory delegateDirectory, Logger deletesLogger) throws IOException { + StoreDirectory(Directory delegateDirectory, Logger deletesLogger) { super(delegateDirectory); this.deletesLogger = deletesLogger; } @Override - public void close() throws IOException { + public void close() { assert false : "Nobody should close this directory except of the Store itself"; } @@ -759,6 +759,17 @@ private void innerClose() throws IOException { public String toString() { return "store(" + in.toString() + ")"; } + + @Override + public boolean checkPendingDeletions() throws IOException { + if (super.checkPendingDeletions()) { + deletesLogger.warn("directory has still pending deletes"); + } + // we skip this check since our IW usage always goes forward. + // we still might run into situations where we have pending deletes ie. in shrink / split case + // and that will cause issues on windows since we open multiple IW instance one after another during the split/shrink recovery + return false; + } } /** diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 8443ac2bf2e3d..e48f151081f62 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -23,7 +23,6 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; -import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -77,7 +76,6 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416") public class ShrinkIndexIT extends ESIntegTestCase { @Override diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java index 894a7738c510d..68e7f1145446c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -24,7 +24,6 @@ import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; import org.apache.lucene.search.join.ScoreMode; -import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -80,7 +79,6 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30416") public class SplitIndexIT extends ESIntegTestCase { @Override From 7f1a15b3aab94f6a52cb9e6ed0fb38d64da4cadd Mon Sep 17 00:00:00 2001 From: lcawl Date: Tue, 15 May 2018 09:02:37 -0700 Subject: [PATCH 02/44] [DOCS] Restores 6.4.0 release notes and highlights --- docs/reference/index-shared4.asciidoc | 4 + docs/reference/release-notes.asciidoc | 31 +- docs/reference/release-notes/6.4.0.asciidoc | 5550 +++++++++++++++++ .../release-notes/highlights.asciidoc | 3 + 4 files changed, 5569 insertions(+), 19 deletions(-) create mode 100644 docs/reference/release-notes/6.4.0.asciidoc diff --git a/docs/reference/index-shared4.asciidoc b/docs/reference/index-shared4.asciidoc index f4e87b4e9e8fc..7b7eb0c10e9e9 100644 --- a/docs/reference/index-shared4.asciidoc +++ b/docs/reference/index-shared4.asciidoc @@ -4,3 +4,7 @@ include::how-to.asciidoc[] include::testing.asciidoc[] include::glossary.asciidoc[] + +include::release-notes/highlights.asciidoc[] + +include::release-notes.asciidoc[] diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index af65ba177cb10..4c5d79599afca 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -5,6 +5,9 @@ -- This section summarizes the changes in each release. +* <> +* <> +* <> * <> * <> * <> @@ -17,24 +20,14 @@ This section summarizes the changes in each release. * <> * <> * <> +* <> +* <> +* <> +* <> +* <> +* <> +* <> -- -include::release-notes/6.2.4.asciidoc[] -include::release-notes/6.2.3.asciidoc[] -include::release-notes/6.2.2.asciidoc[] -include::release-notes/6.2.1.asciidoc[] -include::release-notes/6.2.0.asciidoc[] -include::release-notes/6.1.4.asciidoc[] -include::release-notes/6.1.3.asciidoc[] -include::release-notes/6.1.2.asciidoc[] -include::release-notes/6.1.1.asciidoc[] -include::release-notes/6.1.0.asciidoc[] -include::release-notes/6.0.1.asciidoc[] -include::release-notes/6.0.0.asciidoc[] -include::release-notes/6.0.0-rc2.asciidoc[] -include::release-notes/6.0.0-rc1.asciidoc[] -include::release-notes/6.0.0-beta2.asciidoc[] -include::release-notes/6.0.0-beta1.asciidoc[] -include::release-notes/6.0.0-alpha2.asciidoc[] -include::release-notes/6.0.0-alpha1.asciidoc[] -include::release-notes/6.0.0-alpha1-5x.asciidoc[] + +include::release-notes/6.4.0.asciidoc[] diff --git a/docs/reference/release-notes/6.4.0.asciidoc b/docs/reference/release-notes/6.4.0.asciidoc new file mode 100644 index 0000000000000..c7970b9885ed5 --- /dev/null +++ b/docs/reference/release-notes/6.4.0.asciidoc @@ -0,0 +1,5550 @@ +//// +// To add a release, copy and paste the following text, uncomment the relevant +// sections, and add a link to the new section in the list of releases at the +// top of the page. Note that release subheads must be floated and sections +// cannot be empty. +// TEMPLATE + +// [[release-notes-n.n.n]] +// == {es} n.n.n + +//[float] +[[breaking-n.n.n]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues +//// + +[[release-notes-6.4.0]] +== {es} version 6.4.0 + +coming[6.4.0] + +//[float] +//[[breaking-6.4.0]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +[float] +=== New Features + +The new <> field allows to know which fields +got ignored at index time because of the <> +option. ({pull}30140[#29658]) + +A new analysis plugin called `analysis_nori` that exposes the Lucene Korean +analysis module. ({pull}30397[#30397]) + +[float] +=== Enhancements + +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) + +Geo:: +* Add validation that geohashes are not empty and don't contain unsupported characters ({pull}30376[#30376]) + +Rollup:: +* Validate timezone in range queries to ensure they match the selected job when +searching ({pull}30338[#30338]) + +[float] +=== Bug Fixes + +Use date format in `date_range` mapping before fallback to default ({pull}29310[#29310]) + +Fix NPE in 'more_like_this' when field has zero tokens ({pull}30365[#30365]) + +Do not ignore request analysis/similarity settings on index resize operations when the source index already contains such settings ({pull}30216[#30216]) + +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.3.1]] +== Elasticsearch version 6.3.1 + +coming[6.3.1] + +//[float] +[[breaking-6.3.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Reduce the number of object allocations made by {security} when resolving the indices and aliases for a request ({pull}30180[#30180]) + +Respect accept header on requests with no handler ({pull}30383[#30383]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.3.0]] +== {es} version 6.3.0 + +coming[6.3.0] + +[float] +[[breaking-6.3.0]] +=== Breaking Changes + +[float] +=== Deprecations +Monitoring:: +* By default when you install {xpack}, monitoring is enabled but data collection +is disabled. To enable data collection, use the new +`xpack.monitoring.collection.enabled` setting. You can update this setting by +using the <>. For more +information, see <>. + +Security:: +* The legacy `XPackExtension` extension mechanism has been removed and replaced +with an SPI based extension mechanism that is installed and built as an +elasticsearch plugin. + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +//[float] +//=== Bug Fixes + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.4]] +== {es} version 6.2.4 + +//[float] +//[[breaking-6.2.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Harden periodically check to avoid endless flush loop {pull}29125[#29125] (issues: {issue}28350[#28350], {issue}29097[#29097]) + +Ingest:: +* Don't allow referencing the pattern bank name in the pattern bank {pull}29295[#29295] (issue: {issue}29257[#29257]) + +[float] +=== Regressions +Fail snapshot operations early when creating or deleting a snapshot on a repository that has been +written to by an older Elasticsearch after writing to it with a newer Elasticsearch version. ({pull}30140[#30140]) + +Java High Level REST Client:: +* Bulk processor#awaitClose to close scheduler {pull}29263[#29263] +Fix NPE when CumulativeSum agg encounters null value/empty bucket ({pull}29641[#29641]) +Do not fail snapshot when deleting a missing snapshotted file ({pull}30332[#30332]) + +Java Low Level REST Client:: +* REST client: hosts marked dead for the first time should not be immediately retried {pull}29230[#29230] + +Machine Learning:: +* Prevents failed jobs from mistakenly re-opening after node loss recovery. +* Returns an error when an operation cannot be submitted because the process was +killed. +* Respects the datafeed frequency when it is less or equal to the +`query_delay` setting. + +Network:: +* Cross-cluster search and default connections can get crossed [OPEN] [ISSUE] {pull}29321[#29321] + +Percolator:: +* Fixed bug when non percolator docs end up in the search hits {pull}29447[#29447] (issue: {issue}29429[#29429]) +* Fixed a msm accounting error that can occur during analyzing a percolator query {pull}29415[#29415] (issue: {issue}29393[#29393]) +* Fix more query extraction bugs. {pull}29388[#29388] (issues: {issue}28353[#28353], {issue}29376[#29376]) +* Fix some query extraction bugs. {pull}29283[#29283] + +Plugins:: +* Plugins: Fix native controller confirmation for non-meta plugin {pull}29434[#29434] + +Search:: +* Propagate ignore_unmapped to inner_hits {pull}29261[#29261] (issue: {issue}29071[#29071]) + +Security/Authentication:: +* Adds missing `idp.use_single_logout` and `populate_user_metadata` SAML realm +settings. See <>. + +Settings:: +* Archive unknown or invalid settings on updates {pull}28888[#28888] (issue: {issue}28609[#28609]) + +Watcher:: +* Re-enables `smtp.*` account configuration properties in the notification +settings. See <>. +* Ensures starting and stopping {watcher} is properly acknowledged as a master +node action. +* Refrains from appending a question mark to an HTTP request if no parameters +are used. + +//[float] +//=== Known Issues + +[[release-notes-6.2.3]] +== {es} version 6.2.3 + +//[float] +//[[breaking-6.2.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +[float] +=== Deprecations + +Deprecated multi-argument versions of the request methods in the RestClient. +Prefer the "Request" object flavored methods. ({pull}30315[#30315]) + +//[float] +//=== New Features + +A new analysis plugin called `analysis_nori` that exposes the Lucene Korean +analysis module. ({pull}30397[#30397]) + +[float] +=== Enhancements + +Highlighting:: +* Limit analyzed text for highlighting (improvements) {pull}28808[#28808] (issues: {issue}16764[#16764], {issue}27934[#27934]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow copying source settings on index resize operations] ({pull}30255[#30255]) +{ref-64}/breaking_64_api_changes.html#copy-source-settings-on-resize[Allow +copying source settings on index resize operations] ({pull}30255[#30255], {pull}30404[#30404]) + +Added new "Request" object flavored request methods in the RestClient. Prefer +these instead of the multi-argument versions. ({pull}29623[#29623]) + +Recovery:: +* Require translogUUID when reading global checkpoint {pull}28587[#28587] (issue: {issue}28435[#28435]) +Added `setJsonEntity` to `Request` object so it is marginally easier to send JSON. ({pull}30447[#30447]) +Watcher HTTP client used in watches now allows more parallel connections to the +same endpoint and evicts long running connections. ({pull}30130[#30130]) + +The cluster state listener to decide if watcher should be +stopped/started/paused now runs far less code in an executor but is more +synchronous and predictable. Also the trigger engine thread is only started on +data nodes. And the Execute Watch API can be triggered regardless is watcher is +started or stopped. ({pull}30118[#30118]) + +Added put index template API to the high level rest client ({pull}30400[#30400]) + +Add ability to filter coordinating-only nodes when interacting with cluster +APIs. ({pull}30313[#30313]) + +[float] +=== Bug Fixes + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) +* Never block on key in `LiveVersionMap#pruneTombstones` {pull}28736[#28736] (issue: {issue}28714[#28714]) + +Ingest:: +* Continue registering pipelines after one pipeline parse failure. {pull}28752[#28752] (issue: {issue}28269[#28269]) + +Java High Level REST Client:: +* REST high-level client: encode path parts {pull}28663[#28663] (issue: {issue}28625[#28625]) + +Machine Learning:: +* Fixed the <> such that it +returns only machine learning-specific node attributes. + +Monitoring:: +* Aligned reporting of index statistics that exist in the current cluster state. +This fix avoids subtle race conditions in stats reporting. + +Packaging:: +* Delay path expansion on Windows {pull}28753[#28753] (issues: {issue}27675[#27675], {issue}28748[#28748]) + +Percolator:: +* Fix percolator query analysis for function_score query {pull}28854[#28854] +* Improved percolator's random candidate query duel test {pull}28840[#28840] + +Security:: +* Fixed handling of comments in XML documents [ESA-2018-07]. +* Fixed auditing such that when you use a local audit index, it maintains the +mappings automatically. Maintenance is necessary, for example, when new fields +are introduced or document types change. +* Added and changed settings for the SAML NameID policy. For example, added the +`nameid.allow_create` setting and changed the default value for +the SPNameQualifier setting to blank. See {stack-ov}/saml-realm.html[SAML Authentication]. +* Fixed handling of an Assertion Consumer Service (ACS) URL with existing query +parameters. See {stack-ov}/saml-realm.html[SAML Authentication]. +* Fixed the PKI realm bootstrap check such that it works with secure settings. +For more information, see <>. + +Snapshot/Restore:: +* Fix NPE when using deprecated Azure settings {pull}28769[#28769] (issues: {issue}23518[#23518], {issue}28299[#28299]) + +Stats:: +* Fix AdaptiveSelectionStats serialization bug {pull}28718[#28718] (issue: {issue}28713[#28713]) + +Watcher:: +* Fixed the serialization of failed hipchat messages, such that it no longer +tries to write the status field twice. +* Fixed TransformInput toXContent serialization errors. For more information, +see +{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. + + +Allocation:: + +Auto-expand replicas when adding or removing nodes to prevent shard copies from +being dropped and resynced when a data node rejoins the cluster ({pull}30423[#30423]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + + +[[release-notes-6.2.2]] +== {es} version 6.2.2 + +//[float] +//[[breaking-6.2.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Recovery:: +* Synced-flush should not seal index of out of sync replicas {pull}28464[#28464] (issue: {issue}10032[#10032]) + +[float] +=== Bug Fixes + +Core:: +* Handle throws on tasks submitted to thread pools {pull}28667[#28667] +* Fix size blocking queue to not lie about its weight {pull}28557[#28557] (issue: {issue}28547[#28547]) + +Ingest:: +* Guard accessDeclaredMembers for Tika on JDK 10 {pull}28603[#28603] (issue: {issue}28602[#28602]) +* Fix for bug that prevents pipelines to load that use stored scripts after a restart {pull}28588[#28588] + +Java High Level REST Client:: +* Fix parsing of script fields {pull}28395[#28395] (issue: {issue}28380[#28380]) +* Move to POST when calling API to retrieve which support request body {pull}28342[#28342] (issue: {issue}28326[#28326]) + +Machine Learning:: +* Fixed an exception that occurred when a categorization field contained an +empty string. + +Monitoring:: +* Properly registered `xpack.monitoring.exporters.*.headers.*` settings, which +were broken in 6.2.0 and 6.2.1. For more information, see +<>. + +Packaging:: +* Fix using relative custom config path {pull}28700[#28700] (issue: {issue}27610[#27610]) +* Disable console logging in the Windows service {pull}28618[#28618] (issue: {issue}20422[#20422]) + +Percolator:: +* Do not take duplicate query extractions into account for minimum_should_match attribute {pull}28353[#28353] (issue: {issue}28315[#28315]) + +Recovery:: +* Fsync directory after cleanup {pull}28604[#28604] (issue: {issue}28435[#28435]) + +Security:: +* Added CachingRealm to published artifacts so it can be used in custom realm +extensions. +* If the realm uses native role mappings and the security index health changes, +the realm caches are cleared. For example, they are cleared when the index +recovers from a red state, when the index is deleted, when the index becomes +outdated, and when the index becomes up-to-date. +* Fixed a bug that could prevent auditing to a remote index if the remote +cluster was re-started at the same time as the audited cluster. +* Removed AuthorityKeyIdentifier's Issuer and Serial number from certificates +generated by `certgen` and `certutil`. This improves compatibility with +certificate verification in {kib}. + +Watcher:: +* Proxies now use HTTP by default, which was the default prior to 6.0. This +fixes issues with HTTPS requests that tried to access proxies via HTTP. +* Fixed the HTML sanitizer settings +(`xpack.notification.email.html.sanitization.*`), which were broken in 6.2. For +more information, see <>. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.2.1]] +== {es} version 6.2.1 + +//[float] +//[[breaking-6.2.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements +The cluster state listener to decide if watcher should be +stopped/started/paused now runs far less code in an executor but is more +synchronous and predictable. Also the trigger engine thread is only started on +data nodes. And the Execute Watch API can be triggered regardless is watcher is +started or stopped. ({pull}30118[#30118]) + +[float] +=== Bug Fixes + +Plugin Lang Painless:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +Plugins:: +* Fix the ability to remove old plugin {pull}28540[#28540] (issue: {issue}28538[#28538]) + +Security:: +* Fixed missing dependencies for x-pack-transport. +* Fixed `saml-metadata` env file such that it sources the appropriate +environment file. + +Machine Learning:: + +* Account for gaps in data counts after job is reopened ({pull}30294[#30294]) + +[[release-notes-6.2.0]] +== {es} version 6.2.0 + +[float] +[[breaking-6.2.0]] +=== Breaking Changes + +Aggregations:: +* Add a new cluster setting to limit the total number of buckets returned by a request {pull}27581[#27581] (issues: {issue}26012[#26012], {issue}27452[#27452]) + +Core:: +* Forbid granting the all permission in production {pull}27548[#27548] + +Highlighting:: +* Limit the analyzed text for highlighting {pull}27934[#27934] (issue: {issue}27517[#27517]) + +Rollover:: +* Fail rollover if duplicated alias found in templates {pull}28110[#28110] (issue: {issue}26976[#26976]) + +Search:: +* Introduce limit to the number of terms in Terms Query {pull}27968[#27968] (issue: {issue}18829[#18829]) + +[float] +=== Breaking Java Changes + +Java API:: +* Remove `operationThreaded` from Java API {pull}27836[#27836] + +Java High Level REST Client:: +* REST high-level client: remove index suffix from indices client method names {pull}28263[#28263] + +[float] +=== Deprecations + +Analysis:: +* Backport delimited payload filter renaming {pull}27535[#27535] (issue: {issue}26625[#26625]) + +Suggesters:: +* deprecating `jarowinkler` in favor of `jaro_winkler` {pull}27526[#27526] +* Deprecating `levenstein` in favor of `levensHtein` {pull}27409[#27409] (issue: {issue}27325[#27325]) + +[float] +=== New Features + +Machine Learning:: +* Added the ability to identify scheduled events and prevent anomaly detection +during these periods. For more information, see +{stack-ov}/ml-calendars.html[Calendars and Scheduled Events]. + +Plugin Ingest GeoIp:: +* Enable ASN support for Ingest GeoIP plugin. {pull}27958[#27958] (issue: {issue}27849[#27849]) + +Plugin Lang Painless:: +* Painless: Add spi jar that will be published for extending whitelists {pull}28302[#28302] +* Painless: Add a simple cache for whitelist methods and fields. {pull}28142[#28142] + +Plugins:: +* Add the ability to bundle multiple plugins into a meta plugin {pull}28022[#28022] (issue: {issue}27316[#27316]) + +Rank Evaluation:: +* Backport of ranking evaluation API (#27478) {pull}27844[#27844] (issue: {issue}27478[#27478]) + +Recovery:: +* Backport for using lastSyncedGlobalCheckpoint in deletion policy {pull}27866[#27866] (issue: {issue}27826[#27826]) + +Reindex API:: +* Add scroll parameter to _reindex API {pull}28041[#28041] (issue: {issue}27555[#27555]) + +Security:: +* {security} now supports user authentication using SAML Single Sign on. For +more information, see {stack-ov}/saml-realm.html[SAML authentication]. + +Watcher:: +* Added a transform input for chained input. For more information, see +{stack-ov}/input-chain.html#_transforming_chained_input_data[Transforming Chained Input Data]. + +[float] +=== Enhancements + +Allocation:: +* Fix cluster.routing.allocation.enable and cluster.routing.rebalance.enable case {pull}28037[#28037] (issue: {issue}28007[#28007]) +* Add node id to shard failure message {pull}28024[#28024] (issue: {issue}28018[#28018]) + +Analysis:: +* Limit the analyzed text for highlighting (#27934) {pull}28176[#28176] (issue: {issue}27517[#27517]) +* Allow TrimFilter to be used in custom normalizers {pull}27758[#27758] (issue: {issue}27310[#27310]) + +Circuit Breakers:: +* Add accounting circuit breaker and track segment memory usage {pull}27116[#27116] (issue: {issue}27044[#27044]) + +Cluster:: +* Adds wait_for_no_initializing_shards to cluster health API {pull}27489[#27489] (issue: {issue}25623[#25623]) + +Core:: +* Introduce elasticsearch-core jar {pull}28191[#28191] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28190[#28190] (issue: {issue}27933[#27933]) +* Rename core module to server {pull}28180[#28180] (issue: {issue}27933[#27933]) +* Introduce elasticsearch-core jar {pull}28178[#28178] (issue: {issue}27933[#27933]) +* Add Writeable.Reader support to TransportResponseHandler {pull}28010[#28010] (issue: {issue}26315[#26315]) +* Simplify rejected execution exception {pull}27664[#27664] (issue: {issue}27663[#27663]) +* Add node name to thread pool executor name {pull}27663[#27663] (issues: {issue}26007[#26007], {issue}26835[#26835]) + +Discovery:: +* Add information when master node left to DiscoveryNodes' shortSummary() {pull}28197[#28197] (issue: {issue}28169[#28169]) + +Engine:: +* Move uid lock into LiveVersionMap {pull}27905[#27905] +* Optimize version map for append-only indexing {pull}27752[#27752] + +Geo:: +* [GEO] Add WKT Support to GeoBoundingBoxQueryBuilder {pull}27692[#27692] (issues: {issue}27690[#27690], {issue}9120[#9120]) +* [Geo] Add Well Known Text (WKT) Parsing Support to ShapeBuilders {pull}27417[#27417] (issue: {issue}9120[#9120]) + +Highlighting:: +* Include all sentences smaller than fragment_size in the unified highlighter {pull}28132[#28132] (issue: {issue}28089[#28089]) + +Ingest:: +* Enable convert processor to support Long and Double {pull}27891[#27891] (issues: {issue}23085[#23085], {issue}23423[#23423]) + +Internal:: +* Make KeyedLock reentrant {pull}27920[#27920] +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) +* Tighten the CountedBitSet class {pull}27632[#27632] +* Avoid doing redundant work when checking for self references. {pull}26927[#26927] (issue: {issue}26907[#26907]) + +Java API:: +* Add missing delegate methods to NodeIndicesStats {pull}28092[#28092] +* Java api clean-up : consistency for `shards_acknowledged` getters {pull}27819[#27819] (issue: {issue}27784[#27784]) + +Java High Level REST Client:: +* add toString implementation for UpdateRequest. {pull}27997[#27997] (issue: {issue}27986[#27986]) +* Add Close Index API to the high level REST client {pull}27734[#27734] (issue: {issue}27205[#27205]) +* Add Open Index API to the high level REST client {pull}27574[#27574] (issue: {issue}27205[#27205]) +* Added Create Index support to high-level REST client {pull}27351[#27351] (issue: {issue}27205[#27205]) +* Add multi get api to the high level rest client {pull}27337[#27337] (issue: {issue}27205[#27205]) +* Add msearch api to high level client {pull}27274[#27274] + +Machine Learning:: +* Increased tokenization flexibility for categorization. Now all {es} analyzer +functionality is available, which opens up the possibility of sensibly +categorizing non-English log messages. For more information, see {stack-ov}/ml-configuring-categories.html#ml-configuring-analyzer[Customizing the Categorization Analyzer]. +* Improved the sensitivity of the analysis to high variance data with lots of +values near zero. +* Improved the decay rate of the model memory by using a weighted moving average. +* Machine learning indices created after upgrading to 6.2 have the +`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. +As a result, {ml} indices created after upgrading to 6.2 can have a green +status on single node clusters. There is no impact in multi-node clusters. +* Changed the credentials that are used by {dfeeds}. When {security} is enabled, +a {dfeed} stores the roles of the user who created or updated the {dfeed} +**at that time**. This means that if those roles are updated, the {dfeed} +subsequently runs with the new permissions that are associated with the roles. +However, if the user's roles are adjusted after creating or updating the {dfeed} +then the {dfeed} continues to run with the permissions that are associated with +the original roles. For more information, see +{stack-ov}/ml-dfeeds.html[Datafeeds]. +* Added a new `scheduled` forecast status, which indicates that the forecast +has not started yet. + +Mapping:: +* Allow `_doc` as a type. {pull}27816[#27816] (issues: {issue}27750[#27750], {issue}27751[#27751]) + +Monitoring:: +* {monitoring} indices (`.monitoring`) created after upgrading to 6.2 have the +`auto_expand_replicas: 0-1` setting rather than a fixed setting of 1 replica. +As a result, monitoring indices created after upgrading to 6.2 can have a green +status on single node clusters. There is no impact in multi-node clusters. +* Added a cluster alert that triggers whenever a node is added, removed, or +restarted. + +Network:: +* Add NioGroup for use in different transports {pull}27737[#27737] (issue: {issue}27260[#27260]) +* Add read timeouts to http module {pull}27713[#27713] +* Implement byte array reusage in `NioTransport` {pull}27696[#27696] (issue: {issue}27563[#27563]) +* Introduce resizable inbound byte buffer {pull}27551[#27551] (issue: {issue}27563[#27563]) +* Decouple nio constructs from the tcp transport {pull}27484[#27484] (issue: {issue}27260[#27260]) + +Packaging:: +* Extend JVM options to support multiple versions {pull}27675[#27675] (issue: {issue}27646[#27646]) +* Add explicit coreutils dependency {pull}27660[#27660] (issue: {issue}27609[#27609]) +* Detect mktemp from coreutils {pull}27659[#27659] (issues: {issue}27609[#27609], {issue}27643[#27643]) +* Enable GC logs by default {pull}27610[#27610] +* Use private directory for temporary files {pull}27609[#27609] (issues: {issue}14372[#14372], {issue}27144[#27144]) + +Percolator:: +* also extract match_all queries when indexing percolator queries {pull}27585[#27585] + +Plugin Lang Painless:: +* Painless: Add whitelist extensions {pull}28161[#28161] +* Painless: Modify Loader to Load Classes Directly from Definition {pull}28088[#28088] +* Clean Up Painless Cast Object {pull}27794[#27794] +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] + +Plugins:: +* Add client actions to action plugin {pull}28280[#28280] (issue: {issue}27759[#27759]) +* Plugins: Add validation to plugin descriptor parsing {pull}27951[#27951] +* Plugins: Add plugin extension capabilities {pull}27881[#27881] +* Add support for filtering mappings fields {pull}27603[#27603] + +Rank Evaluation:: +* Simplify RankEvalResponse output {pull}28266[#28266] + +Recovery:: +* Truncate tlog cli should assign global checkpoint {pull}28192[#28192] (issue: {issue}28181[#28181]) +* Replica starts peer recovery with safe commit {pull}28181[#28181] (issue: {issue}10708[#10708]) +* Primary send safe commit in file-based recovery {pull}28038[#28038] (issue: {issue}10708[#10708]) +* Fail resync-failed shards in subsequent writes {pull}28005[#28005] +* Introduce promoting index shard state {pull}28004[#28004] (issue: {issue}24841[#24841]) +* Non-peer recovery should set the global checkpoint {pull}27965[#27965] +* Persist global checkpoint when finalizing a peer recovery {pull}27947[#27947] (issue: {issue}27861[#27861]) +* Rollback a primary before recovering from translog {pull}27804[#27804] (issue: {issue}10708[#10708]) + +Search:: +* Use typeName() to check field type in GeoShapeQueryBuilder {pull}27730[#27730] +* Optimize search_after when sorting in index sort order {pull}26401[#26401] + +Security:: +* Added the ability to refresh tokens that were created by the token API. The +API provides information about a refresh token, which you can use within 24 +hours of its creation to extend the life of a token. For more information, see +<>. +* Added principal and role information to `access_granted`, `access_denied`, +`run_as_granted`, and `run_as_denied` audit events. For more information about +these events, see {stack-ov}/auditing.html[Auditing Security Events]. +* Added audit event ignore policies, which are a way to tune the verbosity of an +audit trail. These policies define rules for ignoring audit events that match +specific attribute values. For more information, see +{stack-ov}/auditing.html#audit-log-ignore-policy[Logfile Audit Events Ignore Policies]. +* Added a certificates API, which enables you to retrieve information about the +X.509 certificates that are used to encrypt communications in your {es} cluster. +For more information, see <>. + +Sequence IDs:: +* Do not keep 5.x commits when having 6.x commits {pull}28188[#28188] (issues: {issue}27606[#27606], {issue}28038[#28038]) +* Use lastSyncedGlobalCheckpoint in deletion policy {pull}27826[#27826] (issue: {issue}27606[#27606]) +* Use CountedBitSet in LocalCheckpointTracker {pull}27793[#27793] +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Keep commits and translog up to the global checkpoint {pull}27606[#27606] +* Adjust CombinedDeletionPolicy for multiple commits {pull}27456[#27456] (issues: {issue}10708[#10708], {issue}27367[#27367]) +* Keeps index commits up to the current global checkpoint {pull}27367[#27367] (issue: {issue}10708[#10708]) +* Dedup translog operations by reading in reverse {pull}27268[#27268] (issue: {issue}10708[#10708]) + +Settings:: +* Add validation of keystore setting names {pull}27626[#27626] + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Include include_global_state in Snapshot status API (#22423) {pull}26853[#26853] (issue: {issue}22423[#22423]) + +Task Manager:: +* Add ability to associate an ID with tasks {pull}27764[#27764] (issue: {issue}23250[#23250]) + +Translog:: +* Simplify MultiSnapshot#SeqNoset {pull}27547[#27547] (issue: {issue}27268[#27268]) +* Enclose CombinedDeletionPolicy in SnapshotDeletionPolicy {pull}27528[#27528] (issues: {issue}27367[#27367], {issue}27456[#27456]) + +Watcher:: +* Added the ability to set the `index` and `doc_type` dynamically in an index +action. For more information, see {stack-ov}/actions-index.html[Index Action]. +* Added a `refresh` index action attribute, which enables you to set the +refresh policy of the write request. For more information, see +{stack-ov}/actions-index.html[Index Action]. +* Added support for actions in slack attachments, which enables you to add +buttons that can be clicked in slack messages. For more information, see +{stack-ov}/actions-slack.html[Slack Action]. +* {watcher} indices (`.watch*` and `triggered_watches`) created after upgrading +to 6.2 have the `auto_expand_replicas: 0-1` setting rather than a fixed setting +of 1 replica. As a result, {watcher} indices created after upgrading to 6.2 can +have a green status on single node clusters. There is no impact in multi-node +clusters. + +[float] +=== Bug Fixes + +Aggregations:: +* Adds metadata to rewritten aggregations {pull}28185[#28185] (issue: {issue}28170[#28170]) +* Fix NPE on composite aggregation with sub-aggregations that need scores {pull}28129[#28129] +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix global aggregation that requires breadth first and scores {pull}27942[#27942] (issues: {issue}22321[#22321], {issue}27928[#27928]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) +* Using DocValueFormat::parseBytesRef for parsing missing value parameter {pull}27855[#27855] (issue: {issue}27788[#27788]) +* Fix illegal cast of the "low cardinality" optimization of the `terms` aggregation. {pull}27543[#27543] +* Always include the _index and _id for nested search hits. {pull}27201[#27201] (issue: {issue}27053[#27053]) + +Allocation:: +* Do not open indices with broken settings {pull}26995[#26995] + +Core:: +* Fix lock accounting in releasable lock {pull}28202[#28202] +* Fixes ByteSizeValue to serialise correctly {pull}27702[#27702] (issue: {issue}27568[#27568]) +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* Handle case where the hole vertex is south of the containing polygon(s) {pull}27685[#27685] (issue: {issue}25933[#25933]) + +Highlighting:: +* Fix highlighting on a keyword field that defines a normalizer {pull}27604[#27604] + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) +* Retain originalIndex info when rewriting FieldCapabilities requests {pull}27761[#27761] + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Machine Learning:: +* Improved error reporting for crashes and resource problems on Linux. +* Improved the detection of seasonal trends in bucket spans longer than 1 hour. +* Updated the forecast API to wait for validation and return an error if the +validation fails. +* Set the actual bucket value to 0 in model plots for empty buckets for count +and sum functions. The count and sum functions treat empty buckets as 0 rather +than unknown for anomaly detection, so it was inconsistent not to do the same +for model plots. This inconsistency resulted in problems plotting these buckets +in {kib}. + +Mapping:: +* Ignore null value for range field (#27845) {pull}28116[#28116] (issue: {issue}27845[#27845]) +* Pass `java.locale.providers=COMPAT` to Java 9 onwards {pull}28080[#28080] (issue: {issue}10984[#10984]) +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] (issue: {issue}1877[#1877]) +* Remove potential nio selector leak {pull}27825[#27825] +* Fix issue where the incorrect buffers are written {pull}27695[#27695] (issue: {issue}27551[#27551]) +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) + +Packaging:: +* Allow custom service names when installing on windows {pull}25255[#25255] (issue: {issue}25231[#25231]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Analysis Phonetic:: +* Fix daitch_mokotoff phonetic filter to use the dedicated Lucene filter {pull}28225[#28225] (issue: {issue}28211[#28211]) + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository HDFS:: +* Fix SecurityException when HDFS Repository used against HA Namenodes {pull}27196[#27196] + +Plugins:: +* Make sure that we don't detect files as maven coordinate when installing a plugin {pull}28163[#28163] +* Fix upgrading indices which use a custom similarity plugin. {pull}26985[#26985] (issue: {issue}25350[#25350]) + +Recovery:: +* Open engine should keep only starting commit {pull}28228[#28228] (issues: {issue}27804[#27804], {issue}28181[#28181]) +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) +* Set global checkpoint before open engine from store {pull}27972[#27972] (issues: {issue}27965[#27965], {issue}27970[#27970]) +* Check and repair index under the store metadata lock {pull}27768[#27768] (issues: {issue}24481[#24481], {issue}24787[#24787], {issue}27731[#27731]) +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Rollover:: +* Make index rollover action atomic {pull}28039[#28039] (issue: {issue}26976[#26976]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Scroll:: +* Reject scroll query if size is 0 (#22552) {pull}27842[#27842] (issue: {issue}22552[#22552]) +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix simple_query_string on invalid input {pull}28219[#28219] (issue: {issue}28204[#28204]) +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Fix synonym phrase query expansion for cross_fields parsing {pull}28045[#28045] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Security:: +* Updated the `setup-passwords` command to generate passwords with characters +`A-Z`, `a-z`, and `0-9`, so that they are safe to use in shell scripts. For more +information about this command, see <>. +* Improved the error messages that occur if the `x-pack` directory is missing +when you run <>. +* Fixed the ordering of realms in a realm chain, which determines the order in +which the realms are consulted. For more information, see +{stack-ov}/realms.html[Realms]. + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) +* Obey translog durability in global checkpoint sync {pull}27641[#27641] + +Settings:: +* Settings: Introduce settings updater for a list of settings {pull}28338[#28338] (issue: {issue}28047[#28047]) +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Consistent updates of IndexShardSnapshotStatus {pull}28130[#28130] (issue: {issue}26480[#26480]) +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Consistent update of stage and failure message in IndexShardSnapshotStatus {pull}27557[#27557] (issue: {issue}26480[#26480]) +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) + +Stats:: +* Fixes DocStats to properly deal with shards that report -1 index size {pull}27863[#27863] +* Include internal refreshes in refresh stats {pull}27615[#27615] + +Term Vectors:: +* Fix term vectors generator with keyword and normalizer {pull}27608[#27608] (issue: {issue}27320[#27320]) + +Watcher:: +* Replaced group settings with affix key settings where filters are needed. +For more information, see https://github.com/elastic/elasticsearch/pull/28338. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Dependencies: Update joda time to 2.9.9 {pull}28261[#28261] +* upgrade to lucene 7.2.1 {pull}28218[#28218] (issue: {issue}28044[#28044]) +* Upgrade jna from 4.4.0-1 to 4.5.1 {pull}28183[#28183] (issue: {issue}28172[#28172]) + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] + +[[release-notes-6.1.4]] +== {es} version 6.1.4 + +//[float] +//[[breaking-6.1.4]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Fix classes that can exit {pull}27518[#27518] + +[float] +=== Bug Fixes + +Aggregations:: +* StringTerms.Bucket.getKeyAsNumber detection type {pull}28118[#28118] (issue: {issue}28012[#28012]) + +Core:: +* Remove special handling for _all in nodes info {pull}28971[#28971] (issue: {issue}28797[#28797]) + +Engine:: +* Avoid class cast exception from index writer {pull}28989[#28989] +* Maybe die before failing engine {pull}28973[#28973] (issues: {issue}27265[#27265], {issue}28967[#28967]) + +Scripting:: +* Painless: Fix For Loop NullPointerException {pull}28506[#28506] (issue: {issue}28501[#28501]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.3]] +== {es} version 6.1.3 + +//[float] +//[[breaking-6.1.3]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +//[float] +//=== Enhancements + +[float] +=== Bug Fixes + +Engine:: +* Replica recovery could go into an endless flushing loop {pull}28350[#28350] + +Internal:: +* Never return null from Strings.tokenizeToStringArray {pull}28224[#28224] (issue: {issue}28213[#28213]) +* Fallback to TransportMasterNodeAction for cluster health retries {pull}28195[#28195] (issue: {issue}28169[#28169]) + +Mapping:: +* Allow update of `eager_global_ordinals` on `_parent`. {pull}28014[#28014] (issue: {issue}24407[#24407]) + +Scripting:: +* Ensure we protect Collections obtained from scripts from self-referencing {pull}28335[#28335] + +Security:: +* Improved cache expiry handling in the token service. Previously, if the token +service was idle for more than 60 minutes, the key expired and the service +failed to generate user tokens. + +Settings:: +* Fix setting notification for complex setting (affixMap settings) that could cause transient settings to be ignored {pull}28317[#28317] (issue: {issue}28316[#28316]) +* Fix environment variable substitutions in list setting {pull}28106[#28106] (issue: {issue}27926[#27926]) + +Snapshot/Restore:: +* Avoid concurrent snapshot finalizations when deleting an INIT snapshot {pull}28078[#28078] (issues: {issue}27214[#27214], {issue}27931[#27931], {issue}27974[#27974]) +* Do not start snapshots that are deleted during initialization {pull}27931[#27931] + +Watcher:: +* Fixed a null pointer exception in the TemplateRegistry when there is no master +node available. +* Ensured collections obtained from scripts are protected from self-referencing. +See https://github.com/elastic/elasticsearch/pull/28335. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.2]] +== {es} version 6.1.2 + +//[float] +//[[breaking-6.1.2]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Internal:: +* Make AbstractQueryBuilder.declareStandardFields to be protected (#27865) {pull}27894[#27894] (issue: {issue}27865[#27865]) + +Added new "Request" object flavored request methods. Prefer these instead of the +multi-argument versions. ({pull}29623[#29623]) + + +[float] +=== Bug Fixes + +Aggregations:: +* Fix incorrect results for aggregations nested under a nested aggregation {pull}27946[#27946] (issue: {issue}27912[#27912]) +* Fix composite aggregation when after term is missing in the shard {pull}27936[#27936] +* Fix preserving FiltersAggregationBuilder#keyed field on rewrite {pull}27900[#27900] (issue: {issue}27841[#27841]) + +Engine:: +* Use `_refresh` to shrink the version map on inactivity {pull}27918[#27918] (issue: {issue}27852[#27852]) +* Allow resize version map under lock even if there are pending operations {pull}27870[#27870] (issue: {issue}27852[#27852]) + +Machine Learning:: +* Fixed the removal of tokens during categorization, where the tokens were +incorrectly deemed to be hexadecimal numbers. For more information, see +{stack-ov}/ml-configuring-categories.html[Categorizing log messages]. +* Reduced the sensitivity of the analysis to small perturbations in the input +data. +* Disabled the ability to create forecasts for jobs that were created before +6.1.0. + +Monitoring:: +* Added a `cluster_alerts.management.blacklist` setting for HTTP Exporters, +which you can use to block the creation of specific cluster alerts. For more +information, see <>. + +Network:: +* Only bind loopback addresses when binding to local {pull}28029[#28029] + +Recovery:: +* Allow shrinking of indices from a previous major {pull}28076[#28076] (issue: {issue}28061[#28061]) + +Search:: +* Use the underlying connection version for CCS connections {pull}28093[#28093] +* Carry forward weights, etc on rescore rewrite {pull}27981[#27981] (issue: {issue}27979[#27979]) + +Security:: +* Fixed an issue in the Active Directory realm when following referrals that +resulted in an increase in the number of connections made to Active Directory. +* Fixed exception that occurred when using auditing and transport clients. In +particular, the problem occurred when the number of processors on the transport +client did not match the number of processors on the server. +* Ensured that TLS is not required to install a license if you are using +single-node discovery. For more information, see <> and +{stack-ov}/ssl-tls.html[Setting up TLS on a Cluster]. +* Fixed the <>. In particular, the +`has_all_requested` field in the API results was not taking cluster privileges +into consideration. + +Snapshot/Restore:: +* Fail restore when the shard allocations max retries count is reached {pull}27493[#27493] (issue: {issue}26865[#26865]) + +Translog:: +* Only sync translog when global checkpoint increased {pull}27973[#27973] (issues: {issue}27837[#27837], {issue}27970[#27970]) + +Watcher:: +* Fixed encoding of UTF-8 data in the HTTP client. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.1.1]] +== {es} version 6.1.1 + +//[float] +//[[breaking-6.1.1]] +//=== Breaking Changes + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Snapshot/Restore:: +* Use AmazonS3.doesObjectExist() method in S3BlobContainer {pull}27723[#27723] + +Watcher:: +* Ensured the watcher thread pool size is reasonably bound. In particular, the +watcher thread pool size is now five times the number of processors until 50 +threads are reached. If more than 50 cores exist and 50 threads exist, the +watch thread pool size grows to match the number of processors. + +[float] +=== Bug Fixes + +Inner Hits:: +* Add version support for inner hits in field collapsing (#27822) {pull}27833[#27833] (issue: {issue}27822[#27822]) + +Java REST Client:: +* Do not use system properties when building the HttpAsyncClient {pull}27829[#27829] (issue: {issue}27827[#27827]) + +Monitoring:: +* Data collectors now all share the same cluster state that existed at the +beginning of data collection. This removes the extremely rare race condition +where the cluster state can change between some data collectors, which could +cause temporary issues in the Monitoring UI. + +Search:: +* Fix routing with leading or trailing whitespace {pull}27712[#27712] (issue: {issue}27708[#27708]) + +Sequence IDs:: +* Recovery from snapshot may leave seq# gaps {pull}27850[#27850] +* No longer unidle shard during recovery {pull}27757[#27757] (issue: {issue}26591[#26591]) + +Watcher:: +* Fixed the pagerduty action to send context data. For more information, see +{stack-ov}/actions-pagerduty.html[PagerDuty Action]. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Ingest:: +* update ingest-attachment to use Tika 1.17 and newer deps {pull}27824[#27824] + +[[release-notes-6.1.0]] +== {es} version 6.1.0 + +[float] +[[breaking-6.1.0]] +=== Breaking Changes + +Network:: +* Allow only a fixed-size receive predictor {pull}26165[#26165] (issue: {issue}23185[#23185]) + +REST:: +* Standardize underscore requirements in parameters {pull}27414[#27414] (issues: {issue}26886[#26886], {issue}27040[#27040]) + +Scroll:: +* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] + +Search:: +* Add a limit to from + size in top_hits and inner hits. {pull}26492[#26492] (issue: {issue}11511[#11511]) + +Security:: +* The `certgen` command now returns validation errors when it encounters problems +reading from an input file (with the `-in` command option). Previously these +errors might have been ignored or caused the command to abort with unclear +messages. For more information, see <>. + +[float] +=== Breaking Java Changes + +Aggregations:: +* Moves deferring code into its own subclass {pull}26421[#26421] + +Core:: +* Unify Settings xcontent reading and writing {pull}26739[#26739] + +Settings:: +* Return List instead of an array from settings {pull}26903[#26903] +* Remove `Settings,put(Map)` {pull}26785[#26785] + +[float] +=== Deprecations + +Aggregations:: +* Deprecate global_ordinals_hash and global_ordinals_low_cardinality {pull}26173[#26173] (issue: {issue}26014[#26014]) + +Allocation:: +* Add deprecation warning for negative index.unassigned.node_left.delayed_timeout {pull}26832[#26832] (issue: {issue}26828[#26828]) + +Analysis:: +* Add limits for ngram and shingle settings {pull}27411[#27411] (issues: {issue}25887[#25887], {issue}27211[#27211]) + +Geo:: +* [GEO] 6x Deprecate ShapeBuilders and decouple geojson parse logic {pull}27345[#27345] + +Mapping:: +* Deprecate the `index_options` parameter for numeric fields {pull}26672[#26672] (issue: {issue}21475[#21475]) + +Plugin Repository Azure:: +* Azure repository: Move to named configurations as we do for S3 repository and secure settings {pull}23405[#23405] (issues: {issue}22762[#22762], {issue}22763[#22763]) + +Search:: +* doc: deprecate _primary and _replica shard option {pull}26792[#26792] (issue: {issue}26335[#26335]) + +[float] +=== New Features + +Aggregations:: +* Aggregations: bucket_sort pipeline aggregation {pull}27152[#27152] (issue: {issue}14928[#14928]) +* Add composite aggregator {pull}26800[#26800] + +Analysis:: +* Added Bengali Analyzer to Elasticsearch with respect to the lucene update {pull}26527[#26527] + +Ingest:: +* add URL-Decode Processor to Ingest {pull}26045[#26045] (issue: {issue}25837[#25837]) + +Java High Level REST Client:: +* Added Delete Index support to high-level REST client {pull}27019[#27019] (issue: {issue}25847[#25847]) + +Machine Learning:: +* Added the ability to create job forecasts. This feature enables you to use +historical behavior to predict the future behavior of your time series. You can +create forecasts in {kib} or by using the <> API. ++ +-- +NOTE: You cannot create forecasts for jobs that were created in previous +versions; this functionality is available only for jobs created in 6.1 or later. + +-- +* Added overall buckets, which summarize bucket results for multiple jobs. +For more information, see the <> API. +* Added job groups, which you can use to manage or retrieve information from +multiple jobs at once. Also updated many {ml} APIs to support groups and +wildcard expressions in the job identifier. + +Nested Docs:: +* Multi-level Nested Sort with Filters {pull}26395[#26395] + +Query DSL:: +* Add terms_set query {pull}27145[#27145] (issue: {issue}26915[#26915]) +* Introduce sorted_after query for sorted index {pull}26377[#26377] +* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}26097[#26097] + +Search:: +* Expose `fuzzy_transpositions` parameter in fuzzy queries {pull}26870[#26870] (issue: {issue}18348[#18348]) +* Add upper limit for scroll expiry {pull}26448[#26448] (issues: {issue}11511[#11511], {issue}23268[#23268]) +* Implement adaptive replica selection {pull}26128[#26128] (issue: {issue}24915[#24915]) +* configure distance limit {pull}25731[#25731] (issue: {issue}25528[#25528]) + +Similarities:: +* Add a scripted similarity. {pull}25831[#25831] + +Suggesters:: +* Expose duplicate removal in the completion suggester {pull}26496[#26496] (issue: {issue}23364[#23364]) +* Support must and should for context query in context suggester {pull}26407[#26407] (issues: {issue}24421[#24421], {issue}24565[#24565]) + +[float] +=== Enhancements + +Aggregations:: +* Allow aggregation sorting via nested aggregation {pull}26683[#26683] (issue: {issue}16838[#16838]) + +Allocation:: +* Tie-break shard path decision based on total number of shards on path {pull}27039[#27039] (issue: {issue}26654[#26654]) +* Balance shards for an index more evenly across multiple data paths {pull}26654[#26654] (issue: {issue}16763[#16763]) +* Expand "NO" decision message in NodeVersionAllocationDecider {pull}26542[#26542] (issue: {issue}10403[#10403]) +* _reroute's retry_failed flag should reset failure counter {pull}25888[#25888] (issue: {issue}25291[#25291]) + +Analysis:: +* Add configurable `max_token_length` parameter to whitespace tokenizer {pull}26749[#26749] (issue: {issue}26643[#26643]) + +CRUD:: +* Add wait_for_active_shards parameter to index open command {pull}26682[#26682] (issue: {issue}20937[#20937]) + +Core:: +* Fix classes that can exit {pull}27518[#27518] +* Replace empty index block checks with global block checks in template delete/put actions {pull}27050[#27050] (issue: {issue}10530[#10530]) +* Allow Uid#decodeId to decode from a byte array slice {pull}26987[#26987] (issue: {issue}26931[#26931]) +* Use separate searchers for "search visibility" vs "move indexing buffer to disk {pull}26972[#26972] (issues: {issue}15768[#15768], {issue}26802[#26802], {issue}26912[#26912], {issue}3593[#3593]) +* Add ability to split shards {pull}26931[#26931] +* Make circuit breaker mutations debuggable {pull}26067[#26067] (issue: {issue}25891[#25891]) + +Dates:: +* DateProcessor Locale {pull}26186[#26186] (issue: {issue}25513[#25513]) + +Discovery:: +* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) + +Engine:: +* Ensure external refreshes will also refresh internal searcher to minimize segment creation {pull}27253[#27253] (issue: {issue}26972[#26972]) +* Move IndexShard#getWritingBytes() under InternalEngine {pull}27209[#27209] (issue: {issue}26972[#26972]) +* Refactor internal engine {pull}27082[#27082] + +Geo:: +* Add ignore_malformed to geo_shape fields {pull}24654[#24654] (issue: {issue}23747[#23747]) + +Ingest:: +* add json-processor support for non-map json types {pull}27335[#27335] (issue: {issue}25972[#25972]) +* Introduce templating support to timezone/locale in DateProcessor {pull}27089[#27089] (issue: {issue}24024[#24024]) +* Add support for parsing inline script (#23824) {pull}26846[#26846] (issue: {issue}23824[#23824]) +* Consolidate locale parsing. {pull}26400[#26400] +* Accept ingest simulate params as ints or strings {pull}23885[#23885] (issue: {issue}23823[#23823]) + +Internal:: +* Avoid uid creation in ParsedDocument {pull}27241[#27241] +* Upgrade to Lucene 7.1.0 snapshot version {pull}26864[#26864] (issue: {issue}26527[#26527]) +* Remove `_index` fielddata hack if cluster alias is present {pull}26082[#26082] (issue: {issue}25885[#25885]) + +Java High Level REST Client:: +* Adjust RestHighLevelClient method modifiers {pull}27238[#27238] +* Decouple BulkProcessor from ThreadPool {pull}26727[#26727] (issue: {issue}26028[#26028]) + +Logging:: +* Add more information on _failed_to_convert_ exception (#21946) {pull}27034[#27034] (issue: {issue}21946[#21946]) +* Improve shard-failed log messages. {pull}26866[#26866] + +Machine Learning:: +* Improved the way {ml} jobs are allocated to nodes, such that it is primarily +determined by the estimated memory requirement of the job. If there is insufficient +information about the job's memory requirements, the allocation decision is based +on job counts per node. +* Increased the default value of the `xpack.ml.max_open_jobs` setting from `10` +to `20`. The allocation of jobs to nodes now considers memory usage as well as +job counts, so it's reasonable to permit more small jobs on a single node. For +more information, see <>. +* Decreased the default `model_memory_limit` property value to 1 GB for new jobs. +If you want to create a job that analyzes high cardinality fields, you can +increase this property value. For more information, see <>. +* Improved analytics related to decay rates when predictions are very accurate. +* Improved analytics related to detecting non-negative quantities and using this +information to constrain analysis, predictions, and confidence intervals. +* Improved periodic trough or spike detection. +* Improved the speed of the aggregation of {ml} results. +* Improved probability calculation performance. +* Expedited bucket processing time in very large populations by determining when +there are nearly duplicate values in a bucket and de-duplicating the samples that +are added to the model. +* Improved handling of periodically missing values. +* Improved analytics related to diurnal periodicity. +* Reduced memory usage during population analysis by releasing redundant memory +after the bucket results are written. +* Improved modeling of long periodic components, particularly when there is a +long bucket span. + +Mapping:: +* Allow ip_range to accept CIDR notation {pull}27192[#27192] (issue: {issue}26260[#26260]) +* Deduplicate `_field_names`. {pull}26550[#26550] +* Throw a better error message for empty field names {pull}26543[#26543] (issue: {issue}23348[#23348]) +* Stricter validation for min/max values for whole numbers {pull}26137[#26137] +* Make FieldMapper.copyTo() always non-null. {pull}25994[#25994] + +Monitoring:: +* Added the new `interval_ms` field to monitoring documents. This field +indicates the current collection interval for {es} or external monitored systems. + +Nested Docs:: +* Use the primary_term field to identify parent documents {pull}27469[#27469] (issue: {issue}24362[#24362]) +* Prohibit using `nested_filter`, `nested_path` and new `nested` Option at the same time in FieldSortBuilder {pull}26490[#26490] (issue: {issue}17286[#17286]) + +Network:: +* Remove manual tracking of registered channels {pull}27445[#27445] (issue: {issue}27260[#27260]) +* Remove tcp profile from low level nio channel {pull}27441[#27441] (issue: {issue}27260[#27260]) +* Decouple `ChannelFactory` from Tcp classes {pull}27286[#27286] (issue: {issue}27260[#27260]) + +Percolator:: +* Use Lucene's CoveringQuery to select percolate candidate matches {pull}27271[#27271] (issues: {issue}26081[#26081], {issue}26307[#26307]) +* Add support to percolate query to percolate multiple documents simultaneously {pull}26418[#26418] +* Hint what clauses are important in a conjunction query based on fields {pull}26081[#26081] +* Add support for selecting percolator query candidate matches containing range queries {pull}25647[#25647] (issue: {issue}21040[#21040]) + +Plugin Discovery EC2:: +* update AWS SDK for ECS Task IAM support in discovery-ec2 {pull}26479[#26479] (issue: {issue}23039[#23039]) + +Plugin Lang Painless:: +* Painless: Only allow Painless type names to be the same as the equivalent Java class. {pull}27264[#27264] +* Allow for the Painless Definition to have multiple instances for white-listing {pull}27096[#27096] +* Separate Painless Whitelist Loading from the Painless Definition {pull}26540[#26540] +* Remove Sort enum from Painless Definition {pull}26179[#26179] + +Plugin Repository Azure:: +* Add azure storage endpoint suffix #26432 {pull}26568[#26568] (issue: {issue}26432[#26432]) +* Support for accessing Azure repositories through a proxy {pull}23518[#23518] (issues: {issue}23506[#23506], {issue}23517[#23517]) + +Plugin Repository S3:: +* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) +* Update to AWS SDK 1.11.223 {pull}27278[#27278] + +Plugins:: +* Plugins: Add versionless alias to all security policy codebase properties {pull}26756[#26756] (issue: {issue}26521[#26521]) +* Allow plugins to plug rescore implementations {pull}26368[#26368] (issue: {issue}26208[#26208]) + +Query DSL:: +* Add support for wildcard on `_index` {pull}27334[#27334] (issue: {issue}25722[#25722]) + +Reindex API:: +* Update by Query is modified to accept short `script` parameter. {pull}26841[#26841] (issue: {issue}24898[#24898]) +* reindex: automatically choose the number of slices {pull}26030[#26030] (issues: {issue}24547[#24547], {issue}25582[#25582]) + +Rollover:: +* Add size-based condition to the index rollover API {pull}27160[#27160] (issue: {issue}27004[#27004]) +* Add size-based condition to the index rollover API {pull}27115[#27115] (issue: {issue}27004[#27004]) + +Scripting:: +* Script: Convert script query to a dedicated script context {pull}26003[#26003] + +Search:: +* Make fields optional in multi_match query and rely on index.query.default_field by default {pull}27380[#27380] +* fix unnecessary logger creation {pull}27349[#27349] +* `ObjectParser` : replace `IllegalStateException` with `ParsingException` {pull}27302[#27302] (issue: {issue}27147[#27147]) +* Uses norms for exists query if enabled {pull}27237[#27237] +* Cross Cluster Search: make remote clusters optional {pull}27182[#27182] (issues: {issue}26118[#26118], {issue}27161[#27161]) +* Enhances exists queries to reduce need for `_field_names` {pull}26930[#26930] (issue: {issue}26770[#26770]) +* Change ParentFieldSubFetchPhase to create doc values iterator once per segment {pull}26815[#26815] +* Change VersionFetchSubPhase to create doc values iterator once per segment {pull}26809[#26809] +* Change ScriptFieldsFetchSubPhase to create search scripts once per segment {pull}26808[#26808] (issue: {issue}26775[#26775]) +* Make sure SortBuilders rewrite inner nested sorts {pull}26532[#26532] +* Extend testing of build method in ScriptSortBuilder {pull}26520[#26520] (issues: {issue}17286[#17286], {issue}26490[#26490]) +* Accept an array of field names and boosts in the index.query.default_field setting {pull}26320[#26320] (issue: {issue}25946[#25946]) +* Reject IPv6-mapped IPv4 addresses when using the CIDR notation. {pull}26254[#26254] (issue: {issue}26078[#26078]) +* Rewrite range queries with open bounds to exists query {pull}26160[#26160] (issue: {issue}22640[#22640]) + +Security:: +* Added the `manage_index_templates` cluster privilege to the built-in role +`kibana_system`. For more information, see +{stack-ov}/security-privileges.html#privileges-list-cluster[Cluster Privileges] +and {stack-ov}/built-in-roles.html[Built-in Roles]. +* Newly created or updated watches execute with the privileges of the user that +last modified the watch. +* Added log messages when a PEM key is found when a PEM certificate was +expected (or vice versa) in the `xpack.ssl.key` or `xpack.ssl.certificate` settings. +* Added the new `certutil` command to simplify the creation of certificates for +use with the Elastic stack. For more information, see <>. +* Added automatic detection of support for AES 256 bit TLS ciphers and enabled +their use when the JVM supports them. + +Sequence IDs:: +* Only fsync global checkpoint if needed {pull}27652[#27652] +* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) +* Lazy initialize checkpoint tracker bit sets {pull}27179[#27179] (issue: {issue}10708[#10708]) +* Returns the current primary_term for Get/MultiGet requests {pull}27177[#27177] (issue: {issue}26493[#26493]) + +Settings:: +* Allow affix settings to specify dependencies {pull}27161[#27161] +* Represent lists as actual lists inside Settings {pull}26878[#26878] (issue: {issue}26723[#26723]) +* Remove Settings#getAsMap() {pull}26845[#26845] +* Replace group map settings with affix setting {pull}26819[#26819] +* Throw exception if setting isn't recognized {pull}26569[#26569] (issue: {issue}25607[#25607]) +* Settings: Move keystore creation to plugin installation {pull}26329[#26329] (issue: {issue}26309[#26309]) + +Snapshot/Restore:: +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] +* Snapshot: Migrate TransportRequestHandler to TransportMasterNodeAction {pull}27165[#27165] (issue: {issue}27151[#27151]) +* Fix toString of class SnapshotStatus (#26851) {pull}26852[#26852] (issue: {issue}26851[#26851]) + +Stats:: +* Adds average document size to DocsStats {pull}27117[#27117] (issue: {issue}27004[#27004]) +* Stats to record how often the ClusterState diff mechanism is used successfully {pull}27107[#27107] (issue: {issue}26973[#26973]) +* Expose adaptive replica selection stats in /_nodes/stats API {pull}27090[#27090] +* Add cgroup memory usage/limit to OS stats on Linux {pull}26166[#26166] +* Add segment attributes to the `_segments` API. {pull}26157[#26157] (issue: {issue}26130[#26130]) + +Suggesters:: +* Improve error message for parse failures of completion fields {pull}27297[#27297] +* Support 'AND' operation for context query in context suggester {pull}24565[#24565] (issue: {issue}24421[#24421]) + +Watcher:: +* Improved error messages when there are no accounts configured for {watcher}. +* Added thread pool rejection information to execution state, which makes it +easier to debug execution failures. +* Added execution state information to watch status details. It is stored in the +`status.execution_state` field. +* Enabled the account monitoring `url` field in the `xpack.notification.jira` +setting to support customized paths. For more information about configuring Jira +accounts for use with watches, see +{stack-ov}/actions-jira.html[Jira Action]. +* Improved handling of exceptions in {watcher} to make it easier to debug +problems. + +[float] +=== Bug Fixes + +Aggregations:: +* Disable the "low cardinality" optimization of terms aggregations. {pull}27545[#27545] (issue: {issue}27543[#27543]) +* scripted_metric _agg parameter disappears if params are provided {pull}27159[#27159] (issues: {issue}19768[#19768], {issue}19863[#19863]) + +Cluster:: +* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] +* Remove optimisations to reuse objects when applying a new `ClusterState` {pull}27317[#27317] + +Core:: +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Ensure logging is configured for CLI commands {pull}27523[#27523] (issue: {issue}27521[#27521]) +* Protect shard splitting from illegal target shards {pull}27468[#27468] (issue: {issue}26931[#26931]) +* Avoid NPE when getting build information {pull}27442[#27442] +* Fix `ShardSplittingQuery` to respect nested documents. {pull}27398[#27398] (issue: {issue}27378[#27378]) +* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) + +Engine:: +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) +* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) + +Geo:: +* Correct two equality checks on incomparable types {pull}27688[#27688] +* [GEO] fix pointsOnly bug for MULTIPOINT {pull}27415[#27415] + +Index Templates:: +* Prevent constructing an index template without index patterns {pull}27662[#27662] + +Ingest:: +* Add pipeline support for REST API bulk upsert {pull}27075[#27075] (issue: {issue}25601[#25601]) +* Fixing Grok pattern for Apache 2.4 {pull}26635[#26635] + +Inner Hits:: +* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] + +Internal:: +* When checking if key exists in ThreadContextStruct:putHeaders() method,should put requestHeaders in map first {pull}26068[#26068] +* Adding a refresh listener to a recovering shard should be a noop {pull}26055[#26055] + +Java High Level REST Client:: +* Register ip_range aggregation with the high level client {pull}26383[#26383] +* add top hits as a parsed aggregation to the rest high level client {pull}26370[#26370] + +Machine Learning:: +* Improved handling of scenarios where there are insufficient values to +interpolate trend components. +* Improved calculation of confidence intervals. +* Fixed degrees of freedom calculation that could lead to excessive error logging. +* Improved trend modeling with long bucket spans. +* Fixed timing of when model size statistics are written. Previously, if there +were multiple partitions, there could be multiple model size stats docs written +within the same bucket. +* Updated the calculation of the model memory to include the memory used by +partition, over, by, or influencer fields. +* Fixed calculation of the `frequency` property value for {dfeeds} that use +aggregations. The value must be a multiple of the histogram interval. For more +information, see +{stack-ov}/ml-configuring-aggregation.html[Aggregating Data for Faster Performance]. +* Removed unnecessary messages from logs when a job is forcefully closed. + +Mapping:: +* Fix dynamic mapping update generation. {pull}27467[#27467] +* Fix merging of _meta field {pull}27352[#27352] (issue: {issue}27323[#27323]) +* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) + +Nested Docs:: +* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] +* Prevent duplicate fields when mixing parent and root nested includes {pull}27072[#27072] (issue: {issue}26990[#26990]) + +Network:: +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) +* Do not set SO_LINGER on server channels {pull}26997[#26997] +* Do not set SO_LINGER to 0 when not shutting down {pull}26871[#26871] (issue: {issue}26764[#26764]) +* Close TcpTransport on RST in some Spots to Prevent Leaking TIME_WAIT Sockets {pull}26764[#26764] (issue: {issue}26701[#26701]) + +Packaging:: +* Removes minimum master nodes default number {pull}26803[#26803] +* setgid on /etc/elasticearch on package install {pull}26412[#26412] (issue: {issue}26410[#26410]) + +Percolator:: +* Avoid TooManyClauses exception if number of terms / ranges is exactly equal to 1024 {pull}27519[#27519] (issue: {issue}1[#1]) + +Plugin Analysis ICU:: +* Catch InvalidPathException in IcuCollationTokenFilterFactory {pull}27202[#27202] + +Plugin Lang Painless:: +* Painless: Fix variable scoping issue in lambdas {pull}27571[#27571] (issue: {issue}26760[#26760]) +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository GCS:: +* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) + +Recovery:: +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Reindex API:: +* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) + +Scroll:: +* Fix scroll query with a sort that is a prefix of the index sort {pull}27498[#27498] + +Search:: +* Fix profiling naming issues {pull}27133[#27133] +* Fix max score tracking with field collapsing {pull}27122[#27122] (issue: {issue}23840[#23840]) +* Apply missing request options to the expand phase {pull}27118[#27118] (issues: {issue}26649[#26649], {issue}27079[#27079]) +* Calculate and cache result when advanceExact is called {pull}26920[#26920] (issue: {issue}26817[#26817]) +* Filter unsupported relation for RangeQueryBuilder {pull}26620[#26620] (issue: {issue}26575[#26575]) +* Handle leniency for phrase query on a field indexed without positions {pull}26388[#26388] + +Security:: +* Fixed REST requests that required a body but did not validate it, resulting in +null pointer exceptions. + +Sequence IDs:: +* Obey translog durability in global checkpoint sync {pull}27641[#27641] +* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) + +Settings:: +* Allow index settings to be reset by wildcards {pull}27671[#27671] (issue: {issue}27537[#27537]) + +Snapshot/Restore:: +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) +* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) +* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) +* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) + +Stats:: +* Include internal refreshes in refresh stats {pull}27615[#27615] +* Make Segment statistics aware of segments hold by internal readers {pull}27558[#27558] +* Ensure `doc_stats` are changing even if refresh is disabled {pull}27505[#27505] + +Watcher:: +* Fixed handling of watcher templates. Missing watcher templates can be added by +any node if that node has a higher version than the master node. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Jackson 2.8.10 {pull}27230[#27230] +* Upgrade to Lucene 7.1 {pull}27225[#27225] + +Plugin Discovery EC2:: +* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) + +Plugin Discovery GCE:: +* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) + +Plugin Lang Painless:: +* Upgrade Painless from ANTLR 4.5.1-1 to ANTLR 4.5.3. {pull}27153[#27153] + +[[release-notes-6.0.1]] +== {es} version 6.0.1 + +[float] +[[breaking-6.0.1]] +=== Breaking Changes + +Scroll:: +* Fail queries with scroll that explicitely set request_cache {pull}27342[#27342] + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Fix classes that can exit {pull}27518[#27518] + +Discovery:: +* Stop responding to ping requests before master abdication {pull}27329[#27329] (issue: {issue}27328[#27328]) + +Plugin Repository S3:: +* Remove S3 output stream {pull}27280[#27280] (issue: {issue}27278[#27278]) +* Update to AWS SDK 1.11.223 {pull}27278[#27278] + +Search:: +* fix unnecessary logger creation {pull}27349[#27349] + +Sequence IDs:: +* Log primary-replica resync failures {pull}27421[#27421] (issues: {issue}24841[#24841], {issue}27418[#27418]) + +Snapshot/Restore:: +* Remove XContentType auto detection in BlobStoreRepository {pull}27480[#27480] + +[float] +=== Bug Fixes + +Cluster:: +* Properly format IndexGraveyard deletion date as date {pull}27362[#27362] + +Core:: +* Do not set data paths on no local storage required {pull}27587[#27587] (issue: {issue}27572[#27572]) +* Ensure threadcontext is preserved when refresh listeners are invoked {pull}27565[#27565] +* Avoid NPE when getting build information {pull}27442[#27442] +* When building Settings do not set SecureSettings if empty {pull}26988[#26988] (issue: {issue}316[#316]) + +Engine:: +* Reset LiveVersionMap on sync commit {pull}27534[#27534] (issue: {issue}27516[#27516]) +* Carry over version map size to prevent excessive resizing {pull}27516[#27516] (issue: {issue}20498[#20498]) + +Inner Hits:: +* Return an empty _source for nested inner hit when filtering on a field that doesn't exist {pull}27531[#27531] + +Machine Learning:: +* Fixed analytics problem where sparse data resulted in "update failed" errors +in the logs. + +Mapping:: +* Fix dynamic mapping update generation. {pull}27467[#27467] +* Fixed rounding of bounds in scaled float comparison {pull}27207[#27207] (issue: {issue}27189[#27189]) + +Nested Docs:: +* Ensure nested documents have consistent version and seq_ids {pull}27455[#27455] + +Network:: +* Throw UOE from compressible bytes stream reset {pull}27564[#27564] (issue: {issue}24927[#24927]) +* Bubble exceptions when closing compressible streams {pull}27542[#27542] (issue: {issue}27540[#27540]) + +Plugin Lang Painless:: +* Painless: Fix errors allowing void to be assigned to def. {pull}27460[#27460] (issue: {issue}27210[#27210]) + +Plugin Repository GCS:: +* Create new handlers for every new request in GoogleCloudStorageService {pull}27339[#27339] (issue: {issue}27092[#27092]) + +Recovery:: +* Flush old indices on primary promotion and relocation {pull}27580[#27580] (issue: {issue}27536[#27536]) + +Reindex API:: +* Reindex: Fix headers in reindex action {pull}26937[#26937] (issue: {issue}22976[#22976]) + +Search:: +* Fix profiling naming issues {pull}27133[#27133] + +Security:: +* Fixed error that occurred when attempting to audit `system_access_granted` +events. +* Fixed the `setup-passwords` command such that it fails appropriately when +invalid URLs are specified in the `--url` option and when {security} is not +enabled. + +Sequence IDs:: +* Fix resync request serialization {pull}27418[#27418] (issue: {issue}24841[#24841]) + +Snapshot/Restore:: +* Do not swallow exception in ChecksumBlobStoreFormat.writeAtomic() {pull}27597[#27597] +* Delete shard store files before restoring a snapshot {pull}27476[#27476] (issues: {issue}20220[#20220], {issue}26865[#26865]) +* Fix snapshot getting stuck in INIT state {pull}27214[#27214] (issue: {issue}27180[#27180]) +* Fix default value of ignore_unavailable for snapshot REST API (#25359) {pull}27056[#27056] (issue: {issue}25359[#25359]) +* Do not create directory on readonly repository (#21495) {pull}26909[#26909] (issue: {issue}21495[#21495]) + +Watcher:: +* Fixed handling of Hipchat rooms. For example, room names with spaces did not +work as expected. For more information, see +{stack-ov}/actions-hipchat.html[HipChat Action]. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Plugin Discovery EC2:: +* Upgrade AWS SDK Jackson Databind to 2.6.7.1 {pull}27361[#27361] (issues: {issue}27278[#27278], {issue}27359[#27359]) + +Plugin Discovery GCE:: +* Update Google SDK to version 1.23.0 {pull}27381[#27381] (issue: {issue}26636[#26636]) + +[[release-notes-6.0.0]] +== {es} version 6.0.0 + +[float] +[[breaking-6.0.0]] +=== Breaking Changes + +Aggregations:: +* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) + +Aliases:: +* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) + +Allocation:: +* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] + +Analysis:: +* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) +* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) + +CAT API:: +* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) + +CRUD:: +* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) +* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) +* If the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) + +Cluster:: +* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) +* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) + +Core:: +* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) +* Make boolean conversion strict {pull}22200[#22200] +* Remove the `default` store type. {pull}21616[#21616] +* Remove store throttling. {pull}21573[#21573] + +Geo:: +* Remove deprecated geo search features {pull}22876[#22876] +* Reduce GeoDistance Insanity {pull}19846[#19846] + +Highlighting:: +* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] + +Index APIs:: +* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) +* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) +* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) +* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] + +Index Templates:: +* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) + +Indexed Scripts/Templates:: +* Scripting: Remove search template actions {pull}25717[#25717] + +Ingest:: +* update ingest-user-agent regexes.yml {pull}25608[#25608] +* remove ingest.new_date_format {pull}25583[#25583] + +Inner Hits:: +* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) + +Java API:: +* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) + +Java REST Client:: +* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) + +Mapping:: +* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) +* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) +* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) +* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) +* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) + +Network:: +* Remove unused Netty-related settings {pull}26161[#26161] +* Remove blocking TCP clients and servers {pull}22639[#22639] +* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] +* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] + +Packaging:: +* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) +* Remove support for ES_INCLUDE {pull}25804[#25804] +* Setup: Change default heap to 1G {pull}25695[#25695] +* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) +* Remove implicit 32-bit support {pull}25435[#25435] +* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) +* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) +* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) +* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) +* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) + +Percolator:: +* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] + +Plugin Analysis ICU:: +* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) +* Upgrade icu4j to latest version {pull}24821[#24821] + +Plugin Delete By Query:: +* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) + +Plugin Discovery Azure Classic:: +* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) + +Plugin Discovery EC2:: +* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] +* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) +* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) + +Plugin Lang JS:: +* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) + +Plugin Mapper Attachment:: +* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) + +Plugin Repository Azure:: +* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) +* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) + +Plugin Repository GCS:: +* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] + +Plugin Repository S3:: +* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] +* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) +* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) +* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] +* Remove deprecated S3 settings {pull}24445[#24445] + +Plugins:: +* Make plugin loading stricter {pull}25405[#25405] + +Query DSL:: +* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] +* Remove several parse field deprecations in query builders {pull}26711[#26711] +* Remove deprecated parameters from `ids_query` {pull}26508[#26508] +* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) +* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) +* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) +* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) +* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] +* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) +* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] +* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] +* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) +* Remove collect payloads parameter {pull}20385[#20385] + +REST:: +* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] +* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) +* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) +* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) +* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) +* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) +* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) +* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) +* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) + +Scripting:: +* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) +* Disallow lang to be used with Stored Scripts {pull}25610[#25610] +* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) +* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) +* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) +* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) +* Remove groovy scripting language {pull}21607[#21607] +* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) + +Search:: +* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) +* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) +* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) +* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) +* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] +* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) +* Remove ignored type parameter in search_shards api {pull}21688[#21688] + +Security:: +* Added new security limitations: +** When a user's role enables document level security for an index and +suggesters are specified, the specified suggesters are ignored. For more +information about suggesters, see {ref}/search-suggesters.html[Suggesters]. +** When document level security is enabled, search requests cannot be profiled. +For more information about profiling, see the +{ref}/search-profile.html[Profile API]. + +Sequence IDs:: +* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) + +Settings:: +* Settings: Remove shared setting property {pull}24728[#24728] +* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) + +Shadow Replicas:: +* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) + +Similarities:: +* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] +* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) +* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] +* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] +* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) + +Internal:: +* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) + +Java API:: +* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] +* Removing unneeded getTookInMillis method {pull}23923[#23923] +* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) + +Java High Level REST Client:: +* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) +* Allow RestHighLevelClient to use plugins {pull}25024[#25024] + +Java REST Client:: +* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) + +Network:: +* Simplify TransportAddress {pull}20798[#20798] + +Plugin Delete By Query:: +* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] + +Plugins:: +* Drop name from TokenizerFactory {pull}24869[#24869] + +Query DSL:: +* Remove QueryParseContext {pull}25486[#25486] +* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] + +REST:: +* Return index name and empty map for `/{index}/_alias` with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) + +[float] +=== Deprecations + +Index APIs:: +* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) + +Index Templates:: +* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] + +Indexed Scripts/Templates:: +* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) + +Internal:: +* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) + +Percolator:: +* Deprecate percolate query's document_type parameter. {pull}25199[#25199] + +Plugins:: +* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) + +Scripting:: +* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] +* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) +* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) +* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) +* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) +* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) + +Settings:: +* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) +* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) + +Tribe Node:: +* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) + +[float] +=== New Features + +Aggregations:: +* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) + +Analysis:: +* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) +* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) + +Core:: +* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) + +Internal:: +* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) + +Mapping:: +* Add new ip_range field type {pull}24433[#24433] + +Parent/Child:: +* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) +* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) + +Plugin Analysis ICU:: +* Add ICUCollationFieldMapper {pull}24126[#24126] + +Search:: +* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) + +Sequence IDs:: +* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) +* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) +* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) +* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) + +Stats:: +* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] + +Task Manager:: +* Task Management [ISSUE] {pull}15117[#15117] + +Upgrade API:: +* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) + +[float] +=== Enhancements + +Aggregations:: +* Add strict parsing of aggregation ranges {pull}25769[#25769] +* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) +* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) +* Add superset size to Significant Term REST response {pull}24865[#24865] +* Add document count to Matrix Stats aggregation response {pull}24776[#24776] +* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) +* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) +* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) +* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) +* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] +* Agg builder accessibility fixes {pull}24323[#24323] +* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) +* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) + +Allocation:: +* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) +* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) + +Analysis:: +* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) +* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] +* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) +* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) + +Bulk:: +* Simplify bulk request execution {pull}20109[#20109] + +CAT API:: +* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) + +CRUD:: +* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) + +Circuit Breakers:: +* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] + +Cluster:: +* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] +* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) +* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] +* Separate publishing from applying cluster states {pull}24236[#24236] +* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) + +Core:: +* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] +* Refactor bootstrap check results and error messages {pull}26637[#26637] +* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] +* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] +* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) +* Add max file size bootstrap check {pull}25974[#25974] +* Add compatibility versions to main action response {pull}25799[#25799] +* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) +* Explicitly reject duplicate data paths {pull}25178[#25178] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] +* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] +* Speed up PK lookups at index time. {pull}19856[#19856] +* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) +* Add friendlier message on bad keystore permissions {pull}26284[#26284] +* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) +* Remove connect SocketPermissions from core {pull}22797[#22797] +* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) +* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) +* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) +* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) +* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) +* Improve bootstrap checks error messages {pull}24548[#24548] + +Discovery:: +* Allow plugins to validate cluster-state on join {pull}26595[#26595] + +Engine:: +* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) +* Introducing a translog deletion policy {pull}24950[#24950] +* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) +* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) +* Add replica ops with version conflict to translog {pull}22626[#22626] +* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) +* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) + +Exceptions:: +* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] + +Highlighting:: +* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) + +Index APIs:: +* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) +* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) + +Index Templates:: +* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) + +Ingest:: +* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) +* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] +* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) +* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) +* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) + +Inner Hits:: +* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) + +Internal:: +* TemplateUpgradeService should only run on the master {pull}27294[#27294] +* Cleanup IndexFieldData visibility {pull}25900[#25900] +* Bump the min compat version to 5.6.0 {pull}25805[#25805] +* "shard started" should show index and shard ID {pull}25157[#25157] +* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) +* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] +* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) +* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] +* Disallow multiple parent-join fields per mapping {pull}25002[#25002] +* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) +* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) +* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] +* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) +* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] +* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) +* Make document write requests immutable {pull}23038[#23038] +* Add assertions enabled helper {pull}24834[#24834] + +Java API:: +* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) + +Java High Level REST Client:: +* [DOCS] restructure java clients docs pages {pull}25517[#25517] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) +* Add support for clear scroll to high level REST client {pull}25038[#25038] +* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) +* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) +* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) +* Add info method to High Level Rest client {pull}23350[#23350] +* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] +* Add BulkRequest support to High Level Rest client {pull}23312[#23312] +* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] +* Add delete API to the High Level Rest Client {pull}23187[#23187] +* Add Index API to High Level Rest Client {pull}23040[#23040] +* Add get/exists method to RestHighLevelClient {pull}22706[#22706] +* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) +* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] +* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) +* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] +* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] +* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) +* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) + +Java REST Client:: +* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) +* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) +* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) + +Logging:: +* Prevent excessive disk consumption by log files {pull}25660[#25660] +* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) + +Mapping:: +* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) +* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] +* Better validation of `copy_to`. {pull}25983[#25983] +* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) +* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] +* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) +* Identify documents by their `_id`. {pull}24460[#24460] + +Network:: +* Add additional low-level logging handler {pull}26887[#26887] +* Unwrap causes when maybe dying {pull}26884[#26884] +* Move TransportStats accounting into TcpTransport {pull}25251[#25251] +* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] +* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) +* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) +* Isolate SocketPermissions to Netty {pull}23057[#23057] +* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) +* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) + +Packaging:: +* Remove memlock suggestion from systemd service {pull}25979[#25979] +* Set address space limit in systemd service file {pull}25975[#25975] +* Version option should display if snapshot {pull}25970[#25970] +* Ignore JVM options before checking Java version {pull}25969[#25969] +* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] +* Introduce elasticsearch-env for Windows {pull}25958[#25958] +* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) +* Stop exporting HOSTNAME from scripts {pull}25807[#25807] +* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) + +Parent/Child:: +* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) +* Support parent id being specified as number in the _source {pull}25547[#25547] + +Percolator:: +* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] +* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] + +Plugin Discovery EC2:: +* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) + +Plugin Lang Painless:: +* Allow Custom Whitelists in Painless {pull}25557[#25557] +* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] +* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] +* Support Script Context Stateful Factory in Painless {pull}25233[#25233] +* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] +* Update Painless to Use New Script Contexts {pull}25015[#25015] +* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] +* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] +* Make PainlessScript An Interface {pull}24966[#24966] + +Plugin Repository GCS:: +* GCS Repository: Add secure storage of credentials {pull}24697[#24697] + +Plugin Repository HDFS:: +* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) +* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) +* Add Kerberos support for Repo HDFS plugin [ISSUE] {pull}21990[#21990] + +Plugin Repository S3:: +* S3 Repository: Add back repository level credentials {pull}24609[#24609] + +Plugins:: +* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] +* Move tribe to a module {pull}25778[#25778] +* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) +* Add purge option to remove plugin CLI {pull}24981[#24981] +* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) +* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) +* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) +* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] +* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) + +Query DSL:: +* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) +* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) +* Add support for auto_generate_synonyms_phrase_query in match_query, multi_match_query, query_string and simple_query_string {pull}23147[#23147] + +REST:: +* Cat shards bytes {pull}26952[#26952] +* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) +* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) + +Recovery:: +* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) +* Goodbye, Translog Views {pull}25962[#25962] +* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] +* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) +* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) +* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) + +Scripting:: +* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] +* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] +* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) +* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) +* Make contexts available to ScriptEngine construction {pull}24896[#24896] +* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] +* Add instance and compiled classes to script contexts {pull}24868[#24868] + +Search:: +* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) +* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) +* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) +* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] +* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] +* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] +* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] +* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) +* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) +* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] +* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] +* Limit the number of concurrent shard requests per search request {pull}25632[#25632] +* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] +* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) +* Leverage scorerSupplier when applicable. {pull}25109[#25109] +* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] +* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) +* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) +* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) +* Add parsing from xContent to Suggest {pull}22903[#22903] +* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] +* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] +* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] +* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] + +Search Templates:: +* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) + +Sequence IDs:: +* Roll translog generation on primary promotion {pull}27313[#27313] +* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) +* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) +* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) +* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) +* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] +* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) +* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] +* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) +* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) +* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) +* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) +* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) +* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) +* Replicate write failures {pull}23314[#23314] +* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) +* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) +* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) +* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) +* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) +* Add global checkpoint to translog checkpoints {pull}21254[#21254] +* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) +* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] +* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) +* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) +* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) +* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) + +Settings:: +* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) +* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) +* Validate `transport.profiles.*` settings {pull}25508[#25508] +* Cleanup network / transport related settings {pull}25489[#25489] +* Emit settings deprecation logging at most once {pull}25457[#25457] +* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] +* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) +* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] +* Settings: Add keystore creation to add commands {pull}26126[#26126] + +Snapshot/Restore:: +* Fixed references to Multi Index Syntax {pull}27283[#27283] +* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] +* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) + +Stats:: +* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) +* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) + +Store:: +* Remote support for lucene versions without checksums {pull}24021[#24021] + +Suggesters:: +* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) + +Task Manager:: +* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) + +Translog:: +* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) + +[float] +=== Bug Fixes + +Aggregations:: +* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) +* Create weights lazily in filter and filters aggregation {pull}26983[#26983] +* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) +* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) +* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) +* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) +* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) +* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) +* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) +* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) +* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) + +Aliases:: +* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) +* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) + +Allocation:: +* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) +* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) + +Analysis:: +* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) +* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] + +CAT API:: +* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) + +CRUD:: +* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) +* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) + +Cache:: +* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) +* fix bug of weight computation {pull}24856[#24856] + +Circuit Breakers:: +* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) + +Cluster:: +* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) + +Core:: +* Correctly encode warning headers {pull}27269[#27269] (issue: {issue}27244[#27244]) +* Fix cache compute if absent for expired entries {pull}26516[#26516] +* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) +* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) +* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] +* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) +* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) +* Fix cache expire after access {pull}24546[#24546] + +Dates:: +* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) + +Discovery:: +* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) + +Engine:: +* Die with dignity while merging {pull}27265[#27265] (issue: {issue}19272[#19272]) +* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) + +Geo:: +* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] + +Highlighting:: +* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] +* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) +* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) +* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) + +Index APIs:: +* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) +* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) +* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) + +Ingest:: +* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) +* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) +* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) +* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) +* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) +* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] +* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) + +Inner Hits:: +* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) +* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) +* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) +* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) + +Internal:: +* Bump version to 6.0.1 [OPEN] {pull}27386[#27386] +* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] +* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) +* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] +* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] +* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] +* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] +* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] +* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) +* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) +* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] +* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] +* Avoid race when shutting down controller processes {pull}24579[#24579] +* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] +* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) + +Java API:: +* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) + +Java High Level REST Client:: +* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) +* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) +* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) + +Java REST Client:: +* Better message text for ResponseException {pull}26564[#26564] +* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] + +Logging:: +* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) + +Machine Learning:: +* Fixed a race condition when simultaneous close requests are made for the same +job. + +Mapping:: +* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) +* Fixed bug that mapper_parsing_exception is thrown for numeric field with ignore_malformed=true when inserting "NaN" {pull}25967[#25967] (issue: {issue}25289[#25289]) +* Coerce decimal strings for whole number types by truncating the decimal part {pull}25835[#25835] (issue: {issue}25819[#25819]) +* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) +* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) +* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) +* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) +* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) +* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] +* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) + +More Like This:: +* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) + +NOT CLASSIFIED:: +* DocumentMissingException during Logstash scripted upsert [ISSUE] {pull}27148[#27148] +* An assertion trips when master opens an index from before 5.x [ISSUE] {pull}24809[#24809] + +Nested Docs:: +* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] +* Inner hits source filtering not working [ISSUE] {pull}23090[#23090] + +Network:: +* Fixed ByteBuf leaking in org.elasticsearch.http.netty4.Netty4HttpRequestHandler {pull}27222[#27222] (issues: {issue}3[#3], {issue}4[#4], {issue}5[#5], {issue}6[#6]) +* Check for closed connection while opening {pull}26932[#26932] +* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] +* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) +* Release pipelined http responses on close {pull}26226[#26226] +* Fix error message if an incompatible node connects {pull}24884[#24884] + +Packaging:: +* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) +* Exit Windows scripts promptly on failure {pull}25959[#25959] +* Pass config path as a system property {pull}25943[#25943] +* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] +* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] +* Stop disabling explicit GC {pull}25759[#25759] +* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) +* Get short path name for native controllers {pull}25344[#25344] +* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] +* remove remaining references to scripts directory {pull}24771[#24771] +* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) +* Detect modified keystore on package removal {pull}26300[#26300] +* Create keystore on RPM and Debian package install {pull}26282[#26282] +* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) +* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) +* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) +* Set RuntimeDirectory in systemd service {pull}23526[#23526] + +Parent/Child:: +* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) + +Percolator:: +* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] +* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) + +Plugin Analysis Kuromoji:: +* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) + +Plugin Analysis Phonetic:: +* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) + +Plugin Discovery File:: +* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) + +Plugin Ingest Attachment:: +* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) + +Plugin Lang Painless:: +* Painless: allow doubles to be casted to longs. {pull}25936[#25936] + +Plugin Repository Azure:: +* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) +* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) +* Use Azure upload method instead of our own implementation {pull}26751[#26751] +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) + +Plugin Repository GCS:: +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) + +Plugin Repository HDFS:: +* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) +* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) + +Plugin Repository S3:: +* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) +* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) + +Plugins:: +* X-Pack plugin download fails on Windows desktop [ISSUE] {pull}24570[#24570] +* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) + +Query DSL:: +* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) +* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) +* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) +* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] + +REST:: +* Rest test fixes {pull}27354[#27354] +* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) +* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) +* exists template needs a template name {pull}25988[#25988] +* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) +* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) +* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) +* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) + +Recovery:: +* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) + +Reindex API:: +* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) +* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) +* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) + +Scroll:: +* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] + +Search:: +* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] +* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) +* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) +* Fix search_after with geo distance sorting {pull}26891[#26891] +* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) +* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) +* Format doc values fields. {pull}22146[#22146] +* Fix term(s) query for range field {pull}25918[#25918] +* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] +* Fix random score generation when no seed is provided. {pull}25908[#25908] +* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) +* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) +* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] +* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) +* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) +* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) +* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) +* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) +* Make sure range queries are correctly profiled. {pull}25108[#25108] +* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) +* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) +* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) +* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) +* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) +* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) + +Security:: +* Prevented 6.0 nodes from joining clusters with un-upgraded version 5 +`.security` indices. For upgrade instructions, see +{stack-ref}/upgrading-elastic-stack.html[Upgrading the Elastic Stack]. +* Enabled read-only access to the index audit log by the `_xpack` internal user. +For more information, see +{stack-ov}/internal-users.html[Internal users]. +* Updated the concrete security index such that it is now always named +`.security-6`. In 6.0 beta and RC releases, it was sometimes named `.security-v6`. +* Fixed handling of exceptions when retrieving roles from a native roles store. +For more information about configuring a native realm, see +{stack-ov}/native-realm.html[Native User Authentication]. + +Sequence IDs:: +* Fire global checkpoint sync under system context {pull}26984[#26984] +* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) +* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) +* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Use correct primary term for replicating NOOPs {pull}25128[#25128] +* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) +* TranslogWriter.assertNoSeqNumberConflict failure [ISSUE] {pull}26710[#26710] +* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) +* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) + +Settings:: +* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) +* Fix filtering for ListSetting {pull}26914[#26914] +* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] +* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] +* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) +* Updating an unrecognized setting should error out with that reason [ISSUE] {pull}25607[#25607] +* Settings: Fix setting groups to include secure settings {pull}25076[#25076] (issue: {issue}25069[#25069]) + +Similarities:: +* Add boolean similarity to built in similarity types {pull}26613[#26613] + +Snapshot/Restore:: +* Snapshot/Restore: better handle incorrect chunk_size settings in FS repo {pull}26844[#26844] (issue: {issue}26843[#26843]) +* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) +* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) +* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) +* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) +* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) +* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) +* Master failover during snapshotting could leave the snapshot incomplete [OPEN] [ISSUE] {pull}25281[#25281] +* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) + +Stats:: +* Fix RestGetAction name typo {pull}27266[#27266] +* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) +* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) +* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) +* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) + +Suggesters:: +* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] +* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) +* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) + +Templates:: +* Tests: Fix FullClusterRestartIT.testSnapshotRestore test failing in 6.x {pull}27218[#27218] (issue: {issue}27213[#27213]) + +Translog:: +* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] + +Upgrade API:: +* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) + +[float] +=== Regressions + +Bulk:: +* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) + +Highlighting:: +* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) + +Search:: +* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) + +Sequence IDs:: +* Indexing performance degradation in 6.0.0-beta1 [ISSUE] {pull}26339[#26339] + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 7.0.0 {pull}26744[#26744] +* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] +* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] +* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) + +Logging:: +* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) +* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) + +Network:: +* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) +* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] + +Plugin Ingest Attachment:: +* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) + +Upgrade API:: +* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) + +[[release-notes-6.0.0-rc2]] +== {es} version 6.0.0-rc2 + +[float] +[[breaking-6.0.0-rc2]] +=== Breaking Changes + +Inner Hits:: +* Return the _source of inner hit nested as is without wrapping it into its full path context {pull}26982[#26982] (issues: {issue}26102[#26102], {issue}26944[#26944]) + +//[float] +//=== Breaking Java Changes + +//[float] +//=== Deprecations + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Ignore .DS_Store files on macOS {pull}27108[#27108] (issue: {issue}23982[#23982]) + +Index Templates:: +* Fix error message for a put index template request without index_patterns {pull}27102[#27102] (issue: {issue}27100[#27100]) + +Machine Learning:: +* Added the `xpack.ml.max_model_memory_limit` setting, which can be dynamically +updated. For more information, see <>. +* Added checks and error messages for the `ml.enabled` and `ml.max_open_jobs` +node attributes. These are reserved for internal use and their values should be +set by using `xpack.ml.enabled` and `xpack.ml.max_open_jobs` +<>. + +Mapping:: +* Don't detect source's XContentType in DocumentParser.parseDocument() {pull}26880[#26880] + +Network:: +* Add additional low-level logging handler {pull}26887[#26887] +* Unwrap causes when maybe dying {pull}26884[#26884] + +Plugins:: +* Adjust SHA-512 supported format on plugin install {pull}27093[#27093] + +REST:: +* Cat shards bytes {pull}26952[#26952] + +Security:: +* Improved the error messages that are returned by the `setup-passwords` command. + +Watcher:: +* Added verification that the required templates exist before {watcher} starts. +For more information, see +{stack-ov}/how-watcher-works.html#scripts-templates[Scripts and Templates]. +* Added the `xpack.watcher.history.cleaner_service.enabled` setting. You can use +this setting to enable or disable the cleaner service, which removes previous +versions of {watcher} indices (for example, .watcher-history*) when it +determines that they are old. For more information, see <>. + +[float] +=== Bug Fixes + +Aggregations:: +* Create weights lazily in filter and filters aggregation {pull}26983[#26983] +* Fix IndexOutOfBoundsException in histograms for NaN doubles (#26787) {pull}26856[#26856] (issue: {issue}26787[#26787]) +* Scripted_metric _agg parameter disappears if params are provided {pull}19863[#19863] (issue: {issue}19768[#19768]) + +CAT API:: +* Fix NPE for /_cat/indices when no primary shard {pull}26953[#26953] (issue: {issue}26942[#26942]) + +Cache:: +* Reduce the default number of cached queries. {pull}26949[#26949] (issue: {issue}26938[#26938]) + +Core:: +* Timed runnable should delegate to abstract runnable {pull}27095[#27095] (issue: {issue}27069[#27069]) +* Stop invoking non-existent syscall {pull}27016[#27016] (issue: {issue}20179[#20179]) +* MetaData Builder doesn't properly prevent an alias with the same name as an index {pull}26804[#26804] + +Ingest:: +* date processor should not fail if timestamp is specified as json number {pull}26986[#26986] (issue: {issue}26967[#26967]) +* date_index_name processor should not fail if timestamp is specified as json number {pull}26910[#26910] (issue: {issue}26890[#26890]) + +Internal:: +* Upgrade Lucene to version 7.0.1 {pull}26926[#26926] + +Java High Level REST Client:: +* Make ShardSearchTarget optional when parsing ShardSearchFailure {pull}27078[#27078] (issue: {issue}27055[#27055]) + +Java REST Client:: +* rest-client-sniffer: configurable threadfactory {pull}26897[#26897] + +Machine Learning:: +* Fixed a scenario where models were incorrectly combined. This problem occurred +when anomaly detectors were considered to be the same despite having different +partition field values. +* Cleaned up the job closure process for situations where the job was still in +the process of opening. + +Mapping:: +* wrong link target for datatype murmur3 {pull}27143[#27143] + +Network:: +* Check for closed connection while opening {pull}26932[#26932] + +Packaging:: +* Fix handling of Windows paths containing parentheses {pull}26916[#26916] (issue: {issue}26454[#26454]) + +Percolator:: +* Also support query extraction for queries wrapped inside a ESToParentBlockJoinQuery {pull}26754[#26754] + +Plugin Analysis Phonetic:: +* Fix beidermorse phonetic token filter for unspecified `languageset` {pull}27112[#27112] (issue: {issue}26771[#26771]) + +Plugin Repository Azure:: +* Use Azure upload method instead of our own implementation {pull}26751[#26751] + +REST:: +* Fix inconsistencies in the rest api specs for cat.snapshots {pull}26996[#26996] (issues: {issue}25737[#25737], {issue}26923[#26923]) +* Fix inconsistencies in the rest api specs for *_script {pull}26971[#26971] (issue: {issue}26923[#26923]) +* exists template needs a template name {pull}25988[#25988] + +Reindex API:: +* Fix update_by_query's default size parameter {pull}26784[#26784] (issue: {issue}26761[#26761]) + +Search:: +* Avoid stack overflow on search phases {pull}27069[#27069] (issue: {issue}27042[#27042]) +* Fix search_after with geo distance sorting {pull}26891[#26891] +* Fix serialization errors when cross cluster search goes to a single shard {pull}26881[#26881] (issue: {issue}26833[#26833]) +* Early termination with index sorting should not set terminated_early in the response {pull}26597[#26597] (issue: {issue}26408[#26408]) +* Format doc values fields. {pull}22146[#22146] + +Security:: +* Enabled PKI realms to obtain the password for the truststore from either the +`truststore.secure_password` or the `truststore.password` setting. For more +information, see <>. +* Fixed document level security such that if your role has authority to access a +root document, you also have access to its nested documents. +* Fixed an issue that caused LDAP authentication requests to be slow and +to require multiple binds when authenticating in user search mode. + +Sequence IDs:: +* Fire global checkpoint sync under system context {pull}26984[#26984] + +Settings:: +* Emit settings deprecation logging on empty update {pull}27017[#27017] (issue: {issue}26419[#26419]) +* Fix filtering for ListSetting {pull}26914[#26914] + +Stats:: +* Keep cumulative elapsed scroll time in microseconds {pull}27068[#27068] (issue: {issue}27046[#27046]) + +Suggesters:: +* Fix division by zero in phrase suggester that causes assertion to fail {pull}27149[#27149] + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.0.0-rc1]] +== {es} version 6.0.0-rc1 + +[float] +[[breaking-6.0.0-rc1]] +=== Breaking Changes + +Packaging:: +* Configure heap dump path out of the box {pull}26755[#26755] (issue: {issue}26665[#26665]) + +Query DSL:: +* Remove deprecated `type` and `slop` field in `match` query {pull}26720[#26720] +* Remove several parse field deprecations in query builders {pull}26711[#26711] +* Remove deprecated parameters from `ids_query` {pull}26508[#26508] + +//[float] +//=== Breaking Java Changes + +[float] +=== Deprecations + +Plugins:: +* Plugins: Add backcompat for sha1 checksums {pull}26748[#26748] (issue: {issue}26746[#26746]) + +Security:: +* The `xpack.security.authc.token.passphrase` is deprecated. If this setting is +not used, the cluster automatically generates a key, which is the recommended +method. See <>. + +//[float] +//=== New Features + +[float] +=== Enhancements + +Core:: +* Allow `InputStreamStreamInput` array size validation where applicable {pull}26692[#26692] +* Refactor bootstrap check results and error messages {pull}26637[#26637] +* Add BootstrapContext to expose settings and recovered state to bootstrap checks {pull}26628[#26628] +* Unit testable index creation task on MetaDataCreateIndexService {pull}25961[#25961] + +Discovery:: +* Allow plugins to validate cluster-state on join {pull}26595[#26595] + +Mapping:: +* More efficient encoding of range fields. {pull}26470[#26470] (issue: {issue}26443[#26443]) + +Plugin Repository HDFS:: +* Add permission checks before reading from HDFS stream {pull}26716[#26716] (issue: {issue}26714[#26714]) + +Recovery:: +* Introduce a History UUID as a requirement for ops based recovery {pull}26577[#26577] (issue: {issue}10708[#10708]) + +Security:: +* Added requirement for TLS/SSL when a cluster with security is running in +production. If you try to upgrade to a production license when security is +enabled, the upgrade is not successful until you configure TLS. For more +information, see +{stack-ov}/ssl-tls.html[Setting Up SSL/TLS on a Cluster]. +* Added bootstrap check that enforces the use of TLS when security is enabled +and you are using a production license. + +Scripting:: +* ScriptService: Replace max compilation per minute setting with max compilation rate {pull}26399[#26399] + +Search:: +* Add soft limit on allowed number of script fields in request {pull}26598[#26598] (issue: {issue}26390[#26390]) +* Add a soft limit for the number of requested doc-value fields {pull}26574[#26574] (issue: {issue}26390[#26390]) + +Sequence IDs:: +* Restoring from snapshot should force generation of a new history uuid {pull}26694[#26694] (issues: {issue}10708[#10708], {issue}26544[#26544], {issue}26557[#26557], {issue}26577[#26577]) +* Add global checkpoint tracking on the primary {pull}26666[#26666] (issue: {issue}26591[#26591]) +* Introduce global checkpoint background sync {pull}26591[#26591] (issues: {issue}26573[#26573], {issue}26630[#26630], {issue}26666[#26666]) +* Move `UNASSIGNED_SEQ_NO` and `NO_OPS_PERFORMED` to SequenceNumbers` {pull}26494[#26494] (issue: {issue}10708[#10708]) + +[float] +=== Bug Fixes + +Aggregations:: +* Do not delegate a null scorer to LeafBucketCollectors {pull}26747[#26747] (issue: {issue}26611[#26611]) + +Core:: +* Fix cache compute if absent for expired entries {pull}26516[#26516] + +Dates:: +* Fix typo in date format {pull}26503[#26503] (issue: {issue}26500[#26500]) + +Highlighting:: +* Fix percolator highlight sub fetch phase to not highlight query twice {pull}26622[#26622] + +Inner Hits:: +* Do not allow inner hits that fetch _source and have a non nested object field as parent {pull}25749[#25749] (issue: {issue}25315[#25315]) + +Internal:: +* `IndexShard.routingEntry` should only be updated once all internal state is ready {pull}26776[#26776] +* Catch exceptions and inform handler in RemoteClusterConnection#collectNodes {pull}26725[#26725] (issue: {issue}26700[#26700]) +* Internal: Add versionless alias for rest client codebase in policy files {pull}26521[#26521] + +Java API:: +* BulkProcessor flush runnable preserves the thread context from creation time {pull}26718[#26718] (issue: {issue}26596[#26596]) + +Java High Level REST Client:: +* Make RestHighLevelClient's Request class public {pull}26627[#26627] (issue: {issue}26455[#26455]) +* Forbid direct usage of ContentType.create() methods {pull}26457[#26457] (issues: {issue}22769[#22769], {issue}26438[#26438]) + +Java REST Client:: +* Better message text for ResponseException {pull}26564[#26564] + +Machine Learning:: +* Fixed problem with dropped or duplicated data when datafeeds used aggregations. +* Fixed problems when model plot is enabled and there are sparse metrics. +* Improved modeling of long-term trends. +* Fixed a bug in calculation of mean values for seasonal components. +* Added more accurate adherence to model memory limit. + +Mapping:: +* Allow copying from a field to another field that belongs to the same nested object. {pull}26774[#26774] (issue: {issue}26763[#26763]) + +Monitoring:: +* Fixed the email message when cluster license expiration issues are resolved. + +Plugin Analysis Kuromoji:: +* Fix kuromoji default stoptags {pull}26600[#26600] (issue: {issue}26519[#26519]) + +Plugin Discovery File:: +* Fix discovery-file plugin to use custom config path {pull}26662[#26662] (issue: {issue}26660[#26660]) + +Plugin Repository Azure:: +* Azure snapshots can not be restored anymore {pull}26778[#26778] (issues: {issue}22858[#22858], {issue}26751[#26751], {issue}26777[#26777]) +* Snapshot : azure module - accelerate the listing of files (used in delete snapshot) {pull}25710[#25710] (issue: {issue}25424[#25424]) + +Plugin Repository HDFS:: +* Add Log4j to SLF4J binding for repository-hdfs {pull}26514[#26514] (issue: {issue}26512[#26512]) + +Query DSL:: +* Fixed incomplete JSON body on count request making org.elasticsearch.rest.action.RestActions#parseTopLevelQueryBuilder go into endless loop {pull}26680[#26680] (issue: {issue}26083[#26083]) + +Search:: +* Fail query when a sort is provided in conjunction with rescorers {pull}26510[#26510] +* Let search phases override max concurrent requests {pull}26484[#26484] (issue: {issue}26198[#26198]) + +Security:: +* Added ability infer the keystore type from its pathname when the type is not specified. +* Added usability improvements for the password bootstrap tool. For more +information, see <>. + +Similarities:: +* Add boolean similarity to built in similarity types {pull}26613[#26613] + +Upgrade API:: +* Upgrade API: fix excessive logging and unnecessary template updates {pull}26698[#26698] (issue: {issue}26673[#26673]) + +Watcher:: +* Fixed {watcher} such that it loads only active watches. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to Lucene 7.0.0 {pull}26744[#26744] +* Upgrade to lucene-7.0.0-snapshot-d94a5f0. {pull}26441[#26441] + +Logging:: +* Upgrade to Log4j 2.9.1 {pull}26750[#26750] (issues: {issue}109[#109], {issue}26464[#26464], {issue}26467[#26467]) +* Upgrade to Log4j 2.9.0 {pull}26450[#26450] (issue: {issue}23798[#23798]) + +[[release-notes-6.0.0-beta2]] +== {es} version 6.0.0-beta2 + +[float] +[[breaking-6.0.0-beta2]] +=== Breaking Changes + +Analysis:: +* Do not allow custom analyzers to have the same names as built-in analyzers {pull}22349[#22349] (issue: {issue}22263[#22263]) + +Cluster:: +* Disallow : in cluster and index/alias names {pull}26247[#26247] (issue: {issue}23892[#23892]) + +Inner Hits:: +* Unfiltered nested source should keep its full path {pull}26102[#26102] (issues: {issue}18567[#18567], {issue}23090[#23090]) + +Mapping:: +* Reject out of range numbers for float, double and half_float {pull}25826[#25826] (issue: {issue}25534[#25534]) + +Network:: +* Remove unused Netty-related settings {pull}26161[#26161] + +Packaging:: +* Rename CONF_DIR to ES_PATH_CONF {pull}26197[#26197] (issue: {issue}26154[#26154]) + +Query DSL:: +* Throw exception in scroll requests using `from` {pull}26235[#26235] (issue: {issue}9373[#9373]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Fix NPE when `values` is omitted on percentile_ranks agg {pull}26046[#26046] + +[float] +=== Deprecations + +Machine Learning:: +* The `max_running_jobs` node property is deprecated and is replaced by +`xpack.ml.max_open_jobs`. See <>. + +//[float] +//=== New Features + +[float] +=== Enhancements + +Aggregations:: +* Support distance units in GeoHashGrid aggregation precision {pull}26291[#26291] (issue: {issue}5042[#5042]) +* Reject multiple methods in `percentiles` aggregation {pull}26163[#26163] (issue: {issue}26095[#26095]) +* Use `global_ordinals_hash` execution mode when sorting by sub aggregations. {pull}26014[#26014] (issue: {issue}24359[#24359]) +* Add a specialized deferring collector for terms aggregator {pull}25190[#25190] + +Core:: +* Use Java 9 FilePermission model {pull}26302[#26302] (issue: {issue}21534[#21534]) +* Add friendlier message on bad keystore permissions {pull}26284[#26284] +* Epoch millis and second formats accept float implicitly {pull}26119[#26119] (issue: {issue}14641[#14641]) + +Internal:: +* Prevent cluster internal `ClusterState.Custom` impls to leak to a client {pull}26232[#26232] +* Use holder pattern for lazy deprecation loggers {pull}26218[#26218] (issue: {issue}26210[#26210]) +* Allow `ClusterState.Custom` to be created on initial cluster states {pull}26144[#26144] + +Java High Level REST Client:: +* Make RestHighLevelClient Closeable and simplify its creation {pull}26180[#26180] (issue: {issue}26086[#26086]) + +Machine Learning:: +* Added `xpack.ml.max_open_jobs` as a node attribute. See <>. + +Mapping:: +* Loosen the restrictions on disabling _all in 6.x {pull}26259[#26259] + +Percolator:: +* Store the QueryBuilder's Writable representation instead of its XContent representation {pull}25456[#25456] +* Add support for selecting percolator query candidate matches containing wildcard / prefix queries {pull}25351[#25351] + +Security:: +* Added the `keystore.seed` setting to create a randomly generated bootstrap +password if an actual password is not present. +* The `bootstrap.password` secure setting is now managed locally on each node +and no longer updates the security index. +* The `xpack.security.authc.token.passphrase` setting is no longer mandatory +when using the token service. The cluster automatically generates a secure key +on startup. See {ref}/security-settings.html[Security Settings in {es}]. +* Added reserved `kibana_dashboard_only_user` role. For more information, see +{kibana-ref}/xpack-dashboard-only-mode.html[Kibana Dashboard Only Mode]. + +Settings:: +* Persist created keystore on startup unless keystore is present {pull}26253[#26253] (issue: {issue}26126[#26126]) +* Settings: Add keystore.seed auto generated secure setting {pull}26149[#26149] +* Settings: Add keystore creation to add commands {pull}26126[#26126] + +[float] +=== Bug Fixes + +Aggregations:: +* Check bucket metric ages point to a multi bucket agg {pull}26215[#26215] (issue: {issue}25775[#25775]) + +Allocation:: +* Fix DiskThresholdMonitor flood warning {pull}26204[#26204] (issue: {issue}26201[#26201]) +* Allow wildcards for shard IP filtering {pull}26187[#26187] (issues: {issue}22591[#22591], {issue}26184[#26184]) + +CRUD:: +* Serialize and expose timeout of acknowledged requests in REST layer {pull}26189[#26189] (issue: {issue}26213[#26213]) +* Fix silent loss of last command to _bulk and _msearch due to missing newline {pull}25740[#25740] (issue: {issue}7601[#7601]) + +Cluster:: +* Register setting `cluster.indices.tombstones.size` {pull}26193[#26193] (issue: {issue}26191[#26191]) + +Highlighting:: +* Fix nested query highlighting {pull}26305[#26305] (issue: {issue}26230[#26230]) + +Logging:: +* Allow not configure logging without config {pull}26209[#26209] (issues: {issue}20575[#20575], {issue}24076[#24076]) + +Machine Learning:: +* Fixed calculation of bucket count and empty bucket count statistics. + +Mapping:: +* ICUCollationKeywordFieldMapper use SortedSetDocValuesField {pull}26267[#26267] +* Fix serialization of the `_all` field. {pull}26143[#26143] (issue: {issue}26136[#26136]) + +Network:: +* Release pipelined http responses on close {pull}26226[#26226] + +Packaging:: +* Detect modified keystore on package removal {pull}26300[#26300] +* Create keystore on RPM and Debian package install {pull}26282[#26282] +* Add safer empty variable checking for Windows {pull}26268[#26268] (issue: {issue}26261[#26261]) +* Export HOSTNAME environment variable {pull}26262[#26262] (issues: {issue}25807[#25807], {issue}26255[#26255]) +* Fix daemonization command status test {pull}26196[#26196] (issue: {issue}26080[#26080]) +* Set RuntimeDirectory in systemd service {pull}23526[#23526] + +Search:: +* Refactor simple_query_string to handle text part like multi_match and query_string {pull}26145[#26145] (issue: {issue}25726[#25726]) +* Fix `_exists_` in query_string on empty indices. {pull}25993[#25993] (issue: {issue}25956[#25956]) + +Security:: +* The `xpack.security.authc.token.enabled` setting now defaults to true when +HTTPS is enabled. See <>. +* Improved the safety of file updates in the `x-pack/users` tool. +* Bootstrap checks no longer fail when checking secure settings. +* The `setup-password` tool no longer fails when using a default +`elasticsearch.yml` configuration file. +* Fixed validation of the input parameters in the +<>. + +Watcher:: +* Ensured that a watch can be activated and deactivated during execution. +* Ensured watch execution always uses the latest watch including its latest status. + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to lucene-7.0.0-snapshot-a128fcb. {pull}26090[#26090] + +[[release-notes-6.0.0-beta1]] +== {es} version 6.0.0-beta1 + +[float] +[[breaking-6.0.0-beta1]] +=== Breaking Changes + +Aggregations:: +* Change parsing of numeric `to` and `from` parameters in `date_range` aggregation {pull}25376[#25376] (issue: {issue}17920[#17920]) + +Aliases:: +* Wrong behavior deleting alias {pull}23997[#23997] (issues: {issue}10106[#10106], {issue}23960[#23960]) + +Highlighting:: +* Remove the postings highlighter and make unified the default highlighter choice {pull}25028[#25028] + +Index APIs:: +* Remove (deprecated) support for '+' in index expressions {pull}25274[#25274] (issue: {issue}24515[#24515]) +* Delete index API to work only against concrete indices {pull}25268[#25268] (issues: {issue}2318[#2318], {issue}23997[#23997]) + +Indexed Scripts/Templates:: +* Scripting: Remove search template actions {pull}25717[#25717] + +Ingest:: +* update ingest-user-agent regexes.yml {pull}25608[#25608] +* remove ingest.new_date_format {pull}25583[#25583] + +Java REST Client:: +* Remove deprecated created and found from index, delete and bulk {pull}25516[#25516] (issues: {issue}19566[#19566], {issue}19630[#19630], {issue}19633[#19633]) + +Packaging:: +* Remove support for ES_INCLUDE {pull}25804[#25804] +* Setup: Change default heap to 1G {pull}25695[#25695] +* Use config directory to find jvm.options {pull}25679[#25679] (issue: {issue}23004[#23004]) +* Remove implicit 32-bit support {pull}25435[#25435] +* Remove default path settings {pull}25408[#25408] (issue: {issue}25357[#25357]) +* Remove path.conf setting {pull}25392[#25392] (issue: {issue}25357[#25357]) +* Honor masking of systemd-sysctl.service {pull}24234[#24234] (issues: {issue}21899[#21899], {issue}806[#806]) + +Plugin Analysis ICU:: +* Upgrade icu4j for the ICU analysis plugin to 59.1 {pull}25243[#25243] (issue: {issue}21425[#21425]) + +Plugin Discovery Azure Classic:: +* Remove `discovery.type` BWC layer from the EC2/Azure/GCE plugins {pull}25080[#25080] (issue: {issue}24543[#24543]) + +Plugin Repository GCS:: +* GCS Repository: Remove specifying credential file on disk {pull}24727[#24727] + +Plugins:: +* Make plugin loading stricter {pull}25405[#25405] + +Query DSL:: +* Refactor QueryStringQuery for 6.0 {pull}25646[#25646] (issue: {issue}25574[#25574]) +* Change `split_on_whitespace` default to false {pull}25570[#25570] (issue: {issue}25470[#25470]) +* Remove deprecated template query {pull}24577[#24577] (issue: {issue}19390[#19390]) + +REST:: +* IndexClosedException to return 400 rather than 403 {pull}25752[#25752] +* Remove comma-separated feature parsing for GetIndicesAction {pull}24723[#24723] (issue: {issue}24437[#24437]) +* Improve REST error handling when endpoint does not support HTTP verb, add OPTIONS support {pull}24437[#24437] (issues: {issue}0[#0], {issue}15335[#15335], {issue}17916[#17916]) + +Scripting:: +* remove lang url parameter from stored script requests {pull}25779[#25779] (issue: {issue}22887[#22887]) +* Disallow lang to be used with Stored Scripts {pull}25610[#25610] +* Remove Deprecated Script Settings {pull}24756[#24756] (issue: {issue}24532[#24532]) +* Scripting: Remove native scripts {pull}24726[#24726] (issue: {issue}19966[#19966]) +* Scripting: Remove file scripts {pull}24627[#24627] (issue: {issue}21798[#21798]) + +Search:: +* Make `index` in TermsLookup mandatory {pull}25753[#25753] (issue: {issue}25750[#25750]) +* Removes FieldStats API {pull}25628[#25628] (issue: {issue}25577[#25577]) +* Remove deprecated fielddata_fields from search request {pull}25566[#25566] (issue: {issue}25537[#25537]) +* Removes deprecated fielddata_fields {pull}25537[#25537] (issue: {issue}19027[#19027]) + +Security:: +* A new bootstrap check enforces that TLS/SSL is required for inter-node +communication when running in +{ref}/bootstrap-checks.html#_development_vs_production_mode[production mode]. See +{stack-ov}/encrypting-communications.html[Encrypting Communications]. +* A new bootstrap check enforces that HTTPS is used by the built-in token +service when running in +{ref}/bootstrap-checks.html#_development_vs_production_mode[production mode]. +To disable the token service, set `xpack.security.authc.token.enabled` +to `false` in your `elasticsearch.yml`. See +<>. + +Settings:: +* Settings: Remove shared setting property {pull}24728[#24728] +* Settings: Remove support for yaml and json config files {pull}24664[#24664] (issue: {issue}19391[#19391]) + +Similarities:: +* Similarity should accept dynamic settings when possible {pull}20339[#20339] (issue: {issue}6727[#6727]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Remove the unused SignificantTerms.compareTerm() method {pull}24714[#24714] +* Make SignificantTerms.Bucket an interface rather than an abstract class {pull}24670[#24670] (issue: {issue}24492[#24492]) + +Internal:: +* Collapses package structure for some bucket aggs {pull}25579[#25579] (issue: {issue}22868[#22868]) + +Java API:: +* Remove deprecated IdsQueryBuilder ctor {pull}25529[#25529] +* Removing unneeded getTookInMillis method {pull}23923[#23923] + +Java High Level REST Client:: +* Unify the result interfaces from get and search in Java client {pull}25361[#25361] (issue: {issue}16440[#16440]) +* Allow RestHighLevelClient to use plugins {pull}25024[#25024] + +Java REST Client:: +* Rename client artifacts {pull}25693[#25693] (issue: {issue}20248[#20248]) + +Plugin Delete By Query:: +* Move DeleteByQuery and Reindex requests into core {pull}24578[#24578] + +Query DSL:: +* Remove QueryParseContext {pull}25486[#25486] +* Remove QueryParseContext from parsing QueryBuilders {pull}25448[#25448] + +REST:: +* Return index name and empty map for /`{index}`/_alias with no aliases {pull}25114[#25114] (issues: {issue}24723[#24723], {issue}25090[#25090]) + +[float] +=== Deprecations + +Index APIs:: +* Deprecated use of + in index expressions {pull}24585[#24585] (issue: {issue}24515[#24515]) + +Indexed Scripts/Templates:: +* Scripting: Deprecate stored search template apis {pull}25437[#25437] (issue: {issue}24596[#24596]) + +Percolator:: +* Deprecate percolate query's document_type parameter. {pull}25199[#25199] + +Scripting:: +* Scripting: Change keys for inline/stored scripts to source/id {pull}25127[#25127] +* Scripting: Deprecate native scripts {pull}24692[#24692] (issue: {issue}19966[#19966]) +* Scripting: Deprecate index lookup {pull}24691[#24691] (issue: {issue}19359[#19359]) +* Deprecate Fine Grain Settings for Scripts {pull}24573[#24573] (issue: {issue}24532[#24532]) +* Scripting: Deprecate file script settings {pull}24555[#24555] (issue: {issue}21798[#21798]) +* Scripting: Deprecate file scripts {pull}24552[#24552] (issue: {issue}21798[#21798]) + +Settings:: +* Settings: Update settings deprecation from yml to yaml {pull}24663[#24663] (issue: {issue}19391[#19391]) + +Tribe Node:: +* Deprecate tribe service {pull}24598[#24598] (issue: {issue}24581[#24581]) + +[float] +=== New Features + +Analysis:: +* Expose simplepattern and simplepatternsplit tokenizers {pull}25159[#25159] (issue: {issue}23363[#23363]) +* Parse synonyms with the same analysis chain {pull}8049[#8049] (issue: {issue}7199[#7199]) + +Parent/Child:: +* Move parent_id query to the parent-join module {pull}25072[#25072] (issue: {issue}20257[#20257]) +* Introduce ParentJoinFieldMapper, a field mapper that creates parent/child relation within documents of the same index {pull}24978[#24978] (issue: {issue}20257[#20257]) + +Search:: +* Automatically early terminate search query based on index sorting {pull}24864[#24864] (issue: {issue}6720[#6720]) + +Sequence IDs:: +* Add a scheduled translog retention check {pull}25622[#25622] (issues: {issue}10708[#10708], {issue}25294[#25294]) +* Initialize sequence numbers on a shrunken index {pull}25321[#25321] (issue: {issue}10708[#10708]) +* Initialize primary term for shrunk indices {pull}25307[#25307] (issue: {issue}10708[#10708]) +* Introduce translog size and age based retention policies {pull}25147[#25147] (issue: {issue}10708[#10708]) + +Stats:: +* Adds nodes usage API to monitor usages of actions {pull}24169[#24169] + +Task Manager:: +* Task Management {pull}15117[#15117] + +Upgrade API:: +* TemplateUpgraders should be called during rolling restart {pull}25263[#25263] (issues: {issue}24379[#24379], {issue}24680[#24680]) + +[float] +=== Enhancements + +Aggregations:: +* Add strict parsing of aggregation ranges {pull}25769[#25769] +* Adds rewrite phase to aggregations {pull}25495[#25495] (issue: {issue}17676[#17676]) +* Tweak AggregatorBase.addRequestCircuitBreakerBytes {pull}25162[#25162] (issue: {issue}24511[#24511]) +* Add superset size to Significant Term REST response {pull}24865[#24865] +* Add document count to Matrix Stats aggregation response {pull}24776[#24776] +* Adds an implementation of LogLogBeta for the cardinality aggregation {pull}22323[#22323] (issue: {issue}22230[#22230]) + +Allocation:: +* Adjust status on bad allocation explain requests {pull}25503[#25503] (issue: {issue}25458[#25458]) +* Promote replica on the highest version node {pull}25277[#25277] (issue: {issue}10708[#10708]) + +Analysis:: +* [Analysis] Support normalizer in request param {pull}24767[#24767] (issue: {issue}23347[#23347]) +* Enforce validation for PathHierarchy tokenizer {pull}23510[#23510] +* [analysis-icu] Allow setting unicodeSetFilter {pull}20814[#20814] (issue: {issue}20820[#20820]) + +CAT API:: +* expand `/_cat/nodes` to return information about hard drive {pull}21775[#21775] (issue: {issue}21679[#21679]) + +Cluster:: +* Validate a joining node's version with version of existing cluster nodes {pull}25808[#25808] +* Switch indices read-only if a node runs out of disk space {pull}25541[#25541] (issue: {issue}24299[#24299]) +* Add a cluster block that allows to delete indices that are read-only {pull}24678[#24678] + +Core:: +* Add max file size bootstrap check {pull}25974[#25974] +* Add compatibility versions to main action response {pull}25799[#25799] +* Index ids in binary form. {pull}25352[#25352] (issues: {issue}18154[#18154], {issue}24615[#24615]) +* Explicitly reject duplicate data paths {pull}25178[#25178] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25097[#25097] +* Upgrade to lucene-7.0.0-snapshot-a0aef2f {pull}24775[#24775] +* Speed up PK lookups at index time. {pull}19856[#19856] + +Engine:: +* Add refresh stats tracking for realtime get {pull}25052[#25052] (issue: {issue}24806[#24806]) +* Introducing a translog deletion policy {pull}24950[#24950] + +Exceptions:: +* IllegalStateException: Only duplicated jar instead of classpath {pull}24953[#24953] + +Highlighting:: +* Picks offset source for the unified highlighter directly from the es mapping {pull}25747[#25747] (issue: {issue}25699[#25699]) + +Index APIs:: +* Let primary own its replication group {pull}25692[#25692] (issue: {issue}25485[#25485]) +* Create index request should return the index name {pull}25139[#25139] (issue: {issue}23044[#23044]) + +Ingest:: +* Add Ingest-Processor specific Rest Endpoints & Add Grok endpoint {pull}25059[#25059] (issue: {issue}24725[#24725]) +* Port support for commercial GeoIP2 databases from Logstash. {pull}24889[#24889] +* add `exclude_keys` option to KeyValueProcessor {pull}24876[#24876] (issue: {issue}23856[#23856]) +* Allow removing multiple fields in ingest processor {pull}24750[#24750] (issue: {issue}24622[#24622]) +* Add target_field parameter to ingest processors {pull}24133[#24133] (issues: {issue}23228[#23228], {issue}23682[#23682]) + +Inner Hits:: +* Reuse inner hit query weight {pull}24571[#24571] (issue: {issue}23917[#23917]) + +Internal:: +* Cleanup IndexFieldData visibility {pull}25900[#25900] +* Bump the min compat version to 5.6.0 {pull}25805[#25805] +* "shard started" should show index and shard ID {pull}25157[#25157] +* Break out clear scroll logic from TransportClearScrollAction {pull}25125[#25125] (issue: {issue}25094[#25094]) +* Add helper methods to TransportActionProxy to identify proxy actions and requests {pull}25124[#25124] +* Add remote cluster infrastructure to fetch discovery nodes. {pull}25123[#25123] (issue: {issue}25094[#25094]) +* Add the ability to set eager_global_ordinals in the new parent-join field {pull}25019[#25019] +* Disallow multiple parent-join fields per mapping {pull}25002[#25002] +* Remove the need for _UNRELEASED suffix in versions {pull}24798[#24798] (issue: {issue}24768[#24768]) +* Optimize the order of bytes in uuids for better compression. {pull}24615[#24615] (issue: {issue}18209[#18209]) + +Java API:: +* Always Accumulate Transport Exceptions {pull}25017[#25017] (issue: {issue}23099[#23099]) + +Java High Level REST Client:: +* [DOCS] restructure java clients docs pages {pull}25517[#25517] +* Use SPI in High Level Rest Client to load XContent parsers {pull}25098[#25098] (issues: {issue}25024[#25024], {issue}25097[#25097]) +* Add support for clear scroll to high level REST client {pull}25038[#25038] +* Add search scroll method to high level REST client {pull}24938[#24938] (issue: {issue}23331[#23331]) +* Add search method to high level REST client {pull}24796[#24796] (issues: {issue}24794[#24794], {issue}24795[#24795]) + +Java REST Client:: +* Shade external dependencies in the rest client jar {pull}25780[#25780] (issue: {issue}25208[#25208]) +* RestClient uses system properties and system default SSLContext {pull}25757[#25757] (issue: {issue}23231[#23231]) + +Logging:: +* Prevent excessive disk consumption by log files {pull}25660[#25660] +* Use LRU set to reduce repeat deprecation messages {pull}25474[#25474] (issue: {issue}25457[#25457]) + +Mapping:: +* Better validation of `copy_to`. {pull}25983[#25983] +* Optimize `terms` queries on `ip` addresses to use a `PointInSetQuery` whenever possible. {pull}25669[#25669] (issue: {issue}25667[#25667]) + +Network:: +* Move TransportStats accounting into TcpTransport {pull}25251[#25251] +* Simplify connection closing and cleanups in TcpTransport {pull}25250[#25250] +* Disable the Netty recycler in the client {pull}24793[#24793] (issues: {issue}22452[#22452], {issue}24721[#24721]) +* Remove Netty logging hack {pull}24653[#24653] (issues: {issue}24469[#24469], {issue}5624[#5624], {issue}6568[#6568], {issue}6696[#6696]) + +Packaging:: +* Remove memlock suggestion from systemd service {pull}25979[#25979] +* Set address space limit in systemd service file {pull}25975[#25975] +* Version option should display if snapshot {pull}25970[#25970] +* Ignore JVM options before checking Java version {pull}25969[#25969] +* Also skip JAVA_TOOL_OPTIONS on Windows {pull}25968[#25968] +* Introduce elasticsearch-env for Windows {pull}25958[#25958] +* Introduce elasticsearch-env {pull}25815[#25815] (issue: {issue}20286[#20286]) +* Stop exporting HOSTNAME from scripts {pull}25807[#25807] + +Parent/Child:: +* Remove ParentJoinFieldSubFetchPhase {pull}25550[#25550] (issue: {issue}25363[#25363]) +* Support parent id being specified as number in the _source {pull}25547[#25547] + +Plugin Lang Painless:: +* Allow Custom Whitelists in Painless {pull}25557[#25557] +* Update Painless to Allow Augmentation from Any Class {pull}25360[#25360] +* Add Needs Methods to Painless Script Context Factories {pull}25267[#25267] +* Support Script Context Stateful Factory in Painless {pull}25233[#25233] +* Generate Painless Factory for Creating Script Instances {pull}25120[#25120] +* Update Painless to Use New Script Contexts {pull}25015[#25015] +* Optimize instance creation in LambdaBootstrap {pull}24618[#24618] + +Plugin Repository GCS:: +* GCS Repository: Add secure storage of credentials {pull}24697[#24697] + +Plugin Repository S3:: +* S3 Repository: Add back repository level credentials {pull}24609[#24609] + +Plugins:: +* Move tribe to a module {pull}25778[#25778] +* Plugins can register pre-configured char filters {pull}25000[#25000] (issue: {issue}23658[#23658]) +* Add purge option to remove plugin CLI {pull}24981[#24981] +* Allow plugins to register pre-configured tokenizers {pull}24751[#24751] (issues: {issue}24223[#24223], {issue}24572[#24572]) +* Move ReindexAction class to core {pull}24684[#24684] (issue: {issue}24578[#24578]) +* Make PreConfiguredTokenFilter harder to misuse {pull}24572[#24572] (issue: {issue}23658[#23658]) + +Query DSL:: +* Make slop optional when parsing `span_near` query {pull}25677[#25677] (issue: {issue}25642[#25642]) +* Require a field when a `seed` is provided to the `random_score` function. {pull}25594[#25594] (issue: {issue}25240[#25240]) + +REST:: +* Refactor PathTrie and RestController to use a single trie for all methods {pull}25459[#25459] (issue: {issue}24437[#24437]) +* Make ObjectParser support string to boolean conversion {pull}24668[#24668] (issue: {issue}21802[#21802]) + +Recovery:: +* Goodbye, Translog Views {pull}25962[#25962] +* Disallow multiple concurrent recovery attempts for same target shard {pull}25428[#25428] +* Live primary-replica resync (no rollback) {pull}24841[#24841] (issue: {issue}10708[#10708]) + +Scripting:: +* Scripting: Rename SearchScript.needsScores to needs_score {pull}25235[#25235] +* Scripting: Add optional context parameter to put stored script requests {pull}25014[#25014] +* Add New Security Script Settings {pull}24637[#24637] (issue: {issue}24532[#24532]) + +Search:: +* Rewrite search requests on the coordinating nodes {pull}25814[#25814] (issue: {issue}25791[#25791]) +* Ensure query resources are fetched asynchronously during rewrite {pull}25791[#25791] +* Introduce a new Rewriteable interface to streamline rewriting {pull}25788[#25788] +* Reduce the scope of `QueryRewriteContext` {pull}25787[#25787] +* Reduce the overhead of timeouts and low-level search cancellation. {pull}25776[#25776] +* Reduce profiling overhead. {pull}25772[#25772] (issue: {issue}24799[#24799]) +* Prevent `can_match` requests from sending to incompatible nodes {pull}25705[#25705] (issue: {issue}25704[#25704]) +* Add a shard filter search phase to pre-filter shards based on query rewriting {pull}25658[#25658] +* Ensure we rewrite common queries to `match_none` if possible {pull}25650[#25650] +* Limit the number of concurrent shard requests per search request {pull}25632[#25632] +* Add cluster name validation to RemoteClusterConnection {pull}25568[#25568] +* Speed up sorted scroll when the index sort matches the search sort {pull}25138[#25138] (issue: {issue}6720[#6720]) +* Leverage scorerSupplier when applicable. {pull}25109[#25109] +* Add Cross Cluster Search support for scroll searches {pull}25094[#25094] +* Track EWMA[1] of task execution time in search threadpool executor {pull}24989[#24989] (issue: {issue}24915[#24915]) +* Query range fields by doc values when they are expected to be more efficient than points {pull}24823[#24823] (issue: {issue}24314[#24314]) +* Search: Fairer balancing when routing searches by session ID {pull}24671[#24671] (issue: {issue}24642[#24642]) + +Sequence IDs:: +* Move primary term from ReplicationRequest to ConcreteShardRequest {pull}25822[#25822] +* Add reason to global checkpoint updates on replica {pull}25612[#25612] (issue: {issue}10708[#10708]) +* Introduce primary/replica mode for GlobalCheckPointTracker {pull}25468[#25468] +* Throw back replica local checkpoint on new primary {pull}25452[#25452] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Update global checkpoint when increasing primary term on replica {pull}25422[#25422] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Enable a long translog retention policy by default {pull}25294[#25294] (issues: {issue}10708[#10708], {issue}25147[#25147]) +* Introduce primary context {pull}25122[#25122] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Block older operations on primary term transition {pull}24779[#24779] (issue: {issue}10708[#10708]) + +Settings:: +* Add disk threshold settings validation {pull}25600[#25600] (issue: {issue}25560[#25560]) +* Enable cross-setting validation {pull}25560[#25560] (issue: {issue}25541[#25541]) +* Validate `transport.profiles.*` settings {pull}25508[#25508] +* Cleanup network / transport related settings {pull}25489[#25489] +* Emit settings deprecation logging at most once {pull}25457[#25457] +* IndexMetaData: Introduce internal format index setting {pull}25292[#25292] + +Snapshot/Restore:: +* Improves snapshot logging and snapshot deletion error handling {pull}25264[#25264] + +Stats:: +* Update `IndexShard#refreshMetric` via a `ReferenceManager.RefreshListener` {pull}25083[#25083] (issues: {issue}24806[#24806], {issue}25052[#25052]) + +Translog:: +* Translog file recovery should not rely on lucene commits {pull}25005[#25005] (issue: {issue}24950[#24950]) + +[float] +=== Bug Fixes + +Aggregations:: +* Fixes array out of bounds for value count agg {pull}26038[#26038] (issue: {issue}17379[#17379]) +* Aggregations bug: Significant_text fails on arrays of text. {pull}25030[#25030] (issue: {issue}25029[#25029]) + +Aliases:: +* mget with an alias shouldn't ignore alias routing {pull}25697[#25697] (issue: {issue}25696[#25696]) +* GET aliases should 404 if aliases are missing {pull}25043[#25043] (issue: {issue}24644[#24644]) + +Analysis:: +* Pre-configured shingle filter should disable graph analysis {pull}25853[#25853] (issue: {issue}25555[#25555]) + +Circuit Breakers:: +* Checks the circuit breaker before allocating bytes for a new big array {pull}25010[#25010] (issue: {issue}24790[#24790]) + +Core:: +* Release operation permit on thread-pool rejection {pull}25930[#25930] (issue: {issue}25863[#25863]) +* Node should start up despite of a lingering `.es_temp_file` {pull}21210[#21210] (issue: {issue}21007[#21007]) + +Discovery:: +* MasterNodeChangePredicate should use the node instance to detect master change {pull}25877[#25877] (issue: {issue}25471[#25471]) + +Engine:: +* Engine - do not index operations with seq# lower than the local checkpoint into lucene {pull}25827[#25827] (issues: {issue}1[#1], {issue}2[#2], {issue}25592[#25592]) + +Geo:: +* Fix typo in GeoUtils#isValidLongitude {pull}25121[#25121] + +Highlighting:: +* FastVectorHighlighter should not cache the field query globally {pull}25197[#25197] (issue: {issue}25171[#25171]) +* Higlighters: Fix MultiPhrasePrefixQuery rewriting {pull}25103[#25103] (issue: {issue}25088[#25088]) + +Index APIs:: +* Shrink API should ignore templates {pull}25380[#25380] (issue: {issue}25035[#25035]) +* Rollover max docs should only count primaries {pull}24977[#24977] (issue: {issue}24217[#24217]) + +Ingest:: +* Sort Processor does not have proper behavior with targetField {pull}25237[#25237] (issue: {issue}24133[#24133]) +* fix grok's pattern parsing to validate pattern names in expression {pull}25063[#25063] (issue: {issue}22831[#22831]) + +Inner Hits:: +* When fetching nested inner hits only access stored fields when needed {pull}25864[#25864] (issue: {issue}6[#6]) + +Internal:: +* Fix BytesReferenceStreamInput#skip with offset {pull}25634[#25634] +* Fix race condition in RemoteClusterConnection node supplier {pull}25432[#25432] +* Initialise empty lists in BaseTaskResponse constructor {pull}25290[#25290] +* Extract a common base class for scroll executions {pull}24979[#24979] (issue: {issue}16555[#16555]) +* Obey lock order if working with store to get metadata snapshots {pull}24787[#24787] (issue: {issue}24481[#24481]) +* Fix Version based BWC and set correct minCompatVersion {pull}24732[#24732] +* Fix `_field_caps` serialization in order to support cross cluster search {pull}24722[#24722] +* Avoid race when shutting down controller processes {pull}24579[#24579] + +Mapping:: +* Fix parsing of ip range queries. {pull}25768[#25768] (issue: {issue}25636[#25636]) +* Disable date field mapping changing {pull}25285[#25285] (issue: {issue}25271[#25271]) +* Correctly enable _all for older 5.x indices {pull}25087[#25087] (issue: {issue}25068[#25068]) +* token_count datatype should handle null value {pull}25046[#25046] (issue: {issue}24928[#24928]) +* keep _parent field while updating child type mapping {pull}24407[#24407] (issue: {issue}23381[#23381]) + +More Like This:: +* Pass over _routing value with more_like_this items to be retrieved {pull}24679[#24679] (issue: {issue}23699[#23699]) + +Nested Docs:: +* In case of a single type the _id field should be added to the nested document instead of _uid field {pull}25149[#25149] + +Network:: +* Ensure pending transport handlers are invoked for all channel failures {pull}25150[#25150] +* Notify onConnectionClosed rather than onNodeDisconnect to prune transport handlers {pull}24639[#24639] (issues: {issue}24557[#24557], {issue}24575[#24575], {issue}24632[#24632]) + +Packaging:: +* Exit Windows scripts promptly on failure {pull}25959[#25959] +* Pass config path as a system property {pull}25943[#25943] +* ES_HOME needs to be made absolute before attempt at traversal {pull}25865[#25865] +* Fix elasticsearch-keystore handling of path.conf {pull}25811[#25811] +* Stop disabling explicit GC {pull}25759[#25759] +* Avoid failing install if system-sysctl is masked {pull}25657[#25657] (issue: {issue}24234[#24234]) +* Get short path name for native controllers {pull}25344[#25344] +* When stopping via systemd only kill the JVM, not its control group {pull}25195[#25195] +* remove remaining references to scripts directory {pull}24771[#24771] +* Handle parentheses in batch file path {pull}24731[#24731] (issue: {issue}24712[#24712]) + +Parent/Child:: +* The default _parent field should not try to load global ordinals {pull}25851[#25851] (issue: {issue}25849[#25849]) + +Percolator:: +* Fix range queries with date range based on current time in percolator queries. {pull}24666[#24666] (issue: {issue}23921[#23921]) + +Plugin Lang Painless:: +* Painless: allow doubles to be casted to longs. {pull}25936[#25936] + +Plugin Repository Azure:: +* Make calls to CloudBlobContainer#exists privileged {pull}25937[#25937] (issue: {issue}25931[#25931]) + +Plugin Repository GCS:: +* Ensure that gcs client creation is privileged {pull}25938[#25938] (issue: {issue}25932[#25932]) + +Plugin Repository HDFS:: +* Upgrading HDFS Repository Plugin to use HDFS 2.8.1 Client {pull}25497[#25497] (issue: {issue}25450[#25450]) + +Plugin Repository S3:: +* Avoid SecurityException in repository-S3 on DefaultS3OutputStream.flush() {pull}25254[#25254] (issue: {issue}25192[#25192]) + +Plugins:: +* X-Pack plugin download fails on Windows desktop {pull}24570[#24570] + +Query DSL:: +* SpanNearQueryBuilder should return the inner clause when a single clause is provided {pull}25856[#25856] (issue: {issue}25630[#25630]) +* Refactor field expansion for match, multi_match and query_string query {pull}25726[#25726] (issues: {issue}25551[#25551], {issue}25556[#25556]) +* WrapperQueryBuilder should also rewrite the parsed query {pull}25480[#25480] + +REST:: +* Fix handling of invalid error trace parameter {pull}25785[#25785] (issue: {issue}25774[#25774]) +* Fix handling of exceptions thrown on HEAD requests {pull}25172[#25172] (issue: {issue}21125[#21125]) +* Fixed NPEs caused by requests without content. {pull}23497[#23497] (issue: {issue}24701[#24701]) +* Fix get mappings HEAD requests {pull}23192[#23192] (issue: {issue}21125[#21125]) + +Recovery:: +* Close translog view after primary-replica resync {pull}25862[#25862] (issue: {issue}24841[#24841]) + +Reindex API:: +* Reindex: don't duplicate _source parameter {pull}24629[#24629] (issue: {issue}24628[#24628]) +* Add qa module that tests reindex-from-remote against pre-5.0 versions of Elasticsearch {pull}24561[#24561] (issues: {issue}23828[#23828], {issue}24520[#24520]) + +Search:: +* Caching a MinDocQuery can lead to wrong results. {pull}25909[#25909] +* Fix random score generation when no seed is provided. {pull}25908[#25908] +* Merge FunctionScoreQuery and FiltersFunctionScoreQuery {pull}25889[#25889] (issues: {issue}15709[#15709], {issue}23628[#23628]) +* Respect cluster alias in `_index` aggs and queries {pull}25885[#25885] (issue: {issue}25606[#25606]) +* First increment shard stats before notifying and potentially sending response {pull}25818[#25818] +* Remove assertion about deviation when casting to a float. {pull}25806[#25806] (issue: {issue}25330[#25330]) +* Prevent skipping shards if a suggest builder is present {pull}25739[#25739] (issue: {issue}25658[#25658]) +* Ensure remote cluster alias is preserved in inner hits aggs {pull}25627[#25627] (issue: {issue}25606[#25606]) +* Do not search locally if remote index pattern resolves to no indices {pull}25436[#25436] (issue: {issue}25426[#25426]) +* Adds check for negative search request size {pull}25397[#25397] (issue: {issue}22530[#22530]) +* Make sure range queries are correctly profiled. {pull}25108[#25108] +* Fix RangeFieldMapper rangeQuery to properly handle relations {pull}24808[#24808] (issue: {issue}24744[#24744]) +* Fix ExpandSearchPhase when response contains no hits {pull}24688[#24688] (issue: {issue}24672[#24672]) + +Sequence IDs:: +* Fix pre-6.0 response to unknown replication actions {pull}25744[#25744] (issue: {issue}10708[#10708]) +* Track local checkpoint on primary immediately {pull}25434[#25434] (issues: {issue}10708[#10708], {issue}25355[#25355], {issue}25415[#25415]) +* Initialize max unsafe auto ID timestamp on shrink {pull}25356[#25356] (issues: {issue}10708[#10708], {issue}25355[#25355]) +* Use correct primary term for replicating NOOPs {pull}25128[#25128] +* Handle already closed while filling gaps {pull}25021[#25021] (issue: {issue}24925[#24925]) + +Settings:: +* Fix settings serialization to not serialize secure settings or not take the total size into account {pull}25323[#25323] +* Keystore CLI should use the AddFileKeyStoreCommand for files {pull}25298[#25298] +* Allow resetting settings that use an IP validator {pull}24713[#24713] (issue: {issue}24709[#24709]) + +Snapshot/Restore:: +* Snapshot/Restore: Ensure that shard failure reasons are correctly stored in CS {pull}25941[#25941] (issue: {issue}25878[#25878]) +* Output all empty snapshot info fields if in verbose mode {pull}25455[#25455] (issue: {issue}24477[#24477]) +* Remove redundant and broken MD5 checksum from repository-s3 {pull}25270[#25270] (issue: {issue}25269[#25269]) +* Consolidates the logic for cleaning up snapshots on master election {pull}24894[#24894] (issue: {issue}24605[#24605]) +* Removes completed snapshot from cluster state on master change {pull}24605[#24605] (issue: {issue}24452[#24452]) + +Stats:: +* _nodes/stats should not fail due to concurrent AlreadyClosedException {pull}25016[#25016] (issue: {issue}23099[#23099]) + +Suggesters:: +* Context suggester should filter doc values field {pull}25858[#25858] (issue: {issue}25404[#25404]) + +[float] +=== Regressions + +Highlighting:: +* Fix Fast Vector Highlighter NPE on match phrase prefix {pull}25116[#25116] (issue: {issue}25088[#25088]) + +Search:: +* Always use DisjunctionMaxQuery to build cross fields disjunction {pull}25115[#25115] (issue: {issue}23966[#23966]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Network:: +* Upgrade to Netty 4.1.13.Final {pull}25581[#25581] (issues: {issue}24729[#24729], {issue}6866[#6866]) +* Upgrade to Netty 4.1.11.Final {pull}24652[#24652] + +Upgrade API:: +* Improve stability and logging of TemplateUpgradeServiceIT tests {pull}25386[#25386] (issue: {issue}25382[#25382]) + +[[release-notes-6.0.0-alpha2]] +== {es} version 6.0.0-alpha2 + +[float] +[[breaking-6.0.0-alpha2]] +=== Breaking Changes + +CRUD:: +* Deleting a document from a non-existing index creates the indexIf the index does not exist, delete document will not auto create it {pull}24518[#24518] (issue: {issue}15425[#15425]) + +Plugin Analysis ICU:: +* Upgrade icu4j to latest version {pull}24821[#24821] + +Plugin Repository S3:: +* Remove deprecated S3 settings {pull}24445[#24445] + +Scripting:: +* Remove script access to term statistics {pull}19462[#19462] (issue: {issue}19359[#19359]) + +Watcher:: +* The watch `_status` field has been renamed to `status`, as underscores in +field names will not be allowed. + +[float] +=== Breaking Java Changes + +Aggregations:: +* Make Terms.Bucket an interface rather than an abstract class {pull}24492[#24492] +* Compound order for histogram aggregations {pull}22343[#22343] (issues: {issue}14771[#14771], {issue}20003[#20003], {issue}23613[#23613]) + +Plugins:: +* Drop name from TokenizerFactory {pull}24869[#24869] + +[float] +=== Deprecations + +Settings:: +* Deprecate settings in .yml and .json {pull}24059[#24059] (issue: {issue}19391[#19391]) + +[float] +=== New Features + +Aggregations:: +* SignificantText aggregation - like significant_terms, but for text {pull}24432[#24432] (issue: {issue}23674[#23674]) + +Internal:: +* Automatically adjust search threadpool queue_size {pull}23884[#23884] (issue: {issue}3890[#3890]) + +Mapping:: +* Add new ip_range field type {pull}24433[#24433] + +Plugin Analysis ICU:: +* Add ICUCollationFieldMapper {pull}24126[#24126] + +[float] +=== Enhancements + +Core:: +* Improve bootstrap checks error messages {pull}24548[#24548] + +Engine:: +* Move the IndexDeletionPolicy to be engine internal {pull}24930[#24930] (issue: {issue}10708[#10708]) + +Internal:: +* Add assertions enabled helper {pull}24834[#24834] + +Java High Level REST Client:: +* Add doc_count to ParsedMatrixStats {pull}24952[#24952] (issue: {issue}24776[#24776]) +* Add fromXContent method to ClearScrollResponse {pull}24909[#24909] +* ClearScrollRequest to implement ToXContentObject {pull}24907[#24907] +* SearchScrollRequest to implement ToXContentObject {pull}24906[#24906] (issue: {issue}3889[#3889]) +* Add aggs parsers for high level REST Client {pull}24824[#24824] (issues: {issue}23965[#23965], {issue}23973[#23973], {issue}23974[#23974], {issue}24085[#24085], {issue}24160[#24160], {issue}24162[#24162], {issue}24182[#24182], {issue}24183[#24183], {issue}24208[#24208], {issue}24213[#24213], {issue}24239[#24239], {issue}24284[#24284], {issue}24312[#24312], {issue}24330[#24330], {issue}24365[#24365], {issue}24371[#24371], {issue}24442[#24442], {issue}24521[#24521], {issue}24524[#24524], {issue}24564[#24564], {issue}24583[#24583], {issue}24589[#24589], {issue}24648[#24648], {issue}24667[#24667], {issue}24675[#24675], {issue}24682[#24682], {issue}24700[#24700], {issue}24706[#24706], {issue}24717[#24717], {issue}24720[#24720], {issue}24738[#24738], {issue}24746[#24746], {issue}24789[#24789], {issue}24791[#24791], {issue}24794[#24794], {issue}24796[#24796], {issue}24822[#24822]) + +Mapping:: +* Identify documents by their `_id`. {pull}24460[#24460] + +Packaging:: +* Set number of processes in systemd unit file {pull}24970[#24970] (issue: {issue}20874[#20874]) + +Plugin Lang Painless:: +* Make Painless Compiler Use an Instance Per Context {pull}24972[#24972] +* Make PainlessScript An Interface {pull}24966[#24966] + +Recovery:: +* Introduce primary context {pull}25031[#25031] (issue: {issue}10708[#10708]) + +Scripting:: +* Add StatefulFactoryType as optional intermediate factory in script contexts {pull}24974[#24974] (issue: {issue}20426[#20426]) +* Make contexts available to ScriptEngine construction {pull}24896[#24896] +* Make ScriptEngine.compile generic on the script context {pull}24873[#24873] +* Add instance and compiled classes to script contexts {pull}24868[#24868] + +Search:: +* Eliminate array access in tight loops when profiling is enabled. {pull}24959[#24959] +* Support Multiple Inner Hits on a Field Collapse Request {pull}24517[#24517] +* Expand cross cluster search indices for search requests to the concrete index or to it's aliases {pull}24502[#24502] + +Search Templates:: +* Add max concurrent searches to multi template search {pull}24255[#24255] (issues: {issue}20912[#20912], {issue}21907[#21907]) + +Security:: +* Adapted indices resolution to use new `ignoreAliases` index option. +* Added the `logstash_admin` role, which provides access +to `.logstash-*` indices for managing configurations. + +Sequence IDs:: +* Fill gaps on primary promotion {pull}24945[#24945] (issue: {issue}10708[#10708]) +* Introduce clean transition on primary promotion {pull}24925[#24925] (issue: {issue}10708[#10708]) +* Guarantee that translog generations are seqNo conflict free {pull}24825[#24825] (issues: {issue}10708[#10708], {issue}24779[#24779]) +* Inline global checkpoints {pull}24513[#24513] (issue: {issue}10708[#10708]) + +Snapshot/Restore:: +* Enhances get snapshots API to allow retrieving repository index only {pull}24477[#24477] (issue: {issue}24288[#24288]) + +Watcher:: +* Watcher indices no longer use multiple types. + +[float] +=== Bug Fixes + +Aggregations:: +* Terms aggregation should remap global ordinal buckets when a sub-aggregator is used to sort the terms {pull}24941[#24941] (issue: {issue}24788[#24788]) +* Correctly set doc_count when MovAvg "predicts" values on existing buckets {pull}24892[#24892] (issue: {issue}24327[#24327]) +* DateHistogram: Fix `extended_bounds` with `offset` {pull}23789[#23789] (issue: {issue}23776[#23776]) +* Fix ArrayIndexOutOfBoundsException when no ranges are specified in the query {pull}23241[#23241] (issue: {issue}22881[#22881]) + +Analysis:: +* PatternAnalyzer should lowercase wildcard queries when `lowercase` is true. {pull}24967[#24967] + +Cache:: +* fix bug of weight computation {pull}24856[#24856] + +Core:: +* Fix cache expire after access {pull}24546[#24546] + +Graph:: +* Reinstated `_xpack/graph/_explore` as the correct graph endpoint. +`_xpack/_graph/_explore` is deprecated and will be removed in v7.0. + +Index APIs:: +* Validates updated settings on closed indices {pull}24487[#24487] (issue: {issue}23787[#23787]) + +Ingest:: +* Fix floating-point error when DateProcessor parses UNIX {pull}24947[#24947] +* add option for _ingest.timestamp to use new ZonedDateTime (5.x backport) {pull}24030[#24030] (issues: {issue}23168[#23168], {issue}23174[#23174]) + +Inner Hits:: +* Fix Source filtering in new field collapsing feature {pull}24068[#24068] (issue: {issue}24063[#24063]) + +Internal:: +* Ensure remote cluster is connected before fetching `_field_caps` {pull}24845[#24845] (issue: {issue}24763[#24763]) + +Machine Learning:: +* If the initial cluster state update to install the Machine Learning +Metadata fails, the update is now retried. + +Network:: +* Fix error message if an incompatible node connects {pull}24884[#24884] + +Plugins:: +* Fix plugin installation permissions {pull}24527[#24527] (issue: {issue}24480[#24480]) + +Scroll:: +* Fix single shard scroll within a cluster with nodes in version `>= 5.3` and `<= 5.3` {pull}24512[#24512] + +Search:: +* Fix script field sort returning Double.MAX_VALUE for all documents {pull}24942[#24942] (issue: {issue}24940[#24940]) +* Compute the took time of the query after the expand phase of field collapsing {pull}24902[#24902] (issue: {issue}24900[#24900]) + +Sequence IDs:: +* Handle primary failure handling replica response {pull}24926[#24926] (issue: {issue}24935[#24935]) + +Snapshot/Restore:: +* Fix inefficient (worst case exponential) loading of snapshot repository {pull}24510[#24510] (issue: {issue}24509[#24509]) + +Stats:: +* Avoid double decrement on current query counter {pull}24922[#24922] (issues: {issue}22996[#22996], {issue}24872[#24872]) +* Adjust available and free bytes to be non-negative on huge FSes {pull}24911[#24911] (issues: {issue}23093[#23093], {issue}24453[#24453]) + +Suggesters:: +* Fix context suggester to read values from keyword type field {pull}24200[#24200] (issue: {issue}24129[#24129]) + +//[float] +//=== Regressions + +//[float] +//=== Known Issues + +[[release-notes-6.0.0-alpha1]] +== {es} version 6.0.0-alpha1 + +[float] +[[breaking-6.0.0-alpha1]] +=== Breaking Changes + + +Allocation:: +* Remove `cluster.routing.allocation.snapshot.relocation_enabled` setting {pull}20994[#20994] + +Analysis:: +* Removing query-string parameters in `_analyze` API {pull}20704[#20704] (issue: {issue}20246[#20246]) + +CAT API:: +* Write -1 on unbounded queue in cat thread pool {pull}21342[#21342] (issue: {issue}21187[#21187]) + +CRUD:: +* Disallow `VersionType.FORCE` for GetRequest {pull}21079[#21079] (issue: {issue}20995[#20995]) +* Disallow `VersionType.FORCE` versioning for 6.x indices {pull}20995[#20995] (issue: {issue}20377[#20377]) + +Cluster:: +* No longer allow cluster name in data path {pull}20433[#20433] (issue: {issue}20391[#20391]) + +Core:: +* Simplify file store {pull}24402[#24402] (issue: {issue}24390[#24390]) +* Make boolean conversion strict {pull}22200[#22200] +* Remove the `default` store type. {pull}21616[#21616] +* Remove store throttling. {pull}21573[#21573] + +Geo:: +* Remove deprecated geo search features {pull}22876[#22876] +* Reduce GeoDistance Insanity {pull}19846[#19846] + +Index APIs:: +* Open/Close index api to allow_no_indices by default {pull}24401[#24401] (issues: {issue}24031[#24031], {issue}24341[#24341]) +* Remove support for controversial `ignore_unavailable` and `allow_no_indices` from indices exists api {pull}20712[#20712] + +Index Templates:: +* Allows multiple patterns to be specified for index templates {pull}21009[#21009] (issue: {issue}20690[#20690]) + +Java API:: +* Enforce Content-Type requirement on the rest layer and remove deprecated methods {pull}23146[#23146] (issue: {issue}19388[#19388]) + +Mapping:: +* Enforce at most one type. {pull}24428[#24428] (issue: {issue}24317[#24317]) +* Disallow `include_in_all` for 6.0+ indices {pull}22970[#22970] (issue: {issue}22923[#22923]) +* Disable _all by default, disallow configuring _all on 6.0+ indices {pull}22144[#22144] (issues: {issue}19784[#19784], {issue}20925[#20925], {issue}21341[#21341]) +* Throw an exception on unrecognized "match_mapping_type" {pull}22090[#22090] (issue: {issue}17285[#17285]) + +Network:: +* Remove blocking TCP clients and servers {pull}22639[#22639] +* Remove `modules/transport_netty_3` in favor of `netty_4` {pull}21590[#21590] +* Remove LocalTransport in favor of MockTcpTransport {pull}20695[#20695] + +Packaging:: +* Remove customization of ES_USER and ES_GROUP {pull}23989[#23989] (issue: {issue}23848[#23848]) + +Percolator:: +* Remove deprecated percolate and mpercolate apis {pull}22331[#22331] + +Plugin Delete By Query:: +* Require explicit query in _delete_by_query API {pull}23632[#23632] (issue: {issue}23629[#23629]) + +Plugin Discovery EC2:: +* Ec2 Discovery: Cleanup deprecated settings {pull}24150[#24150] +* Discovery EC2: Remove region setting {pull}23991[#23991] (issue: {issue}22758[#22758]) +* AWS Plugins: Remove signer type setting {pull}23984[#23984] (issue: {issue}22599[#22599]) + +Plugin Lang JS:: +* Remove lang-python and lang-javascript {pull}20734[#20734] (issue: {issue}20698[#20698]) + +Plugin Mapper Attachment:: +* Remove mapper attachments plugin {pull}20416[#20416] (issue: {issue}18837[#18837]) + +Plugin Repository Azure:: +* Remove global `repositories.azure` settings {pull}23262[#23262] (issues: {issue}22800[#22800], {issue}22856[#22856]) +* Remove auto creation of container for azure repository {pull}22858[#22858] (issue: {issue}22857[#22857]) + +Plugin Repository S3:: +* S3 Repository: Cleanup deprecated settings {pull}24097[#24097] +* S3 Repository: Remove region setting {pull}22853[#22853] (issue: {issue}22758[#22758]) +* S3 Repository: Remove bucket auto create {pull}22846[#22846] (issue: {issue}22761[#22761]) +* S3 Repository: Remove env var and sysprop credentials support {pull}22842[#22842] + +Query DSL:: +* Remove deprecated `minimum_number_should_match` in BoolQueryBuilder {pull}22416[#22416] +* Remove support for empty queries {pull}22092[#22092] (issue: {issue}17624[#17624]) +* Remove deprecated query names: in, geo_bbox, mlt, fuzzy_match and match_fuzzy {pull}21852[#21852] +* The `terms` query should always map to a Lucene `TermsQuery`. {pull}21786[#21786] +* Be strict when parsing values searching for booleans {pull}21555[#21555] (issue: {issue}21545[#21545]) +* Remove collect payloads parameter {pull}20385[#20385] + +REST:: +* Remove ldjson support and document ndjson for bulk/msearch {pull}23049[#23049] (issue: {issue}23025[#23025]) +* Enable strict duplicate checks for all XContent types {pull}22225[#22225] (issues: {issue}19614[#19614], {issue}22073[#22073]) +* Enable strict duplicate checks for JSON content {pull}22073[#22073] (issue: {issue}19614[#19614]) +* Remove lenient stats parsing {pull}21417[#21417] (issues: {issue}20722[#20722], {issue}21410[#21410]) +* Remove allow unquoted JSON {pull}20388[#20388] (issues: {issue}17674[#17674], {issue}17801[#17801]) +* Remove FORCE version_type {pull}20377[#20377] (issue: {issue}19769[#19769]) + +Scripting:: +* Make dates be ReadableDateTimes in scripts {pull}22948[#22948] (issue: {issue}22875[#22875]) +* Remove groovy scripting language {pull}21607[#21607] + +Search:: +* ProfileResult and CollectorResult should print machine readable timing information {pull}22561[#22561] +* Remove indices query {pull}21837[#21837] (issue: {issue}17710[#17710]) +* Remove ignored type parameter in search_shards api {pull}21688[#21688] + +Security:: +* A new bootstrap check enforces that default passwords are disabled for the +built-in users when running in +{ref}/bootstrap-checks.html#_development_vs_production_mode[production mode]. +You must set `xpack.security.authc.accept_default_password` to `false` in your +`elasticsearch.yml`. For more information, see <> and +{stack-ov}/setting-up-authentication.html[User authentication]. +* A new configuration setting is available to disable support for the default +password (_"changeme"_). For more information, see +{stack-ov}/built-in-users.html#disabling-default-password[Disable default password functionality]. + +Sequence IDs:: +* Change certain replica failures not to fail the replica shard {pull}22874[#22874] (issue: {issue}10708[#10708]) + +Shadow Replicas:: +* Remove shadow replicas {pull}23906[#23906] (issue: {issue}22024[#22024]) + +Watcher:: +* The built-in HTTP client used in webhooks, the http input and the http email attachment has been replaced. +This results in the need to always escape all parts of an URL. +* The new built-in HTTP client also enforces a maximum request size, which defaults to 10mb. + +[float] +=== Breaking Java Changes + +Java API:: +* Java api: ActionRequestBuilder#execute to return a PlainActionFuture {pull}24415[#24415] (issues: {issue}24412[#24412], {issue}9201[#9201]) + +Network:: +* Simplify TransportAddress {pull}20798[#20798] + +[float] +=== Deprecations + +Index Templates:: +* Restore deprecation warning for invalid match_mapping_type values {pull}22304[#22304] + +Internal:: +* Deprecate XContentType auto detection methods in XContentFactory {pull}22181[#22181] (issue: {issue}19388[#19388]) + +[float] +=== New Features + +Core:: +* Enable index-time sorting {pull}24055[#24055] (issue: {issue}6720[#6720]) + +[float] +=== Enhancements + +Aggregations:: +* Agg builder accessibility fixes {pull}24323[#24323] +* Remove support for the include/pattern syntax. {pull}23141[#23141] (issue: {issue}22933[#22933]) +* Promote longs to doubles when a terms agg mixes decimal and non-decimal numbers {pull}22449[#22449] (issue: {issue}22232[#22232]) + +Analysis:: +* Match- and MultiMatchQueryBuilder should only allow setting analyzer on string values {pull}23684[#23684] (issue: {issue}21665[#21665]) + +Bulk:: +* Simplify bulk request execution {pull}20109[#20109] + +CRUD:: +* Added validation for upsert request {pull}24282[#24282] (issue: {issue}16671[#16671]) + +Cluster:: +* Separate publishing from applying cluster states {pull}24236[#24236] +* Adds cluster state size to /_cluster/state response {pull}23440[#23440] (issue: {issue}3415[#3415]) + +Core:: +* Remove connect SocketPermissions from core {pull}22797[#22797] +* Add repository-url module and move URLRepository {pull}22752[#22752] (issue: {issue}22116[#22116]) +* Remove accept SocketPermissions from core {pull}22622[#22622] (issue: {issue}22116[#22116]) +* Move IfConfig.logIfNecessary call into bootstrap {pull}22455[#22455] (issue: {issue}22116[#22116]) +* Remove artificial default processors limit {pull}20874[#20874] (issue: {issue}20828[#20828]) +* Simplify write failure handling {pull}19105[#19105] (issue: {issue}20109[#20109]) + +Engine:: +* Fill missing sequence IDs up to max sequence ID when recovering from store {pull}24238[#24238] (issue: {issue}10708[#10708]) +* Use sequence numbers to identify out of order delivery in replicas & recovery {pull}24060[#24060] (issue: {issue}10708[#10708]) +* Add replica ops with version conflict to translog {pull}22626[#22626] +* Clarify global checkpoint recovery {pull}21934[#21934] (issue: {issue}21254[#21254]) + +Internal:: +* Try to convince the JVM not to lose stacktraces {pull}24426[#24426] (issue: {issue}24376[#24376]) +* Make document write requests immutable {pull}23038[#23038] + +Java High Level REST Client:: +* Add info method to High Level Rest client {pull}23350[#23350] +* Add support for named xcontent parsers to high level REST client {pull}23328[#23328] +* Add BulkRequest support to High Level Rest client {pull}23312[#23312] +* Add UpdateRequest support to High Level Rest client {pull}23266[#23266] +* Add delete API to the High Level Rest Client {pull}23187[#23187] +* Add Index API to High Level Rest Client {pull}23040[#23040] +* Add get/exists method to RestHighLevelClient {pull}22706[#22706] +* Add fromxcontent methods to delete response {pull}22680[#22680] (issue: {issue}22229[#22229]) +* Add REST high level client gradle submodule and first simple method {pull}22371[#22371] + +Java REST Client:: +* Wrap rest httpclient with doPrivileged blocks {pull}22603[#22603] (issue: {issue}22116[#22116]) + +Mapping:: +* Date detection should not rely on a hardcoded set of characters. {pull}22171[#22171] (issue: {issue}1694[#1694]) + +Network:: +* Isolate SocketPermissions to Netty {pull}23057[#23057] +* Wrap netty accept/connect ops with doPrivileged {pull}22572[#22572] (issue: {issue}22116[#22116]) +* Replace Socket, ServerSocket, and HttpServer usages in tests with mocksocket versions {pull}22287[#22287] (issue: {issue}22116[#22116]) + +Plugin Discovery EC2:: +* Read ec2 discovery address from aws instance tags {pull}22743[#22743] (issue: {issue}22566[#22566]) + +Plugin Repository HDFS:: +* Add doPrivilege blocks for socket connect ops in repository-hdfs {pull}22793[#22793] (issue: {issue}22116[#22116]) + +Plugins:: +* Add doPrivilege blocks for socket connect operations in plugins {pull}22534[#22534] (issue: {issue}22116[#22116]) + +Recovery:: +* Peer Recovery: remove maxUnsafeAutoIdTimestamp hand off {pull}24243[#24243] (issue: {issue}24149[#24149]) +* Introduce sequence-number-based recovery {pull}22484[#22484] (issue: {issue}10708[#10708]) + +Search:: +* Add parsing from xContent to Suggest {pull}22903[#22903] +* Add parsing from xContent to ShardSearchFailure {pull}22699[#22699] + +Sequence IDs:: +* Block global checkpoint advances when recovering {pull}24404[#24404] (issue: {issue}10708[#10708]) +* Add primary term to doc write response {pull}24171[#24171] (issue: {issue}10708[#10708]) +* Preserve multiple translog generations {pull}24015[#24015] (issue: {issue}10708[#10708]) +* Introduce translog generation rolling {pull}23606[#23606] (issue: {issue}10708[#10708]) +* Replicate write failures {pull}23314[#23314] +* Introduce sequence-number-aware translog {pull}22822[#22822] (issue: {issue}10708[#10708]) +* Introduce translog no-op {pull}22291[#22291] (issue: {issue}10708[#10708]) +* Tighten sequence numbers recovery {pull}22212[#22212] (issue: {issue}10708[#10708]) +* Add BWC layer to seq no infra and enable BWC tests {pull}22185[#22185] (issue: {issue}21670[#21670]) +* Add internal _primary_term doc values field, fix _seq_no indexing {pull}21637[#21637] (issues: {issue}10708[#10708], {issue}21480[#21480]) +* Add global checkpoint to translog checkpoints {pull}21254[#21254] +* Sequence numbers commit data for Lucene uses Iterable interface {pull}20793[#20793] (issue: {issue}10708[#10708]) +* Simplify GlobalCheckpointService and properly hook it for cluster state updates {pull}20720[#20720] + +Stats:: +* Expose disk usage estimates in nodes stats {pull}22081[#22081] (issue: {issue}8686[#8686]) + +Store:: +* Remote support for lucene versions without checksums {pull}24021[#24021] + +Suggesters:: +* Remove deprecated _suggest endpoint {pull}22203[#22203] (issue: {issue}20305[#20305]) + +Task Manager:: +* Add descriptions to bulk tasks {pull}22059[#22059] (issue: {issue}21768[#21768]) + +[float] +=== Bug Fixes + +Ingest:: +* Remove support for Visio and potm files {pull}22079[#22079] (issue: {issue}22077[#22077]) + +Inner Hits:: +* If size / offset are out of bounds just do a plain count {pull}20556[#20556] (issue: {issue}20501[#20501]) + +Internal:: +* Fix handling of document failure exception in InternalEngine {pull}22718[#22718] + +Plugin Ingest Attachment:: +* Add missing mime4j library {pull}22764[#22764] (issue: {issue}22077[#22077]) + +Plugin Repository S3:: +* Wrap getCredentials() in a doPrivileged() block {pull}23297[#23297] (issues: {issue}22534[#22534], {issue}23271[#23271]) + +Sequence IDs:: +* Avoid losing ops in file-based recovery {pull}22945[#22945] (issue: {issue}22484[#22484]) + +Snapshot/Restore:: +* Keep snapshot restore state and routing table in sync {pull}20836[#20836] (issue: {issue}19774[#19774]) + +Translog:: +* Fix Translog.Delete serialization for sequence numbers {pull}22543[#22543] + +Watcher:: +* The HTTP client respects timeouts now and does not get stuck leading to stuck watches. + +[float] +=== Regressions + +Bulk:: +* Only re-parse operation if a mapping update was needed {pull}23832[#23832] (issue: {issue}23665[#23665]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Core:: +* Upgrade to a Lucene 7 snapshot {pull}24089[#24089] (issues: {issue}23966[#23966], {issue}24086[#24086], {issue}24087[#24087], {issue}24088[#24088]) + +Plugin Ingest Attachment:: +* Update to Tika 1.14 {pull}21591[#21591] (issue: {issue}20390[#20390]) + +[[release-notes-6.0.0-alpha1-5x]] +== {es} version 6.0.0-alpha1 (Changes previously released in 5.x) + +The changes listed below were first released in the 5.x series. Changes +released for the first time in Elasticsearch 6.0.0-alpha1 are listed in +<>. + +[float] +[[breaking-6.0.0-alpha1-5x]] +=== Breaking Changes + +Aliases:: +* Validate alias names the same as index names {pull}20771[#20771] (issue: {issue}20748[#20748]) + +CRUD:: +* Fixed naming inconsistency for fields/stored_fields in the APIs {pull}20166[#20166] (issues: {issue}18943[#18943], {issue}20155[#20155]) + +Core:: +* Add system call filter bootstrap check {pull}21940[#21940] +* Remove ignore system bootstrap checks {pull}20511[#20511] + +Internal:: +* `_flush` should block by default {pull}20597[#20597] (issue: {issue}20569[#20569]) + +Packaging:: +* Rename service.bat to elasticsearch-service.bat {pull}20496[#20496] (issue: {issue}17528[#17528]) + +Plugin Lang Painless:: +* Remove all date 'now' methods from Painless {pull}20766[#20766] (issue: {issue}20762[#20762]) + +Query DSL:: +* Fix name of `enabled_position_increments` {pull}22895[#22895] + +REST:: +* Change separator for shards preference {pull}20786[#20786] (issues: {issue}20722[#20722], {issue}20769[#20769]) + +Search:: +* Remove DFS_QUERY_AND_FETCH as a search type {pull}22787[#22787] + +Settings:: +* Remove support for default settings {pull}24093[#24093] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074]) + +[float] +=== Breaking Java Changes + +Aggregations:: +* Move getProperty method out of MultiBucketsAggregation.Bucket interface {pull}23988[#23988] +* Remove getProperty method from Aggregations interface and impl {pull}23972[#23972] +* Move getProperty method out of Aggregation interface {pull}23949[#23949] + +Allocation:: +* Cluster Explain API uses the allocation process to explain shard allocation decisions {pull}22182[#22182] (issues: {issue}20347[#20347], {issue}20634[#20634], {issue}21103[#21103], {issue}21662[#21662], {issue}21691[#21691]) + +Cluster:: +* Remove PROTO-based custom cluster state components {pull}22336[#22336] (issue: {issue}21868[#21868]) + +Core:: +* Remove ability to plug-in TransportService {pull}20505[#20505] + +Discovery:: +* Remove pluggability of ElectMasterService {pull}21031[#21031] + +Exceptions:: +* Remove `IndexTemplateAlreadyExistsException` and `IndexShardAlreadyExistsException` {pull}21539[#21539] (issue: {issue}21494[#21494]) +* Replace IndexAlreadyExistsException with ResourceAlreadyExistsException {pull}21494[#21494] + +Ingest:: +* Change type of ingest doc meta-data field 'TIMESTAMP' to `Date` {pull}22234[#22234] (issue: {issue}22074[#22074]) + +Internal:: +* Replace SearchExtRegistry with namedObject {pull}22492[#22492] +* Replace Suggesters with namedObject {pull}22491[#22491] +* Consolidate the last easy parser construction {pull}22095[#22095] +* Introduce XContentParser#namedObject {pull}22003[#22003] +* Pass executor name to request interceptor to support async intercept calls {pull}21089[#21089] +* Remove TransportService#registerRequestHandler leniency {pull}20469[#20469] (issue: {issue}20468[#20468]) + +Java API:: +* Fold InternalSearchHits and friends into their interfaces {pull}23042[#23042] + +Network:: +* Remove HttpServer and HttpServerAdapter in favor of a simple dispatch method {pull}22636[#22636] (issue: {issue}18482[#18482]) +* Unguice Transport and friends {pull}20526[#20526] + +Plugins:: +* Deguice rest handlers {pull}22575[#22575] +* Plugins: Replace Rest filters with RestHandler wrapper {pull}21905[#21905] +* Plugins: Remove support for onModule {pull}21416[#21416] +* Cleanup sub fetch phase extension point {pull}20382[#20382] + +Query DSL:: +* Resolve index names in indices_boost {pull}21393[#21393] (issue: {issue}4756[#4756]) + +Scripting:: +* Refactor ScriptType to be a Top-Level Class {pull}21136[#21136] + +Search:: +* Remove QUERY_AND_FETCH search type {pull}22996[#22996] +* Cluster search shards improvements: expose ShardId, adjust visibility of some members {pull}21752[#21752] + +[float] +=== Deprecations + +Java API:: +* Add BulkProcessor methods with XContentType parameter {pull}23078[#23078] (issue: {issue}22691[#22691]) +* Deprecate and remove "minimumNumberShouldMatch" in BoolQueryBuilder {pull}22403[#22403] + +Plugin Repository S3:: +* S3 Repository: Deprecate remaining `repositories.s3.*` settings {pull}24144[#24144] (issue: {issue}24143[#24143]) +* Deprecate specifying credentials through env vars, sys props, and remove profile files {pull}22567[#22567] (issues: {issue}21041[#21041], {issue}22479[#22479]) + +Query DSL:: +* Add deprecation logging message for 'fuzzy' query {pull}20993[#20993] (issue: {issue}15760[#15760]) + +REST:: +* Optionally require a valid content type for all rest requests with content {pull}22691[#22691] (issue: {issue}19388[#19388]) + +Scripting:: +* Change Namespace for Stored Script to Only Use Id {pull}22206[#22206] + +Shadow Replicas:: +* Add a deprecation notice to shadow replicas {pull}22647[#22647] (issue: {issue}22024[#22024]) + +Stats:: +* Deprecate _field_stats endpoint {pull}23914[#23914] + +[float] +=== New Features + +Aggregations:: +* Initial version of an adjacency matrix using the Filters aggregation {pull}22239[#22239] (issue: {issue}22169[#22169]) + +Analysis:: +* Adds pattern keyword marker filter support {pull}23600[#23600] (issue: {issue}4877[#4877]) +* Expose WordDelimiterGraphTokenFilter {pull}23327[#23327] (issue: {issue}23104[#23104]) +* Synonym Graph Support (LUCENE-6664) {pull}21517[#21517] +* Expose Lucenes Ukrainian analyzer {pull}21176[#21176] (issue: {issue}19433[#19433]) + +CAT API:: +* Provides a cat api endpoint for templates. {pull}20545[#20545] (issue: {issue}20467[#20467]) + +CRUD:: +* Allow an index to be partitioned with custom routing {pull}22274[#22274] (issue: {issue}21585[#21585]) + +Highlighting:: +* Integrate UnifiedHighlighter {pull}21621[#21621] (issue: {issue}21376[#21376]) + +Index APIs:: +* Add FieldCapabilities (_field_caps) API {pull}23007[#23007] (issue: {issue}22438[#22438]) + +Ingest:: +* introduce KV Processor in Ingest Node {pull}22272[#22272] (issue: {issue}22222[#22222]) + +Mapping:: +* Add the ability to set a normalizer on keyword fields. {pull}21919[#21919] (issue: {issue}18064[#18064]) +* Add RangeFieldMapper for numeric and date range types {pull}21002[#21002] (issue: {issue}20999[#20999]) + +Plugin Discovery File:: +* File-based discovery plugin {pull}20394[#20394] (issue: {issue}20323[#20323]) + +Query DSL:: +* Add "all fields" execution mode to simple_query_string query {pull}21341[#21341] (issues: {issue}19784[#19784], {issue}20925[#20925]) +* Add support for `quote_field_suffix` to `simple_query_string`. {pull}21060[#21060] (issue: {issue}18641[#18641]) +* Add "all field" execution mode to query_string query {pull}20925[#20925] (issue: {issue}19784[#19784]) + +Reindex API:: +* Add automatic parallelization support to reindex and friends {pull}20767[#20767] (issue: {issue}20624[#20624]) + +Search:: +* Introduce incremental reduction of TopDocs {pull}23946[#23946] +* Add federated cross cluster search capabilities {pull}22502[#22502] (issue: {issue}21473[#21473]) +* Add field collapsing for search request {pull}22337[#22337] (issue: {issue}21833[#21833]) + +Settings:: +* Add infrastructure for elasticsearch keystore {pull}22335[#22335] + +Similarities:: +* Adds boolean similarity to Elasticsearch {pull}23637[#23637] (issue: {issue}6731[#6731]) + +[float] +=== Enhancements + +Aggregations:: +* Add `count` to rest output of `geo_centroid` {pull}24387[#24387] (issue: {issue}24366[#24366]) +* Allow scripted metric agg to access `_score` {pull}24295[#24295] +* Add BucketMetricValue interface {pull}24188[#24188] +* Move aggs CommonFields and TYPED_KEYS_DELIMITER from InternalAggregation to Aggregation {pull}23987[#23987] +* Use ParseField for aggs CommonFields rather than String {pull}23717[#23717] +* Share XContent rendering code in terms aggs {pull}23680[#23680] +* Add unit tests for ParentToChildAggregator {pull}23305[#23305] (issue: {issue}22278[#22278]) +* First step towards incremental reduction of query responses {pull}23253[#23253] +* `value_type` is useful regardless of scripting. {pull}22160[#22160] (issue: {issue}20163[#20163]) +* Support for partitioning set of terms {pull}21626[#21626] (issue: {issue}21487[#21487]) +* Rescorer should be applied in the TopHits aggregation {pull}20978[#20978] (issue: {issue}19317[#19317]) + +Aliases:: +* Handle multiple aliases in _cat/aliases api {pull}23698[#23698] (issue: {issue}23661[#23661]) + +Allocation:: +* Trigger replica recovery restarts by master when primary relocation completes {pull}23926[#23926] (issue: {issue}23904[#23904]) +* Makes the same_shard host dynamically updatable {pull}23397[#23397] (issue: {issue}22992[#22992]) +* Include stale replica shard info when explaining an unassigned primary {pull}22826[#22826] +* Adds setting level to allocation decider explanations {pull}22268[#22268] (issue: {issue}21771[#21771]) +* Improves allocation decider decision explanation messages {pull}21771[#21771] +* Prepares allocator decision objects for use with the allocation explain API {pull}21691[#21691] +* Balance step in BalancedShardsAllocator for a single shard {pull}21103[#21103] +* Process more expensive allocation deciders last {pull}20724[#20724] (issue: {issue}12815[#12815]) +* Separates decision making from decision application in BalancedShardsAllocator {pull}20634[#20634] + +Analysis:: +* Support Keyword type in Analyze API {pull}23161[#23161] +* Expose FlattenGraphTokenFilter {pull}22643[#22643] +* Analyze API Position Length Support {pull}22574[#22574] +* Remove AnalysisService and reduce it to a simple name to analyzer mapping {pull}20627[#20627] (issues: {issue}19827[#19827], {issue}19828[#19828]) + +CAT API:: +* Adding built-in sorting capability to _cat apis. {pull}20658[#20658] (issue: {issue}16975[#16975]) +* Add health status parameter to cat indices API {pull}20393[#20393] + +CRUD:: +* Use correct block levels for TRA subclasses {pull}22224[#22224] +* Make index and delete operation execute as a single bulk item {pull}21964[#21964] + +Cache:: +* Do not cache term queries. {pull}21566[#21566] (issues: {issue}16031[#16031], {issue}20116[#20116]) +* Parse alias filters on the coordinating node {pull}20916[#20916] + +Circuit Breakers:: +* Closing a ReleasableBytesStreamOutput closes the underlying BigArray {pull}23941[#23941] +* Add used memory amount to CircuitBreakingException message (#22521) {pull}22693[#22693] (issue: {issue}22521[#22521]) +* Cluster Settings Updates should not trigger circuit breakers. {pull}20827[#20827] + +Cluster:: +* Extract a common base class to allow services to listen to remote cluster config updates {pull}24367[#24367] +* Prevent nodes from joining if newer indices exist in the cluster {pull}23843[#23843] +* Connect to new nodes concurrently {pull}22984[#22984] (issue: {issue}22828[#22828]) +* Keep NodeConnectionsService in sync with current nodes in the cluster state {pull}22509[#22509] +* Add a generic way of checking version before serializing custom cluster object {pull}22376[#22376] (issue: {issue}22313[#22313]) +* Add validation for supported index version on node join, restore, upgrade & open index {pull}21830[#21830] (issue: {issue}21670[#21670]) +* Let ClusterStateObserver only hold onto state that's needed for change detection {pull}21631[#21631] (issue: {issue}21568[#21568]) +* Cache successful shard deletion checks {pull}21438[#21438] +* Remove mutable status field from cluster state {pull}21379[#21379] +* Skip shard management code when updating cluster state on client/tribe nodes {pull}20731[#20731] +* Add clusterUUID to RestMainAction output {pull}20503[#20503] + +Core:: +* Regex upgrades {pull}24316[#24316] (issue: {issue}24226[#24226]) +* Detect remnants of path.data/default.path.data bug {pull}24099[#24099] (issues: {issue}23981[#23981], {issue}24052[#24052], {issue}24074[#24074], {issue}24093[#24093]) +* Await termination after shutting down executors {pull}23889[#23889] +* Add early-access check {pull}23743[#23743] (issue: {issue}23668[#23668]) +* Adapter action future should restore interrupts {pull}23618[#23618] (issue: {issue}23617[#23617]) +* Disable bootstrap checks for single-node discovery {pull}23598[#23598] (issues: {issue}23585[#23585], {issue}23595[#23595]) +* Enable explicitly enforcing bootstrap checks {pull}23585[#23585] (issue: {issue}21864[#21864]) +* Add equals/hashcode method to ReplicationResponse {pull}23215[#23215] +* Simplify ElasticsearchException rendering as a XContent {pull}22611[#22611] +* Remove setLocalNode from ClusterService and TransportService {pull}22608[#22608] +* Rename bootstrap.seccomp to bootstrap.system_call_filter {pull}22226[#22226] (issue: {issue}21940[#21940]) +* Cleanup random stats serialization code {pull}22223[#22223] +* Avoid corruption when deserializing booleans {pull}22152[#22152] +* Reduce memory pressure when sending large terms queries. {pull}21776[#21776] +* Install a security manager on startup {pull}21716[#21716] +* Log node ID on startup {pull}21673[#21673] +* Ensure source filtering automatons are only compiled once {pull}20857[#20857] (issue: {issue}20839[#20839]) +* Improve scheduling fairness when batching cluster state changes with equal priority {pull}20775[#20775] (issue: {issue}20768[#20768]) +* Add production warning for pre-release builds {pull}20674[#20674] +* Add serial collector bootstrap check {pull}20558[#20558] +* Do not log full bootstrap checks exception {pull}19989[#19989] + +Dates:: +* Improve error handling for epoch format parser with time zone (#22621) {pull}23689[#23689] + +Discovery:: +* Introduce single-node discovery {pull}23595[#23595] +* UnicastZenPing shouldn't ping the address of the local node {pull}23567[#23567] +* MasterFaultDetection can start after the initial cluster state has been processed {pull}23037[#23037] (issue: {issue}22828[#22828]) +* Simplify Unicast Zen Ping {pull}22277[#22277] (issues: {issue}19370[#19370], {issue}21739[#21739], {issue}22120[#22120], {issue}22194[#22194]) +* Prefer joining node with conflicting transport address when becoming master {pull}22134[#22134] (issues: {issue}22049[#22049], {issue}22120[#22120]) + +Engine:: +* Engine: store maxUnsafeAutoIdTimestamp in commit {pull}24149[#24149] +* Replace EngineClosedException with AlreadyClosedExcpetion {pull}22631[#22631] + +Exceptions:: +* Add BWC layer for Exceptions {pull}21694[#21694] (issue: {issue}21656[#21656]) + +Geo:: +* Optimize geo-distance sorting. {pull}20596[#20596] (issue: {issue}20450[#20450]) + +Highlighting:: +* Add support for fragment_length in the unified highlighter {pull}23431[#23431] +* Add BreakIteratorBoundaryScanner support {pull}23248[#23248] + +Index APIs:: +* Open and close index to honour allow_no_indices option {pull}24222[#24222] (issue: {issue}24031[#24031]) +* Wildcard cluster names for cross cluster search {pull}23985[#23985] (issue: {issue}23893[#23893]) +* Indexing: Add shard id to indexing operation listener {pull}22606[#22606] +* Better error when can't auto create index {pull}22488[#22488] (issues: {issue}21448[#21448], {issue}22435[#22435]) +* Add date-math support to `_rollover` {pull}20709[#20709] + +Ingest:: +* Lazy load the geoip databases {pull}23337[#23337] +* add `ignore_missing` flag to ingest plugins {pull}22273[#22273] +* Added ability to remove pipelines via wildcards (#22149) {pull}22191[#22191] (issue: {issue}22149[#22149]) +* Enables the ability to inject serialized json fields into root of document {pull}22179[#22179] (issue: {issue}21898[#21898]) +* compile ScriptProcessor inline scripts when creating ingest pipelines {pull}21858[#21858] (issue: {issue}21842[#21842]) +* add `ignore_missing` option to SplitProcessor {pull}20982[#20982] (issues: {issue}19995[#19995], {issue}20840[#20840]) +* add ignore_missing option to convert,trim,lowercase,uppercase,grok,rename {pull}20194[#20194] (issue: {issue}19995[#19995]) +* introduce the JSON Processor {pull}20128[#20128] (issue: {issue}20052[#20052]) + +Internal:: +* Add cross cluster support to `_field_caps` {pull}24463[#24463] (issue: {issue}24334[#24334]) +* Log JVM arguments on startup {pull}24451[#24451] +* Preserve cluster alias throughout search execution to lookup nodes by cluster and ID {pull}24438[#24438] +* Move RemoteClusterService into TransportService {pull}24424[#24424] +* Enum related performance additions. {pull}24274[#24274] (issue: {issue}24226[#24226]) +* Add a dedicated TransportRemoteInfoAction for consistency {pull}24040[#24040] (issue: {issue}23969[#23969]) +* Simplify sorted top docs merging in SearchPhaseController {pull}23881[#23881] +* Synchronized CollapseTopFieldDocs with lucenes relatives {pull}23854[#23854] +* Cleanup SearchPhaseController interface {pull}23844[#23844] +* Do not create String instances in 'Strings' methods accepting StringBuilder {pull}22907[#22907] +* Improve connection closing in `RemoteClusterConnection` {pull}22804[#22804] (issue: {issue}22803[#22803]) +* Remove some more usages of ParseFieldMatcher {pull}22437[#22437] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22398[#22398] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some more usages of ParseFieldMatcher {pull}22395[#22395] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Remove some ParseFieldMatcher usages {pull}22389[#22389] (issues: {issue}19552[#19552], {issue}22130[#22130]) +* Introduce ToXContentObject interface {pull}22387[#22387] (issue: {issue}16347[#16347]) +* Add infrastructure to manage network connections outside of Transport/TransportService {pull}22194[#22194] +* Replace strict parsing mode with response headers assertions {pull}22130[#22130] (issues: {issue}11859[#11859], {issue}19552[#19552], {issue}20993[#20993]) +* Start using `ObjectParser` for aggs. {pull}22048[#22048] (issue: {issue}22009[#22009]) +* Don't output null source node in RecoveryFailedException {pull}21963[#21963] +* ClusterService should expose "applied" cluster states (i.e., remove ClusterStateStatus) {pull}21817[#21817] +* Rename ClusterState#lookupPrototypeSafe to `lookupPrototype` and remove "unsafe" unused variant {pull}21686[#21686] +* ShardActiveResponseHandler shouldn't hold to an entire cluster state {pull}21470[#21470] (issue: {issue}21394[#21394]) +* Remove unused ClusterService dependency from SearchPhaseController {pull}21421[#21421] +* Remove special case in case no action filters are registered {pull}21251[#21251] +* Use TimveValue instead of long for CacheBuilder methods {pull}20887[#20887] +* Remove SearchContext#current and all it's threadlocals {pull}20778[#20778] (issue: {issue}19341[#19341]) +* Remove poor-mans compression in InternalSearchHit and friends {pull}20472[#20472] + +Java API:: +* Added types options to DeleteByQueryRequest {pull}23265[#23265] (issue: {issue}21984[#21984]) +* prevent NPE when trying to uncompress a null BytesReference {pull}22386[#22386] + +Java High Level REST Client:: +* Add utility method to parse named XContent objects with typed prefix {pull}24240[#24240] (issue: {issue}22965[#22965]) +* Convert suggestion response parsing to use NamedXContentRegistry {pull}23355[#23355] +* UpdateRequest implements ToXContent {pull}23289[#23289] +* Add javadoc for DocWriteResponse.Builders {pull}23267[#23267] +* Expose WriteRequest.RefreshPolicy string representation {pull}23106[#23106] +* Use `typed_keys` parameter to prefix suggester names by type in search responses {pull}23080[#23080] (issue: {issue}22965[#22965]) +* Add parsing from xContent to MainResponse {pull}22934[#22934] +* Parse elasticsearch exception's root causes {pull}22924[#22924] +* Add parsing method to BytesRestResponse's error {pull}22873[#22873] +* Add parsing methods to BulkItemResponse {pull}22859[#22859] +* Add parsing method for ElasticsearchException.generateFailureXContent() {pull}22815[#22815] +* Add parsing method for ElasticsearchException.generateThrowableXContent() {pull}22783[#22783] +* Add parsing methods for UpdateResponse {pull}22586[#22586] +* Add parsing from xContent to InternalSearchHit and InternalSearchHits {pull}22429[#22429] +* Add fromxcontent methods to index response {pull}22229[#22229] +* Add fromXContent() methods for ReplicationResponse {pull}22196[#22196] (issue: {issue}22082[#22082]) +* Add parsing method for ElasticsearchException {pull}22143[#22143] +* Add fromXContent method to GetResponse {pull}22082[#22082] + +Java REST Client:: +* move ignore parameter support from yaml test client to low level rest client {pull}22637[#22637] +* Warn log deprecation warnings received from server {pull}21895[#21895] +* Support Preemptive Authentication with RestClient {pull}21336[#21336] +* Provide error message when rest request path is null {pull}21233[#21233] (issue: {issue}21232[#21232]) + +Logging:: +* Log deleting indices at info level {pull}22627[#22627] (issue: {issue}22605[#22605]) +* Expose logs base path {pull}22625[#22625] +* Log failure to connect to node at info instead of debug {pull}21809[#21809] (issue: {issue}6468[#6468]) +* Truncate log messages from the end {pull}21609[#21609] (issue: {issue}21602[#21602]) +* Ensure logging is initialized in CLI tools {pull}20575[#20575] +* Give useful error message if log config is missing {pull}20493[#20493] +* Complete Elasticsearch logger names {pull}20457[#20457] (issue: {issue}20326[#20326]) +* Logging shutdown hack {pull}20389[#20389] (issue: {issue}20304[#20304]) +* Disable console logging {pull}20387[#20387] +* Warn on not enough masters during election {pull}20063[#20063] (issue: {issue}8362[#8362]) + +Mapping:: +* Do not index `_type` when there is at most one type. {pull}24363[#24363] +* Only allow one type on 6.0 indices {pull}24317[#24317] (issue: {issue}15613[#15613]) +* token_count type : add an option to count tokens (fix #23227) {pull}24175[#24175] (issue: {issue}23227[#23227]) +* Atomic mapping updates across types {pull}22220[#22220] +* Only update DocumentMapper if field type changes {pull}22165[#22165] +* Better error message when _parent isn't an object {pull}21987[#21987] +* Create the QueryShardContext lazily in DocumentMapperParser. {pull}21287[#21287] + +Nested Docs:: +* Avoid adding unnecessary nested filters when ranges are used. {pull}23427[#23427] + +Network:: +* Set available processors for Netty {pull}24420[#24420] (issue: {issue}6224[#6224]) +* Adjust default Netty receive predictor size to 64k {pull}23542[#23542] (issue: {issue}23185[#23185]) +* Keep the pipeline handler queue small initially {pull}23335[#23335] +* Set network receive predictor size to 32kb {pull}23284[#23284] (issue: {issue}23185[#23185]) +* TransportService.connectToNode should validate remote node ID {pull}22828[#22828] (issue: {issue}22194[#22194]) +* Disable the Netty recycler {pull}22452[#22452] (issues: {issue}22189[#22189], {issue}22360[#22360], {issue}22406[#22406], {issue}5904[#5904]) +* Tell Netty not to be unsafe in transport client {pull}22284[#22284] +* Introduce a low level protocol handshake {pull}22094[#22094] +* Detach handshake from connect to node {pull}22037[#22037] +* Reduce number of connections per node depending on the nodes role {pull}21849[#21849] +* Add a connect timeout to the ConnectionProfile to allow per node connect timeouts {pull}21847[#21847] (issue: {issue}19719[#19719]) +* Grant Netty permission to read system somaxconn {pull}21840[#21840] +* Remove connectToNodeLight and replace it with a connection profile {pull}21799[#21799] +* Lazy resolve unicast hosts {pull}21630[#21630] (issues: {issue}14441[#14441], {issue}16412[#16412]) +* Fix handler name on message not fully read {pull}21478[#21478] +* Handle rejected pings on shutdown gracefully {pull}20842[#20842] +* Network: Allow to listen on virtual interfaces. {pull}19568[#19568] (issues: {issue}17473[#17473], {issue}19537[#19537]) + +Packaging:: +* Introduce Java version check {pull}23194[#23194] (issue: {issue}21102[#21102]) +* Improve the out-of-the-box experience {pull}21920[#21920] (issues: {issue}18317[#18317], {issue}21783[#21783]) +* Add empty plugins dir for archive distributions {pull}21204[#21204] (issue: {issue}20342[#20342]) +* Make explicit missing settings for Windows service {pull}21200[#21200] (issue: {issue}18317[#18317]) +* Change permissions on config files {pull}20966[#20966] +* Add quiet option to disable console logging {pull}20422[#20422] (issues: {issue}15315[#15315], {issue}16159[#16159], {issue}17220[#17220]) + +Percolator:: +* Allowing range queries with now ranges inside percolator queries {pull}23921[#23921] (issue: {issue}23859[#23859]) +* Add term extraction support for MultiPhraseQuery {pull}23176[#23176] + +Plugin Discovery EC2:: +* Settings: Migrate ec2 discovery sensitive settings to elasticsearch keystore {pull}23961[#23961] (issue: {issue}22475[#22475]) +* Add support for ca-central-1 region to EC2 and S3 plugins {pull}22458[#22458] (issue: {issue}22454[#22454]) +* Support for eu-west-2 (London) cloud-aws plugin {pull}22308[#22308] (issue: {issue}22306[#22306]) +* Add us-east-2 AWS region {pull}21961[#21961] (issue: {issue}21881[#21881]) +* Add setting to set read timeout for EC2 discovery and S3 repository plugins {pull}21956[#21956] (issue: {issue}19078[#19078]) + +Plugin Ingest GeoIp:: +* Cache results of geoip lookups {pull}22231[#22231] (issue: {issue}22074[#22074]) + +Plugin Lang Painless:: +* Allow painless to load stored fields {pull}24290[#24290] +* Start on custom whitelists for Painless {pull}23563[#23563] +* Fix Painless's implementation of interfaces returning primitives {pull}23298[#23298] (issue: {issue}22983[#22983]) +* Allow painless to implement more interfaces {pull}22983[#22983] +* Generate reference links for painless API {pull}22775[#22775] +* Painless: Add augmentation to String for base 64 {pull}22665[#22665] (issue: {issue}22648[#22648]) +* Improve painless's ScriptException generation {pull}21762[#21762] (issue: {issue}21733[#21733]) +* Add Debug.explain to painless {pull}21723[#21723] (issue: {issue}20263[#20263]) +* Implement the ?: operator in painless {pull}21506[#21506] +* In painless suggest a long constant if int won't do {pull}21415[#21415] (issue: {issue}21313[#21313]) +* Support decimal constants with trailing [dD] in painless {pull}21412[#21412] (issue: {issue}21116[#21116]) +* Implement reading from null safe dereferences {pull}21239[#21239] +* Painless negative offsets {pull}21080[#21080] (issue: {issue}20870[#20870]) +* Remove more equivalents of the now method from the Painless whitelist. {pull}21047[#21047] +* Disable regexes by default in painless {pull}20427[#20427] (issue: {issue}20397[#20397]) + +Plugin Repository Azure:: +* Add Backoff policy to azure repository {pull}23387[#23387] (issue: {issue}22728[#22728]) + +Plugin Repository S3:: +* Removes the retry mechanism from the S3 blob store {pull}23952[#23952] (issue: {issue}22845[#22845]) +* S3 Repository: Eagerly load static settings {pull}23910[#23910] +* S3 repository: Add named configurations {pull}22762[#22762] (issues: {issue}22479[#22479], {issue}22520[#22520]) +* Make the default S3 buffer size depend on the available memory. {pull}21299[#21299] + +Plugins:: +* Plugins: Add support for platform specific plugins {pull}24265[#24265] +* Plugins: Remove leniency for missing plugins dir {pull}24173[#24173] +* Modify permissions dialog for plugins {pull}23742[#23742] +* Plugins: Add plugin cli specific exit codes {pull}23599[#23599] (issue: {issue}15295[#15295]) +* Plugins: Output better error message when existing plugin is incompatible {pull}23562[#23562] (issue: {issue}20691[#20691]) +* Add the ability to define search response listeners in search plugin {pull}22682[#22682] +* Pass ThreadContext to transport interceptors to allow header modification {pull}22618[#22618] (issue: {issue}22585[#22585]) +* Provide helpful error message if a plugin exists {pull}22305[#22305] (issue: {issue}22084[#22084]) +* Add shutdown hook for closing CLI commands {pull}22126[#22126] (issue: {issue}22111[#22111]) +* Allow plugins to install bootstrap checks {pull}22110[#22110] +* Clarify that plugins can be closed {pull}21669[#21669] +* Plugins: Convert custom discovery to pull based plugin {pull}21398[#21398] +* Removing plugin that isn't installed shouldn't trigger usage information {pull}21272[#21272] (issue: {issue}21250[#21250]) +* Remove pluggability of ZenPing {pull}21049[#21049] +* Make UnicastHostsProvider extension pull based {pull}21036[#21036] +* Revert "Display plugins versions" {pull}20807[#20807] (issues: {issue}18683[#18683], {issue}20668[#20668]) +* Provide error message when plugin id is missing {pull}20660[#20660] + +Query DSL:: +* Make it possible to validate a query on all shards instead of a single random shard {pull}23697[#23697] (issue: {issue}18254[#18254]) +* QueryString and SimpleQueryString Graph Support {pull}22541[#22541] +* Additional Graph Support in Match Query {pull}22503[#22503] (issue: {issue}22490[#22490]) +* RangeQuery WITHIN case now normalises query {pull}22431[#22431] (issue: {issue}22412[#22412]) +* Un-deprecate fuzzy query {pull}22088[#22088] (issue: {issue}15760[#15760]) +* support numeric bounds with decimal parts for long/integer/short/byte datatypes {pull}21972[#21972] (issue: {issue}21600[#21600]) +* Using ObjectParser in MatchAllQueryBuilder and IdsQueryBuilder {pull}21273[#21273] +* Expose splitOnWhitespace in `Query String Query` {pull}20965[#20965] (issue: {issue}20841[#20841]) +* Throw error if query element doesn't end with END_OBJECT {pull}20528[#20528] (issue: {issue}20515[#20515]) +* Remove `lowercase_expanded_terms` and `locale` from query-parser options. {pull}20208[#20208] (issue: {issue}9978[#9978]) + +REST:: +* Allow passing single scrollID in clear scroll API body {pull}24242[#24242] (issue: {issue}24233[#24233]) +* Validate top-level keys when parsing mget requests {pull}23746[#23746] (issue: {issue}23720[#23720]) +* Cluster stats should not render empty http/transport types {pull}23735[#23735] +* Add parameter to prefix aggs name with type in search responses {pull}22965[#22965] +* Add a REST spec for the create API {pull}20924[#20924] +* Add response params to REST params did you mean {pull}20753[#20753] (issues: {issue}20722[#20722], {issue}20747[#20747]) +* Add did you mean to strict REST params {pull}20747[#20747] (issue: {issue}20722[#20722]) + +Reindex API:: +* Increase visibility of doExecute so it can be used directly {pull}22614[#22614] +* Improve error message when reindex-from-remote gets bad json {pull}22536[#22536] (issue: {issue}22330[#22330]) +* Reindex: Better error message for pipeline in wrong place {pull}21985[#21985] +* Timeout improvements for rest client and reindex {pull}21741[#21741] (issue: {issue}21707[#21707]) +* Add "simple match" support for reindex-from-remote whitelist {pull}21004[#21004] +* Make reindex-from-remote ignore unknown fields {pull}20591[#20591] (issue: {issue}20504[#20504]) + +Scripting:: +* Expose multi-valued dates to scripts and document painless's date functions {pull}22875[#22875] (issue: {issue}22162[#22162]) +* Wrap VerifyError in ScriptException {pull}21769[#21769] +* Log ScriptException's xcontent if file script compilation fails {pull}21767[#21767] (issue: {issue}21733[#21733]) +* Support binary field type in script values {pull}21484[#21484] (issue: {issue}14469[#14469]) +* Mustache: Add {{#url}}{{/url}} function to URL encode strings {pull}20838[#20838] +* Expose `ctx._now` in update scripts {pull}20835[#20835] (issue: {issue}17895[#17895]) + +Search:: +* Remove leniency when merging fetched hits in a search response phase {pull}24158[#24158] +* Set shard count limit to unlimited {pull}24012[#24012] +* Streamline shard index availability in all SearchPhaseResults {pull}23788[#23788] +* Search took time should use a relative clock {pull}23662[#23662] +* Prevent negative `from` parameter in SearchSourceBuilder {pull}23358[#23358] (issue: {issue}23324[#23324]) +* Remove unnecessary result sorting in SearchPhaseController {pull}23321[#23321] +* Expose `batched_reduce_size` via `_search` {pull}23288[#23288] (issue: {issue}23253[#23253]) +* Adding fromXContent to Suggest and Suggestion class {pull}23226[#23226] (issue: {issue}23202[#23202]) +* Adding fromXContent to Suggestion.Entry and subclasses {pull}23202[#23202] +* Add CollapseSearchPhase as a successor for the FetchSearchPhase {pull}23165[#23165] +* Integrate IndexOrDocValuesQuery. {pull}23119[#23119] +* Detach SearchPhases from AbstractSearchAsyncAction {pull}23118[#23118] +* Fix GraphQuery expectation after Lucene upgrade to 6.5 {pull}23117[#23117] (issue: {issue}23102[#23102]) +* Nested queries should avoid adding unnecessary filters when possible. {pull}23079[#23079] (issue: {issue}20797[#20797]) +* Add xcontent parsing to completion suggestion option {pull}23071[#23071] +* Add xcontent parsing to suggestion options {pull}23018[#23018] +* Separate reduce (aggs, suggest and profile) from merging fetched hits {pull}23017[#23017] +* Add a setting to disable remote cluster connections on a node {pull}23005[#23005] +* First step towards separating individual search phases {pull}22802[#22802] +* Add parsing from xContent to SearchProfileShardResults and nested classes {pull}22649[#22649] +* Move SearchTransportService and SearchPhaseController creation outside of TransportSearchAction constructor {pull}21754[#21754] +* Don't carry ShardRouting around when not needed in AbstractSearchAsyncAction {pull}21753[#21753] +* ShardSearchRequest to take ShardId constructor argument rather than the whole ShardRouting {pull}21750[#21750] +* Use index uuid as key in the alias filter map rather than the index name {pull}21749[#21749] +* Add indices and filter information to search shards api output {pull}21738[#21738] (issue: {issue}20916[#20916]) +* remove pointless catch exception in TransportSearchAction {pull}21689[#21689] +* Optimize query with types filter in the URL (t/t/_search) {pull}20979[#20979] +* Makes search action cancelable by task management API {pull}20405[#20405] + +Search Templates:: +* Add profile and explain parameters to template API {pull}20451[#20451] + +Settings:: +* Add secure file setting to keystore {pull}24001[#24001] +* Add a property to mark setting as final {pull}23872[#23872] +* Remove obsolete index setting `index.version.minimum_compatible`. {pull}23593[#23593] +* Provide a method to retrieve a closeable char[] from a SecureString {pull}23389[#23389] +* Update indices settings api to support CBOR and SMILE format {pull}23309[#23309] (issues: {issue}23242[#23242], {issue}23245[#23245]) +* Improve setting deprecation message {pull}23156[#23156] (issue: {issue}22849[#22849]) +* Add secure settings validation on startup {pull}22894[#22894] +* Allow comma delimited array settings to have a space after each entry {pull}22591[#22591] (issue: {issue}22297[#22297]) +* Allow affix settings to be dynamic / updatable {pull}22526[#22526] +* Allow affix settings to delegate to actual settings {pull}22523[#22523] +* Make s3 repository sensitive settings use secure settings {pull}22479[#22479] +* Speed up filter and prefix settings operations {pull}22249[#22249] +* Add precise logging on unknown or invalid settings {pull}20951[#20951] (issue: {issue}20946[#20946]) + +Snapshot/Restore:: +* Ensure every repository has an incompatible-snapshots blob {pull}24403[#24403] (issue: {issue}22267[#22267]) +* Change snapshot status error to use generic SnapshotException {pull}24355[#24355] (issue: {issue}24225[#24225]) +* Duplicate snapshot name throws InvalidSnapshotNameException {pull}22921[#22921] (issue: {issue}18228[#18228]) +* Fixes retrieval of the latest snapshot index blob {pull}22700[#22700] +* Use general cluster state batching mechanism for snapshot state updates {pull}22528[#22528] (issue: {issue}14899[#14899]) +* Synchronize snapshot deletions on the cluster state {pull}22313[#22313] (issue: {issue}19957[#19957]) +* Abort snapshots on a node that leaves the cluster {pull}21084[#21084] (issue: {issue}20876[#20876]) + +Stats:: +* Show JVM arguments {pull}24450[#24450] +* Add cross-cluster search remote cluster info API {pull}23969[#23969] (issue: {issue}23925[#23925]) +* Add geo_point to FieldStats {pull}21947[#21947] (issue: {issue}20707[#20707]) +* Include unindexed field in FieldStats response {pull}21821[#21821] (issue: {issue}21952[#21952]) +* Remove load average leniency {pull}21380[#21380] +* Strengthen handling of unavailable cgroup stats {pull}21094[#21094] (issue: {issue}21029[#21029]) +* Add basic cgroup CPU metrics {pull}21029[#21029] + +Suggesters:: +* Provide informative error message in case of unknown suggestion context. {pull}24241[#24241] +* Allow different data types for category in Context suggester {pull}23491[#23491] (issue: {issue}22358[#22358]) + +Task Manager:: +* Limit IndexRequest toString() length {pull}22832[#22832] +* Improve the error message if task and node isn't found {pull}22062[#22062] (issue: {issue}22027[#22027]) +* Add descriptions to create snapshot and restore snapshot tasks. {pull}21901[#21901] (issue: {issue}21768[#21768]) +* Add proper descriptions to reindex, update-by-query and delete-by-query tasks. {pull}21841[#21841] (issue: {issue}21768[#21768]) +* Add search task descriptions {pull}21740[#21740] + +Tribe Node:: +* Add support for merging custom meta data in tribe node {pull}21552[#21552] (issues: {issue}20544[#20544], {issue}20791[#20791], {issue}9372[#9372]) + +Upgrade API:: +* Allow plugins to upgrade templates and index metadata on startup {pull}24379[#24379] + +[float] +=== Bug Fixes + + +Aggregations:: +* InternalPercentilesBucket should not rely on ordered percents array {pull}24336[#24336] (issue: {issue}24331[#24331]) +* Align behavior HDR percentiles iterator with percentile() method {pull}24206[#24206] +* The `filter` and `significant_terms` aggregations should parse the `filter` as a filter, not a query. {pull}23797[#23797] +* Completion suggestion should also consider text if prefix/regex is missing {pull}23451[#23451] (issue: {issue}23340[#23340]) +* Fixes the per term error in the terms aggregation {pull}23399[#23399] +* Fixes terms error count for multiple reduce phases {pull}23291[#23291] (issue: {issue}23286[#23286]) +* Fix scaled_float numeric type in aggregations {pull}22351[#22351] (issue: {issue}22350[#22350]) +* Allow terms aggregations on pure boolean scripts. {pull}22201[#22201] (issue: {issue}20941[#20941]) +* Fix numeric terms aggregations with includes/excludes and minDocCount=0 {pull}22141[#22141] (issue: {issue}22140[#22140]) +* Fix `missing` on aggs on `boolean` fields. {pull}22135[#22135] (issue: {issue}22009[#22009]) +* IP range masks exclude the maximum address of the range. {pull}22018[#22018] (issue: {issue}22005[#22005]) +* Fix `other_bucket` on the `filters` agg to be enabled if a key is set. {pull}21994[#21994] (issue: {issue}21951[#21951]) +* Rewrite Queries/Filter in FilterAggregationBuilder and ensure client usage marks query as non-cachable {pull}21303[#21303] (issue: {issue}21301[#21301]) +* Percentiles bucket fails for 100th percentile {pull}21218[#21218] +* Thread safety for scripted significance heuristics {pull}21113[#21113] (issue: {issue}18120[#18120]) +* `ip_range` aggregation should accept null bounds. {pull}21043[#21043] (issue: {issue}21006[#21006]) +* Fixes bug preventing script sort working on top_hits aggregation {pull}21023[#21023] (issue: {issue}21022[#21022]) +* Fixed writeable name from range to geo_distance {pull}20860[#20860] +* Fix date_range aggregation to not cache if now is used {pull}20740[#20740] +* The `top_hits` aggregation should compile scripts only once. {pull}20738[#20738] + +Allocation:: +* Discard stale node responses from async shard fetching {pull}24434[#24434] (issue: {issue}24007[#24007]) +* Cannot force allocate primary to a node where the shard already exists {pull}22031[#22031] (issue: {issue}22021[#22021]) +* Promote shadow replica to primary when initializing primary fails {pull}22021[#22021] +* Trim in-sync allocations set only when it grows {pull}21976[#21976] (issue: {issue}21719[#21719]) +* Allow master to assign primary shard to node that has shard store locked during shard state fetching {pull}21656[#21656] (issue: {issue}19416[#19416]) +* Keep a shadow replicas' allocation id when it is promoted to primary {pull}20863[#20863] (issue: {issue}20650[#20650]) +* IndicesClusterStateService should clean local started when re-assigns an initializing shard with the same aid {pull}20687[#20687] +* IndexRoutingTable.initializeEmpty shouldn't override supplied primary RecoverySource {pull}20638[#20638] (issue: {issue}20637[#20637]) +* Update incoming recoveries stats when shadow replica is reinitialized {pull}20612[#20612] +* `index.routing.allocation.initial_recovery` limits replica allocation {pull}20589[#20589] + +Analysis:: +* AsciiFoldingFilter's multi-term component should never preserve the original token. {pull}21982[#21982] +* Pre-built analysis factories do not implement MultiTermAware correctly. {pull}21981[#21981] +* Can load non-PreBuiltTokenFilter in Analyze API {pull}20396[#20396] +* Named analyzer should close the analyzer that it wraps {pull}20197[#20197] + +Bulk:: +* Reject empty IDs {pull}24118[#24118] (issue: {issue}24116[#24116]) + +CAT API:: +* Consume `full_id` request parameter early {pull}21270[#21270] (issue: {issue}21266[#21266]) + +CRUD:: +* Reject external versioning and explicit version numbers on create {pull}21998[#21998] +* MultiGet should not fail entirely if alias resolves to many indices {pull}20858[#20858] (issue: {issue}20845[#20845]) +* Fixed date math expression support in multi get requests. {pull}20659[#20659] (issue: {issue}17957[#17957]) + +Cache:: +* Invalidate cached query results if query timed out {pull}22807[#22807] (issue: {issue}22789[#22789]) +* Fix the request cache keys to not hold references to the SearchContext. {pull}21284[#21284] +* Prevent requests that use scripts or now() from being cached {pull}20750[#20750] (issue: {issue}20645[#20645]) + +Circuit Breakers:: +* ClusterState publishing shouldn't trigger circuit breakers {pull}20986[#20986] (issues: {issue}20827[#20827], {issue}20960[#20960]) + +Cluster:: +* Don't set local node on cluster state used for node join validation {pull}23311[#23311] (issues: {issue}21830[#21830], {issue}3[#3], {issue}4[#4], {issue}6[#6], {issue}9[#9]) +* Allow a cluster state applier to create an observer and wait for a better state {pull}23132[#23132] (issue: {issue}21817[#21817]) +* Cluster allocation explain to never return empty response body {pull}23054[#23054] +* IndicesService handles all exceptions during index deletion {pull}22433[#22433] +* Remove cluster update task when task times out {pull}21578[#21578] (issue: {issue}21568[#21568]) + +Core:: +* Check for default.path.data included in path.data {pull}24285[#24285] (issue: {issue}24283[#24283]) +* Improve performance of extracting warning value {pull}24114[#24114] (issue: {issue}24018[#24018]) +* Reject duplicate settings on the command line {pull}24053[#24053] +* Restrict build info loading to ES jar, not any jar {pull}24049[#24049] (issue: {issue}21955[#21955]) +* Streamline foreign stored context restore and allow to perserve response headers {pull}22677[#22677] (issue: {issue}22647[#22647]) +* Support negative numbers in readVLong {pull}22314[#22314] +* Add a StreamInput#readArraySize method that ensures sane array sizes {pull}21697[#21697] +* Use a buffer to do character to byte conversion in StreamOutput#writeString {pull}21680[#21680] (issue: {issue}21660[#21660]) +* Fix ShardInfo#toString {pull}21319[#21319] +* Protect BytesStreamOutput against overflows of the current number of written bytes. {pull}21174[#21174] (issue: {issue}21159[#21159]) +* Return target index name even if _rollover conditions are not met {pull}21138[#21138] +* .es_temp_file remains after system crash, causing it not to start again {pull}21007[#21007] (issue: {issue}20992[#20992]) +* StoreStatsCache should also ignore AccessDeniedException when checking file size {pull}20790[#20790] (issue: {issue}17580[#17580]) + +Dates:: +* Fix time zone rounding edge case for DST overlaps {pull}21550[#21550] (issue: {issue}20833[#20833]) + +Discovery:: +* ZenDiscovery - only validate min_master_nodes values if local node is master {pull}23915[#23915] (issue: {issue}23695[#23695]) +* Close InputStream when receiving cluster state in PublishClusterStateAction {pull}22711[#22711] +* Do not reply to pings from another cluster {pull}21894[#21894] (issue: {issue}21874[#21874]) +* Add current cluster state version to zen pings and use them in master election {pull}20384[#20384] (issue: {issue}20348[#20348]) + +Engine:: +* Close and flush refresh listeners on shard close {pull}22342[#22342] +* Die with dignity on the Lucene layer {pull}21721[#21721] (issue: {issue}19272[#19272]) +* Fix `InternalEngine#isThrottled` to not always return `false`. {pull}21592[#21592] +* Retrying replication requests on replica doesn't call `onRetry` {pull}21189[#21189] (issue: {issue}20211[#20211]) +* Take refresh IOExceptions into account when catching ACE in InternalEngine {pull}20546[#20546] (issue: {issue}19975[#19975]) + +Exceptions:: +* Stop returning "es." internal exception headers as http response headers {pull}22703[#22703] (issue: {issue}17593[#17593]) +* Fixing shard recovery error message to report the number of docs correctly for each node {pull}22515[#22515] (issue: {issue}21893[#21893]) + +Highlighting:: +* Fix FiltersFunctionScoreQuery highlighting {pull}21827[#21827] +* Fix highlighting on a stored keyword field {pull}21645[#21645] (issue: {issue}21636[#21636]) +* Fix highlighting of MultiTermQuery within a FunctionScoreQuery {pull}20400[#20400] (issue: {issue}20392[#20392]) + +Index APIs:: +* Fixes restore of a shrunken index when initial recovery node is gone {pull}24322[#24322] (issue: {issue}24257[#24257]) +* Honor update request timeout {pull}23825[#23825] +* Ensure shrunk indices carry over version information from its source {pull}22469[#22469] (issue: {issue}22373[#22373]) +* Validate the `_rollover` target index name early to also fail if dry_run=true {pull}21330[#21330] (issue: {issue}21149[#21149]) +* Only negate index expression on all indices with preceding wildcard {pull}20898[#20898] (issues: {issue}19800[#19800], {issue}20033[#20033]) +* Fix IndexNotFoundException in multi index search request. {pull}20188[#20188] (issue: {issue}3839[#3839]) + +Index Templates:: +* Fix integer overflows when dealing with templates. {pull}21628[#21628] (issue: {issue}21622[#21622]) + +Ingest:: +* Improve missing ingest processor error {pull}23379[#23379] (issue: {issue}23392[#23392]) +* update _ingest.timestamp to use new ZonedDateTime {pull}23174[#23174] (issue: {issue}23168[#23168]) +* fix date-processor to a new default year for every new pipeline execution {pull}22601[#22601] (issue: {issue}22547[#22547]) +* fix index out of bounds error in KV Processor {pull}22288[#22288] (issue: {issue}22272[#22272]) +* Fixes GrokProcessor's ignorance of named-captures with same name. {pull}22131[#22131] (issue: {issue}22117[#22117]) +* fix trace_match behavior for when there is only one grok pattern {pull}21413[#21413] (issue: {issue}21371[#21371]) +* Stored scripts and ingest node configurations should be included into a snapshot {pull}21227[#21227] (issue: {issue}21184[#21184]) +* make painless the default scripting language for ScriptProcessor {pull}20981[#20981] (issue: {issue}20943[#20943]) +* no null values in ingest configuration error messages {pull}20616[#20616] +* JSON Processor was not properly added {pull}20613[#20613] + +Inner Hits:: +* Replace NestedChildrenQuery with ParentChildrenBlockJoinQuery {pull}24016[#24016] (issue: {issue}24009[#24009]) +* Changed DisMaxQueryBuilder to extract inner hits from leaf queries {pull}23512[#23512] (issue: {issue}23482[#23482]) +* Inner hits and ignore unmapped {pull}21693[#21693] (issue: {issue}21620[#21620]) +* Skip adding a parent field to nested documents. {pull}21522[#21522] (issue: {issue}21503[#21503]) + +Internal:: +* Fix NPE if field caps request has a field that exists not in all indices {pull}24504[#24504] +* Add infrastructure to mark contexts as system contexts {pull}23830[#23830] +* Always restore the ThreadContext for operations delayed due to a block {pull}23349[#23349] +* Index creation and setting update may not return deprecation logging {pull}22702[#22702] +* Rethrow ExecutionException from the loader to concurrent callers of Cache#computeIfAbsent {pull}21549[#21549] +* Restore thread's original context before returning to the ThreadPool {pull}21411[#21411] +* Fix NPE in SearchContext.toString() {pull}21069[#21069] +* Prevent AbstractArrays from release bytes more than once {pull}20819[#20819] +* Source filtering should treat dots in field names as sub objects. {pull}20736[#20736] (issue: {issue}20719[#20719]) +* IndicesAliasesRequest should not implement CompositeIndicesRequest {pull}20726[#20726] +* Ensure elasticsearch doesn't start with unuspported indices {pull}20514[#20514] (issue: {issue}20512[#20512]) + +Java API:: +* Don't output empty ext object in SearchSourceBuilder#toXContent {pull}22093[#22093] (issue: {issue}20969[#20969]) +* Transport client: Fix remove address to actually work {pull}21743[#21743] +* Add a HostFailureListener to notify client code if a node got disconnected {pull}21709[#21709] (issue: {issue}21424[#21424]) +* Fix InternalSearchHit#hasSource to return the proper boolean value {pull}21441[#21441] (issue: {issue}21419[#21419]) +* Null checked for source when calling sourceRef {pull}21431[#21431] (issue: {issue}19279[#19279]) +* ClusterAdminClient.prepareDeletePipeline method should accept pipeline id to delete {pull}21228[#21228] +* fix IndexResponse#toString to print out shards info {pull}20562[#20562] + +Java High Level REST Client:: +* Correctly parse BulkItemResponse.Failure's status {pull}23432[#23432] + +Java REST Client:: +* Make buffer limit configurable in HeapBufferedConsumerFactory {pull}23970[#23970] (issue: {issue}23958[#23958]) +* RestClient asynchronous execution should not throw exceptions {pull}23307[#23307] +* Don't use null charset in RequestLogger {pull}22197[#22197] (issue: {issue}22190[#22190]) +* Rest client: don't reuse the same HttpAsyncResponseConsumer across multiple retries {pull}21378[#21378] + +Logging:: +* Do not prematurely shutdown Log4j {pull}21519[#21519] (issue: {issue}21514[#21514]) +* Assert status logger does not warn on Log4j usage {pull}21339[#21339] +* Fix logger names for Netty {pull}21223[#21223] (issue: {issue}20457[#20457]) +* Fix logger when you can not create an azure storage client {pull}20670[#20670] (issues: {issue}20633[#20633], {issue}20669[#20669]) +* Avoid unnecessary creation of prefix loggers {pull}20571[#20571] (issue: {issue}20570[#20570]) +* Fix logging hierarchy configs {pull}20463[#20463] +* Fix prefix logging {pull}20429[#20429] + +Mapping:: +* Preserve response headers when creating an index {pull}23950[#23950] (issue: {issue}23947[#23947]) +* Improves disabled fielddata error message {pull}23841[#23841] (issue: {issue}22768[#22768]) +* Fix MapperService StackOverflowError {pull}23605[#23605] (issue: {issue}23604[#23604]) +* Fix NPE with scaled floats stats when field is not indexed {pull}23528[#23528] (issue: {issue}23487[#23487]) +* Range types causing `GetFieldMappingsIndexRequest` to fail due to `NullPointerException` in `RangeFieldMapper.doXContentBody` when `include_defaults=true` is on the query string {pull}22925[#22925] +* Disallow introducing illegal object mappings (double '..') {pull}22891[#22891] (issue: {issue}22794[#22794]) +* The `_all` default mapper is not completely configured. {pull}22236[#22236] +* Fix MapperService.allEnabled(). {pull}22227[#22227] +* Dynamic `date` fields should use the `format` that was used to detect it is a date. {pull}22174[#22174] (issue: {issue}9410[#9410]) +* Sub-fields should not accept `include_in_all` parameter {pull}21971[#21971] (issue: {issue}21710[#21710]) +* Mappings: Fix get mapping when no indexes exist to not fail in response generation {pull}21924[#21924] (issue: {issue}21916[#21916]) +* Fail to index fields with dots in field names when one of the intermediate objects is nested. {pull}21787[#21787] (issue: {issue}21726[#21726]) +* Uncommitted mapping updates should not efect existing indices {pull}21306[#21306] (issue: {issue}21189[#21189]) + +Nested Docs:: +* Fix bug in query builder rewrite that ignores the ignore_unmapped option {pull}22456[#22456] + +Network:: +* Respect promises on pipelined responses {pull}23317[#23317] (issues: {issue}23310[#23310], {issue}23322[#23322]) +* Ensure that releasing listener is called {pull}23310[#23310] +* Pass `forceExecution` flag to transport interceptor {pull}22739[#22739] +* Ensure new connections won't be opened if transport is closed or closing {pull}22589[#22589] (issue: {issue}22554[#22554]) +* Prevent open channel leaks if handshake times out or is interrupted {pull}22554[#22554] +* Execute low level handshake in #openConnection {pull}22440[#22440] +* Handle connection close / reset events gracefully during handshake {pull}22178[#22178] +* Do not lose host information when pinging {pull}21939[#21939] (issue: {issue}21828[#21828]) +* DiscoveryNode and TransportAddress should preserve host information {pull}21828[#21828] +* Die with dignity on the network layer {pull}21720[#21720] (issue: {issue}19272[#19272]) +* Fix connection close header handling {pull}20956[#20956] (issue: {issue}20938[#20938]) +* Ensure port range is readable in the exception message {pull}20893[#20893] +* Prevent double release in TcpTransport if send listener throws an exception {pull}20880[#20880] + +Packaging:: +* Fall back to non-atomic move when removing plugins {pull}23548[#23548] (issue: {issue}35[#35]) +* Another fix for handling of paths on Windows {pull}22132[#22132] (issue: {issue}21921[#21921]) +* Fix handling of spaces in Windows paths {pull}21921[#21921] (issues: {issue}20809[#20809], {issue}21525[#21525]) +* Add option to skip kernel parameters on install {pull}21899[#21899] (issue: {issue}21877[#21877]) +* Set vm.max_map_count on systemd package install {pull}21507[#21507] +* Export ES_JVM_OPTIONS for SysV init {pull}21445[#21445] (issue: {issue}21255[#21255]) +* Debian: configure start-stop-daemon to not go into background {pull}21343[#21343] (issues: {issue}12716[#12716], {issue}21300[#21300]) +* Generate POM files with non-wildcard excludes {pull}21234[#21234] (issue: {issue}21170[#21170]) +* [Packaging] Do not remove scripts directory on upgrade {pull}20452[#20452] +* [Package] Remove bin/lib/modules directories on RPM uninstall/upgrade {pull}20448[#20448] + +Parent/Child:: +* Add null check in case of orphan child document {pull}22772[#22772] (issue: {issue}22770[#22770]) + +Percolator:: +* Fix memory leak when percolator uses bitset or field data cache {pull}24115[#24115] (issue: {issue}24108[#24108]) +* Fix NPE in percolator's 'now' range check for percolator queries with range queries {pull}22356[#22356] (issue: {issue}22355[#22355]) + +Plugin Analysis Stempel:: +* Fix thread safety of Stempel's token filter factory {pull}22610[#22610] (issue: {issue}21911[#21911]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21048[#21048] (issue: {issue}21039[#21039]) + +Plugin Ingest GeoIp:: +* [ingest-geoip] update geoip to not include null-valued results from {pull}20455[#20455] + +Plugin Lang Painless:: +* painless: Fix method references to ctor with the new LambdaBootstrap and cleanup code {pull}24406[#24406] +* Fix Painless Lambdas for Java 9 {pull}24070[#24070] (issue: {issue}23473[#23473]) +* Fix painless's regex lexer and error messages {pull}23634[#23634] +* Replace Painless's Cast with casting strategies {pull}23369[#23369] +* Fix Bad Casts In Painless {pull}23282[#23282] (issue: {issue}23238[#23238]) +* Don't allow casting from void to def in painless {pull}22969[#22969] (issue: {issue}22908[#22908]) +* Fix def invoked qualified method refs {pull}22918[#22918] +* Whitelist some ScriptDocValues in painless {pull}22600[#22600] (issue: {issue}22584[#22584]) +* Update Painless Loop Counter to be Higher {pull}22560[#22560] (issue: {issue}22508[#22508]) +* Fix some issues with painless's strings {pull}22393[#22393] (issue: {issue}22372[#22372]) +* Test fix for def equals in Painless {pull}21945[#21945] (issue: {issue}21801[#21801]) +* Fix a VerifyError bug in Painless {pull}21765[#21765] +* Fix Lambdas in Painless to be Able to Use Top-Level Variables Such as params and doc {pull}21635[#21635] (issues: {issue}20869[#20869], {issue}21479[#21479]) +* Fix String Concatenation Bug In Painless {pull}20623[#20623] + +Plugin Repository Azure:: +* Azure blob store's readBlob() method first checks if the blob exists {pull}23483[#23483] (issue: {issue}23480[#23480]) +* Fixes default chunk size for Azure repositories {pull}22577[#22577] (issue: {issue}22513[#22513]) +* readonly on azure repository must be taken into account {pull}22055[#22055] (issues: {issue}22007[#22007], {issue}22053[#22053]) + +Plugin Repository HDFS:: +* Fixing permission errors for `KERBEROS` security mode for HDFS Repository {pull}23439[#23439] (issue: {issue}22156[#22156]) + +Plugin Repository S3:: +* Handle BlobPath's trailing separator case. Add test cases to BlobPathTests.java {pull}23091[#23091] +* Fixes leading forward slash in S3 repository base_path {pull}20861[#20861] + +Plugins:: +* Fix delete of plugin directory on remove plugin {pull}24266[#24266] (issue: {issue}24252[#24252]) +* Use a marker file when removing a plugin {pull}24252[#24252] (issue: {issue}24231[#24231]) +* Remove hidden file leniency from plugin service {pull}23982[#23982] (issue: {issue}12465[#12465]) +* Add check for null pluginName in remove command {pull}22930[#22930] (issue: {issue}22922[#22922]) +* Use sysprop like with es.path.home to pass conf dir {pull}18870[#18870] (issue: {issue}18689[#18689]) + +Query DSL:: +* FuzzyQueryBuilder should error when parsing array of values {pull}23762[#23762] (issue: {issue}23759[#23759]) +* Fix parsing for `max_determinized_states` {pull}22749[#22749] (issue: {issue}22722[#22722]) +* Fix script score function that combines _score and weight {pull}22713[#22713] (issue: {issue}21483[#21483]) +* Fixes date range query using epoch with timezone {pull}21542[#21542] (issue: {issue}21501[#21501]) +* Allow overriding all-field leniency when `lenient` option is specified {pull}21504[#21504] (issues: {issue}20925[#20925], {issue}21341[#21341]) +* Max score should be updated when a rescorer is used {pull}20977[#20977] (issue: {issue}20651[#20651]) +* Fixes MultiMatchQuery so that it doesn't provide a null context {pull}20882[#20882] +* Fix silently accepting malformed queries {pull}20515[#20515] (issue: {issue}20500[#20500]) +* Fix match_phrase_prefix query with single term on _all field {pull}20471[#20471] (issue: {issue}20470[#20470]) + +REST:: +* [API] change wait_for_completion default according to docs {pull}23672[#23672] +* Deprecate request_cache for clear-cache {pull}23638[#23638] (issue: {issue}22748[#22748]) +* HTTP transport stashes the ThreadContext instead of the RestController {pull}23456[#23456] +* Fix date format in warning headers {pull}23418[#23418] (issue: {issue}23275[#23275]) +* Align REST specs for HEAD requests {pull}23313[#23313] (issue: {issue}21125[#21125]) +* Correct warning header to be compliant {pull}23275[#23275] (issue: {issue}22986[#22986]) +* Fix get HEAD requests {pull}23186[#23186] (issue: {issue}21125[#21125]) +* Handle bad HTTP requests {pull}23153[#23153] (issue: {issue}23034[#23034]) +* Fix get source HEAD requests {pull}23151[#23151] (issue: {issue}21125[#21125]) +* Properly encode location header {pull}23133[#23133] (issues: {issue}21057[#21057], {issue}23115[#23115]) +* Fix template HEAD requests {pull}23130[#23130] (issue: {issue}21125[#21125]) +* Fix index HEAD requests {pull}23112[#23112] (issue: {issue}21125[#21125]) +* Fix alias HEAD requests {pull}23094[#23094] (issue: {issue}21125[#21125]) +* Strict level parsing for indices stats {pull}21577[#21577] (issue: {issue}21024[#21024]) +* The routing query string param is supported by mget but was missing from the rest spec {pull}21357[#21357] +* fix thread_pool_patterns path variable definition {pull}21332[#21332] +* Read indices options in indices upgrade API {pull}21281[#21281] (issue: {issue}21099[#21099]) +* ensure the XContentBuilder is always closed in RestBuilderListener {pull}21124[#21124] +* Add correct Content-Length on HEAD requests {pull}21123[#21123] (issue: {issue}21077[#21077]) +* Make sure HEAD / has 0 Content-Length {pull}21077[#21077] (issue: {issue}21075[#21075]) +* Adds percent-encoding for Location headers {pull}21057[#21057] (issue: {issue}21016[#21016]) +* Whitelist node stats indices level parameter {pull}21024[#21024] (issue: {issue}20722[#20722]) +* Remove lenient URL parameter parsing {pull}20722[#20722] (issue: {issue}14719[#14719]) +* XContentBuilder: Avoid building self-referencing objects {pull}20550[#20550] (issues: {issue}19475[#19475], {issue}20540[#20540]) + +Recovery:: +* Provide target allocation id as part of start recovery request {pull}24333[#24333] (issue: {issue}24167[#24167]) +* Fix primary relocation for shadow replicas {pull}22474[#22474] (issue: {issue}20300[#20300]) +* Don't close store under CancellableThreads {pull}22434[#22434] (issue: {issue}22325[#22325]) +* Use a fresh recovery id when retrying recoveries {pull}22325[#22325] (issue: {issue}22043[#22043]) +* Allow flush/force_merge/upgrade on shard marked as relocated {pull}22078[#22078] (issue: {issue}22043[#22043]) +* Fix concurrency issues between cancelling a relocation and marking shard as relocated {pull}20443[#20443] + +Reindex API:: +* Fix throttled reindex_from_remote {pull}23953[#23953] (issues: {issue}23828[#23828], {issue}23945[#23945]) +* Fix reindex with a remote source on a version before 2.0.0 {pull}23805[#23805] +* Make reindex wait for cleanup before responding {pull}23677[#23677] (issue: {issue}23653[#23653]) +* Reindex: do not log when can't clear old scroll {pull}22942[#22942] (issue: {issue}22937[#22937]) +* Fix reindex-from-remote from <2.0 {pull}22931[#22931] (issue: {issue}22893[#22893]) +* Fix reindex from remote clearing scroll {pull}22525[#22525] (issue: {issue}22514[#22514]) +* Fix source filtering in reindex-from-remote {pull}22514[#22514] (issue: {issue}22507[#22507]) +* Remove content type detection from reindex-from-remote {pull}22504[#22504] (issue: {issue}22329[#22329]) +* Don't close rest client from its callback {pull}22061[#22061] (issue: {issue}22027[#22027]) +* Keep context during reindex's retries {pull}21941[#21941] +* Ignore IllegalArgumentException with assertVersionSerializable {pull}21409[#21409] (issues: {issue}20767[#20767], {issue}21350[#21350]) +* Bump reindex-from-remote's buffer to 200mb {pull}21222[#21222] (issue: {issue}21185[#21185]) +* Fix reindex-from-remote for parent/child from <2.0 {pull}21070[#21070] (issue: {issue}21044[#21044]) + +Scripting:: +* Convert script/template objects to json format internally {pull}23308[#23308] (issue: {issue}23245[#23245]) +* Script: Fix value of `ctx._now` to be current epoch time in milliseconds {pull}23175[#23175] (issue: {issue}23169[#23169]) +* Expose `ip` fields as strings in scripts. {pull}21997[#21997] (issue: {issue}21977[#21977]) +* Add support for booleans in scripts {pull}20950[#20950] (issue: {issue}20949[#20949]) +* Native scripts should be created once per index, not per segment. {pull}20609[#20609] + +Search:: +* Include all aliases including non-filtering in `_search_shards` response {pull}24489[#24489] +* Cross Cluster Search: propagate original indices per cluster {pull}24328[#24328] +* Query string default field {pull}24214[#24214] +* Speed up parsing of large `terms` queries. {pull}24210[#24210] +* IndicesQueryCache should delegate the scorerSupplier method. {pull}24209[#24209] +* Disable graph analysis at query time for shingle and cjk filters producing tokens of different size {pull}23920[#23920] (issue: {issue}23918[#23918]) +* Fix cross-cluster remote node gateway attributes {pull}23863[#23863] +* Use a fixed seed for computing term hashCode in TermsSliceQuery {pull}23795[#23795] +* Honor max concurrent searches in multi-search {pull}23538[#23538] (issue: {issue}23527[#23527]) +* Avoid stack overflow in multi-search {pull}23527[#23527] (issue: {issue}23523[#23523]) +* Fix query_string_query to transform "foo:*" in an exists query on the field name {pull}23433[#23433] (issue: {issue}23356[#23356]) +* Factor out filling of TopDocs in SearchPhaseController {pull}23380[#23380] (issues: {issue}19356[#19356], {issue}23357[#23357]) +* Replace blocking calls in ExpandCollapseSearchResponseListener by asynchronous requests {pull}23053[#23053] (issue: {issue}23048[#23048]) +* Ensure fixed serialization order of InnerHitBuilder {pull}22820[#22820] (issue: {issue}22808[#22808]) +* Improve concurrency of ShardCoreKeyMap. {pull}22316[#22316] +* Make `-0` compare less than `+0` consistently. {pull}22173[#22173] (issue: {issue}22167[#22167]) +* Fix boost_mode propagation when the function score query builder is rewritten {pull}22172[#22172] (issue: {issue}22138[#22138]) +* FiltersAggregationBuilder: rewriting filter queries, the same way as in FilterAggregationBuilder {pull}22076[#22076] +* Fix cross_fields type on multi_match query with synonyms {pull}21638[#21638] (issue: {issue}21633[#21633]) +* Fix match_phrase_prefix on boosted fields {pull}21623[#21623] (issue: {issue}21613[#21613]) +* Respect default search timeout {pull}21599[#21599] (issues: {issue}12211[#12211], {issue}21595[#21595]) +* Remove LateParsingQuery to prevent timestamp access after context is frozen {pull}21328[#21328] (issue: {issue}21295[#21295]) +* Make range queries round up upper bounds again. {pull}20582[#20582] (issues: {issue}20579[#20579], {issue}8889[#8889]) +* Throw error when trying to fetch fields from source and source is disabled {pull}20424[#20424] (issues: {issue}20093[#20093], {issue}20408[#20408]) + +Search Templates:: +* No longer add illegal content type option to stored search templates {pull}24251[#24251] (issue: {issue}24227[#24227]) +* SearchTemplateRequest to implement CompositeIndicesRequest {pull}21865[#21865] (issue: {issue}21747[#21747]) + +Settings:: +* Do not set path.data in environment if not set {pull}24132[#24132] (issue: {issue}24099[#24099]) +* Correct handling of default and array settings {pull}24074[#24074] (issues: {issue}23981[#23981], {issue}24052[#24052]) +* Fix merge scheduler config settings {pull}23391[#23391] +* Settings: Fix keystore cli prompting for yes/no to handle console returning null {pull}23320[#23320] +* Expose `search.highlight.term_vector_multi_value` as a node level setting {pull}22999[#22999] +* NPE when no setting name passed to elasticsearch-keystore {pull}22609[#22609] +* Handle spaces in `action.auto_create_index` gracefully {pull}21790[#21790] (issue: {issue}21449[#21449]) +* Fix settings diff generation for affix and group settings {pull}21788[#21788] +* Don't reset non-dynamic settings unless explicitly requested {pull}21646[#21646] (issue: {issue}21593[#21593]) +* Fix Setting.timeValue() method {pull}20696[#20696] (issue: {issue}20662[#20662]) +* Add a hard limit for `index.number_of_shard` {pull}20682[#20682] +* Include complex settings in settings requests {pull}20622[#20622] + +Snapshot/Restore:: +* Fixes maintaining the shards a snapshot is waiting on {pull}24289[#24289] +* Fixes snapshot status on failed snapshots {pull}23833[#23833] (issue: {issue}23716[#23716]) +* Fixes snapshot deletion handling on in-progress snapshot failure {pull}23703[#23703] (issue: {issue}23663[#23663]) +* Prioritize listing index-N blobs over index.latest in reading snapshots {pull}23333[#23333] +* Gracefully handles pre 2.x compressed snapshots {pull}22267[#22267] +* URLRepository should throw NoSuchFileException to correctly adhere to readBlob contract {pull}22069[#22069] (issue: {issue}22004[#22004]) +* Fixes shard level snapshot metadata loading when index-N file is missing {pull}21813[#21813] +* Ensures cleanup of temporary index-* generational blobs during snapshotting {pull}21469[#21469] (issue: {issue}21462[#21462]) +* Fixes get snapshot duplicates when asking for _all {pull}21340[#21340] (issue: {issue}21335[#21335]) + +Stats:: +* Avoid overflow when computing total FS stats {pull}23641[#23641] +* Handle existence of cgroup version 2 hierarchy {pull}23493[#23493] (issue: {issue}23486[#23486]) +* Handle long overflow when adding paths' totals {pull}23293[#23293] (issue: {issue}23093[#23093]) +* Fix control group pattern {pull}23219[#23219] (issue: {issue}23218[#23218]) +* Fix total disk bytes returning negative value {pull}23093[#23093] +* Implement stats for geo_point and geo_shape field {pull}22391[#22391] (issue: {issue}22384[#22384]) +* Use reader for doc stats {pull}22317[#22317] (issue: {issue}22285[#22285]) +* Avoid NPE in NodeService#stats if HTTP is disabled {pull}22060[#22060] (issue: {issue}22058[#22058]) +* Add support for "include_segment_file_sizes" in indices stats REST handler {pull}21879[#21879] (issue: {issue}21878[#21878]) +* Remove output_uuid parameter from cluster stats {pull}21020[#21020] (issue: {issue}20722[#20722]) +* Fix FieldStats deserialization of `ip` field {pull}20522[#20522] (issue: {issue}20516[#20516]) + +Task Manager:: +* Task Management: Make TaskInfo parsing forwards compatible {pull}24073[#24073] (issue: {issue}23250[#23250]) +* Fix hanging cancelling task with no children {pull}22796[#22796] +* Fix broken TaskInfo.toString() {pull}22698[#22698] (issue: {issue}22387[#22387]) +* Task cancellation command should wait for all child nodes to receive cancellation request before returning {pull}21397[#21397] (issue: {issue}21126[#21126]) + +Term Vectors:: +* Fix _termvectors with preference to not hit NPE {pull}21959[#21959] +* Return correct term statistics when a field is not found in a shard {pull}21922[#21922] (issue: {issue}21906[#21906]) + +Tribe Node:: +* Add socket permissions for tribe nodes {pull}21546[#21546] (issues: {issue}16392[#16392], {issue}21122[#21122]) + +[float] +=== Regressions + +Bulk:: +* Fix _bulk response when it can't create an index {pull}24048[#24048] (issues: {issue}22488[#22488], {issue}24028[#24028]) + +Core:: +* Source filtering: only accept array items if the previous include pattern matches {pull}22593[#22593] (issue: {issue}22557[#22557]) + +Highlighting:: +* Handle SynonymQuery extraction for the FastVectorHighlighter {pull}20829[#20829] (issue: {issue}20781[#20781]) + +Logging:: +* Restores the original default format of search slow log {pull}21770[#21770] (issue: {issue}21711[#21711]) + +Network:: +* You had one job Netty logging guard {pull}24469[#24469] (issues: {issue}5624[#5624], {issue}6568[#6568]) + +Plugin Discovery EC2:: +* Fix ec2 discovery when used with IAM profiles. {pull}21042[#21042] (issue: {issue}21039[#21039]) + +Plugin Repository S3:: +* Fix s3 repository when used with IAM profiles {pull}21058[#21058] (issue: {issue}21048[#21048]) + +Plugins:: +* Plugins: Add back user agent when downloading plugins {pull}20872[#20872] + +Search:: +* Handle specialized term queries in MappedFieldType.extractTerm(Query) {pull}21889[#21889] (issue: {issue}21882[#21882]) + +//[float] +//=== Known Issues + +[float] +=== Upgrades + +Aggregations:: +* Upgrade HDRHistogram to 2.1.9 {pull}23254[#23254] (issue: {issue}23239[#23239]) + +Core:: +* Upgrade to Lucene 6.5.0 {pull}23750[#23750] +* Upgrade from JNA 4.2.2 to JNA 4.4.0 {pull}23636[#23636] +* Upgrade to lucene-6.5.0-snapshot-d00c5ca {pull}23385[#23385] +* Upgrade to lucene-6.5.0-snapshot-f919485. {pull}23087[#23087] +* Upgrade to Lucene 6.4.0 {pull}22724[#22724] +* Update Jackson to 2.8.6 {pull}22596[#22596] (issue: {issue}22266[#22266]) +* Upgrade to lucene-6.4.0-snapshot-084f7a0. {pull}22413[#22413] +* Upgrade to lucene-6.4.0-snapshot-ec38570 {pull}21853[#21853] +* Upgrade to lucene-6.3.0. {pull}21464[#21464] + +Dates:: +* Update Joda Time to version 2.9.5 {pull}21468[#21468] (issues: {issue}20911[#20911], {issue}332[#332], {issue}373[#373], {issue}378[#378], {issue}379[#379], {issue}386[#386], {issue}394[#394], {issue}396[#396], {issue}397[#397], {issue}404[#404], {issue}69[#69]) + +Internal:: +* Upgrade to Lucene 6.4.1. {pull}22978[#22978] + +Logging:: +* Upgrade to Log4j 2.8.2 {pull}23995[#23995] +* Upgrade Log4j 2 to version 2.7 {pull}20805[#20805] (issue: {issue}20304[#20304]) + +Network:: +* Upgrade Netty to 4.1.10.Final {pull}24414[#24414] +* Upgrade to Netty 4.1.9 {pull}23540[#23540] (issues: {issue}23172[#23172], {issue}6308[#6308], {issue}6374[#6374]) +* Upgrade to Netty 4.1.8 {pull}23055[#23055] +* Upgrade to Netty 4.1.7 {pull}22587[#22587] +* Upgrade to Netty 4.1.6 {pull}21051[#21051] +//[float] +//=== Regressions + +Plugin Repository Azure:: +* Update to Azure Storage 5.0.0 {pull}23517[#23517] (issue: {issue}23448[#23448]) \ No newline at end of file diff --git a/docs/reference/release-notes/highlights.asciidoc b/docs/reference/release-notes/highlights.asciidoc index bde0a6ee9dfca..a0372187e80d5 100644 --- a/docs/reference/release-notes/highlights.asciidoc +++ b/docs/reference/release-notes/highlights.asciidoc @@ -1,5 +1,8 @@ [[release-highlights]] = {es} Release Highlights +++++ +Release Highlights +++++ [partintro] -- From 511856dd2e513e6641a8983f6a3c367fc3d0b51d Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Tue, 15 May 2018 08:06:58 -0400 Subject: [PATCH 03/44] HLRestClient: Follow-up for put index template api (#30592) This commit addresses some comments given after the original PR was in. Follow-up #30400 --- .../template/put/PutIndexTemplateRequest.java | 16 +++-- .../put/PutIndexTemplateRequestTests.java | 62 +++++++++++-------- .../put/PutIndexTemplateResponseTests.java | 45 ++++++++++++++ 3 files changed, 92 insertions(+), 31 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponseTests.java diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b018e24a565b8..5d4e558dbb25b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ToXContent; @@ -45,6 +46,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.common.xcontent.support.XContentMapValues; import java.io.IOException; @@ -543,9 +545,6 @@ public void writeTo(StreamOutput out) throws IOException { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - if (customs.isEmpty() == false) { - throw new IllegalArgumentException("Custom data type is no longer supported in index template [" + customs + "]"); - } builder.field("index_patterns", indexPatterns); builder.field("order", order); if (version != null) { @@ -558,8 +557,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.startObject("mappings"); for (Map.Entry entry : mappings.entrySet()) { - Map mapping = XContentHelper.convertToMap(new BytesArray(entry.getValue()), false).v2(); - builder.field(entry.getKey(), mapping); + builder.field(entry.getKey()); + XContentParser parser = JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, + DeprecationHandler.THROW_UNSUPPORTED_OPERATION, entry.getValue()); + builder.copyCurrentStructure(parser); } builder.endObject(); @@ -568,6 +569,11 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws alias.toXContent(builder, params); } builder.endObject(); + + for (Map.Entry entry : customs.entrySet()) { + builder.field(entry.getKey(), entry.getValue(), params); + } + return builder; } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java index 294213452596f..577a8b55e61a3 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequestTests.java @@ -23,18 +23,18 @@ import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.yaml.YamlXContent; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.AbstractXContentTestCase; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Arrays; import java.util.Base64; import java.util.Collections; @@ -45,7 +45,7 @@ import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.core.Is.is; -public class PutIndexTemplateRequestTests extends ESTestCase { +public class PutIndexTemplateRequestTests extends AbstractXContentTestCase { // bwc for #21009 public void testPutIndexTemplateRequest510() throws IOException { @@ -137,13 +137,14 @@ public void testValidateErrorMessage() throws Exception { assertThat(noError, is(nullValue())); } - private PutIndexTemplateRequest randomPutIndexTemplateRequest() throws IOException { + @Override + protected PutIndexTemplateRequest createTestInstance() { PutIndexTemplateRequest request = new PutIndexTemplateRequest(); request.name("test"); - if (randomBoolean()){ + if (randomBoolean()) { request.version(randomInt()); } - if (randomBoolean()){ + if (randomBoolean()) { request.order(randomInt()); } request.patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); @@ -159,30 +160,39 @@ private PutIndexTemplateRequest randomPutIndexTemplateRequest() throws IOExcepti request.alias(alias); } if (randomBoolean()) { - request.mapping("doc", XContentFactory.jsonBuilder().startObject() - .startObject("doc").startObject("properties") - .startObject("field-" + randomInt()).field("type", randomFrom("keyword", "text")).endObject() - .endObject().endObject().endObject()); + try { + request.mapping("doc", XContentFactory.jsonBuilder().startObject() + .startObject("doc").startObject("properties") + .startObject("field-" + randomInt()).field("type", randomFrom("keyword", "text")).endObject() + .endObject().endObject().endObject()); + } catch (IOException ex) { + throw new UncheckedIOException(ex); + } } - if (randomBoolean()){ + if (randomBoolean()) { request.settings(Settings.builder().put("setting1", randomLong()).put("setting2", randomTimeValue()).build()); } return request; } - public void testFromToXContentPutTemplateRequest() throws Exception { - for (int i = 0; i < 10; i++) { - PutIndexTemplateRequest expected = randomPutIndexTemplateRequest(); - XContentType xContentType = randomFrom(XContentType.values()); - BytesReference shuffled = toShuffledXContent(expected, xContentType, ToXContent.EMPTY_PARAMS, randomBoolean()); - PutIndexTemplateRequest parsed = new PutIndexTemplateRequest().source(shuffled, xContentType); - assertNotSame(expected, parsed); - assertThat(parsed.version(), equalTo(expected.version())); - assertThat(parsed.order(), equalTo(expected.order())); - assertThat(parsed.patterns(), equalTo(expected.patterns())); - assertThat(parsed.aliases(), equalTo(expected.aliases())); - assertThat(parsed.mappings(), equalTo(expected.mappings())); - assertThat(parsed.settings(), equalTo(expected.settings())); - } + @Override + protected PutIndexTemplateRequest doParseInstance(XContentParser parser) throws IOException { + return new PutIndexTemplateRequest().source(parser.map()); + } + + @Override + protected void assertEqualInstances(PutIndexTemplateRequest expected, PutIndexTemplateRequest actual) { + assertNotSame(expected, actual); + assertThat(actual.version(), equalTo(expected.version())); + assertThat(actual.order(), equalTo(expected.order())); + assertThat(actual.patterns(), equalTo(expected.patterns())); + assertThat(actual.aliases(), equalTo(expected.aliases())); + assertThat(actual.mappings(), equalTo(expected.mappings())); + assertThat(actual.settings(), equalTo(expected.settings())); + } + + @Override + protected boolean supportsUnknownFields() { + return false; } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponseTests.java new file mode 100644 index 0000000000000..096d62bf2bb5b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateResponseTests.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.template.put; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; + +public class PutIndexTemplateResponseTests extends AbstractStreamableXContentTestCase { + @Override + protected PutIndexTemplateResponse doParseInstance(XContentParser parser) { + return PutIndexTemplateResponse.fromXContent(parser); + } + + @Override + protected PutIndexTemplateResponse createTestInstance() { + return new PutIndexTemplateResponse(randomBoolean()); + } + + @Override + protected PutIndexTemplateResponse createBlankInstance() { + return new PutIndexTemplateResponse(); + } + + @Override + protected PutIndexTemplateResponse mutateInstance(PutIndexTemplateResponse response) { + return new PutIndexTemplateResponse(response.isAcknowledged() == false); + } +} From 017779b4ac905f359fd3192beadae2124c79f5a8 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Tue, 15 May 2018 19:35:20 +0200 Subject: [PATCH 04/44] Watcher: Prevent triggering watch when using activate API (#30613) A wrong check in the activate watch API could lead to watches triggering on wrong nodes. The check was supposed to check if watch execution was distributed already in 6.x and only if not, then trigger locally. The if-condition however was broken and triggered the watch only when distributed watch execution was actually enabled. --- .../TransportActivateWatchAction.java | 5 +- .../TransportActivateWatchActionTests.java | 188 ++++++++++++++++++ 2 files changed, 192 insertions(+), 1 deletion(-) create mode 100644 x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchActionTests.java diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java index 776c76524796b..7178773f61624 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/activate/TransportActivateWatchAction.java @@ -54,6 +54,7 @@ public class TransportActivateWatchAction extends WatcherTransportAction { + UpdateRequest request = (UpdateRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + + ShardId shardId = new ShardId(new Index(Watch.INDEX, "uuid"), 0); + listener.onResponse(new UpdateResponse(shardId, request.type(), request.id(), request.version(), + DocWriteResponse.Result.UPDATED)); + + return null; + }).when(client).update(any(), any()); + + // mock an get response that calls the listener + doAnswer(invocation -> { + GetRequest request = (GetRequest) invocation.getArguments()[0]; + ActionListener listener = (ActionListener) invocation.getArguments()[1]; + + GetResult getResult = new GetResult(request.index(), request.type(), request.id(), request.version(), true, null, + Collections.emptyMap()); + listener.onResponse(new GetResponse(getResult)); + + return null; + }).when(client).get(any(), any()); + + action = new TransportActivateWatchAction(Settings.EMPTY, transportService, threadPool, + new ActionFilters(Collections.emptySet()), new IndexNameExpressionResolver(Settings.EMPTY), new ClockMock(), + new XPackLicenseState(Settings.EMPTY), parser, clusterService, client, triggerService); + } + + // when running in distributed mode, watches are only triggered by the indexing operation listener + public void testWatchesAreNotTriggeredWhenDistributed() throws Exception { + boolean watchActivated = randomBoolean(); + ActivateWatchRequest request = new ActivateWatchRequest("watch_id", watchActivated); + ActionListener listener = PlainActionFuture.newFuture(); + + // add a few nodes, with current versions + ClusterState clusterState = ClusterState.builder(new ClusterName("my_cluster")) + .nodes(DiscoveryNodes.builder() + .masterNodeId("node_1") + .localNodeId(randomFrom("node_1", "node_2")) + .add(newNode("node_1", Version.CURRENT)) + .add(newNode("node_2", Version.CURRENT))) + .build(); + when(clusterService.state()).thenReturn(clusterState); + mockWatchStatus(watchActivated); + + action.masterOperation(request, clusterState, listener); + + verifyNoMoreInteractions(triggerService); + } + + public void testWatchesAreNotTriggeredOnNonMasterWhenNotDistributed() throws Exception { + boolean watchActivated = randomBoolean(); + ActivateWatchRequest request = new ActivateWatchRequest("watch_id", watchActivated); + ActionListener listener = PlainActionFuture.newFuture(); + + // add a few nodes, with current versions + ClusterState clusterState = ClusterState.builder(new ClusterName("my_cluster")) + .nodes(DiscoveryNodes.builder() + .masterNodeId("node_2") + .localNodeId("node_1") + .add(newNode("node_1", Version.CURRENT)) + .add(newNode("node_2", Version.V_5_6_10))) + .build(); + when(clusterService.state()).thenReturn(clusterState); + mockWatchStatus(watchActivated); + + action.masterOperation(request, clusterState, listener); + + verifyNoMoreInteractions(triggerService); + } + + // we trigger on the master node only, not on any other node + public void testWatchesAreTriggeredOnMasterWhenNotDistributed() throws Exception { + boolean watchActivated = randomBoolean(); + ActivateWatchRequest request = new ActivateWatchRequest("watch_id", watchActivated); + ActionListener listener = PlainActionFuture.newFuture(); + + // add a few nodes, with current versions + ClusterState clusterState = ClusterState.builder(new ClusterName("my_cluster")) + .nodes(DiscoveryNodes.builder() + .masterNodeId("node_1") + .localNodeId("node_1") + .add(newNode("node_1", Version.CURRENT)) + .add(newNode("node_2", Version.V_5_6_10))) + .build(); + when(clusterService.state()).thenReturn(clusterState); + mockWatchStatus(watchActivated); + + action.masterOperation(request, clusterState, listener); + + if (watchActivated) { + verify(triggerService).add(eq(watch)); + } else { + verify(triggerService).remove(eq("watch_id")); + } + } + + private void mockWatchStatus(boolean active) { + WatchStatus status = mock(WatchStatus.class); + WatchStatus.State state = new WatchStatus.State(active, DateTime.now(DateTimeZone.UTC)); + when(status.state()).thenReturn(state); + when(watch.status()).thenReturn(status); + } + + private static DiscoveryNode newNode(String nodeId, Version version) { + return new DiscoveryNode(nodeId, ESTestCase.buildNewFakeTransportAddress(), Collections.emptyMap(), + new HashSet<>(asList(DiscoveryNode.Role.values())), version); + } +} From 873d380f455811c6f3002ad3436f310e3e3dfb78 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Tue, 15 May 2018 18:22:58 +0300 Subject: [PATCH 05/44] Repository GCS plugin new client library (#30168) This does away with the deprecated `com.google.api-client:google-api-client:1.23` and replaces it with `com.google.cloud:google-cloud-storage:1.28.0`. It also changes security permissions for the repository-gcs plugin. Closes: #29259 --- docs/plugins/repository-gcs.asciidoc | 12 +- plugins/repository-gcs/build.gradle | 213 +++++- .../licenses/api-common-1.5.0.jar.sha1 | 1 + .../licenses/api-common-LICENSE.txt | 27 + .../licenses/api-common-NOTICE.txt | 0 .../licenses/commons-codec-1.10.jar.sha1 | 1 - .../licenses/commons-logging-1.1.3.jar.sha1 | 1 - .../licenses/gax-1.25.0.jar.sha1 | 1 + .../repository-gcs/licenses/gax-LICENSE.txt | 27 + .../repository-gcs/licenses/gax-NOTICE.txt | 0 .../licenses/gax-httpjson-0.40.0.jar.sha1 | 1 + .../licenses/gax-httpjson-LICENSE.txt | 27 + .../licenses/gax-httpjson-NOTICE.txt | 0 .../licenses/google-api-client-LICENSE.txt | 201 ++++++ .../licenses/google-api-client-NOTICE.txt | 0 ...api-services-storage-v1-rev115-LICENSE.txt | 201 ++++++ ...-api-services-storage-v1-rev115-NOTICE.txt | 0 .../licenses/google-auth-LICENSE.txt | 28 + .../licenses/google-auth-NOTICE.txt | 0 ...le-auth-library-credentials-0.9.1.jar.sha1 | 1 + ...le-auth-library-oauth2-http-0.9.1.jar.sha1 | 1 + .../licenses/google-cloud-LICENSE.txt | 201 ++++++ .../licenses/google-cloud-NOTICE.txt | 0 .../google-cloud-core-1.28.0.jar.sha1 | 1 + .../google-cloud-core-http-1.28.0.jar.sha1 | 1 + .../google-cloud-storage-1.28.0.jar.sha1 | 1 + ...le-LICENSE.txt => google-http-LICENSE.txt} | 0 .../licenses/google-http-NOTICE.txt | 0 ...ogle-http-client-appengine-1.23.0.jar.sha1 | 1 + ...google-http-client-jackson-1.23.0.jar.sha1 | 1 + .../licenses/google-oauth-client-LICENSE.txt | 28 + .../licenses/google-oauth-client-NOTICE.txt | 0 .../licenses/grpc-context-1.9.0.jar.sha1 | 1 + ...c-LICENSE.txt => grpc-context-LICENSE.txt} | 0 .../licenses/grpc-context-NOTICE.txt | 0 .../licenses/guava-20.0.jar.sha1 | 1 + .../repository-gcs/licenses/guava-LICENSE.txt | 202 ++++++ .../repository-gcs/licenses/guava-NOTICE.txt | 0 .../licenses/httpclient-4.5.2.jar.sha1 | 1 - .../licenses/httpcore-4.4.5.jar.sha1 | 1 - .../licenses/jackson-core-asl-1.9.13.jar.sha1 | 1 + .../licenses/jackson-core-asl-LICENSE.txt | 202 ++++++ .../licenses/jackson-core-asl-NOTICE.txt | 0 .../licenses/old/commons-codec-LICENSE.txt | 202 ++++++ .../{ => old}/commons-codec-NOTICE.txt | 0 .../{ => old}/commons-logging-LICENSE.txt | 0 .../{ => old}/commons-logging-NOTICE.txt | 0 .../licenses/old/google-LICENSE.txt | 201 ++++++ .../licenses/{ => old}/google-NOTICE.txt | 0 .../licenses/{ => old}/httpclient-LICENSE.txt | 0 .../licenses/{ => old}/httpclient-NOTICE.txt | 0 .../licenses/{ => old}/httpcore-LICENSE.txt | 0 .../licenses/{ => old}/httpcore-NOTICE.txt | 0 .../licenses/opencensus-LICENSE.txt | 202 ++++++ .../licenses/opencensus-NOTICE.txt | 0 .../licenses/opencensus-api-0.11.1.jar.sha1 | 1 + ...encensus-contrib-http-util-0.11.1.jar.sha1 | 1 + .../proto-google-common-protos-1.8.0.jar.sha1 | 1 + .../proto-google-common-protos-LICENSE.txt | 202 ++++++ .../proto-google-common-protos-NOTICE.txt | 0 .../licenses/threetenbp-1.3.6.jar.sha1 | 1 + .../licenses/threetenbp-LICENSE.txt | 31 + .../licenses/threetenbp-NOTICE.txt | 0 .../qa/google-cloud-storage/build.gradle | 8 +- .../gcs/GoogleCloudStorageTestServer.java | 346 +++++++--- .../gcs/GoogleCloudStorageBlobStore.java | 394 +++++------ .../gcs/GoogleCloudStorageClientSettings.java | 99 ++- .../gcs/GoogleCloudStoragePlugin.java | 78 +-- .../gcs/GoogleCloudStorageRepository.java | 49 +- .../gcs/GoogleCloudStorageService.java | 191 +++--- .../plugin-metadata/plugin-security.policy | 7 +- .../cloud/storage/StorageRpcOptionUtils.java | 54 ++ .../cloud/storage/StorageTestUtils.java | 37 ++ ...eCloudStorageBlobStoreRepositoryTests.java | 8 +- ...GoogleCloudStorageClientSettingsTests.java | 132 ++-- ...loudStorageRepositoryDeprecationTests.java | 5 +- .../gcs/GoogleCloudStorageServiceTests.java | 121 ++-- .../repositories/gcs/MockStorage.java | 627 ++++++++++++------ 78 files changed, 3403 insertions(+), 981 deletions(-) create mode 100644 plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/api-common-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/api-common-NOTICE.txt delete mode 100644 plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/gax-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/gax-httpjson-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-api-client-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/google-api-client-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-auth-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/google-auth-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-cloud-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/google-cloud-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 rename plugins/repository-gcs/licenses/{google-LICENSE.txt => google-http-LICENSE.txt} (100%) create mode 100644 plugins/repository-gcs/licenses/google-http-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/google-oauth-client-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 rename plugins/repository-gcs/licenses/{commons-codec-LICENSE.txt => grpc-context-LICENSE.txt} (100%) create mode 100644 plugins/repository-gcs/licenses/grpc-context-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/guava-20.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/guava-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/guava-NOTICE.txt delete mode 100644 plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 delete mode 100644 plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt rename plugins/repository-gcs/licenses/{ => old}/commons-codec-NOTICE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/commons-logging-LICENSE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/commons-logging-NOTICE.txt (100%) create mode 100644 plugins/repository-gcs/licenses/old/google-LICENSE.txt rename plugins/repository-gcs/licenses/{ => old}/google-NOTICE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/httpclient-LICENSE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/httpclient-NOTICE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/httpcore-LICENSE.txt (100%) rename plugins/repository-gcs/licenses/{ => old}/httpcore-NOTICE.txt (100%) create mode 100644 plugins/repository-gcs/licenses/opencensus-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/opencensus-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt create mode 100644 plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 create mode 100644 plugins/repository-gcs/licenses/threetenbp-LICENSE.txt create mode 100644 plugins/repository-gcs/licenses/threetenbp-NOTICE.txt create mode 100644 plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java create mode 100644 plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java diff --git a/docs/plugins/repository-gcs.asciidoc b/docs/plugins/repository-gcs.asciidoc index 8b9a742277563..8cbec8ce4f2c6 100644 --- a/docs/plugins/repository-gcs.asciidoc +++ b/docs/plugins/repository-gcs.asciidoc @@ -84,11 +84,7 @@ A service account file looks like this: "private_key_id": "...", "private_key": "-----BEGIN PRIVATE KEY-----\n...\n-----END PRIVATE KEY-----\n", "client_email": "service-account-for-your-repository@your-project-id.iam.gserviceaccount.com", - "client_id": "...", - "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "token_uri": "https://accounts.google.com/o/oauth2/token", - "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", - "client_x509_cert_url": "..." + "client_id": "..." } ---- // NOTCONSOLE @@ -178,6 +174,12 @@ are marked as `Secure`. a custom name can be useful to authenticate your cluster when requests statistics are logged in the Google Cloud Platform. Default to `repository-gcs` +`project_id`:: + + The Google Cloud project id. This will be automatically infered from the credentials file but + can be specified explicitly. For example, it can be used to switch between projects when the + same credentials are usable for both the production and the development projects. + [[repository-gcs-repository]] ==== Repository Settings diff --git a/plugins/repository-gcs/build.gradle b/plugins/repository-gcs/build.gradle index e164a8553f81f..07ef4b4be5e62 100644 --- a/plugins/repository-gcs/build.gradle +++ b/plugins/repository-gcs/build.gradle @@ -22,38 +22,207 @@ esplugin { classname 'org.elasticsearch.repositories.gcs.GoogleCloudStoragePlugin' } -versions << [ - 'google': '1.23.0', -] - dependencies { - compile "com.google.apis:google-api-services-storage:v1-rev115-${versions.google}" - compile "com.google.api-client:google-api-client:${versions.google}" - compile "com.google.oauth-client:google-oauth-client:${versions.google}" - compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" - compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" - compile "commons-logging:commons-logging:${versions.commonslogging}" - compile "commons-codec:commons-codec:${versions.commonscodec}" - compile "com.google.http-client:google-http-client:${versions.google}" - compile "com.google.http-client:google-http-client-jackson2:${versions.google}" + compile 'com.google.cloud:google-cloud-storage:1.28.0' + compile 'com.google.cloud:google-cloud-core:1.28.0' + compile 'com.google.cloud:google-cloud-core-http:1.28.0' + compile 'com.google.auth:google-auth-library-oauth2-http:0.9.1' + compile 'com.google.auth:google-auth-library-credentials:0.9.1' + compile 'com.google.oauth-client:google-oauth-client:1.23.0' + compile 'com.google.http-client:google-http-client:1.23.0' + compile 'com.google.http-client:google-http-client-jackson:1.23.0' + compile 'com.google.http-client:google-http-client-jackson2:1.23.0' + compile 'com.google.http-client:google-http-client-appengine:1.23.0' + compile 'com.google.api-client:google-api-client:1.23.0' + compile 'com.google.api:gax:1.25.0' + compile 'com.google.api:gax-httpjson:0.40.0' + compile 'com.google.api:api-common:1.5.0' + compile 'com.google.api.grpc:proto-google-common-protos:1.8.0' + compile 'com.google.guava:guava:20.0' + compile 'com.google.apis:google-api-services-storage:v1-rev115-1.23.0' + compile 'org.codehaus.jackson:jackson-core-asl:1.9.13' + compile 'io.grpc:grpc-context:1.9.0' + compile 'io.opencensus:opencensus-api:0.11.1' + compile 'io.opencensus:opencensus-contrib-http-util:0.11.1' + compile 'org.threeten:threetenbp:1.3.6' } dependencyLicenses { - mapping from: /google-.*/, to: 'google' + mapping from: /google-cloud-.*/, to: 'google-cloud' + mapping from: /google-auth-.*/, to: 'google-auth' + mapping from: /google-http-.*/, to: 'google-http' + mapping from: /opencensus.*/, to: 'opencensus' } thirdPartyAudit.excludes = [ + // uses internal java api: sun.misc.Unsafe + 'com.google.common.cache.Striped64', + 'com.google.common.cache.Striped64$1', + 'com.google.common.cache.Striped64$Cell', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$2', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray$3', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper', + 'com.google.common.util.concurrent.AbstractFuture$UnsafeAtomicHelper$1', + 'com.google.common.hash.LittleEndianByteArray$UnsafeByteArray', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator', + 'com.google.common.primitives.UnsignedBytes$LexicographicalComparatorHolder$UnsafeComparator$1', // classes are missing - 'com.google.common.base.Splitter', - 'com.google.common.collect.Lists', - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', + 'com.google.appengine.api.datastore.Blob', + 'com.google.appengine.api.datastore.DatastoreService', + 'com.google.appengine.api.datastore.DatastoreServiceFactory', + 'com.google.appengine.api.datastore.Entity', + 'com.google.appengine.api.datastore.Key', + 'com.google.appengine.api.datastore.KeyFactory', + 'com.google.appengine.api.datastore.PreparedQuery', + 'com.google.appengine.api.datastore.Query', + 'com.google.appengine.api.memcache.Expiration', + 'com.google.appengine.api.memcache.MemcacheService', + 'com.google.appengine.api.memcache.MemcacheServiceFactory', + 'com.google.appengine.api.urlfetch.FetchOptions$Builder', + 'com.google.appengine.api.urlfetch.FetchOptions', + 'com.google.appengine.api.urlfetch.HTTPHeader', + 'com.google.appengine.api.urlfetch.HTTPMethod', + 'com.google.appengine.api.urlfetch.HTTPRequest', + 'com.google.appengine.api.urlfetch.HTTPResponse', + 'com.google.appengine.api.urlfetch.URLFetchService', + 'com.google.appengine.api.urlfetch.URLFetchServiceFactory', + 'com.google.gson.Gson', + 'com.google.gson.GsonBuilder', + 'com.google.gson.TypeAdapter', + 'com.google.gson.stream.JsonReader', + 'com.google.gson.stream.JsonWriter', + 'com.google.iam.v1.Binding$Builder', + 'com.google.iam.v1.Binding', + 'com.google.iam.v1.Policy$Builder', + 'com.google.iam.v1.Policy', + 'com.google.protobuf.AbstractMessageLite$Builder', + 'com.google.protobuf.AbstractParser', + 'com.google.protobuf.Any$Builder', + 'com.google.protobuf.Any', + 'com.google.protobuf.AnyOrBuilder', + 'com.google.protobuf.AnyProto', + 'com.google.protobuf.Api$Builder', + 'com.google.protobuf.Api', + 'com.google.protobuf.ApiOrBuilder', + 'com.google.protobuf.ApiProto', + 'com.google.protobuf.ByteString', + 'com.google.protobuf.CodedInputStream', + 'com.google.protobuf.CodedOutputStream', + 'com.google.protobuf.DescriptorProtos', + 'com.google.protobuf.Descriptors$Descriptor', + 'com.google.protobuf.Descriptors$EnumDescriptor', + 'com.google.protobuf.Descriptors$EnumValueDescriptor', + 'com.google.protobuf.Descriptors$FieldDescriptor', + 'com.google.protobuf.Descriptors$FileDescriptor$InternalDescriptorAssigner', + 'com.google.protobuf.Descriptors$FileDescriptor', + 'com.google.protobuf.Descriptors$OneofDescriptor', + 'com.google.protobuf.Duration$Builder', + 'com.google.protobuf.Duration', + 'com.google.protobuf.DurationOrBuilder', + 'com.google.protobuf.DurationProto', + 'com.google.protobuf.EmptyProto', + 'com.google.protobuf.Enum$Builder', + 'com.google.protobuf.Enum', + 'com.google.protobuf.EnumOrBuilder', + 'com.google.protobuf.ExtensionRegistry', + 'com.google.protobuf.ExtensionRegistryLite', + 'com.google.protobuf.FloatValue$Builder', + 'com.google.protobuf.FloatValue', + 'com.google.protobuf.FloatValueOrBuilder', + 'com.google.protobuf.GeneratedMessage$GeneratedExtension', + 'com.google.protobuf.GeneratedMessage', + 'com.google.protobuf.GeneratedMessageV3$Builder', + 'com.google.protobuf.GeneratedMessageV3$BuilderParent', + 'com.google.protobuf.GeneratedMessageV3$FieldAccessorTable', + 'com.google.protobuf.GeneratedMessageV3', + 'com.google.protobuf.Internal$EnumLite', + 'com.google.protobuf.Internal$EnumLiteMap', + 'com.google.protobuf.Internal', + 'com.google.protobuf.InvalidProtocolBufferException', + 'com.google.protobuf.LazyStringArrayList', + 'com.google.protobuf.LazyStringList', + 'com.google.protobuf.MapEntry$Builder', + 'com.google.protobuf.MapEntry', + 'com.google.protobuf.MapField', + 'com.google.protobuf.Message', + 'com.google.protobuf.MessageOrBuilder', + 'com.google.protobuf.Parser', + 'com.google.protobuf.ProtocolMessageEnum', + 'com.google.protobuf.ProtocolStringList', + 'com.google.protobuf.RepeatedFieldBuilderV3', + 'com.google.protobuf.SingleFieldBuilderV3', + 'com.google.protobuf.Struct$Builder', + 'com.google.protobuf.Struct', + 'com.google.protobuf.StructOrBuilder', + 'com.google.protobuf.StructProto', + 'com.google.protobuf.Timestamp$Builder', + 'com.google.protobuf.Timestamp', + 'com.google.protobuf.TimestampProto', + 'com.google.protobuf.Type$Builder', + 'com.google.protobuf.Type', + 'com.google.protobuf.TypeOrBuilder', + 'com.google.protobuf.TypeProto', + 'com.google.protobuf.UInt32Value$Builder', + 'com.google.protobuf.UInt32Value', + 'com.google.protobuf.UInt32ValueOrBuilder', + 'com.google.protobuf.UnknownFieldSet$Builder', + 'com.google.protobuf.UnknownFieldSet', + 'com.google.protobuf.WireFormat$FieldType', + 'com.google.protobuf.WrappersProto', + 'com.google.protobuf.util.Timestamps', + 'org.apache.http.ConnectionReuseStrategy', + 'org.apache.http.Header', + 'org.apache.http.HttpEntity', + 'org.apache.http.HttpEntityEnclosingRequest', + 'org.apache.http.HttpHost', + 'org.apache.http.HttpRequest', + 'org.apache.http.HttpResponse', + 'org.apache.http.HttpVersion', + 'org.apache.http.RequestLine', + 'org.apache.http.StatusLine', + 'org.apache.http.client.AuthenticationHandler', + 'org.apache.http.client.HttpClient', + 'org.apache.http.client.HttpRequestRetryHandler', + 'org.apache.http.client.RedirectHandler', + 'org.apache.http.client.RequestDirector', + 'org.apache.http.client.UserTokenHandler', + 'org.apache.http.client.methods.HttpDelete', + 'org.apache.http.client.methods.HttpEntityEnclosingRequestBase', + 'org.apache.http.client.methods.HttpGet', + 'org.apache.http.client.methods.HttpHead', + 'org.apache.http.client.methods.HttpOptions', + 'org.apache.http.client.methods.HttpPost', + 'org.apache.http.client.methods.HttpPut', + 'org.apache.http.client.methods.HttpRequestBase', + 'org.apache.http.client.methods.HttpTrace', + 'org.apache.http.conn.ClientConnectionManager', + 'org.apache.http.conn.ConnectionKeepAliveStrategy', + 'org.apache.http.conn.params.ConnManagerParams', + 'org.apache.http.conn.params.ConnPerRouteBean', + 'org.apache.http.conn.params.ConnRouteParams', + 'org.apache.http.conn.routing.HttpRoutePlanner', + 'org.apache.http.conn.scheme.PlainSocketFactory', + 'org.apache.http.conn.scheme.Scheme', + 'org.apache.http.conn.scheme.SchemeRegistry', + 'org.apache.http.conn.ssl.SSLSocketFactory', + 'org.apache.http.conn.ssl.X509HostnameVerifier', + 'org.apache.http.entity.AbstractHttpEntity', + 'org.apache.http.impl.client.DefaultHttpClient', + 'org.apache.http.impl.client.DefaultHttpRequestRetryHandler', + 'org.apache.http.impl.conn.ProxySelectorRoutePlanner', + 'org.apache.http.impl.conn.tsccm.ThreadSafeClientConnManager', + 'org.apache.http.message.BasicHttpResponse', + 'org.apache.http.params.BasicHttpParams', + 'org.apache.http.params.HttpConnectionParams', + 'org.apache.http.params.HttpParams', + 'org.apache.http.params.HttpProtocolParams', + 'org.apache.http.protocol.HttpContext', + 'org.apache.http.protocol.HttpProcessor', + 'org.apache.http.protocol.HttpRequestExecutor' ] check { // also execute the QA tests when testing the plugin dependsOn 'qa:google-cloud-storage:check' -} \ No newline at end of file +} diff --git a/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 new file mode 100644 index 0000000000000..64435356e5eaf --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-1.5.0.jar.sha1 @@ -0,0 +1 @@ +7e537338d40a57ad469239acb6d828fa544fb52b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/api-common-LICENSE.txt b/plugins/repository-gcs/licenses/api-common-LICENSE.txt new file mode 100644 index 0000000000000..6d16b6578a2f0 --- /dev/null +++ b/plugins/repository-gcs/licenses/api-common-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/api-common-NOTICE.txt b/plugins/repository-gcs/licenses/api-common-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 b/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 deleted file mode 100644 index 3fe8682a1b0f9..0000000000000 --- a/plugins/repository-gcs/licenses/commons-codec-1.10.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 b/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 deleted file mode 100644 index 5b8f029e58293..0000000000000 --- a/plugins/repository-gcs/licenses/commons-logging-1.1.3.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 new file mode 100644 index 0000000000000..594177047c140 --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-1.25.0.jar.sha1 @@ -0,0 +1 @@ +36ab73c0b5d4a67447eb89a3174cc76ced150bd1 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-LICENSE.txt b/plugins/repository-gcs/licenses/gax-LICENSE.txt new file mode 100644 index 0000000000000..267561bb386de --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/gax-NOTICE.txt b/plugins/repository-gcs/licenses/gax-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 new file mode 100644 index 0000000000000..c251ea1dd956c --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-0.40.0.jar.sha1 @@ -0,0 +1 @@ +cb4bafbfd45b9d24efbb6138a31e37918fac015f \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt b/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt new file mode 100644 index 0000000000000..267561bb386de --- /dev/null +++ b/plugins/repository-gcs/licenses/gax-httpjson-LICENSE.txt @@ -0,0 +1,27 @@ +Copyright 2016, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/gax-httpjson-NOTICE.txt b/plugins/repository-gcs/licenses/gax-httpjson-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt b/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-client-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-api-client-NOTICE.txt b/plugins/repository-gcs/licenses/google-api-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt b/plugins/repository-gcs/licenses/google-api-services-storage-v1-rev115-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-auth-LICENSE.txt b/plugins/repository-gcs/licenses/google-auth-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/google-auth-NOTICE.txt b/plugins/repository-gcs/licenses/google-auth-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 new file mode 100644 index 0000000000000..0922a53d2e356 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-credentials-0.9.1.jar.sha1 @@ -0,0 +1 @@ +25e0f45f3b3d1b4fccc8944845e51a7a4f359652 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 new file mode 100644 index 0000000000000..100a44c187218 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-auth-library-oauth2-http-0.9.1.jar.sha1 @@ -0,0 +1 @@ +c0fe3a39b0f28d59de1986b3c50f018cd7cb9ec2 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt b/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt new file mode 100644 index 0000000000000..4eedc0116add7 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-LICENSE.txt @@ -0,0 +1,201 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-cloud-NOTICE.txt b/plugins/repository-gcs/licenses/google-cloud-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..071533f227839 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-1.28.0.jar.sha1 @@ -0,0 +1 @@ +c0e88c78ce17c92d76bf46345faf3fa68833b216 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..fed3fc257c32c --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-core-http-1.28.0.jar.sha1 @@ -0,0 +1 @@ +7b4559a9513abd98da50958c56a10f8ae00cb0f7 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 new file mode 100644 index 0000000000000..f49152ea05646 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-cloud-storage-1.28.0.jar.sha1 @@ -0,0 +1 @@ +226019ae816b42c59f1b06999aeeb73722b87200 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-LICENSE.txt b/plugins/repository-gcs/licenses/google-http-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-LICENSE.txt rename to plugins/repository-gcs/licenses/google-http-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/google-http-NOTICE.txt b/plugins/repository-gcs/licenses/google-http-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 new file mode 100644 index 0000000000000..823c3a85089a5 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-appengine-1.23.0.jar.sha1 @@ -0,0 +1 @@ +0eda0d0f758c1cc525866e52e1226c4eb579d130 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 new file mode 100644 index 0000000000000..85ba0ab798d05 --- /dev/null +++ b/plugins/repository-gcs/licenses/google-http-client-jackson-1.23.0.jar.sha1 @@ -0,0 +1 @@ +a72ea3a197937ef63a893e73df312dac0d813663 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt b/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt new file mode 100644 index 0000000000000..12edf23c6711f --- /dev/null +++ b/plugins/repository-gcs/licenses/google-oauth-client-LICENSE.txt @@ -0,0 +1,28 @@ +Copyright 2014, Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/plugins/repository-gcs/licenses/google-oauth-client-NOTICE.txt b/plugins/repository-gcs/licenses/google-oauth-client-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 new file mode 100644 index 0000000000000..02bac0e492074 --- /dev/null +++ b/plugins/repository-gcs/licenses/grpc-context-1.9.0.jar.sha1 @@ -0,0 +1 @@ +28b0836f48c9705abf73829bbc536dba29a1329a \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/grpc-context-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-codec-LICENSE.txt rename to plugins/repository-gcs/licenses/grpc-context-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/grpc-context-NOTICE.txt b/plugins/repository-gcs/licenses/grpc-context-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 b/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 new file mode 100644 index 0000000000000..7b6ae09060b29 --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-20.0.jar.sha1 @@ -0,0 +1 @@ +89507701249388e1ed5ddcf8c41f4ce1be7831ef \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/guava-LICENSE.txt b/plugins/repository-gcs/licenses/guava-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/guava-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/guava-NOTICE.txt b/plugins/repository-gcs/licenses/guava-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 b/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 deleted file mode 100644 index 6937112a09fb6..0000000000000 --- a/plugins/repository-gcs/licenses/httpclient-4.5.2.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 b/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 deleted file mode 100644 index 581726601745b..0000000000000 --- a/plugins/repository-gcs/licenses/httpcore-4.4.5.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e7501a1b34325abb00d17dde96150604a0658b54 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 new file mode 100644 index 0000000000000..c5016bf828d60 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-1.9.13.jar.sha1 @@ -0,0 +1 @@ +3c304d70f42f832e0a86d45bd437f692129299a4 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt b/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/jackson-core-asl-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt b/plugins/repository-gcs/licenses/jackson-core-asl-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt b/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/old/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/commons-codec-NOTICE.txt b/plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-codec-NOTICE.txt rename to plugins/repository-gcs/licenses/old/commons-codec-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-LICENSE.txt b/plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-logging-LICENSE.txt rename to plugins/repository-gcs/licenses/old/commons-logging-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/commons-logging-NOTICE.txt b/plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/commons-logging-NOTICE.txt rename to plugins/repository-gcs/licenses/old/commons-logging-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/old/google-LICENSE.txt b/plugins/repository-gcs/licenses/old/google-LICENSE.txt new file mode 100644 index 0000000000000..980a15ac24eeb --- /dev/null +++ b/plugins/repository-gcs/licenses/old/google-LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/google-NOTICE.txt b/plugins/repository-gcs/licenses/old/google-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/google-NOTICE.txt rename to plugins/repository-gcs/licenses/old/google-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpclient-LICENSE.txt rename to plugins/repository-gcs/licenses/old/httpclient-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/httpclient-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpclient-NOTICE.txt rename to plugins/repository-gcs/licenses/old/httpclient-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-LICENSE.txt b/plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpcore-LICENSE.txt rename to plugins/repository-gcs/licenses/old/httpcore-LICENSE.txt diff --git a/plugins/repository-gcs/licenses/httpcore-NOTICE.txt b/plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt similarity index 100% rename from plugins/repository-gcs/licenses/httpcore-NOTICE.txt rename to plugins/repository-gcs/licenses/old/httpcore-NOTICE.txt diff --git a/plugins/repository-gcs/licenses/opencensus-LICENSE.txt b/plugins/repository-gcs/licenses/opencensus-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/opencensus-NOTICE.txt b/plugins/repository-gcs/licenses/opencensus-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 new file mode 100644 index 0000000000000..61d8e3b148144 --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-api-0.11.1.jar.sha1 @@ -0,0 +1 @@ +54689fbf750a7f26e34fa1f1f96b883c53f51486 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 new file mode 100644 index 0000000000000..c0b04f0f8ccce --- /dev/null +++ b/plugins/repository-gcs/licenses/opencensus-contrib-http-util-0.11.1.jar.sha1 @@ -0,0 +1 @@ +82e572b41e81ecf58d0d1e9a3953a05aa8f9c84b \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 new file mode 100644 index 0000000000000..0a2dee4447e92 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-1.8.0.jar.sha1 @@ -0,0 +1 @@ +b3282312ba82536fc9a7778cabfde149a875e877 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt b/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/plugins/repository-gcs/licenses/proto-google-common-protos-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt b/plugins/repository-gcs/licenses/proto-google-common-protos-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 new file mode 100644 index 0000000000000..65c16fed4a07b --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-1.3.6.jar.sha1 @@ -0,0 +1 @@ +89dcc04a7e028c3c963413a71f950703cf51f057 \ No newline at end of file diff --git a/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt b/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt new file mode 100644 index 0000000000000..fcdfc8f0d0774 --- /dev/null +++ b/plugins/repository-gcs/licenses/threetenbp-LICENSE.txt @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2007-present, Stephen Colebourne & Michael Nascimento Santos + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * * Neither the name of JSR-310 nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/plugins/repository-gcs/licenses/threetenbp-NOTICE.txt b/plugins/repository-gcs/licenses/threetenbp-NOTICE.txt new file mode 100644 index 0000000000000..e69de29bb2d1d diff --git a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle index afd49b9f4dc73..34ec92a354277 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/build.gradle +++ b/plugins/repository-gcs/qa/google-cloud-storage/build.gradle @@ -69,7 +69,6 @@ task googleCloudStorageFixture(type: AntFixture) { /** A service account file that points to the Google Cloud Storage service emulated by the fixture **/ task createServiceAccountFile() { - dependsOn googleCloudStorageFixture doLast { KeyPairGenerator keyPairGenerator = KeyPairGenerator.getInstance("RSA") keyPairGenerator.initialize(1024) @@ -83,11 +82,7 @@ task createServiceAccountFile() { ' "private_key_id": "' + UUID.randomUUID().toString() + '",\n' + ' "private_key": "-----BEGIN PRIVATE KEY-----\\n' + encodedKey + '\\n-----END PRIVATE KEY-----\\n",\n' + ' "client_email": "integration_test@appspot.gserviceaccount.com",\n' + - ' "client_id": "123456789101112130594",\n' + - " \"auth_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/auth\",\n" + - " \"token_uri\": \"http://${googleCloudStorageFixture.addressAndPort}/o/oauth2/token\",\n" + - ' "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",\n' + - ' "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/integration_test%40appspot.gserviceaccount.com"\n' + + ' "client_id": "123456789101112130594"\n' + '}', 'UTF-8') } } @@ -109,6 +104,7 @@ integTestCluster { dependsOn createServiceAccountFile, googleCloudStorageFixture /* Use a closure on the string to delay evaluation until tests are executed */ setting 'gcs.client.integration_test.endpoint', "http://${ -> googleCloudStorageFixture.addressAndPort }" + setting 'gcs.client.integration_test.token_uri', "http://${ -> googleCloudStorageFixture.addressAndPort }/o/oauth2/token" } else { println "Using an external service to test the repository-gcs plugin" } diff --git a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java index 2330e230f4505..a9832ae318de4 100644 --- a/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java +++ b/plugins/repository-gcs/qa/google-cloud-storage/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageTestServer.java @@ -31,13 +31,18 @@ import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.zip.GZIPInputStream; import static java.util.Collections.emptyMap; import static java.util.Collections.singletonList; @@ -52,7 +57,7 @@ */ public class GoogleCloudStorageTestServer { - private static byte[] EMPTY_BYTE = new byte[0]; + private static final byte[] EMPTY_BYTE = new byte[0]; /** List of the buckets stored on this test server **/ private final Map buckets = ConcurrentCollections.newConcurrentMap(); @@ -63,13 +68,6 @@ public class GoogleCloudStorageTestServer { /** Server endpoint **/ private final String endpoint; - /** - * Creates a {@link GoogleCloudStorageTestServer} with the default endpoint - */ - GoogleCloudStorageTestServer() { - this("https://www.googleapis.com"); - } - /** * Creates a {@link GoogleCloudStorageTestServer} with a custom endpoint */ @@ -87,29 +85,6 @@ public String getEndpoint() { return endpoint; } - /** - * Returns a Google Cloud Storage response for the given request - * - * @param method the HTTP method of the request - * @param url the HTTP URL of the request - * @param headers the HTTP headers of the request - * @param body the HTTP request body - * @return a {@link Response} - * - * @throws IOException if something goes wrong - */ - public Response handle(final String method, - final String url, - final Map> headers, - byte[] body) throws IOException { - - final int questionMark = url.indexOf('?'); - if (questionMark == -1) { - return handle(method, url, null, headers, body); - } - return handle(method, url.substring(0, questionMark), url.substring(questionMark + 1), headers, body); - } - /** * Returns a Google Cloud Storage response for the given request * @@ -165,7 +140,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/buckets/get handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}", (params, headers, body) -> { - String name = params.get("bucket"); + final String name = params.get("bucket"); if (Strings.hasText(name) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "bucket name is missing"); } @@ -181,7 +156,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/get handlers.insert("GET " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String objectName = params.get("object"); + final String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); } @@ -191,7 +166,7 @@ private static PathTrie defaultHandlers(final String endpoint, f return newError(RestStatus.NOT_FOUND, "bucket not found"); } - for (Map.Entry object : bucket.objects.entrySet()) { + for (final Map.Entry object : bucket.objects.entrySet()) { if (object.getKey().equals(objectName)) { return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectName, object.getValue())); } @@ -203,7 +178,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/delete handlers.insert("DELETE " + endpoint + "/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String objectName = params.get("object"); + final String objectName = params.get("object"); if (Strings.hasText(objectName) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); } @@ -224,25 +199,149 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/objects/insert handlers.insert("POST " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { - if ("resumable".equals(params.get("uploadType")) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable"); - } - - final String objectName = params.get("name"); - if (Strings.hasText(objectName) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); - } - - final Bucket bucket = buckets.get(params.get("bucket")); - if (bucket == null) { - return newError(RestStatus.NOT_FOUND, "bucket not found"); - } - - if (bucket.objects.put(objectName, EMPTY_BYTE) == null) { - String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" + objectName; - return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE); + final String uploadType = params.get("uploadType"); + if ("resumable".equals(uploadType)) { + final String objectName = params.get("name"); + if (Strings.hasText(objectName) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "object name is missing"); + } + final Bucket bucket = buckets.get(params.get("bucket")); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) { + final String location = endpoint + "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" + + objectName; + return new Response(RestStatus.CREATED, singletonMap("Location", location), XContentType.JSON.mediaType(), EMPTY_BYTE); + } else { + return newError(RestStatus.CONFLICT, "object already exist"); + } + } else if ("multipart".equals(uploadType)) { + /* + * A multipart/related request body looks like this (note the binary dump inside a text blob! nice!): + * --__END_OF_PART__ + * Content-Length: 135 + * Content-Type: application/json; charset=UTF-8 + * content-transfer-encoding: binary + * + * {"bucket":"bucket_test","crc32c":"7XacHQ==","md5Hash":"fVztGkklMlUamsSmJK7W+w==", + * "name":"tests-KEwE3bU4TuyetBgQIghmUw/master.dat-temp"} + * --__END_OF_PART__ + * content-transfer-encoding: binary + * + * KEwE3bU4TuyetBgQIghmUw + * --__END_OF_PART__-- + */ + String boundary = "__END_OF_PART__"; + // Determine the multipart boundary + final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type")); + if (contentTypes != null) { + final String contentType = contentTypes.get(0); + if ((contentType != null) && contentType.contains("multipart/related; boundary=")) { + boundary = contentType.replace("multipart/related; boundary=", ""); + } + } + InputStream inputStreamBody = new ByteArrayInputStream(body); + final List contentEncodings = headers.getOrDefault("Content-Encoding", headers.get("Content-encoding")); + if (contentEncodings != null) { + if (contentEncodings.stream().anyMatch(x -> "gzip".equalsIgnoreCase(x))) { + inputStreamBody = new GZIPInputStream(inputStreamBody); + } + } + // Read line by line ?both? parts of the multipart. Decoding headers as + // IS_8859_1 is safe. + try (BufferedReader reader = new BufferedReader(new InputStreamReader(inputStreamBody, StandardCharsets.ISO_8859_1))) { + String line; + // read first part delimiter + line = reader.readLine(); + if ((line == null) || (line.equals("--" + boundary) == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Does not start with the part delimiter."); + } + final Map> firstPartHeaders = new HashMap<>(); + // Reads the first part's headers, if any + while ((line = reader.readLine()) != null) { + if (line.equals("\r\n") || (line.length() == 0)) { + // end of headers + break; + } else { + final String[] header = line.split(":", 2); + firstPartHeaders.put(header[0], singletonList(header[1])); + } + } + final List firstPartContentTypes = firstPartHeaders.getOrDefault("Content-Type", + firstPartHeaders.get("Content-type")); + if ((firstPartContentTypes == null) + || (firstPartContentTypes.stream().noneMatch(x -> x.contains("application/json")))) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Metadata part expected to have the \"application/json\" content type."); + } + // read metadata part, a single line + line = reader.readLine(); + final byte[] metadata = line.getBytes(StandardCharsets.ISO_8859_1); + if ((firstPartContentTypes != null) && (firstPartContentTypes.stream().anyMatch((x -> x.contains("charset=utf-8"))))) { + // decode as utf-8 + line = new String(metadata, StandardCharsets.UTF_8); + } + final Matcher objectNameMatcher = Pattern.compile("\"name\":\"([^\"]*)\"").matcher(line); + objectNameMatcher.find(); + final String objectName = objectNameMatcher.group(1); + final Matcher bucketNameMatcher = Pattern.compile("\"bucket\":\"([^\"]*)\"").matcher(line); + bucketNameMatcher.find(); + final String bucketName = bucketNameMatcher.group(1); + // read second part delimiter + line = reader.readLine(); + if ((line == null) || (line.equals("--" + boundary) == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Second part does not start with delimiter. " + + "Is the metadata multi-line?"); + } + final Map> secondPartHeaders = new HashMap<>(); + // Reads the second part's headers, if any + while ((line = reader.readLine()) != null) { + if (line.equals("\r\n") || (line.length() == 0)) { + // end of headers + break; + } else { + final String[] header = line.split(":", 2); + secondPartHeaders.put(header[0], singletonList(header[1])); + } + } + final List secondPartTransferEncoding = secondPartHeaders.getOrDefault("Content-Transfer-Encoding", + secondPartHeaders.get("content-transfer-encoding")); + if ((secondPartTransferEncoding == null) + || (secondPartTransferEncoding.stream().noneMatch(x -> x.contains("binary")))) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, + "Error parsing multipart request. Data part expected to have the \"binary\" content transfer encoding."); + } + final ByteArrayOutputStream baos = new ByteArrayOutputStream(); + int c; + while ((c = reader.read()) != -1) { + // one char to one byte, because of the ISO_8859_1 encoding + baos.write(c); + } + final byte[] temp = baos.toByteArray(); + final byte[] trailingEnding = ("\r\n--" + boundary + "--\r\n").getBytes(StandardCharsets.ISO_8859_1); + // check trailing + for (int i = trailingEnding.length - 1; i >= 0; i--) { + if (trailingEnding[i] != temp[(temp.length - trailingEnding.length) + i]) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "Error parsing multipart request."); + } + } + final Bucket bucket = buckets.get(bucketName); + if (bucket == null) { + return newError(RestStatus.NOT_FOUND, "bucket not found"); + } + final byte[] objectData = Arrays.copyOf(temp, temp.length - trailingEnding.length); + if ((objectName != null) && (bucketName != null) && (objectData != null)) { + bucket.objects.put(objectName, objectData); + return new Response(RestStatus.OK, emptyMap(), XContentType.JSON.mediaType(), metadata); + } else { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "error parsing multipart request"); + } + } } else { - return newError(RestStatus.CONFLICT, "object already exist"); + return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload type must be resumable or multipart"); } }); @@ -250,7 +349,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload handlers.insert("PUT " + endpoint + "/upload/storage/v1/b/{bucket}/o", (params, headers, body) -> { - String objectId = params.get("upload_id"); + final String objectId = params.get("upload_id"); if (Strings.hasText(objectId) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "upload id is missing"); } @@ -268,38 +367,46 @@ private static PathTrie defaultHandlers(final String endpoint, f return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(bucket.name, objectId, body)); }); - // Copy Object + // Rewrite or Copy Object // + // https://cloud.google.com/storage/docs/json_api/v1/objects/rewrite // https://cloud.google.com/storage/docs/json_api/v1/objects/copy - handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/copyTo/b/{destBucket}/o/{dest}", (params, headers, body)-> { - String source = params.get("src"); - if (Strings.hasText(source) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); - } - - final Bucket srcBucket = buckets.get(params.get("srcBucket")); - if (srcBucket == null) { - return newError(RestStatus.NOT_FOUND, "source bucket not found"); - } - - String dest = params.get("dest"); - if (Strings.hasText(dest) == false) { - return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); - } - - final Bucket destBucket = buckets.get(params.get("destBucket")); - if (destBucket == null) { - return newError(RestStatus.NOT_FOUND, "destination bucket not found"); - } - - final byte[] sourceBytes = srcBucket.objects.get(source); - if (sourceBytes == null) { - return newError(RestStatus.NOT_FOUND, "source object not found"); - } - - destBucket.objects.put(dest, sourceBytes); - return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); - }); + handlers.insert("POST " + endpoint + "/storage/v1/b/{srcBucket}/o/{src}/{action}/b/{destBucket}/o/{dest}", + (params, headers, body) -> { + final String action = params.get("action"); + if ((action.equals("rewriteTo") == false) && (action.equals("copyTo") == false)) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "Action not implemented. None of \"rewriteTo\" or \"copyTo\"."); + } + final String source = params.get("src"); + if (Strings.hasText(source) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "source object name is missing"); + } + final Bucket srcBucket = buckets.get(params.get("srcBucket")); + if (srcBucket == null) { + return newError(RestStatus.NOT_FOUND, "source bucket not found"); + } + final String dest = params.get("dest"); + if (Strings.hasText(dest) == false) { + return newError(RestStatus.INTERNAL_SERVER_ERROR, "destination object name is missing"); + } + final Bucket destBucket = buckets.get(params.get("destBucket")); + if (destBucket == null) { + return newError(RestStatus.NOT_FOUND, "destination bucket not found"); + } + final byte[] sourceBytes = srcBucket.objects.get(source); + if (sourceBytes == null) { + return newError(RestStatus.NOT_FOUND, "source object not found"); + } + destBucket.objects.put(dest, sourceBytes); + if (action.equals("rewriteTo")) { + final XContentBuilder respBuilder = jsonBuilder(); + buildRewriteResponse(respBuilder, destBucket.name, dest, sourceBytes.length); + return newResponse(RestStatus.OK, emptyMap(), respBuilder); + } else { + assert action.equals("copyTo"); + return newResponse(RestStatus.OK, emptyMap(), buildObjectResource(destBucket.name, dest, sourceBytes)); + } + }); // List Objects // @@ -317,8 +424,8 @@ private static PathTrie defaultHandlers(final String endpoint, f builder.startArray("items"); final String prefixParam = params.get("prefix"); - for (Map.Entry object : bucket.objects.entrySet()) { - if (prefixParam != null && object.getKey().startsWith(prefixParam) == false) { + for (final Map.Entry object : bucket.objects.entrySet()) { + if ((prefixParam != null) && (object.getKey().startsWith(prefixParam) == false)) { continue; } buildObjectResource(builder, bucket.name, object.getKey(), object.getValue()); @@ -333,7 +440,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // // https://cloud.google.com/storage/docs/request-body handlers.insert("GET " + endpoint + "/download/storage/v1/b/{bucket}/o/{object}", (params, headers, body) -> { - String object = params.get("object"); + final String object = params.get("object"); if (Strings.hasText(object) == false) { return newError(RestStatus.INTERNAL_SERVER_ERROR, "object id is missing"); } @@ -353,7 +460,7 @@ private static PathTrie defaultHandlers(final String endpoint, f // Batch // // https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch - handlers.insert("POST " + endpoint + "/batch", (params, headers, body) -> { + handlers.insert("POST " + endpoint + "/batch/storage/v1", (params, headers, body) -> { final List batchedResponses = new ArrayList<>(); // A batch request body looks like this: @@ -385,7 +492,7 @@ private static PathTrie defaultHandlers(final String endpoint, f final List contentTypes = headers.getOrDefault("Content-Type", headers.get("Content-type")); if (contentTypes != null) { final String contentType = contentTypes.get(0); - if (contentType != null && contentType.contains("multipart/mixed; boundary=")) { + if ((contentType != null) && contentType.contains("multipart/mixed; boundary=")) { boundary = contentType.replace("multipart/mixed; boundary=", ""); } } @@ -398,25 +505,25 @@ private static PathTrie defaultHandlers(final String endpoint, f while ((line = reader.readLine()) != null) { // Start of a batched request if (line.equals("--" + boundary)) { - Map> batchedHeaders = new HashMap<>(); + final Map> batchedHeaders = new HashMap<>(); // Reads the headers, if any while ((line = reader.readLine()) != null) { - if (line.equals("\r\n") || line.length() == 0) { + if (line.equals("\r\n") || (line.length() == 0)) { // end of headers break; } else { - String[] header = line.split(":", 2); + final String[] header = line.split(":", 2); batchedHeaders.put(header[0], singletonList(header[1])); } } // Reads the method and URL line = reader.readLine(); - String batchedUrl = line.substring(0, line.lastIndexOf(' ')); + final String batchedUrl = line.substring(0, line.lastIndexOf(' ')); final Map batchedParams = new HashMap<>(); - int questionMark = batchedUrl.indexOf('?'); + final int questionMark = batchedUrl.indexOf('?'); if (questionMark != -1) { RestUtils.decodeQueryString(batchedUrl.substring(questionMark + 1), 0, batchedParams); } @@ -424,16 +531,16 @@ private static PathTrie defaultHandlers(final String endpoint, f // Reads the body line = reader.readLine(); byte[] batchedBody = new byte[0]; - if (line != null || line.startsWith("--" + boundary) == false) { + if ((line != null) || (line.startsWith("--" + boundary) == false)) { batchedBody = line.getBytes(StandardCharsets.UTF_8); } // Executes the batched request - RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams); + final RequestHandler handler = handlers.retrieve(batchedUrl, batchedParams); if (handler != null) { try { batchedResponses.add(handler.execute(batchedParams, batchedHeaders, batchedBody)); - } catch (IOException e) { + } catch (final IOException e) { batchedResponses.add(newError(RestStatus.INTERNAL_SERVER_ERROR, e.getMessage())); } } @@ -442,11 +549,11 @@ private static PathTrie defaultHandlers(final String endpoint, f } // Now we can build the response - String sep = "--"; - String line = "\r\n"; + final String sep = "--"; + final String line = "\r\n"; - StringBuilder builder = new StringBuilder(); - for (Response response : batchedResponses) { + final StringBuilder builder = new StringBuilder(); + for (final Response response : batchedResponses) { builder.append(sep).append(boundary).append(line); builder.append("Content-Type: application/http").append(line); builder.append(line); @@ -465,7 +572,7 @@ private static PathTrie defaultHandlers(final String endpoint, f builder.append(line); builder.append(sep).append(boundary).append(sep); - byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8); + final byte[] content = builder.toString().getBytes(StandardCharsets.UTF_8); return new Response(RestStatus.OK, emptyMap(), "multipart/mixed; boundary=" + boundary, content); }); @@ -525,7 +632,7 @@ private static Response newResponse(final RestStatus status, final Map { - try { - Bucket bucket = client.buckets().get(bucketName).execute(); - if (bucket != null) { - return Strings.hasText(bucket.getId()); - } - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - return false; - } - throw e; - } - return false; - }); - } catch (IOException e) { + final Bucket bucket = SocketAccess.doPrivilegedIOException(() -> storage.get(bucketName)); + return bucket != null; + } catch (final Exception e) { throw new BlobStoreException("Unable to check if bucket [" + bucketName + "] exists", e); } } /** - * List all blobs in the bucket + * List blobs in the bucket under the specified path. The path root is removed. * - * @param path base path of the blobs to list + * @param path + * base path of the blobs to list * @return a map of blob names and their metadata */ Map listBlobs(String path) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> listBlobsByPath(bucket, path, path)); + return listBlobsByPrefix(path, ""); } /** * List all blobs in the bucket which have a prefix * - * @param path base path of the blobs to list - * @param prefix prefix of the blobs to list - * @return a map of blob names and their metadata + * @param path + * base path of the blobs to list. This path is removed from the + * names of the blobs returned. + * @param prefix + * prefix of the blobs to list. + * @return a map of blob names and their metadata. */ Map listBlobsByPrefix(String path, String prefix) throws IOException { - return SocketAccess.doPrivilegedIOException(() -> listBlobsByPath(bucket, buildKey(path, prefix), path)); - } - - /** - * Lists all blobs in a given bucket - * - * @param bucketName name of the bucket - * @param path base path of the blobs to list - * @param pathToRemove if true, this path part is removed from blob name - * @return a map of blob names and their metadata - */ - private Map listBlobsByPath(String bucketName, String path, String pathToRemove) throws IOException { - return blobsStream(client, bucketName, path, MAX_BATCHING_REQUESTS) - .map(new BlobMetaDataConverter(pathToRemove)) - .collect(Collectors.toMap(PlainBlobMetaData::name, Function.identity())); + final String pathPrefix = buildKey(path, prefix); + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + SocketAccess.doPrivilegedVoidIOException(() -> { + storage.get(bucket).list(BlobListOption.prefix(pathPrefix)).iterateAll().forEach(blob -> { + assert blob.getName().startsWith(path); + final String suffixName = blob.getName().substring(path.length()); + mapBuilder.put(suffixName, new PlainBlobMetaData(suffixName, blob.getSize())); + }); + }); + return mapBuilder.immutableMap(); } /** @@ -161,19 +143,9 @@ private Map listBlobsByPath(String bucketName, String path * @return true if the blob exists, false otherwise */ boolean blobExists(String blobName) throws IOException { - try { - StorageObject blob = SocketAccess.doPrivilegedIOException(() -> client.objects().get(bucket, blobName).execute()); - if (blob != null) { - return Strings.hasText(blob.getId()); - } - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - return false; - } - throw e; - } - return false; + final BlobId blobId = BlobId.of(bucket, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + return blob != null; } /** @@ -183,18 +155,29 @@ boolean blobExists(String blobName) throws IOException { * @return an InputStream */ InputStream readBlob(String blobName) throws IOException { - try { - return SocketAccess.doPrivilegedIOException(() -> { - Storage.Objects.Get object = client.objects().get(bucket, blobName); - return object.executeMediaAsInputStream(); - }); - } catch (GoogleJsonResponseException e) { - GoogleJsonError error = e.getDetails(); - if ((e.getStatusCode() == HTTP_NOT_FOUND) || ((error != null) && (error.getCode() == HTTP_NOT_FOUND))) { - throw new NoSuchFileException(e.getMessage()); - } - throw e; + final BlobId blobId = BlobId.of(bucket, blobName); + final Blob blob = SocketAccess.doPrivilegedIOException(() -> storage.get(blobId)); + if (blob == null) { + throw new NoSuchFileException("Blob [" + blobName + "] does not exit"); } + final ReadChannel readChannel = SocketAccess.doPrivilegedIOException(blob::reader); + return Channels.newInputStream(new ReadableByteChannel() { + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int read(ByteBuffer dst) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> readChannel.read(dst)); + } + + @Override + public boolean isOpen() { + return readChannel.isOpen(); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(readChannel::close); + } + }); } /** @@ -204,14 +187,58 @@ InputStream readBlob(String blobName) throws IOException { * @param blobSize expected size of the blob to be written */ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { - SocketAccess.doPrivilegedVoidIOException(() -> { - InputStreamContent stream = new InputStreamContent(null, inputStream); - stream.setLength(blobSize); + final BlobInfo blobInfo = BlobInfo.newBuilder(bucket, blobName).build(); + if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { + writeBlobResumable(blobInfo, inputStream); + } else { + writeBlobMultipart(blobInfo, inputStream, blobSize); + } + } - Storage.Objects.Insert insert = client.objects().insert(bucket, null, stream); - insert.setName(blobName); - insert.execute(); - }); + /** + * Uploads a blob using the "resumable upload" method (multiple requests, which + * can be independently retried in case of failure, see + * https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload + * + * @param blobInfo the info for the blob to be uploaded + * @param inputStream the stream containing the blob data + */ + private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { + final WriteChannel writeChannel = SocketAccess.doPrivilegedIOException(() -> storage.writer(blobInfo)); + Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { + @Override + public boolean isOpen() { + return writeChannel.isOpen(); + } + + @Override + public void close() throws IOException { + SocketAccess.doPrivilegedVoidIOException(writeChannel::close); + } + + @SuppressForbidden(reason = "Channel is based of a socket not a file") + @Override + public int write(ByteBuffer src) throws IOException { + return SocketAccess.doPrivilegedIOException(() -> writeChannel.write(src)); + } + })); + } + + /** + * Uploads a blob using the "multipart upload" method (a single + * 'multipart/related' request containing both data and metadata. The request is + * gziped), see: + * https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload + * + * @param blobInfo the info for the blob to be uploaded + * @param inputStream the stream containing the blob data + * @param blobSize the size + */ + private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize) throws IOException { + assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; + final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); + Streams.copy(inputStream, baos); + SocketAccess.doPrivilegedVoidIOException(() -> storage.create(blobInfo, baos.toByteArray())); } /** @@ -220,10 +247,11 @@ void writeBlob(String blobName, InputStream inputStream, long blobSize) throws I * @param blobName name of the blob */ void deleteBlob(String blobName) throws IOException { - if (!blobExists(blobName)) { + final BlobId blobId = BlobId.of(bucket, blobName); + final boolean deleted = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobId)); + if (deleted == false) { throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } - SocketAccess.doPrivilegedIOException(() -> client.objects().delete(bucket, blobName).execute()); } /** @@ -232,7 +260,7 @@ void deleteBlob(String blobName) throws IOException { * @param prefix prefix of the buckets to delete */ void deleteBlobsByPrefix(String prefix) throws IOException { - deleteBlobs(listBlobsByPath(bucket, prefix, null).keySet()); + deleteBlobs(listBlobsByPrefix("", prefix).keySet()); } /** @@ -241,163 +269,55 @@ void deleteBlobsByPrefix(String prefix) throws IOException { * @param blobNames names of the bucket to delete */ void deleteBlobs(Collection blobNames) throws IOException { - if (blobNames == null || blobNames.isEmpty()) { + if (blobNames.isEmpty()) { return; } - + // for a single op submit a simple delete instead of a batch of size 1 if (blobNames.size() == 1) { deleteBlob(blobNames.iterator().next()); return; } - final List deletions = new ArrayList<>(Math.min(MAX_BATCHING_REQUESTS, blobNames.size())); - final Iterator blobs = blobNames.iterator(); - - SocketAccess.doPrivilegedVoidIOException(() -> { - while (blobs.hasNext()) { - // Create a delete request for each blob to delete - deletions.add(client.objects().delete(bucket, blobs.next())); - - if (blobs.hasNext() == false || deletions.size() == MAX_BATCHING_REQUESTS) { - try { - // Deletions are executed using a batch request - BatchRequest batch = client.batch(); - - // Used to track successful deletions - CountDown countDown = new CountDown(deletions.size()); - - for (Storage.Objects.Delete delete : deletions) { - // Queue the delete request in batch - delete.queue(batch, new JsonBatchCallback() { - @Override - public void onFailure(GoogleJsonError e, HttpHeaders responseHeaders) throws IOException { - logger.error("failed to delete blob [{}] in bucket [{}]: {}", delete.getObject(), delete.getBucket(), e - .getMessage()); - } - - @Override - public void onSuccess(Void aVoid, HttpHeaders responseHeaders) throws IOException { - countDown.countDown(); - } - }); - } - - batch.execute(); - - if (countDown.isCountedDown() == false) { - throw new IOException("Failed to delete all [" + deletions.size() + "] blobs"); - } - } finally { - deletions.clear(); - } - } + final List blobIdsToDelete = blobNames.stream().map(blobName -> BlobId.of(bucket, blobName)).collect(Collectors.toList()); + final List deletedStatuses = SocketAccess.doPrivilegedIOException(() -> storage.delete(blobIdsToDelete)); + assert blobIdsToDelete.size() == deletedStatuses.size(); + boolean failed = false; + for (int i = 0; i < blobIdsToDelete.size(); i++) { + if (deletedStatuses.get(i) == false) { + logger.error("Failed to delete blob [{}] in bucket [{}]", blobIdsToDelete.get(i).getName(), bucket); + failed = true; } - }); + } + if (failed) { + throw new IOException("Failed to delete all [" + blobIdsToDelete.size() + "] blobs"); + } } /** * Moves a blob within the same bucket * * @param sourceBlob name of the blob to move - * @param targetBlob new name of the blob in the target bucket + * @param targetBlob new name of the blob in the same bucket */ - void moveBlob(String sourceBlob, String targetBlob) throws IOException { - SocketAccess.doPrivilegedIOException(() -> { + void moveBlob(String sourceBlobName, String targetBlobName) throws IOException { + final BlobId sourceBlobId = BlobId.of(bucket, sourceBlobName); + final BlobId targetBlobId = BlobId.of(bucket, targetBlobName); + final CopyRequest request = CopyRequest.newBuilder() + .setSource(sourceBlobId) + .setTarget(targetBlobId) + .build(); + SocketAccess.doPrivilegedVoidIOException(() -> { // There's no atomic "move" in GCS so we need to copy and delete - client.objects().copy(bucket, sourceBlob, bucket, targetBlob, null).execute(); - client.objects().delete(bucket, sourceBlob).execute(); - return null; + storage.copy(request).getResult(); + final boolean deleted = storage.delete(sourceBlobId); + if (deleted == false) { + throw new IOException("Failed to move source [" + sourceBlobName + "] to target [" + targetBlobName + "]"); + } }); } - private String buildKey(String keyPath, String s) { + private static String buildKey(String keyPath, String s) { assert s != null; return keyPath + s; } - /** - * Converts a {@link StorageObject} to a {@link PlainBlobMetaData} - */ - class BlobMetaDataConverter implements Function { - - private final String pathToRemove; - - BlobMetaDataConverter(String pathToRemove) { - this.pathToRemove = pathToRemove; - } - - @Override - public PlainBlobMetaData apply(StorageObject storageObject) { - String blobName = storageObject.getName(); - if (Strings.hasLength(pathToRemove)) { - blobName = blobName.substring(pathToRemove.length()); - } - return new PlainBlobMetaData(blobName, storageObject.getSize().longValue()); - } - } - - /** - * Spliterator can be used to list storage objects stored in a bucket. - */ - static class StorageObjectsSpliterator implements Spliterator { - - private final Storage.Objects.List list; - - StorageObjectsSpliterator(Storage client, String bucketName, String prefix, long pageSize) throws IOException { - list = SocketAccess.doPrivilegedIOException(() -> client.objects().list(bucketName)); - list.setMaxResults(pageSize); - if (prefix != null) { - list.setPrefix(prefix); - } - } - - @Override - public boolean tryAdvance(Consumer action) { - try { - // Retrieves the next page of items - Objects objects = SocketAccess.doPrivilegedIOException(list::execute); - - if ((objects == null) || (objects.getItems() == null) || (objects.getItems().isEmpty())) { - return false; - } - - // Consumes all the items - objects.getItems().forEach(action::accept); - - // Sets the page token of the next page, - // null indicates that all items have been consumed - String next = objects.getNextPageToken(); - if (next != null) { - list.setPageToken(next); - return true; - } - - return false; - } catch (Exception e) { - throw new BlobStoreException("Exception while listing objects", e); - } - } - - @Override - public Spliterator trySplit() { - return null; - } - - @Override - public long estimateSize() { - return Long.MAX_VALUE; - } - - @Override - public int characteristics() { - return 0; - } - } - - /** - * Returns a {@link Stream} of {@link StorageObject}s that are stored in a given bucket. - */ - static Stream blobsStream(Storage client, String bucketName, String prefix, long pageSize) throws IOException { - return StreamSupport.stream(new StorageObjectsSpliterator(client, bucketName, prefix, pageSize), false); - } - } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java index 68143b48ba374..99df38413326c 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettings.java @@ -18,8 +18,10 @@ */ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.services.storage.StorageScopes; +import com.google.auth.oauth2.ServiceAccountCredentials; + +import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.SecureSetting; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -28,9 +30,12 @@ import java.io.IOException; import java.io.InputStream; import java.io.UncheckedIOException; +import java.net.URI; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.function.Function; import static org.elasticsearch.common.settings.Setting.timeSetting; @@ -43,11 +48,19 @@ public class GoogleCloudStorageClientSettings { /** A json Service Account file loaded from secure settings. */ static final Setting.AffixSetting CREDENTIALS_FILE_SETTING = Setting.affixKeySetting(PREFIX, "credentials_file", - key -> SecureSetting.secureFile(key, null)); + key -> SecureSetting.secureFile(key, null)); /** An override for the Storage endpoint to connect to. */ static final Setting.AffixSetting ENDPOINT_SETTING = Setting.affixKeySetting(PREFIX, "endpoint", - key -> new Setting<>(key, "", s -> s, Setting.Property.NodeScope)); + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** An override for the Google Project ID. */ + static final Setting.AffixSetting PROJECT_ID_SETTING = Setting.affixKeySetting(PREFIX, "project_id", + key -> Setting.simpleString(key, Setting.Property.NodeScope)); + + /** An override for the Token Server URI in the oauth flow. */ + static final Setting.AffixSetting TOKEN_URI_SETTING = Setting.affixKeySetting(PREFIX, "token_uri", + key -> new Setting<>(key, "", URI::create, Setting.Property.NodeScope)); /** * The timeout to establish a connection. A value of {@code -1} corresponds to an infinite timeout. A value of {@code 0} @@ -63,45 +76,59 @@ public class GoogleCloudStorageClientSettings { static final Setting.AffixSetting READ_TIMEOUT_SETTING = Setting.affixKeySetting(PREFIX, "read_timeout", key -> timeSetting(key, TimeValue.ZERO, TimeValue.MINUS_ONE, Setting.Property.NodeScope)); - /** Name used by the client when it uses the Google Cloud JSON API. **/ + /** Name used by the client when it uses the Google Cloud JSON API. */ static final Setting.AffixSetting APPLICATION_NAME_SETTING = Setting.affixKeySetting(PREFIX, "application_name", - key -> new Setting<>(key, "repository-gcs", s -> s, Setting.Property.NodeScope)); + key -> new Setting<>(key, "repository-gcs", Function.identity(), Setting.Property.NodeScope, Setting.Property.Deprecated)); - /** The credentials used by the client to connect to the Storage endpoint **/ - private final GoogleCredential credential; + /** The credentials used by the client to connect to the Storage endpoint. */ + private final ServiceAccountCredentials credential; - /** The Storage root URL the client should talk to, or empty string to use the default. **/ + /** The Storage endpoint URL the client should talk to. Null value sets the default. */ private final String endpoint; - /** The timeout to establish a connection **/ + /** The Google project ID overriding the default way to infer it. Null value sets the default. */ + private final String projectId; + + /** The timeout to establish a connection */ private final TimeValue connectTimeout; - /** The timeout to read data from an established connection **/ + /** The timeout to read data from an established connection */ private final TimeValue readTimeout; - /** The Storage client application name **/ + /** The Storage client application name */ private final String applicationName; - GoogleCloudStorageClientSettings(final GoogleCredential credential, + /** The token server URI. This leases access tokens in the oauth flow. */ + private final URI tokenUri; + + GoogleCloudStorageClientSettings(final ServiceAccountCredentials credential, final String endpoint, + final String projectId, final TimeValue connectTimeout, final TimeValue readTimeout, - final String applicationName) { + final String applicationName, + final URI tokenUri) { this.credential = credential; this.endpoint = endpoint; + this.projectId = projectId; this.connectTimeout = connectTimeout; this.readTimeout = readTimeout; this.applicationName = applicationName; + this.tokenUri = tokenUri; } - public GoogleCredential getCredential() { + public ServiceAccountCredentials getCredential() { return credential; } - public String getEndpoint() { + public String getHost() { return endpoint; } + public String getProjectId() { + return Strings.hasLength(projectId) ? projectId : (credential != null ? credential.getProjectId() : null); + } + public TimeValue getConnectTimeout() { return connectTimeout; } @@ -114,9 +141,13 @@ public String getApplicationName() { return applicationName; } + public URI getTokenUri() { + return tokenUri; + } + public static Map load(final Settings settings) { final Map clients = new HashMap<>(); - for (String clientName: settings.getGroups(PREFIX).keySet()) { + for (final String clientName: settings.getGroups(PREFIX).keySet()) { clients.put(clientName, getClientSettings(settings, clientName)); } if (clients.containsKey("default") == false) { @@ -131,22 +162,27 @@ static GoogleCloudStorageClientSettings getClientSettings(final Settings setting return new GoogleCloudStorageClientSettings( loadCredential(settings, clientName), getConfigValue(settings, clientName, ENDPOINT_SETTING), + getConfigValue(settings, clientName, PROJECT_ID_SETTING), getConfigValue(settings, clientName, CONNECT_TIMEOUT_SETTING), getConfigValue(settings, clientName, READ_TIMEOUT_SETTING), - getConfigValue(settings, clientName, APPLICATION_NAME_SETTING) + getConfigValue(settings, clientName, APPLICATION_NAME_SETTING), + getConfigValue(settings, clientName, TOKEN_URI_SETTING) ); } /** - * Loads the service account file corresponding to a given client name. If no file is defined for the client, - * a {@code null} credential is returned. + * Loads the service account file corresponding to a given client name. If no + * file is defined for the client, a {@code null} credential is returned. * - * @param settings the {@link Settings} - * @param clientName the client name + * @param settings + * the {@link Settings} + * @param clientName + * the client name * - * @return the {@link GoogleCredential} to use for the given client, {@code null} if no service account is defined. + * @return the {@link ServiceAccountCredentials} to use for the given client, + * {@code null} if no service account is defined. */ - static GoogleCredential loadCredential(final Settings settings, final String clientName) { + static ServiceAccountCredentials loadCredential(final Settings settings, final String clientName) { try { if (CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).exists(settings) == false) { // explicitly returning null here so that the default credential @@ -154,19 +190,22 @@ static GoogleCredential loadCredential(final Settings settings, final String cli return null; } try (InputStream credStream = CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).get(settings)) { - GoogleCredential credential = GoogleCredential.fromStream(credStream); - if (credential.createScopedRequired()) { - credential = credential.createScoped(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); - } - return credential; + final Collection scopes = Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL); + return SocketAccess.doPrivilegedIOException(() -> { + final ServiceAccountCredentials credentials = ServiceAccountCredentials.fromStream(credStream); + if (credentials.createScopedRequired()) { + return (ServiceAccountCredentials) credentials.createScoped(scopes); + } + return credentials; + }); } - } catch (IOException e) { + } catch (final IOException e) { throw new UncheckedIOException(e); } } private static T getConfigValue(final Settings settings, final String clientName, final Setting.AffixSetting clientSetting) { - Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); + final Setting concreteSetting = clientSetting.getConcreteSettingForNamespace(clientName); return concreteSetting.get(settings); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java index ef24cd959e55b..1d2d70584adf9 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStoragePlugin.java @@ -19,21 +19,6 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.auth.oauth2.TokenRequest; -import com.google.api.client.auth.oauth2.TokenResponse; -import com.google.api.client.googleapis.json.GoogleJsonError; -import com.google.api.client.http.GenericUrl; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.json.GenericJson; -import com.google.api.client.json.webtoken.JsonWebSignature; -import com.google.api.client.json.webtoken.JsonWebToken; -import com.google.api.client.util.ClassInfo; -import com.google.api.client.util.Data; -import com.google.api.services.storage.Storage; -import com.google.api.services.storage.model.Bucket; -import com.google.api.services.storage.model.Objects; -import com.google.api.services.storage.model.StorageObject; -import org.elasticsearch.SpecialPermission; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -42,8 +27,6 @@ import org.elasticsearch.plugins.RepositoryPlugin; import org.elasticsearch.repositories.Repository; -import java.security.AccessController; -import java.security.PrivilegedAction; import java.util.Arrays; import java.util.Collections; import java.util.List; @@ -51,63 +34,6 @@ public class GoogleCloudStoragePlugin extends Plugin implements RepositoryPlugin { - static { - /* - * Google HTTP client changes access levels because its silly and we - * can't allow that on any old stack stack so we pull it here, up front, - * so we can cleanly check the permissions for it. Without this changing - * the permission can fail if any part of core is on the stack because - * our plugin permissions don't allow core to "reach through" plugins to - * change the permission. Because that'd be silly. - */ - SpecialPermission.check(); - AccessController.doPrivileged((PrivilegedAction) () -> { - // ClassInfo put in cache all the fields of a given class - // that are annoted with @Key; at the same time it changes - // the field access level using setAccessible(). Calling - // them here put the ClassInfo in cache (they are never evicted) - // before the SecurityManager is installed. - ClassInfo.of(HttpHeaders.class, true); - - ClassInfo.of(JsonWebSignature.Header.class, false); - ClassInfo.of(JsonWebToken.Payload.class, false); - - ClassInfo.of(TokenRequest.class, false); - ClassInfo.of(TokenResponse.class, false); - - ClassInfo.of(GenericJson.class, false); - ClassInfo.of(GenericUrl.class, false); - - Data.nullOf(GoogleJsonError.ErrorInfo.class); - ClassInfo.of(GoogleJsonError.class, false); - - Data.nullOf(Bucket.Cors.class); - ClassInfo.of(Bucket.class, false); - ClassInfo.of(Bucket.Cors.class, false); - ClassInfo.of(Bucket.Lifecycle.class, false); - ClassInfo.of(Bucket.Logging.class, false); - ClassInfo.of(Bucket.Owner.class, false); - ClassInfo.of(Bucket.Versioning.class, false); - ClassInfo.of(Bucket.Website.class, false); - - ClassInfo.of(StorageObject.class, false); - ClassInfo.of(StorageObject.Owner.class, false); - - ClassInfo.of(Objects.class, false); - - ClassInfo.of(Storage.Buckets.Get.class, false); - ClassInfo.of(Storage.Buckets.Insert.class, false); - - ClassInfo.of(Storage.Objects.Get.class, false); - ClassInfo.of(Storage.Objects.Insert.class, false); - ClassInfo.of(Storage.Objects.Delete.class, false); - ClassInfo.of(Storage.Objects.Copy.class, false); - ClassInfo.of(Storage.Objects.List.class, false); - - return null; - }); - } - private final Map clientsSettings; public GoogleCloudStoragePlugin(final Settings settings) { @@ -134,8 +60,10 @@ public List> getSettings() { return Arrays.asList( GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING, GoogleCloudStorageClientSettings.ENDPOINT_SETTING, + GoogleCloudStorageClientSettings.PROJECT_ID_SETTING, GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING, GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING, - GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING); + GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING, + GoogleCloudStorageClientSettings.TOKEN_URI_SETTING); } } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 1c1fabcdb9f26..422d7a308f260 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,8 +19,6 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.services.storage.Storage; -import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -30,7 +28,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; @@ -45,10 +42,9 @@ import static org.elasticsearch.common.settings.Setting.timeSetting; import static org.elasticsearch.common.unit.TimeValue.timeValueMillis; -class GoogleCloudStorageRepository extends BlobStoreRepository { +import com.google.cloud.storage.Storage; - private final Logger logger = ESLoggerFactory.getLogger(GoogleCloudStorageRepository.class); - private final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); +class GoogleCloudStorageRepository extends BlobStoreRepository { // package private for testing static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); @@ -56,8 +52,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { static final String TYPE = "gcs"; - static final TimeValue NO_TIMEOUT = timeValueMillis(-1); - static final Setting BUCKET = simpleString("bucket", Property.NodeScope, Property.Dynamic); static final Setting BASE_PATH = @@ -68,18 +62,6 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic); static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); - @Deprecated - static final Setting APPLICATION_NAME = - new Setting<>("application_name", "", Function.identity(), Property.NodeScope, Property.Dynamic); - - @Deprecated - static final Setting HTTP_READ_TIMEOUT = - timeSetting("http.read_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); - - @Deprecated - static final Setting HTTP_CONNECT_TIMEOUT = - timeSetting("http.connect_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); - private final ByteSizeValue chunkSize; private final boolean compress; private final BlobPath basePath; @@ -108,32 +90,7 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); - String application = APPLICATION_NAME.get(metadata.settings()); - if (Strings.hasText(application)) { - deprecationLogger.deprecated("Setting [application_name] in repository settings is deprecated, " + - "it must be specified in the client settings instead"); - } - TimeValue connectTimeout = null; - TimeValue readTimeout = null; - - TimeValue timeout = HTTP_CONNECT_TIMEOUT.get(metadata.settings()); - if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { - deprecationLogger.deprecated("Setting [http.connect_timeout] in repository settings is deprecated, " + - "it must be specified in the client settings instead"); - connectTimeout = timeout; - } - timeout = HTTP_READ_TIMEOUT.get(metadata.settings()); - if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { - deprecationLogger.deprecated("Setting [http.read_timeout] in repository settings is deprecated, " + - "it must be specified in the client settings instead"); - readTimeout = timeout; - } - - TimeValue finalConnectTimeout = connectTimeout; - TimeValue finalReadTimeout = readTimeout; - - Storage client = SocketAccess.doPrivilegedIOException(() -> - storageService.createClient(clientName, application, finalConnectTimeout, finalReadTimeout)); + Storage client = SocketAccess.doPrivilegedIOException(() -> storageService.createClient(clientName)); this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index f4d80c9e90453..5a52fff463499 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -19,23 +19,26 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; -import com.google.api.client.googleapis.javanet.GoogleNetHttpTransport; -import com.google.api.client.http.HttpBackOffIOExceptionHandler; -import com.google.api.client.http.HttpBackOffUnsuccessfulResponseHandler; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestInitializer; +import com.google.api.client.googleapis.GoogleUtils; import com.google.api.client.http.HttpTransport; -import com.google.api.client.http.HttpUnsuccessfulResponseHandler; -import com.google.api.client.json.jackson2.JacksonFactory; -import com.google.api.client.util.ExponentialBackOff; -import com.google.api.services.storage.Storage; +import com.google.api.client.http.javanet.DefaultConnectionFactory; +import com.google.api.client.http.javanet.NetHttpTransport; +import com.google.auth.oauth2.ServiceAccountCredentials; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageOptions; + import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import java.io.IOException; +import java.net.HttpURLConnection; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; import java.util.Map; public class GoogleCloudStorageService extends AbstractComponent { @@ -51,57 +54,108 @@ public GoogleCloudStorageService(final Environment environment, final Map httpTransport) + .build(); + final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() + .setTransportOptions(httpTransportOptions) + .setHeaderProvider(() -> { + final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); + if (Strings.hasLength(clientSettings.getApplicationName())) { + mapBuilder.put("user-agent", clientSettings.getApplicationName()); + } + return mapBuilder.immutableMap(); + }); + if (Strings.hasLength(clientSettings.getHost())) { + storageOptionsBuilder.setHost(clientSettings.getHost()); } - if (Strings.hasLength(clientSettings.getEndpoint())) { - storage.setRootUrl(clientSettings.getEndpoint()); + if (Strings.hasLength(clientSettings.getProjectId())) { + storageOptionsBuilder.setProjectId(clientSettings.getProjectId()); } - return storage.build(); + if (clientSettings.getCredential() == null) { + logger.warn("\"Application Default Credentials\" are not supported out of the box." + + " Additional file system permissions have to be granted to the plugin."); + } else { + ServiceAccountCredentials serviceAccountCredentials = clientSettings.getCredential(); + // override token server URI + final URI tokenServerUri = clientSettings.getTokenUri(); + if (Strings.hasLength(tokenServerUri.toString())) { + // Rebuild the service account credentials in order to use a custom Token url. + // This is mostly used for testing purpose. + serviceAccountCredentials = serviceAccountCredentials.toBuilder().setTokenServerUri(tokenServerUri).build(); + } + storageOptionsBuilder.setCredentials(serviceAccountCredentials); + } + return storageOptionsBuilder.build().getService(); } - static HttpRequestInitializer createRequestInitializer(final GoogleCloudStorageClientSettings settings, - final TimeValue deprecatedConnectTimeout, - final TimeValue deprecatedReadTimeout) throws IOException { - GoogleCredential credential = settings.getCredential(); - if (credential == null) { - credential = GoogleCredential.getApplicationDefault(); + /** + * Pins the TLS trust certificates and, more importantly, overrides connection + * URLs in the case of a custom endpoint setting because some connections don't + * fully honor this setting (bugs in the SDK). The default connection factory + * opens a new connection for each request. This is required for the storage + * instance to be thread-safe. + **/ + private static HttpTransport createHttpTransport(final String endpoint) throws Exception { + final NetHttpTransport.Builder builder = new NetHttpTransport.Builder(); + // requires java.lang.RuntimePermission "setFactory" + builder.trustCertificates(GoogleUtils.getCertificateTrustStore()); + if (Strings.hasLength(endpoint)) { + final URL endpointUrl = URI.create(endpoint).toURL(); + builder.setConnectionFactory(new DefaultConnectionFactory() { + @Override + public HttpURLConnection openConnection(final URL originalUrl) throws IOException { + // test if the URL is built correctly, ie following the `host` setting + if (originalUrl.getHost().equals(endpointUrl.getHost()) && originalUrl.getPort() == endpointUrl.getPort() + && originalUrl.getProtocol().equals(endpointUrl.getProtocol())) { + return super.openConnection(originalUrl); + } + // override connection URLs because some don't follow the config. See + // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3254 and + // https://github.com/GoogleCloudPlatform/google-cloud-java/issues/3255 + URI originalUri; + try { + originalUri = originalUrl.toURI(); + } catch (final URISyntaxException e) { + throw new RuntimeException(e); + } + String overridePath = "/"; + if (originalUri.getRawPath() != null) { + overridePath = originalUri.getRawPath(); + } + if (originalUri.getRawQuery() != null) { + overridePath += "?" + originalUri.getRawQuery(); + } + return super.openConnection( + new URL(endpointUrl.getProtocol(), endpointUrl.getHost(), endpointUrl.getPort(), overridePath)); + } + }); } - - final Integer connectTimeout = (deprecatedConnectTimeout != null) ? - toTimeout(deprecatedConnectTimeout) : toTimeout(settings.getConnectTimeout()); - - final Integer readTimeout = (deprecatedReadTimeout != null) ? - toTimeout(deprecatedReadTimeout) : toTimeout(settings.getReadTimeout()); - - return new DefaultHttpRequestInitializer(credential, connectTimeout, readTimeout); + return builder.build(); } - /** Converts timeout values from the settings to a timeout value for the Google Cloud SDK **/ + /** + * Converts timeout values from the settings to a timeout value for the Google + * Cloud SDK + **/ static Integer toTimeout(final TimeValue timeout) { // Null or zero in settings means the default timeout if (timeout == null || TimeValue.ZERO.equals(timeout)) { - return null; + // negative value means using the default value + return -1; } // -1 means infinite timeout if (TimeValue.MINUS_ONE.equals(timeout)) { @@ -111,51 +165,4 @@ static Integer toTimeout(final TimeValue timeout) { return Math.toIntExact(timeout.getMillis()); } - /** - * HTTP request initializer that set timeouts and backoff handler while deferring authentication to GoogleCredential. - * See https://cloud.google.com/storage/transfer/create-client#retry - */ - static class DefaultHttpRequestInitializer implements HttpRequestInitializer { - - private final Integer connectTimeout; - private final Integer readTimeout; - private final GoogleCredential credential; - - DefaultHttpRequestInitializer(GoogleCredential credential, Integer connectTimeoutMillis, Integer readTimeoutMillis) { - this.credential = credential; - this.connectTimeout = connectTimeoutMillis; - this.readTimeout = readTimeoutMillis; - } - - @Override - public void initialize(HttpRequest request) { - if (connectTimeout != null) { - request.setConnectTimeout(connectTimeout); - } - if (readTimeout != null) { - request.setReadTimeout(readTimeout); - } - - request.setIOExceptionHandler(new HttpBackOffIOExceptionHandler(newBackOff())); - request.setInterceptor(credential); - - final HttpUnsuccessfulResponseHandler handler = new HttpBackOffUnsuccessfulResponseHandler(newBackOff()); - request.setUnsuccessfulResponseHandler((req, resp, supportsRetry) -> { - // Let the credential handle the response. If it failed, we rely on our backoff handler - return credential.handleResponse(req, resp, supportsRetry) || handler.handleResponse(req, resp, supportsRetry); - } - ); - } - - private ExponentialBackOff newBackOff() { - return new ExponentialBackOff.Builder() - .setInitialIntervalMillis(100) - .setMaxIntervalMillis(6000) - .setMaxElapsedTimeMillis(900000) - .setMultiplier(1.5) - .setRandomizationFactor(0.5) - .build(); - } - } - } diff --git a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy index ce9b0334638a0..fffe6cbbc0f24 100644 --- a/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy +++ b/plugins/repository-gcs/src/main/plugin-metadata/plugin-security.policy @@ -18,11 +18,12 @@ */ grant { + // required by: com.google.api.client.json.JsonParser#parseValue permission java.lang.RuntimePermission "accessDeclaredMembers"; - permission java.lang.RuntimePermission "setFactory"; + // required by: com.google.api.client.json.GenericJson# permission java.lang.reflect.ReflectPermission "suppressAccessChecks"; - permission java.net.URLPermission "http://www.googleapis.com/*", "*"; - permission java.net.URLPermission "https://www.googleapis.com/*", "*"; + // required to add google certs to the gcs client trustore + permission java.lang.RuntimePermission "setFactory"; // gcs client opens socket connections for to access repository permission java.net.SocketPermission "*", "connect"; diff --git a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java new file mode 100644 index 0000000000000..f2b8a0571ad87 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageRpcOptionUtils.java @@ -0,0 +1,54 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.storage.spi.v1.StorageRpc; + +import static org.mockito.Mockito.mock; + +/** + * Utility class that exposed Google SDK package protected methods to + * create specific StorageRpc objects in unit tests. + */ +public class StorageRpcOptionUtils { + + private StorageRpcOptionUtils(){} + + public static String getPrefix(final Storage.BlobListOption... options) { + if (options != null) { + for (final Option option : options) { + final StorageRpc.Option rpcOption = option.getRpcOption(); + if (StorageRpc.Option.PREFIX.equals(rpcOption)) { + return (String) option.getValue(); + } + } + } + return null; + } + + public static CopyWriter createCopyWriter(final Blob result) { + return new CopyWriter(mock(StorageOptions.class), mock(StorageRpc.RewriteResponse.class)) { + @Override + public Blob getResult() { + return result; + } + }; + } +} diff --git a/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java new file mode 100644 index 0000000000000..68175d7f1be53 --- /dev/null +++ b/plugins/repository-gcs/src/test/java/com/google/cloud/storage/StorageTestUtils.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package com.google.cloud.storage; + +/** + * Utility class that exposed Google SDK package protected methods to + * create buckets and blobs objects in unit tests. + */ +public class StorageTestUtils { + + private StorageTestUtils(){} + + public static Bucket createBucket(final Storage storage, final String bucketName) { + return new Bucket(storage, (BucketInfo.BuilderImpl) BucketInfo.newBuilder(bucketName)); + } + + public static Blob createBlob(final Storage storage, final String bucketName, final String blobName, final long blobSize) { + return new Blob(storage, (BlobInfo.BuilderImpl) BlobInfo.newBuilder(bucketName, blobName).setSize(blobSize)); + } +} diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index f52dc492f6f44..d02100f63cc41 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -19,12 +19,11 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.services.storage.Storage; +import com.google.cloud.storage.Storage; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; @@ -86,10 +85,7 @@ public static class MockGoogleCloudStorageService extends GoogleCloudStorageServ } @Override - public Storage createClient(final String clientName, - final String application, - final TimeValue connectTimeout, - final TimeValue readTimeout) { + public Storage createClient(final String clientName) { return new MockStorage(BUCKET, blobs); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java index badd86cd8a2b3..14cb4fa242e7d 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageClientSettingsTests.java @@ -18,20 +18,25 @@ */ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import com.google.api.services.storage.StorageScopes; +import com.google.auth.oauth2.ServiceAccountCredentials; + import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; +import java.net.URI; import java.nio.charset.StandardCharsets; import java.security.KeyPair; import java.security.KeyPairGenerator; +import java.util.ArrayList; import java.util.Base64; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Locale; import java.util.Map; @@ -39,6 +44,7 @@ import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING; +import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.PROJECT_ID_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.getClientSettings; import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.loadCredential; @@ -46,59 +52,78 @@ public class GoogleCloudStorageClientSettingsTests extends ESTestCase { public void testLoadWithEmptySettings() { - Map clientsSettings = GoogleCloudStorageClientSettings.load(Settings.EMPTY); + final Map clientsSettings = GoogleCloudStorageClientSettings.load(Settings.EMPTY); assertEquals(1, clientsSettings.size()); assertNotNull(clientsSettings.get("default")); } public void testLoad() throws Exception { final int nbClients = randomIntBetween(1, 5); - final Tuple, Settings> randomClients = randomClients(nbClients); + final List> deprecationWarnings = new ArrayList<>(); + final Tuple, Settings> randomClients = randomClients(nbClients, deprecationWarnings); final Map expectedClientsSettings = randomClients.v1(); - Map actualClientsSettings = GoogleCloudStorageClientSettings.load(randomClients.v2()); + final Map actualClientsSettings = GoogleCloudStorageClientSettings + .load(randomClients.v2()); assertEquals(expectedClientsSettings.size(), actualClientsSettings.size()); - for (String clientName : expectedClientsSettings.keySet()) { - GoogleCloudStorageClientSettings actualClientSettings = actualClientsSettings.get(clientName); + for (final String clientName : expectedClientsSettings.keySet()) { + final GoogleCloudStorageClientSettings actualClientSettings = actualClientsSettings.get(clientName); assertNotNull(actualClientSettings); - GoogleCloudStorageClientSettings expectedClientSettings = expectedClientsSettings.get(clientName); + final GoogleCloudStorageClientSettings expectedClientSettings = expectedClientsSettings.get(clientName); assertNotNull(expectedClientSettings); - assertGoogleCredential(expectedClientSettings.getCredential(), actualClientSettings.getCredential()); - assertEquals(expectedClientSettings.getEndpoint(), actualClientSettings.getEndpoint()); + assertEquals(expectedClientSettings.getHost(), actualClientSettings.getHost()); + assertEquals(expectedClientSettings.getProjectId(), actualClientSettings.getProjectId()); assertEquals(expectedClientSettings.getConnectTimeout(), actualClientSettings.getConnectTimeout()); assertEquals(expectedClientSettings.getReadTimeout(), actualClientSettings.getReadTimeout()); assertEquals(expectedClientSettings.getApplicationName(), actualClientSettings.getApplicationName()); } + + if (deprecationWarnings.isEmpty() == false) { + assertSettingDeprecationsAndWarnings(deprecationWarnings.toArray(new Setting[0])); + } } public void testLoadCredential() throws Exception { - Tuple, Settings> randomClient = randomClients(1); - GoogleCloudStorageClientSettings expectedClientSettings = randomClient.v1().values().iterator().next(); - String clientName = randomClient.v1().keySet().iterator().next(); - + final List> deprecationWarnings = new ArrayList<>(); + final Tuple, Settings> randomClient = randomClients(1, deprecationWarnings); + final GoogleCloudStorageClientSettings expectedClientSettings = randomClient.v1().values().iterator().next(); + final String clientName = randomClient.v1().keySet().iterator().next(); assertGoogleCredential(expectedClientSettings.getCredential(), loadCredential(randomClient.v2(), clientName)); } + public void testProjectIdDefaultsToCredentials() throws Exception { + final String clientName = randomAlphaOfLength(5); + final Tuple credentials = randomCredential(clientName); + final ServiceAccountCredentials credential = credentials.v1(); + final GoogleCloudStorageClientSettings googleCloudStorageClientSettings = new GoogleCloudStorageClientSettings(credential, + ENDPOINT_SETTING.getDefault(Settings.EMPTY), PROJECT_ID_SETTING.getDefault(Settings.EMPTY), + CONNECT_TIMEOUT_SETTING.getDefault(Settings.EMPTY), READ_TIMEOUT_SETTING.getDefault(Settings.EMPTY), + APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY), new URI("")); + assertEquals(credential.getProjectId(), googleCloudStorageClientSettings.getProjectId()); + } + /** Generates a given number of GoogleCloudStorageClientSettings along with the Settings to build them from **/ - private Tuple, Settings> randomClients(final int nbClients) throws Exception { + private Tuple, Settings> randomClients(final int nbClients, + final List> deprecationWarnings) + throws Exception { final Map expectedClients = new HashMap<>(); - expectedClients.put("default", getClientSettings(Settings.EMPTY, "default")); final Settings.Builder settings = Settings.builder(); final MockSecureSettings secureSettings = new MockSecureSettings(); for (int i = 0; i < nbClients; i++) { - String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); - - GoogleCloudStorageClientSettings clientSettings = randomClient(clientName, settings, secureSettings); + final String clientName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT); + final GoogleCloudStorageClientSettings clientSettings = randomClient(clientName, settings, secureSettings, deprecationWarnings); expectedClients.put(clientName, clientSettings); } if (randomBoolean()) { - GoogleCloudStorageClientSettings clientSettings = randomClient("default", settings, secureSettings); + final GoogleCloudStorageClientSettings clientSettings = randomClient("default", settings, secureSettings, deprecationWarnings); expectedClients.put("default", clientSettings); + } else { + expectedClients.put("default", getClientSettings(Settings.EMPTY, "default")); } return Tuple.tuple(expectedClients, settings.setSecureSettings(secureSettings).build()); @@ -107,20 +132,30 @@ private Tuple, Settings> randomCli /** Generates a random GoogleCloudStorageClientSettings along with the Settings to build it **/ private static GoogleCloudStorageClientSettings randomClient(final String clientName, final Settings.Builder settings, - final MockSecureSettings secureSettings) throws Exception { + final MockSecureSettings secureSettings, + final List> deprecationWarnings) throws Exception { - Tuple credentials = randomCredential(clientName); - GoogleCredential credential = credentials.v1(); + final Tuple credentials = randomCredential(clientName); + final ServiceAccountCredentials credential = credentials.v1(); secureSettings.setFile(CREDENTIALS_FILE_SETTING.getConcreteSettingForNamespace(clientName).getKey(), credentials.v2()); String endpoint; if (randomBoolean()) { - endpoint = randomAlphaOfLength(5); + endpoint = randomFrom("http://www.elastic.co", "http://metadata.google.com:88/oauth", "https://www.googleapis.com", + "https://www.elastic.co:443", "http://localhost:8443", "https://www.googleapis.com/oauth/token"); settings.put(ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), endpoint); } else { endpoint = ENDPOINT_SETTING.getDefault(Settings.EMPTY); } + String projectId; + if (randomBoolean()) { + projectId = randomAlphaOfLength(5); + settings.put(PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectId); + } else { + projectId = PROJECT_ID_SETTING.getDefault(Settings.EMPTY); + } + TimeValue connectTimeout; if (randomBoolean()) { connectTimeout = randomTimeout(); @@ -141,40 +176,35 @@ private static GoogleCloudStorageClientSettings randomClient(final String client if (randomBoolean()) { applicationName = randomAlphaOfLength(5); settings.put(APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), applicationName); + deprecationWarnings.add(APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName)); } else { applicationName = APPLICATION_NAME_SETTING.getDefault(Settings.EMPTY); } - return new GoogleCloudStorageClientSettings(credential, endpoint, connectTimeout, readTimeout, applicationName); + return new GoogleCloudStorageClientSettings(credential, endpoint, projectId, connectTimeout, readTimeout, applicationName, + new URI("")); } /** Generates a random GoogleCredential along with its corresponding Service Account file provided as a byte array **/ - private static Tuple randomCredential(final String clientName) throws Exception { - KeyPair keyPair = KeyPairGenerator.getInstance("RSA").generateKeyPair(); - - GoogleCredential.Builder credentialBuilder = new GoogleCredential.Builder(); - credentialBuilder.setServiceAccountId(clientName); - credentialBuilder.setServiceAccountProjectId("project_id_" + clientName); - credentialBuilder.setServiceAccountScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); - credentialBuilder.setServiceAccountPrivateKey(keyPair.getPrivate()); - credentialBuilder.setServiceAccountPrivateKeyId("private_key_id_" + clientName); - - String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); - String serviceAccount = "{\"type\":\"service_account\"," + + private static Tuple randomCredential(final String clientName) throws Exception { + final KeyPair keyPair = KeyPairGenerator.getInstance("RSA").generateKeyPair(); + final ServiceAccountCredentials.Builder credentialBuilder = ServiceAccountCredentials.newBuilder(); + credentialBuilder.setClientId("id_" + clientName); + credentialBuilder.setClientEmail(clientName); + credentialBuilder.setProjectId("project_id_" + clientName); + credentialBuilder.setPrivateKey(keyPair.getPrivate()); + credentialBuilder.setPrivateKeyId("private_key_id_" + clientName); + credentialBuilder.setScopes(Collections.singleton(StorageScopes.DEVSTORAGE_FULL_CONTROL)); + final String encodedPrivateKey = Base64.getEncoder().encodeToString(keyPair.getPrivate().getEncoded()); + final String serviceAccount = "{\"type\":\"service_account\"," + "\"project_id\":\"project_id_" + clientName + "\"," + "\"private_key_id\":\"private_key_id_" + clientName + "\"," + "\"private_key\":\"-----BEGIN PRIVATE KEY-----\\n" + encodedPrivateKey + "\\n-----END PRIVATE KEY-----\\n\"," + "\"client_email\":\"" + clientName + "\"," + - "\"client_id\":\"id_" + clientName + "\"," + - "\"auth_uri\":\"https://accounts.google.com/o/oauth2/auth\"," + - "\"token_uri\":\"https://accounts.google.com/o/oauth2/token\"," + - "\"auth_provider_x509_cert_url\":\"https://www.googleapis.com/oauth2/v1/certs\"," + - "\"client_x509_cert_url\":\"https://www.googleapis.com/robot/v1/metadata/x509/" + - clientName + - "%40appspot.gserviceaccount.com\"}"; - + "\"client_id\":\"id_" + clientName + "\"" + + "}"; return Tuple.tuple(credentialBuilder.build(), serviceAccount.getBytes(StandardCharsets.UTF_8)); } @@ -182,14 +212,16 @@ private static TimeValue randomTimeout() { return randomFrom(TimeValue.MINUS_ONE, TimeValue.ZERO, TimeValue.parseTimeValue(randomPositiveTimeValue(), "test")); } - private static void assertGoogleCredential(final GoogleCredential expected, final GoogleCredential actual) { + private static void assertGoogleCredential(ServiceAccountCredentials expected, ServiceAccountCredentials actual) { if (expected != null) { assertEquals(expected.getServiceAccountUser(), actual.getServiceAccountUser()); - assertEquals(expected.getServiceAccountId(), actual.getServiceAccountId()); - assertEquals(expected.getServiceAccountProjectId(), actual.getServiceAccountProjectId()); - assertEquals(expected.getServiceAccountScopesAsString(), actual.getServiceAccountScopesAsString()); - assertEquals(expected.getServiceAccountPrivateKey(), actual.getServiceAccountPrivateKey()); - assertEquals(expected.getServiceAccountPrivateKeyId(), actual.getServiceAccountPrivateKeyId()); + assertEquals(expected.getClientId(), actual.getClientId()); + assertEquals(expected.getClientEmail(), actual.getClientEmail()); + assertEquals(expected.getAccount(), actual.getAccount()); + assertEquals(expected.getProjectId(), actual.getProjectId()); + assertEquals(expected.getScopes(), actual.getScopes()); + assertEquals(expected.getPrivateKey(), actual.getPrivateKey()); + assertEquals(expected.getPrivateKeyId(), actual.getPrivateKeyId()); } else { assertNull(actual); } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index 26324d614d4aa..d33547c37dce3 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -19,10 +19,9 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.services.storage.Storage; +import com.google.cloud.storage.Storage; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; @@ -46,7 +45,7 @@ public void testDeprecatedSettings() throws Exception { new GoogleCloudStorageRepository(repositoryMetaData, environment, NamedXContentRegistry.EMPTY, new GoogleCloudStorageService(environment, GoogleCloudStorageClientSettings.load(Settings.EMPTY)) { @Override - public Storage createClient(String clientName, String application, TimeValue connect, TimeValue read) throws Exception { + public Storage createClient(String clientName) throws Exception { return new MockStorage("test", new ConcurrentHashMap<>()); } }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 59931fe623ee4..a33ae90c549bc 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -19,96 +19,65 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; -import com.google.api.client.http.GenericUrl; -import com.google.api.client.http.HttpIOExceptionHandler; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestFactory; -import com.google.api.client.http.HttpRequestInitializer; -import com.google.api.client.http.HttpResponse; -import com.google.api.client.http.HttpUnsuccessfulResponseHandler; -import com.google.api.client.testing.http.MockHttpTransport; +import com.google.auth.Credentials; +import com.google.cloud.http.HttpTransportOptions; +import com.google.cloud.storage.Storage; + +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matchers; +import java.util.Collections; +import java.util.Locale; -import java.io.IOException; - -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; public class GoogleCloudStorageServiceTests extends ESTestCase { - /** - * Test that the {@link GoogleCloudStorageService.DefaultHttpRequestInitializer} attaches new instances - * of {@link HttpIOExceptionHandler} and {@link HttpUnsuccessfulResponseHandler} for every HTTP requests. - */ - public void testDefaultHttpRequestInitializer() throws IOException { + public void testClientInitializer() throws Exception { + final String clientName = randomAlphaOfLength(4).toLowerCase(Locale.ROOT); final Environment environment = mock(Environment.class); - when(environment.settings()).thenReturn(Settings.EMPTY); - - final GoogleCredential credential = mock(GoogleCredential.class); - when(credential.handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean())).thenReturn(false); - - final String endpoint = randomBoolean() ? randomAlphaOfLength(10) : null; - - final TimeValue readTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - final TimeValue connectTimeout = TimeValue.timeValueSeconds(randomIntBetween(1, 120)); - final String applicationName = randomBoolean() ? randomAlphaOfLength(10) : null; - - final boolean useDeprecatedSettings = true; - - final TimeValue deprecatedReadTimeout = useDeprecatedSettings ? TimeValue.timeValueSeconds(randomIntBetween(1, 120)) : null; - final TimeValue deprecatedConnectTimeout = useDeprecatedSettings ? TimeValue.timeValueSeconds(randomIntBetween(1, 120)) : null; - - final GoogleCloudStorageClientSettings clientSettings = - new GoogleCloudStorageClientSettings(credential, endpoint, connectTimeout, readTimeout, applicationName); - - final HttpRequestInitializer initializer = - GoogleCloudStorageService.createRequestInitializer(clientSettings, deprecatedConnectTimeout, deprecatedReadTimeout); - final HttpRequestFactory requestFactory = new MockHttpTransport().createRequestFactory(initializer); - - final HttpRequest request1 = requestFactory.buildGetRequest(new GenericUrl()); - if (useDeprecatedSettings) { - assertEquals((int) deprecatedConnectTimeout.millis(), request1.getConnectTimeout()); - assertEquals((int) deprecatedReadTimeout.millis(), request1.getReadTimeout()); - } else { - assertEquals((int) connectTimeout.millis(), request1.getConnectTimeout()); - assertEquals((int) readTimeout.millis(), request1.getReadTimeout()); - } - assertSame(credential, request1.getInterceptor()); - assertNotNull(request1.getIOExceptionHandler()); - assertNotNull(request1.getUnsuccessfulResponseHandler()); - - final HttpRequest request2 = requestFactory.buildGetRequest(new GenericUrl()); - if (useDeprecatedSettings) { - assertEquals((int) deprecatedConnectTimeout.millis(), request2.getConnectTimeout()); - assertEquals((int) deprecatedReadTimeout.millis(), request2.getReadTimeout()); - } else { - assertEquals((int) connectTimeout.millis(), request2.getConnectTimeout()); - assertEquals((int) readTimeout.millis(), request2.getReadTimeout()); - } - assertSame(request1.getInterceptor(), request2.getInterceptor()); - assertNotNull(request2.getIOExceptionHandler()); - assertNotSame(request1.getIOExceptionHandler(), request2.getIOExceptionHandler()); - assertNotNull(request2.getUnsuccessfulResponseHandler()); - assertNotSame(request1.getUnsuccessfulResponseHandler(), request2.getUnsuccessfulResponseHandler()); - - request1.getUnsuccessfulResponseHandler().handleResponse(null, null, false); - verify(credential, times(1)).handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean()); - - request2.getUnsuccessfulResponseHandler().handleResponse(null, null, false); - verify(credential, times(2)).handleResponse(any(HttpRequest.class), any(HttpResponse.class), anyBoolean()); + final TimeValue connectTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue readTimeValue = TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final String applicationName = randomAlphaOfLength(4); + final String hostName = randomFrom("http://", "https://") + randomAlphaOfLength(4) + ":" + randomIntBetween(1, 65535); + final String projectIdName = randomAlphaOfLength(4); + final Settings settings = Settings.builder() + .put(GoogleCloudStorageClientSettings.CONNECT_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + connectTimeValue.getStringRep()) + .put(GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + readTimeValue.getStringRep()) + .put(GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName).getKey(), + applicationName) + .put(GoogleCloudStorageClientSettings.ENDPOINT_SETTING.getConcreteSettingForNamespace(clientName).getKey(), hostName) + .put(GoogleCloudStorageClientSettings.PROJECT_ID_SETTING.getConcreteSettingForNamespace(clientName).getKey(), projectIdName) + .build(); + when(environment.settings()).thenReturn(settings); + final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName); + final GoogleCloudStorageService service = new GoogleCloudStorageService(environment, + Collections.singletonMap(clientName, clientSettings)); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.createClient("another_client")); + assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); + assertSettingDeprecationsAndWarnings( + new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); + final Storage storage = service.createClient(clientName); + assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + assertThat(storage.getOptions().getHost(), Matchers.is(hostName)); + assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); + assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), + Matchers.is((int) connectTimeValue.millis())); + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), + Matchers.is((int) readTimeValue.millis())); + assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } public void testToTimeout() { - assertNull(GoogleCloudStorageService.toTimeout(null)); - assertNull(GoogleCloudStorageService.toTimeout(TimeValue.ZERO)); + assertEquals(-1, GoogleCloudStorageService.toTimeout(null).intValue()); + assertEquals(-1, GoogleCloudStorageService.toTimeout(TimeValue.ZERO).intValue()); assertEquals(0, GoogleCloudStorageService.toTimeout(TimeValue.MINUS_ONE).intValue()); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java index 325cea132beb6..2b52b7a32a9cc 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/MockStorage.java @@ -19,289 +19,478 @@ package org.elasticsearch.repositories.gcs; -import com.google.api.client.googleapis.json.GoogleJsonError; -import com.google.api.client.googleapis.json.GoogleJsonResponseException; -import com.google.api.client.http.AbstractInputStreamContent; -import com.google.api.client.http.HttpHeaders; -import com.google.api.client.http.HttpMethods; -import com.google.api.client.http.HttpRequest; -import com.google.api.client.http.HttpRequestInitializer; -import com.google.api.client.http.HttpResponseException; -import com.google.api.client.http.LowLevelHttpRequest; -import com.google.api.client.http.LowLevelHttpResponse; -import com.google.api.client.http.MultipartContent; -import com.google.api.client.json.JsonFactory; -import com.google.api.client.testing.http.MockHttpTransport; -import com.google.api.client.testing.http.MockLowLevelHttpRequest; -import com.google.api.client.testing.http.MockLowLevelHttpResponse; -import com.google.api.services.storage.Storage; -import com.google.api.services.storage.model.Bucket; -import com.google.api.services.storage.model.StorageObject; -import org.elasticsearch.common.io.Streams; -import org.elasticsearch.rest.RestStatus; +import com.google.api.gax.paging.Page; +import com.google.cloud.Policy; +import com.google.cloud.ReadChannel; +import com.google.cloud.RestorableState; +import com.google.cloud.WriteChannel; +import com.google.cloud.storage.Acl; +import com.google.cloud.storage.Blob; +import com.google.cloud.storage.BlobId; +import com.google.cloud.storage.BlobInfo; +import com.google.cloud.storage.Bucket; +import com.google.cloud.storage.BucketInfo; +import com.google.cloud.storage.CopyWriter; +import com.google.cloud.storage.ServiceAccount; +import com.google.cloud.storage.Storage; +import com.google.cloud.storage.StorageBatch; +import com.google.cloud.storage.StorageException; +import com.google.cloud.storage.StorageOptions; +import com.google.cloud.storage.StorageRpcOptionUtils; +import com.google.cloud.storage.StorageTestUtils; + +import org.elasticsearch.core.internal.io.IOUtils; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; -import java.math.BigInteger; +import java.net.URL; +import java.nio.ByteBuffer; +import java.nio.channels.Channels; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; import java.util.ArrayList; +import java.util.List; +import java.util.Objects; import java.util.concurrent.ConcurrentMap; - -import static org.mockito.Mockito.mock; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; /** * {@link MockStorage} mocks a {@link Storage} client by storing all the blobs * in a given concurrent map. */ -class MockStorage extends Storage { - - /* A custom HTTP header name used to propagate the name of the blobs to delete in batch requests */ - private static final String DELETION_HEADER = "x-blob-to-delete"; +class MockStorage implements Storage { private final String bucketName; private final ConcurrentMap blobs; MockStorage(final String bucket, final ConcurrentMap blobs) { - super(new MockedHttpTransport(blobs), mock(JsonFactory.class), mock(HttpRequestInitializer.class)); - this.bucketName = bucket; - this.blobs = blobs; + this.bucketName = Objects.requireNonNull(bucket); + this.blobs = Objects.requireNonNull(blobs); } @Override - public Buckets buckets() { - return new MockBuckets(); + public Bucket get(String bucket, BucketGetOption... options) { + if (bucketName.equals(bucket)) { + return StorageTestUtils.createBucket(this, bucketName); + } else { + return null; + } } @Override - public Objects objects() { - return new MockObjects(); + public Blob get(BlobId blob) { + if (bucketName.equals(blob.getBucket())) { + final byte[] bytes = blobs.get(blob.getName()); + if (bytes != null) { + return StorageTestUtils.createBlob(this, bucketName, blob.getName(), bytes.length); + } + } + return null; } - class MockBuckets extends Buckets { + @Override + public boolean delete(BlobId blob) { + if (bucketName.equals(blob.getBucket()) && blobs.containsKey(blob.getName())) { + return blobs.remove(blob.getName()) != null; + } + return false; + } - @Override - public Get get(String getBucket) { - return new Get(getBucket) { - @Override - public Bucket execute() { - if (bucketName.equals(getBucket())) { - Bucket bucket = new Bucket(); - bucket.setId(bucketName); - return bucket; - } else { - return null; - } - } - }; + @Override + public List delete(Iterable blobIds) { + final List ans = new ArrayList<>(); + for (final BlobId blobId : blobIds) { + ans.add(delete(blobId)); } + return ans; } - class MockObjects extends Objects { + @Override + public Blob create(BlobInfo blobInfo, byte[] content, BlobTargetOption... options) { + if (bucketName.equals(blobInfo.getBucket()) == false) { + throw new StorageException(404, "Bucket not found"); + } + blobs.put(blobInfo.getName(), content); + return get(BlobId.of(blobInfo.getBucket(), blobInfo.getName())); + } + + @Override + public CopyWriter copy(CopyRequest copyRequest) { + if (bucketName.equals(copyRequest.getSource().getBucket()) == false) { + throw new StorageException(404, "Source bucket not found"); + } + if (bucketName.equals(copyRequest.getTarget().getBucket()) == false) { + throw new StorageException(404, "Target bucket not found"); + } + + final byte[] bytes = blobs.get(copyRequest.getSource().getName()); + if (bytes == null) { + throw new StorageException(404, "Source blob does not exist"); + } + blobs.put(copyRequest.getTarget().getName(), bytes); + return StorageRpcOptionUtils + .createCopyWriter(get(BlobId.of(copyRequest.getTarget().getBucket(), copyRequest.getTarget().getName()))); + } + + @Override + public Page list(String bucket, BlobListOption... options) { + if (bucketName.equals(bucket) == false) { + throw new StorageException(404, "Bucket not found"); + } + final Storage storage = this; + final String prefix = StorageRpcOptionUtils.getPrefix(options); - @Override - public Get get(String getBucket, String getObject) { - return new Get(getBucket, getObject) { + return new Page() { + @Override + public boolean hasNextPage() { + return false; + } + + @Override + public String getNextPageToken() { + return null; + } + + @Override + public Page getNextPage() { + throw new UnsupportedOperationException(); + } + + @Override + public Iterable iterateAll() { + return blobs.entrySet().stream() + .filter(blob -> ((prefix == null) || blob.getKey().startsWith(prefix))) + .map(blob -> StorageTestUtils.createBlob(storage, bucketName, blob.getKey(), blob.getValue().length)) + .collect(Collectors.toList()); + } + + @Override + public Iterable getValues() { + throw new UnsupportedOperationException(); + } + }; + } + + @Override + public ReadChannel reader(BlobId blob, BlobSourceOption... options) { + if (bucketName.equals(blob.getBucket())) { + final byte[] bytes = blobs.get(blob.getName()); + final ReadableByteChannel readableByteChannel = Channels.newChannel(new ByteArrayInputStream(bytes)); + return new ReadChannel() { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } - - StorageObject storageObject = new StorageObject(); - storageObject.setId(getObject()); - return storageObject; + public void close() { + IOUtils.closeWhileHandlingException(readableByteChannel); } @Override - public InputStream executeMediaAsInputStream() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } - return new ByteArrayInputStream(blobs.get(getObject())); + public void seek(long position) throws IOException { + throw new UnsupportedOperationException(); } - }; - } - @Override - public Insert insert(String insertBucket, StorageObject insertObject, AbstractInputStreamContent insertStream) { - return new Insert(insertBucket, insertObject) { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - - ByteArrayOutputStream out = new ByteArrayOutputStream(); - Streams.copy(insertStream.getInputStream(), out); - blobs.put(getName(), out.toByteArray()); - return null; + public void setChunkSize(int chunkSize) { + throw new UnsupportedOperationException(); + } + + @Override + public RestorableState capture() { + throw new UnsupportedOperationException(); + } + + @Override + public int read(ByteBuffer dst) throws IOException { + return readableByteChannel.read(dst); } - }; - } - @Override - public List list(String listBucket) { - return new List(listBucket) { @Override - public com.google.api.services.storage.model.Objects execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } - - final com.google.api.services.storage.model.Objects objects = new com.google.api.services.storage.model.Objects(); - - final java.util.List storageObjects = new ArrayList<>(); - for (Entry blob : blobs.entrySet()) { - if (getPrefix() == null || blob.getKey().startsWith(getPrefix())) { - StorageObject storageObject = new StorageObject(); - storageObject.setId(blob.getKey()); - storageObject.setName(blob.getKey()); - storageObject.setSize(BigInteger.valueOf((long) blob.getValue().length)); - storageObjects.add(storageObject); - } - } - - objects.setItems(storageObjects); - return objects; + public boolean isOpen() { + return readableByteChannel.isOpen(); } }; } + return null; + } + + @Override + public WriteChannel writer(BlobInfo blobInfo, BlobWriteOption... options) { + if (bucketName.equals(blobInfo.getBucket())) { + final ByteArrayOutputStream output = new ByteArrayOutputStream(); + return new WriteChannel() { + + final WritableByteChannel writableByteChannel = Channels.newChannel(output); - @Override - public Delete delete(String deleteBucket, String deleteObject) { - return new Delete(deleteBucket, deleteObject) { @Override - public Void execute() throws IOException { - if (bucketName.equals(getBucket()) == false) { - throw newBucketNotFoundException(getBucket()); - } + public void setChunkSize(int chunkSize) { + throw new UnsupportedOperationException(); + } - if (blobs.containsKey(getObject()) == false) { - throw newObjectNotFoundException(getObject()); - } + @Override + public RestorableState capture() { + throw new UnsupportedOperationException(); + } - blobs.remove(getObject()); - return null; + @Override + public int write(ByteBuffer src) throws IOException { + return writableByteChannel.write(src); } @Override - public HttpRequest buildHttpRequest() throws IOException { - HttpRequest httpRequest = super.buildHttpRequest(); - httpRequest.getHeaders().put(DELETION_HEADER, getObject()); - return httpRequest; + public boolean isOpen() { + return writableByteChannel.isOpen(); } - }; - } - @Override - public Copy copy(String srcBucket, String srcObject, String destBucket, String destObject, StorageObject content) { - return new Copy(srcBucket, srcObject, destBucket, destObject, content) { @Override - public StorageObject execute() throws IOException { - if (bucketName.equals(getSourceBucket()) == false) { - throw newBucketNotFoundException(getSourceBucket()); - } - if (bucketName.equals(getDestinationBucket()) == false) { - throw newBucketNotFoundException(getDestinationBucket()); - } - - final byte[] bytes = blobs.get(getSourceObject()); - if (bytes == null) { - throw newObjectNotFoundException(getSourceObject()); - } - blobs.put(getDestinationObject(), bytes); - - StorageObject storageObject = new StorageObject(); - storageObject.setId(getDestinationObject()); - return storageObject; + public void close() throws IOException { + IOUtils.closeWhileHandlingException(writableByteChannel); + blobs.put(blobInfo.getName(), output.toByteArray()); } }; } + return null; } - private static GoogleJsonResponseException newBucketNotFoundException(final String bucket) { - HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Bucket not found: " + bucket, new HttpHeaders()); - return new GoogleJsonResponseException(builder, new GoogleJsonError()); + // Everything below this line is not implemented. + + @Override + public Bucket create(BucketInfo bucketInfo, BucketTargetOption... options) { + return null; } - private static GoogleJsonResponseException newObjectNotFoundException(final String object) { - HttpResponseException.Builder builder = new HttpResponseException.Builder(404, "Object not found: " + object, new HttpHeaders()); - return new GoogleJsonResponseException(builder, new GoogleJsonError()); + @Override + public Blob create(BlobInfo blobInfo, BlobTargetOption... options) { + return null; } - /** - * {@link MockedHttpTransport} extends the existing testing transport to analyze the content - * of {@link com.google.api.client.googleapis.batch.BatchRequest} and delete the appropriates - * blobs. We use this because {@link Storage#batch()} is final and there is no other way to - * extend batch requests for testing purposes. - */ - static class MockedHttpTransport extends MockHttpTransport { + @Override + public Blob create(BlobInfo blobInfo, InputStream content, BlobWriteOption... options) { + return null; + } - private final ConcurrentMap blobs; + @Override + public Blob get(String bucket, String blob, BlobGetOption... options) { + return null; + } - MockedHttpTransport(final ConcurrentMap blobs) { - this.blobs = blobs; - } + @Override + public Blob get(BlobId blob, BlobGetOption... options) { + return null; + } - @Override - public LowLevelHttpRequest buildRequest(final String method, final String url) throws IOException { - // We analyze the content of the Batch request to detect our custom HTTP header, - // and extract from it the name of the blob to delete. Then we reply a simple - // batch response so that the client parser is happy. - // - // See https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch for the - // format of the batch request body. - if (HttpMethods.POST.equals(method) && url.endsWith("/batch")) { - return new MockLowLevelHttpRequest() { - @Override - public LowLevelHttpResponse execute() throws IOException { - final String contentType = new MultipartContent().getType(); - - final StringBuilder builder = new StringBuilder(); - try (ByteArrayOutputStream out = new ByteArrayOutputStream()) { - getStreamingContent().writeTo(out); - - Streams.readAllLines(new ByteArrayInputStream(out.toByteArray()), line -> { - if (line != null && line.startsWith(DELETION_HEADER)) { - builder.append("--__END_OF_PART__\r\n"); - builder.append("Content-Type: application/http").append("\r\n"); - builder.append("\r\n"); - builder.append("HTTP/1.1 "); - - final String blobName = line.substring(line.indexOf(':') + 1).trim(); - if (blobs.containsKey(blobName)) { - builder.append(RestStatus.OK.getStatus()); - blobs.remove(blobName); - } else { - builder.append(RestStatus.NOT_FOUND.getStatus()); - } - builder.append("\r\n"); - builder.append("Content-Type: application/json; charset=UTF-8").append("\r\n"); - builder.append("Content-Length: 0").append("\r\n"); - builder.append("\r\n"); - } - }); - builder.append("\r\n"); - builder.append("--__END_OF_PART__--"); - } - - MockLowLevelHttpResponse response = new MockLowLevelHttpResponse(); - response.setStatusCode(200); - response.setContent(builder.toString()); - response.setContentType(contentType); - return response; - } - }; - } else { - return super.buildRequest(method, url); - } - } + @Override + public Page list(BucketListOption... options) { + return null; + } + + @Override + public Bucket update(BucketInfo bucketInfo, BucketTargetOption... options) { + return null; + } + + @Override + public Blob update(BlobInfo blobInfo, BlobTargetOption... options) { + return null; + } + + @Override + public Blob update(BlobInfo blobInfo) { + return null; + } + + @Override + public boolean delete(String bucket, BucketSourceOption... options) { + return false; + } + + @Override + public boolean delete(String bucket, String blob, BlobSourceOption... options) { + return false; + } + + @Override + public boolean delete(BlobId blob, BlobSourceOption... options) { + return false; + } + + @Override + public Blob compose(ComposeRequest composeRequest) { + return null; + } + + @Override + public byte[] readAllBytes(String bucket, String blob, BlobSourceOption... options) { + return new byte[0]; + } + + @Override + public byte[] readAllBytes(BlobId blob, BlobSourceOption... options) { + return new byte[0]; + } + + @Override + public StorageBatch batch() { + return null; + } + + @Override + public ReadChannel reader(String bucket, String blob, BlobSourceOption... options) { + return null; + } + + @Override + public URL signUrl(BlobInfo blobInfo, long duration, TimeUnit unit, SignUrlOption... options) { + return null; + } + + @Override + public List get(BlobId... blobIds) { + return null; + } + + @Override + public List get(Iterable blobIds) { + return null; + } + + @Override + public List update(BlobInfo... blobInfos) { + return null; + } + + @Override + public List update(Iterable blobInfos) { + return null; + } + + @Override + public List delete(BlobId... blobIds) { + return null; + } + + @Override + public Acl getAcl(String bucket, Acl.Entity entity, BucketSourceOption... options) { + return null; + } + + @Override + public Acl getAcl(String bucket, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteAcl(String bucket, Acl.Entity entity, BucketSourceOption... options) { + return false; + } + + @Override + public boolean deleteAcl(String bucket, Acl.Entity entity) { + return false; + } + + @Override + public Acl createAcl(String bucket, Acl acl, BucketSourceOption... options) { + return null; + } + + @Override + public Acl createAcl(String bucket, Acl acl) { + return null; + } + + @Override + public Acl updateAcl(String bucket, Acl acl, BucketSourceOption... options) { + return null; + } + + @Override + public Acl updateAcl(String bucket, Acl acl) { + return null; + } + + @Override + public List listAcls(String bucket, BucketSourceOption... options) { + return null; + } + + @Override + public List listAcls(String bucket) { + return null; + } + + @Override + public Acl getDefaultAcl(String bucket, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteDefaultAcl(String bucket, Acl.Entity entity) { + return false; + } + + @Override + public Acl createDefaultAcl(String bucket, Acl acl) { + return null; + } + + @Override + public Acl updateDefaultAcl(String bucket, Acl acl) { + return null; + } + + @Override + public List listDefaultAcls(String bucket) { + return null; + } + + @Override + public Acl getAcl(BlobId blob, Acl.Entity entity) { + return null; + } + + @Override + public boolean deleteAcl(BlobId blob, Acl.Entity entity) { + return false; + } + + @Override + public Acl createAcl(BlobId blob, Acl acl) { + return null; + } + + @Override + public Acl updateAcl(BlobId blob, Acl acl) { + return null; + } + + @Override + public List listAcls(BlobId blob) { + return null; + } + + @Override + public Policy getIamPolicy(String bucket, BucketSourceOption... options) { + return null; + } + + @Override + public Policy setIamPolicy(String bucket, Policy policy, BucketSourceOption... options) { + return null; + } + + @Override + public List testIamPermissions(String bucket, List permissions, BucketSourceOption... options) { + return null; + } + + @Override + public ServiceAccount getServiceAccount(String projectId) { + return null; + } + + @Override + public StorageOptions getOptions() { + return null; } } From 47fc038918de5bffe90a5cee98893fca2afc5fbb Mon Sep 17 00:00:00 2001 From: Costin Leau Date: Tue, 15 May 2018 22:46:46 +0300 Subject: [PATCH 06/44] SQL: Verify GROUP BY ordering on grouped columns (#30585) Due to the way composite aggregation works, ordering in GROUP BY can be applied only through grouped columns which now the analyzer verifier enforces. Fix 29900 --- .../xpack/sql/analysis/analyzer/Verifier.java | 23 ++++++++++++++++--- .../analyzer/VerifierErrorMessagesTests.java | 19 +++++++++++++-- 2 files changed, 37 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java index f5147b84468b7..6f8be61b463fd 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/analysis/analyzer/Verifier.java @@ -211,12 +211,13 @@ static Collection verify(LogicalPlan plan) { /** * Check validity of Aggregate/GroupBy. - * This rule is needed for two reasons: + * This rule is needed for multiple reasons: * 1. a user might specify an invalid aggregate (SELECT foo GROUP BY bar) * 2. the order/having might contain a non-grouped attribute. This is typically * caught by the Analyzer however if wrapped in a function (ABS()) it gets resolved * (because the expression gets resolved little by little without being pushed down, * without the Analyzer modifying anything. + * 3. composite agg (used for GROUP BY) allows ordering only on the group keys */ private static boolean checkGroupBy(LogicalPlan p, Set localFailures, Map resolvedFunctions, Set groupingFailures) { @@ -225,7 +226,7 @@ && checkGroupByOrder(p, localFailures, groupingFailures, resolvedFunctions) && checkGroupByHaving(p, localFailures, groupingFailures, resolvedFunctions); } - // check whether an orderBy failed + // check whether an orderBy failed or if it occurs on a non-key private static boolean checkGroupByOrder(LogicalPlan p, Set localFailures, Set groupingFailures, Map functions) { if (p instanceof OrderBy) { @@ -234,7 +235,23 @@ private static boolean checkGroupByOrder(LogicalPlan p, Set localFailur Aggregate a = (Aggregate) o.child(); Map> missing = new LinkedHashMap<>(); - o.order().forEach(oe -> oe.collectFirstChildren(c -> checkGroupMatch(c, oe, a.groupings(), missing, functions))); + o.order().forEach(oe -> { + Expression e = oe.child(); + // cannot order by aggregates (not supported by composite) + if (Functions.isAggregate(e)) { + missing.put(e, oe); + return; + } + + // make sure to compare attributes directly + if (Expressions.anyMatch(a.groupings(), + g -> e.semanticEquals(e instanceof Attribute ? Expressions.attribute(g) : g))) { + return; + } + + // nothing matched, cannot group by it + missing.put(e, oe); + }); if (!missing.isEmpty()) { String plural = missing.size() > 1 ? "s" : StringUtils.EMPTY; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java index 355c4d2f7b763..60875e0194a0c 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/analysis/analyzer/VerifierErrorMessagesTests.java @@ -111,7 +111,7 @@ public void testGroupByOrderByNonGrouped() { } public void testGroupByOrderByScalarOverNonGrouped() { - assertEquals("1:50: Cannot order by non-grouped column [date], expected [text]", + assertEquals("1:50: Cannot order by non-grouped column [YEAR(date [UTC])], expected [text]", verify("SELECT MAX(int) FROM test GROUP BY text ORDER BY YEAR(date)")); } @@ -144,4 +144,19 @@ public void testUnsupportedType() { assertEquals("1:8: Cannot use field [unsupported] type [ip_range] as is unsupported", verify("SELECT unsupported FROM test")); } -} + + public void testGroupByOrderByNonKey() { + assertEquals("1:52: Cannot order by non-grouped column [a], expected [bool]", + verify("SELECT AVG(int) a FROM test GROUP BY bool ORDER BY a")); + } + + public void testGroupByOrderByFunctionOverKey() { + assertEquals("1:44: Cannot order by non-grouped column [MAX(int)], expected [int]", + verify("SELECT int FROM test GROUP BY int ORDER BY MAX(int)")); + } + + public void testGroupByOrderByScore() { + assertEquals("1:44: Cannot order by non-grouped column [SCORE()], expected [int]", + verify("SELECT int FROM test GROUP BY int ORDER BY SCORE()")); + } +} \ No newline at end of file From d45ef008644d9c92b93589a6d7f0aee32506b272 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Tue, 15 May 2018 06:23:31 -0400 Subject: [PATCH 07/44] Revert "Revert "Add deprecation warning for default shards (#30587)"" This reverts commit 93e3e083fcaa0f1c56ead8987ab809ddbefe254e. --- .../test/rest/DefaultShardsIT.java | 51 +++++++++++++++++++ .../metadata/MetaDataCreateIndexService.java | 8 ++- .../metadata/IndexCreationTaskTests.java | 36 +++++++++++++ .../test/rest/yaml/section/DoSection.java | 11 +++- 4 files changed, 100 insertions(+), 6 deletions(-) create mode 100644 distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java diff --git a/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java new file mode 100644 index 0000000000000..de736c84e45b6 --- /dev/null +++ b/distribution/archives/integ-test-zip/src/test/java/org/elasticsearch/test/rest/DefaultShardsIT.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test.rest; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; + +import java.io.IOException; +import java.util.regex.Matcher; + +import static org.elasticsearch.common.logging.DeprecationLogger.WARNING_HEADER_PATTERN; +import static org.hamcrest.Matchers.equalTo; + +public class DefaultShardsIT extends ESRestTestCase { + + public void testDefaultShards() throws IOException { + final Response response = client().performRequest(new Request("PUT", "/index")); + final String warning = response.getHeader("Warning"); + final Matcher matcher = WARNING_HEADER_PATTERN.matcher(warning); + assertTrue(matcher.matches()); + final String message = matcher.group(1); + assertThat(message, equalTo("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template")); + } + + public void testNonDefaultShards() throws IOException { + final Request request = new Request("PUT", "/index"); + request.setJsonEntity("{\"settings\":{\"index.number_of_shards\":1}}"); + final Response response = client().performRequest(request); + assertNull(response.getHeader("Warning")); + } + +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index be7b0b483182c..35883cb208031 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -19,11 +19,9 @@ package org.elasticsearch.cluster.metadata; -import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectObjectCursor; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.Version; @@ -59,7 +57,6 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; @@ -81,12 +78,10 @@ import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import java.io.IOException; import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.util.ArrayList; import java.util.Collections; -import java.util.Comparator; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -376,6 +371,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { // now, put the request settings, so they override templates indexSettingsBuilder.put(request.settings()); if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) { + deprecationLogger.deprecated("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5)); } if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) { diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 5accb3aba3ca4..40673ba6d29cf 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -120,6 +120,10 @@ public void testMatchTemplates() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.metaData().index("test").getAliases(), hasAllKeys("alias_from_template_1", "alias_from_template_2")); assertThat(result.metaData().index("test").getAliases(), not(hasKey("alias_from_template_3"))); } @@ -134,6 +138,10 @@ public void testApplyDataFromTemplate() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); @@ -148,6 +156,10 @@ public void testApplyDataFromRequest() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.metaData().index("test").getAliases(), hasKey("alias1")); assertThat(result.metaData().index("test").getCustoms(), hasKey("custom1")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("value1")); @@ -177,6 +189,10 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.metaData().index("test").getCustoms().get("custom1"), equalTo(mergedCustom)); assertThat(result.metaData().index("test").getAliases().get("alias1").getSearchRouting(), equalTo("fromReq")); assertThat(result.metaData().index("test").getSettings().get("key1"), equalTo("reqValue")); @@ -186,6 +202,10 @@ public void testRequestDataHavePriorityOverTemplateData() throws Exception { public void testDefaultSettings() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("5")); } @@ -194,6 +214,10 @@ public void testSettingsFromClusterState() throws Exception { final ClusterState result = executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(result.getMetaData().index("test").getSettings().get(SETTING_NUMBER_OF_SHARDS), equalTo("15")); } @@ -246,6 +270,10 @@ public void testRequestStateOpen() throws Exception { executeTask(); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + verify(allocationService, times(1)).reroute(anyObject(), anyObject()); } @@ -255,6 +283,10 @@ public void testIndexRemovalOnFailure() throws Exception { expectThrows(RuntimeException.class, this::executeTask); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + verify(indicesService, times(1)).removeIndex(anyObject(), anyObject(), anyObject()); } @@ -290,6 +322,10 @@ public void testValidateWaitForActiveShardsFailure() throws Exception { IllegalArgumentException e = expectThrows(IllegalArgumentException.class, this::executeTask); + assertWarnings("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template"); + assertThat(e.getMessage(), containsString("invalid wait_for_active_shards")); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index 7c6647d65f044..81d5c1d32a94b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -271,7 +271,16 @@ void checkWarningHeaders(final List warningHeaders) { final boolean matches = matcher.matches(); if (matches) { final String message = matcher.group(1); - if (expected.remove(message) == false) { + // noinspection StatementWithEmptyBody + if (message.equals("the default number of shards will change from [5] to [1] in 7.0.0; " + + "if you wish to continue using the default of [5] shards, " + + "you must manage this on the create index request or with an index template")) { + /* + * This warning header will come back in the vast majority of our tests that create an index. Rather than rewrite our + * tests to assert this warning header, we assume that it is expected. + */ + } + else if (expected.remove(message) == false) { unexpected.add(header); } } else { From 32c7b06bd7526381bbc8fa45528e8839af81862a Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 15 May 2018 17:16:16 -0400 Subject: [PATCH 08/44] QA: System property to override distribution (#30591) This configures all `qa` projects to use the distribution contained in the `tests.distribution` system property if it is set. The goal is to create a simple way to run tests against the default distribution which has x-pack basic features enabled while not forcing these tests on all contributors. You run these tests by doing something like: ``` ./gradlew -p qa -Dtests.distribution=zip check ``` or ``` ./gradlew -p qa -Dtests.distribution=zip bwcTest ``` x-pack basic *shouldn't* get in the way of any of these tests but nothing is ever perfect so this we have to disable a few when running with the zip distribution. --- qa/build.gradle | 2 +- qa/mixed-cluster/build.gradle | 9 ++++++++- qa/smoke-test-multinode/build.gradle | 10 ++++++++++ qa/smoke-test-rank-eval-with-mustache/build.gradle | 8 ++++++++ .../test/junit/listeners/ReproduceInfoPrinter.java | 1 + 5 files changed, 28 insertions(+), 2 deletions(-) diff --git a/qa/build.gradle b/qa/build.gradle index 494f6e3cd94b7..709c309359ecf 100644 --- a/qa/build.gradle +++ b/qa/build.gradle @@ -4,7 +4,7 @@ import org.elasticsearch.gradle.test.RestIntegTestTask subprojects { Project subproj -> subproj.tasks.withType(RestIntegTestTask) { subproj.extensions.configure("${it.name}Cluster") { cluster -> - cluster.distribution = 'oss-zip' + cluster.distribution = System.getProperty('tests.distribution', 'oss-zip') } } } diff --git a/qa/mixed-cluster/build.gradle b/qa/mixed-cluster/build.gradle index 3958d97ac1159..417ef497b902b 100644 --- a/qa/mixed-cluster/build.gradle +++ b/qa/mixed-cluster/build.gradle @@ -57,9 +57,16 @@ for (Version version : bwcVersions.wireCompatible) { bwcTest.dependsOn(versionBwcTest) } - /* To support taking index snapshots, we have to set path.repo setting */ tasks.getByName("${baseName}#mixedClusterTestRunner").configure { + /* To support taking index snapshots, we have to set path.repo setting */ systemProperty 'tests.path.repo', new File(buildDir, "cluster/shared/repo") + if ('zip'.equals(extension.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } } } diff --git a/qa/smoke-test-multinode/build.gradle b/qa/smoke-test-multinode/build.gradle index 5df77bd0d9513..9d299e16f0210 100644 --- a/qa/smoke-test-multinode/build.gradle +++ b/qa/smoke-test-multinode/build.gradle @@ -27,3 +27,13 @@ integTest { integTestCluster { numNodes = 2 } + +integTestRunner { + if ('zip'.equals(integTestCluster.distribution)) { + systemProperty 'tests.rest.blacklist', [ + 'cat.templates/10_basic/No templates', + 'cat.templates/10_basic/Sort templates', + 'cat.templates/10_basic/Multiple template', + ].join(',') + } +} diff --git a/qa/smoke-test-rank-eval-with-mustache/build.gradle b/qa/smoke-test-rank-eval-with-mustache/build.gradle index 7274e65f4e1bd..122c2603719a0 100644 --- a/qa/smoke-test-rank-eval-with-mustache/build.gradle +++ b/qa/smoke-test-rank-eval-with-mustache/build.gradle @@ -26,3 +26,11 @@ dependencies { testCompile project(path: ':modules:lang-mustache', configuration: 'runtime') } +/* + * One of the integration tests doesn't work with the zip distribution + * and will be fixed later. + * Tracked by https://github.com/elastic/elasticsearch/issues/30628 + */ +if ("zip".equals(integTestCluster.distribution)) { + integTestRunner.enabled = false +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java index ca16ac6204a90..877a6f2e98a67 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java +++ b/test/framework/src/main/java/org/elasticsearch/test/junit/listeners/ReproduceInfoPrinter.java @@ -149,6 +149,7 @@ public ReproduceErrorMessageBuilder appendESProperties() { } appendOpt("tests.locale", Locale.getDefault().toLanguageTag()); appendOpt("tests.timezone", TimeZone.getDefault().getID()); + appendOpt("tests.distribution", System.getProperty("tests.distribution")); return this; } From c350c22b5c8cb70c980532a60d15a1b06d40dc73 Mon Sep 17 00:00:00 2001 From: Nik Everett Date: Tue, 15 May 2018 17:48:47 -0400 Subject: [PATCH 09/44] Switch many QA projects to use new style requests (#30574) In #29623 we added `Request` object flavored requests to the low level REST client and in #30315 we deprecated the the old requests. This changes many calls in the `qa` projects to use the new version. --- ...rossClusterSearchUnavailableClusterIT.java | 20 +++-- .../qa/die_with_dignity/DieWithDignityIT.java | 4 +- .../elasticsearch/backwards/IndexingIT.java | 86 +++++++++---------- .../elasticsearch/bwc/QueryBuilderBWCIT.java | 23 +++-- .../http/ContextAndHeaderTransportIT.java | 7 +- .../org/elasticsearch/http/CorsNotSetIT.java | 9 +- .../org/elasticsearch/http/CorsRegexIT.java | 43 ++++++---- .../elasticsearch/http/DeprecationHttpIT.java | 13 +-- .../http/DetailedErrorsDisabledIT.java | 8 +- .../http/DetailedErrorsEnabledIT.java | 8 +- .../elasticsearch/http/HttpCompressionIT.java | 21 +++-- .../org/elasticsearch/http/NoHandlerIT.java | 9 +- .../http/ResponseHeaderPluginIT.java | 7 +- .../http/RestHttpResponseHeadersIT.java | 9 +- 14 files changed, 150 insertions(+), 117 deletions(-) diff --git a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java index 6ea864b9084f3..409a646c4fe36 100644 --- a/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java +++ b/qa/ccs-unavailable-clusters/src/test/java/org/elasticsearch/search/CrossClusterSearchUnavailableClusterIT.java @@ -35,6 +35,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchScrollRequest; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -134,7 +135,7 @@ public void testSearchSkipUnavailable() throws IOException { for (int i = 0; i < 10; i++) { restHighLevelClient.index(new IndexRequest("index", "doc", String.valueOf(i)).source("field", "value")); } - Response refreshResponse = client().performRequest("POST", "/index/_refresh"); + Response refreshResponse = client().performRequest(new Request("POST", "/index/_refresh")); assertEquals(200, refreshResponse.getStatusLine().getStatusCode()); { @@ -223,10 +224,11 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that skip_unavailable alone cannot be set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody( - Collections.singletonMap("skip_unavailable", randomBoolean())); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody( + Collections.singletonMap("skip_unavailable", randomBoolean()))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + @@ -240,9 +242,10 @@ public void testSkipUnavailableDependsOnSeeds() throws IOException { { //check that seeds cannot be reset alone if skip_unavailable is set - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null)); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(Collections.singletonMap("seeds", null))); ResponseException responseException = expectThrows(ResponseException.class, - () -> client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity)); + () -> client().performRequest(request)); assertEquals(400, responseException.getResponse().getStatusLine().getStatusCode()); assertThat(responseException.getMessage(), containsString("Missing required setting [search.remote.remote1.seeds] " + "for setting [search.remote.remote1.skip_unavailable]")); @@ -284,8 +287,9 @@ private static void assertSearchConnectFailure() { private static void updateRemoteClusterSettings(Map settings) throws IOException { - HttpEntity clusterSettingsEntity = buildUpdateSettingsRequestBody(settings); - Response response = client().performRequest("PUT", "/_cluster/settings", Collections.emptyMap(), clusterSettingsEntity); + Request request = new Request("PUT", "/_cluster/settings"); + request.setEntity(buildUpdateSettingsRequestBody(settings)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); } diff --git a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 4e69a478562a7..992d3ce71f623 100644 --- a/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/qa/die-with-dignity/src/test/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -21,6 +21,7 @@ import org.apache.http.ConnectionClosedException; import org.apache.lucene.util.Constants; +import org.elasticsearch.client.Request; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.test.rest.ESRestTestCase; import org.hamcrest.Matcher; @@ -51,7 +52,8 @@ public void testDieWithDignity() throws Exception { assertThat(pidFileLines, hasSize(1)); final int pid = Integer.parseInt(pidFileLines.get(0)); Files.delete(pidFile); - IOException e = expectThrows(IOException.class, () -> client().performRequest("GET", "/_die_with_dignity")); + IOException e = expectThrows(IOException.class, + () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); Matcher failureMatcher = instanceOf(ConnectionClosedException.class); if (Constants.WINDOWS) { /* diff --git a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java index 73aa02fd6ccb9..0886edc8ae0d2 100644 --- a/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java +++ b/qa/mixed-cluster/src/test/java/org/elasticsearch/backwards/IndexingIT.java @@ -19,9 +19,8 @@ package org.elasticsearch.backwards; import org.apache.http.HttpHost; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.Version; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,28 +33,23 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiOfLength; -import static java.util.Collections.emptyMap; -import static java.util.Collections.singletonMap; import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED; import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO; -import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; public class IndexingIT extends ESRestTestCase { private int indexDocs(String index, final int idStart, final int numDocs) throws IOException { for (int i = 0; i < numDocs; i++) { final int id = idStart + i; - assertOK(client().performRequest("PUT", index + "/test/" + id, emptyMap(), - new StringEntity("{\"test\": \"test_" + randomAsciiOfLength(2) + "\"}", ContentType.APPLICATION_JSON))); + Request request = new Request("PUT", index + "/test/" + id); + request.setJsonEntity("{\"test\": \"test_" + randomAlphaOfLength(2) + "\"}"); + assertOK(client().performRequest(request)); } return numDocs; } @@ -108,7 +102,7 @@ public void testIndexVersionPropagation() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); List shards = buildShards(index, nodes, newNodeClient); Shard primary = buildShards(index, nodes, newNodeClient).stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -120,7 +114,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after allowing shards on all nodes", nUpdates); final int finalVersionForDoc2 = indexDocWithConcurrentUpdates(index, 2, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); primary = shards.stream().filter(Shard::isPrimary).findFirst().get(); logger.info("primary resolved to: " + primary.getNode().getNodeName()); @@ -136,7 +130,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing docs with [{}] concurrent updates after moving primary", nUpdates); final int finalVersionForDoc3 = indexDocWithConcurrentUpdates(index, 3, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 3, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc3); @@ -149,7 +143,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 0", nUpdates); final int finalVersionForDoc4 = indexDocWithConcurrentUpdates(index, 4, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 4, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc4); @@ -162,7 +156,7 @@ public void testIndexVersionPropagation() throws Exception { nUpdates = randomIntBetween(minUpdates, maxUpdates); logger.info("indexing doc with [{}] concurrent updates after setting number of replicas to 1", nUpdates); final int finalVersionForDoc5 = indexDocWithConcurrentUpdates(index, 5, nUpdates); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); shards = buildShards(index, nodes, newNodeClient); for (Shard shard : shards) { assertVersion(index, 5, "_only_nodes:" + shard.getNode().getNodeName(), finalVersionForDoc5); @@ -197,7 +191,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("allowing shards on all nodes"); updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); for (final String bwcName : bwcNamesList) { assertCount(index, "_only_nodes:" + bwcName, numDocs); } @@ -228,7 +222,7 @@ public void testSeqNoCheckpoints() throws Exception { logger.info("setting number of replicas to 1"); updateIndexSettings(index, Settings.builder().put("index.number_of_replicas", 1)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); // the number of documents on the primary and on the recovered replica should match the number of indexed documents assertCount(index, "_primary", numDocs); assertCount(index, "_replica", numDocs); @@ -242,20 +236,18 @@ public void testUpdateSnapshotStatus() throws Exception { logger.info("cluster discovered: {}", nodes.toString()); // Create the repository before taking the snapshot. - String repoConfig = Strings + Request request = new Request("PUT", "/_snapshot/repo"); + request.setJsonEntity(Strings .toString(JsonXContent.contentBuilder() .startObject() - .field("type", "fs") - .startObject("settings") - .field("compress", randomBoolean()) - .field("location", System.getProperty("tests.path.repo")) - .endObject() - .endObject()); - - assertOK( - client().performRequest("PUT", "/_snapshot/repo", emptyMap(), - new StringEntity(repoConfig, ContentType.APPLICATION_JSON)) - ); + .field("type", "fs") + .startObject("settings") + .field("compress", randomBoolean()) + .field("location", System.getProperty("tests.path.repo")) + .endObject() + .endObject())); + + assertOK(client().performRequest(request)); String bwcNames = nodes.getBWCNodes().stream().map(Node::getNodeName).collect(Collectors.joining(",")); @@ -269,34 +261,36 @@ public void testUpdateSnapshotStatus() throws Exception { createIndex(index, settings.build()); indexDocs(index, 0, between(50, 100)); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/bwc-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/bwc-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); + assertOK(client().performRequest(request)); // Allocating shards on all nodes, taking snapshots should happen on all nodes. updateIndexSettings(index, Settings.builder().putNull("index.routing.allocation.include._name")); ensureGreen(index); - assertOK(client().performRequest("POST", index + "/_refresh")); + assertOK(client().performRequest(new Request("POST", index + "/_refresh"))); - assertOK( - client().performRequest("PUT", "/_snapshot/repo/mixed-snapshot", singletonMap("wait_for_completion", "true"), - new StringEntity("{\"indices\": \"" + index + "\"}", ContentType.APPLICATION_JSON)) - ); + request = new Request("PUT", "/_snapshot/repo/mixed-snapshot"); + request.addParameter("wait_for_completion", "true"); + request.setJsonEntity("{\"indices\": \"" + index + "\"}"); } private void assertCount(final String index, final String preference, final int expectedCount) throws IOException { - final Response response = client().performRequest("GET", index + "/_count", Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/_count"); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualCount = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("count").toString()); assertThat(actualCount, equalTo(expectedCount)); } private void assertVersion(final String index, final int docId, final String preference, final int expectedVersion) throws IOException { - final Response response = client().performRequest("GET", index + "/test/" + docId, - Collections.singletonMap("preference", preference)); + Request request = new Request("GET", index + "/test/" + docId); + request.addParameter("preference", preference); + final Response response = client().performRequest(request); assertOK(response); final int actualVersion = Integer.parseInt(ObjectPath.createFromResponse(response).evaluate("_version").toString()); assertThat("version mismatch for doc [" + docId + "] preference [" + preference + "]", actualVersion, equalTo(expectedVersion)); @@ -339,7 +333,9 @@ private void assertSeqNoOnShards(String index, Nodes nodes, int numDocs, RestCli } private List buildShards(String index, Nodes nodes, RestClient client) throws IOException { - Response response = client.performRequest("GET", index + "/_stats", singletonMap("level", "shards")); + Request request = new Request("GET", index + "/_stats"); + request.addParameter("level", "shards"); + Response response = client.performRequest(request); List shardStats = ObjectPath.createFromResponse(response).evaluate("indices." + index + ".shards.0"); ArrayList shards = new ArrayList<>(); for (Object shard : shardStats) { @@ -361,7 +357,7 @@ private List buildShards(String index, Nodes nodes, RestClient client) th } private Nodes buildNodeAndVersions() throws IOException { - Response response = client().performRequest("GET", "_nodes"); + Response response = client().performRequest(new Request("GET", "_nodes")); ObjectPath objectPath = ObjectPath.createFromResponse(response); Map nodesAsMap = objectPath.evaluate("nodes"); Nodes nodes = new Nodes(); @@ -372,7 +368,7 @@ private Nodes buildNodeAndVersions() throws IOException { Version.fromString(objectPath.evaluate("nodes." + id + ".version")), HttpHost.create(objectPath.evaluate("nodes." + id + ".http.publish_address")))); } - response = client().performRequest("GET", "_cluster/state"); + response = client().performRequest(new Request("GET", "_cluster/state")); nodes.setMasterNodeId(ObjectPath.createFromResponse(response).evaluate("master_node")); return nodes; } diff --git a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java index 29b3deb1cb5de..284a45fa3055e 100644 --- a/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java +++ b/qa/query-builder-bwc/src/test/java/org/elasticsearch/bwc/QueryBuilderBWCIT.java @@ -19,11 +19,13 @@ package org.elasticsearch.bwc; -import org.elasticsearch.Version; -import org.elasticsearch.client.Response; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; import org.apache.http.util.EntityUtils; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.common.Booleans; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -189,13 +191,15 @@ public void testQueryBuilderBWC() throws Exception { mappingsAndSettings.endObject(); } mappingsAndSettings.endObject(); - Response rsp = client().performRequest("PUT", "/" + index, Collections.emptyMap(), - new StringEntity(Strings.toString(mappingsAndSettings), ContentType.APPLICATION_JSON)); + Request request = new Request("PUT", "/" + index); + request.setJsonEntity(Strings.toString(mappingsAndSettings)); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); for (int i = 0; i < CANDIDATES.size(); i++) { - rsp = client().performRequest("PUT", "/" + index + "/doc/" + Integer.toString(i), Collections.emptyMap(), - new StringEntity((String) CANDIDATES.get(i)[0], ContentType.APPLICATION_JSON)); + request = new Request("PUT", "/" + index + "/doc/" + Integer.toString(i)); + request.setJsonEntity((String) CANDIDATES.get(i)[0]); + rsp = client().performRequest(request); assertEquals(201, rsp.getStatusLine().getStatusCode()); } } else { @@ -204,9 +208,10 @@ public void testQueryBuilderBWC() throws Exception { for (int i = 0; i < CANDIDATES.size(); i++) { QueryBuilder expectedQueryBuilder = (QueryBuilder) CANDIDATES.get(i)[1]; - Response rsp = client().performRequest("GET", "/" + index + "/_search", Collections.emptyMap(), - new StringEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + - "\"docvalue_fields\" : [\"query.query_builder_field\"]}", ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + index + "/_search"); + request.setJsonEntity("{\"query\": {\"ids\": {\"values\": [\"" + Integer.toString(i) + "\"]}}, " + + "\"docvalue_fields\" : [\"query.query_builder_field\"]}"); + Response rsp = client().performRequest(request); assertEquals(200, rsp.getStatusLine().getStatusCode()); Map hitRsp = (Map) ((List) ((Map)toMap(rsp).get("hits")).get("hits")).get(0); String queryBuilderStr = (String) ((List) ((Map) hitRsp.get("fields")).get("query.query_builder_field")).get(0); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java index 9d05ef3f05db2..2d7e618a85664 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ContextAndHeaderTransportIT.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; @@ -222,8 +223,10 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws IOException { final String IRRELEVANT_HEADER = "SomeIrrelevantHeader"; - Response response = getRestClient().performRequest("GET", "/" + queryIndex + "/_search", - new BasicHeader(CUSTOM_HEADER, randomHeaderValue), new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Request request = new Request("GET", "/" + queryIndex + "/_search"); + request.setHeaders(new BasicHeader(CUSTOM_HEADER, randomHeaderValue), + new BasicHeader(IRRELEVANT_HEADER, randomHeaderValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); assertThat(searchRequests, hasSize(greaterThan(0))); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java index bdda44c1b7118..4ab64abda453b 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsNotSetIT.java @@ -20,8 +20,8 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.test.ESIntegTestCase; import java.io.IOException; @@ -32,15 +32,16 @@ public class CorsNotSetIT extends HttpSmokeTestCase { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java index 441f56a8631dd..306e0d7d0b293 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/CorsRegexIT.java @@ -19,10 +19,11 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -55,21 +56,26 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); - corsValue = "https://localhost:9200"; - response = getRestClient().performRequest("GET", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + corsValue = "https://localhost:9201"; + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue)); + response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOException { + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", "http://evil-host:9200")); try { - getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -80,31 +86,38 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws IOExcep } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws IOException { - Response response = getRestClient().performRequest("GET", "/", new BasicHeader("User-Agent", "Mozilla Bar")); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar")); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws IOException { - Response response = getRestClient().performRequest("GET", "/"); + Response response = getRestClient().performRequest(new Request("GET", "/")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } public void testThatPreFlightRequestWorksOnMatch() throws IOException { String corsValue = "http://localhost:9200"; - Response response = getRestClient().performRequest("OPTIONS", "/", - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), new BasicHeader("Access-Control-Request-Method", "GET")); + Response response = getRestClient().performRequest(request); assertResponseWithOriginheader(response, corsValue); assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws IOException { + String corsValue = "http://evil-host:9200"; + Request request = new Request("OPTIONS", "/"); + request.setHeaders(new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", corsValue), + new BasicHeader("Access-Control-Request-Method", "GET")); try { - getRestClient().performRequest("OPTIONS", "/", new BasicHeader("User-Agent", "Mozilla Bar"), - new BasicHeader("Origin", "http://evil-host:9200"), - new BasicHeader("Access-Control-Request-Method", "GET")); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java index a795c295d2b1c..9fb7a22b29f3c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DeprecationHttpIT.java @@ -22,6 +22,7 @@ import org.apache.http.HttpEntity; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.DeprecationLogger; @@ -102,11 +103,10 @@ public void testUniqueDeprecationResponsesMergedTogether() throws IOException { final String commaSeparatedIndices = Stream.of(indices).collect(Collectors.joining(",")); - final String body = "{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"; - // trigger all index deprecations - Response response = getRestClient().performRequest("GET", "/" + commaSeparatedIndices + "/_search", - Collections.emptyMap(), new StringEntity(body, ContentType.APPLICATION_JSON)); + Request request = new Request("GET", "/" + commaSeparatedIndices + "/_search"); + request.setJsonEntity("{\"query\":{\"bool\":{\"filter\":[{\"" + TestDeprecatedQueryBuilder.NAME + "\":{}}]}}}"); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); @@ -158,8 +158,9 @@ private void doTestDeprecationWarningsAppearInHeaders() throws IOException { Collections.shuffle(settings, random()); // trigger all deprecations - Response response = getRestClient().performRequest("GET", "/_test_cluster/deprecated_settings", - Collections.emptyMap(), buildSettingsRequest(settings, useDeprecatedField)); + Request request = new Request("GET", "/_test_cluster/deprecated_settings"); + request.setEntity(buildSettingsRequest(settings, useDeprecatedField)); + Response response = getRestClient().performRequest(request); assertThat(response.getStatusLine().getStatusCode(), equalTo(OK.getStatus())); final List deprecatedWarnings = getWarningHeaders(response.getHeaders()); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java index 380937ed010e1..31ad49f95f61d 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsDisabledIT.java @@ -20,13 +20,13 @@ package org.elasticsearch.http; import java.io.IOException; -import java.util.Collections; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; -import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; @@ -49,8 +49,10 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceParamReturns400() throws IOException { + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); ResponseException e = expectThrows(ResponseException.class, () -> - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"))); + getRestClient().performRequest(request)); Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), is("application/json; charset=UTF-8")); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java index d0b80595a26ee..db37034973cf8 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/DetailedErrorsEnabledIT.java @@ -20,11 +20,11 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -36,7 +36,9 @@ public class DetailedErrorsEnabledIT extends HttpSmokeTestCase { public void testThatErrorTraceWorksByDefault() throws IOException { try { - getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true")); + Request request = new Request("DELETE", "/"); + request.addParameter("error_trace", "true"); + getRestClient().performRequest(request); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -47,7 +49,7 @@ public void testThatErrorTraceWorksByDefault() throws IOException { } try { - getRestClient().performRequest("DELETE", "/"); + getRestClient().performRequest(new Request("DELETE", "/")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java index 20ddd0d230ad4..6af08577393d9 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/HttpCompressionIT.java @@ -19,41 +19,40 @@ package org.elasticsearch.http; import org.apache.http.HttpHeaders; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; import org.elasticsearch.test.rest.ESRestTestCase; import java.io.IOException; -import java.util.Collections; public class HttpCompressionIT extends ESRestTestCase { private static final String GZIP_ENCODING = "gzip"; - private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" + + private static final String SAMPLE_DOCUMENT = "{\n" + " \"name\": {\n" + " \"first name\": \"Steve\",\n" + " \"last name\": \"Jobs\"\n" + " }\n" + - "}", ContentType.APPLICATION_JSON); + "}"; public void testCompressesResponseIfRequested() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/", new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Request request = new Request("GET", "/"); + request.setHeaders(new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + Response response = client().performRequest(request); assertEquals(200, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, response.getHeader(HttpHeaders.CONTENT_ENCODING)); } public void testUncompressedResponseByDefault() throws IOException { - RestClient client = client(); - Response response = client.performRequest("GET", "/"); + Response response = client().performRequest(new Request("GET", "/")); assertEquals(200, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); - response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT); + Request request = new Request("POST", "/company/employees/1"); + request.setJsonEntity(SAMPLE_DOCUMENT); + response = client().performRequest(request); assertEquals(201, response.getStatusLine().getStatusCode()); assertNull(response.getHeader(HttpHeaders.CONTENT_ENCODING)); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java index 0a2d7ed9b06f2..e1d55afea1b54 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/NoHandlerIT.java @@ -21,6 +21,7 @@ import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -45,10 +46,10 @@ public void testNoHandlerRespectsAcceptHeader() throws IOException { private void runTestNoHandlerRespectsAcceptHeader( final String accept, final String contentType, final String expect) throws IOException { - final ResponseException e = - expectThrows( - ResponseException.class, - () -> getRestClient().performRequest("GET", "/foo/bar/baz/qux/quux", new BasicHeader("Accept", accept))); + Request request = new Request("GET", "/foo/bar/baz/qux/quux"); + request.setHeaders(new BasicHeader("Accept", accept)); + final ResponseException e = expectThrows(ResponseException.class, + () -> getRestClient().performRequest(request)); final Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), equalTo(contentType)); diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java index ffb23f31f4087..13f072dc22d6c 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/ResponseHeaderPluginIT.java @@ -19,6 +19,7 @@ package org.elasticsearch.http; import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; @@ -55,7 +56,7 @@ protected Collection> nodePlugins() { public void testThatSettingHeadersWorks() throws IOException { ensureGreen(); try { - getRestClient().performRequest("GET", "/_protected"); + getRestClient().performRequest(new Request("GET", "/_protected")); fail("request should have failed"); } catch(ResponseException e) { Response response = e.getResponse(); @@ -63,7 +64,9 @@ public void testThatSettingHeadersWorks() throws IOException { assertThat(response.getHeader("Secret"), equalTo("required")); } - Response authResponse = getRestClient().performRequest("GET", "/_protected", new BasicHeader("Secret", "password")); + Request request = new Request("GET", "/_protected"); + request.setHeaders(new BasicHeader("Secret", "password")); + Response authResponse = getRestClient().performRequest(request); assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); } diff --git a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java index c9e7dc451a053..901bffc9553d4 100644 --- a/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java +++ b/qa/smoke-test-http/src/test/java/org/elasticsearch/http/RestHttpResponseHeadersIT.java @@ -18,6 +18,7 @@ package org.elasticsearch.http; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.ESRestTestCase; @@ -46,7 +47,7 @@ public class RestHttpResponseHeadersIT extends ESRestTestCase { * - Options). */ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { - Response response = client().performRequest("OPTIONS", "/_tasks"); + Response response = client().performRequest(new Request("OPTIONS", "/_tasks")); assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Allow"), notNullValue()); List responseAllowHeaderStringArray = @@ -64,7 +65,7 @@ public void testValidEndpointOptionsResponseHttpHeader() throws Exception { */ public void testUnsupportedMethodResponseHttpHeader() throws Exception { try { - client().performRequest("DELETE", "/_tasks"); + client().performRequest(new Request("DELETE", "/_tasks")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); @@ -85,9 +86,9 @@ public void testUnsupportedMethodResponseHttpHeader() throws Exception { * 17853 for more information). */ public void testIndexSettingsPostRequest() throws Exception { - client().performRequest("PUT", "/testindex"); + client().performRequest(new Request("PUT", "/testindex")); try { - client().performRequest("POST", "/testindex/_settings"); + client().performRequest(new Request("POST", "/testindex/_settings")); fail("Request should have failed with 405 error"); } catch (ResponseException e) { Response response = e.getResponse(); From 7e1241742e2c972e8a5c4a83c26b24d44d9e0260 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 May 2018 00:19:00 -0400 Subject: [PATCH 10/44] Awaits fix a failing test This test can not pass on 6.x It is expecting some deprecation messages to be brought back on some REST tests yet it appears these particular messages never appear in production code. This commit awaits fixes these tests. --- .../gcs/GoogleCloudStorageRepositoryDeprecationTests.java | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index d33547c37dce3..daf761a74dd43 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -31,6 +31,7 @@ public class GoogleCloudStorageRepositoryDeprecationTests extends ESTestCase { + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30638") public void testDeprecatedSettings() throws Exception { final Settings repositorySettings = Settings.builder() .put("bucket", "test") From d212b30a55c4a8b9bcdc2b41661134d931925fe4 Mon Sep 17 00:00:00 2001 From: Vladimir Dolzhenko Date: Wed, 16 May 2018 07:23:25 +0200 Subject: [PATCH 11/44] Allow date math for naming newly-created snapshots (#7939) (#30479) (cherry picked from commit fe3e025) --- docs/reference/modules/snapshots.asciidoc | 14 +++++++ .../create/TransportCreateSnapshotAction.java | 7 ++-- .../DedicatedClusterSnapshotRestoreIT.java | 38 +++++++++++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 693d537d732c1..f70857e66c86f 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -289,6 +289,20 @@ By setting `include_global_state` to false it's possible to prevent the cluster the snapshot. By default, the entire snapshot will fail if one or more indices participating in the snapshot don't have all primary shards available. This behaviour can be changed by setting `partial` to `true`. +Snapshot names can be automatically derived using <>, similarly as when creating +new indices. Note that special characters need to be URI encoded. + +For example, creating a snapshot with the current day in the name, like `snapshot-2018.05.11`, can be achieved with +the following command: +[source,js] +----------------------------------- +# PUT /_snapshot/my_backup/ +PUT /_snapshot/my_backup/%3Csnapshot-%7Bnow%2Fd%7D%3E +----------------------------------- +// CONSOLE +// TEST[continued] + + The index snapshot process is incremental. In the process of making the index snapshot Elasticsearch analyses the list of the index files that are already stored in the repository and copies only files that were created or changed since the last snapshot. That allows multiple snapshots to be preserved in the repository in a compact form. diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java index 269edfc401b7a..14bc8624e994d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java @@ -71,8 +71,9 @@ protected ClusterBlockException checkBlock(CreateSnapshotRequest request, Cluste @Override protected void masterOperation(final CreateSnapshotRequest request, ClusterState state, final ActionListener listener) { + final String snapshotName = indexNameExpressionResolver.resolveDateMathExpression(request.snapshot()); SnapshotsService.SnapshotRequest snapshotRequest = - new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]") + new SnapshotsService.SnapshotRequest(request.repository(), snapshotName, "create_snapshot [" + snapshotName + "]") .indices(request.indices()) .indicesOptions(request.indicesOptions()) .partial(request.partial()) @@ -87,7 +88,7 @@ public void onResponse() { @Override public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onResponse(new CreateSnapshotResponse(snapshotInfo)); snapshotsService.removeListener(this); } @@ -96,7 +97,7 @@ public void onSnapshotCompletion(Snapshot snapshot, SnapshotInfo snapshotInfo) { @Override public void onSnapshotFailure(Snapshot snapshot, Exception e) { if (snapshot.getRepository().equals(request.repository()) && - snapshot.getSnapshotId().getName().equals(request.snapshot())) { + snapshot.getSnapshotId().getName().equals(snapshotName)) { listener.onFailure(e); snapshotsService.removeListener(this); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 5341b268544e7..4349f6940cc6a 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/server/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -32,6 +32,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.cluster.ClusterState; @@ -41,6 +42,7 @@ import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaDataIndexStateService; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -49,6 +51,7 @@ import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable; @@ -56,6 +59,7 @@ import org.elasticsearch.common.settings.SettingsFilter; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.discovery.zen.ElectMasterService; @@ -68,6 +72,7 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.action.admin.cluster.RestClusterStateAction; import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; import org.elasticsearch.snapshots.mockstore.MockRepository; @@ -96,6 +101,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.lessThan; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; @@ -981,6 +987,38 @@ public void testRestoreShrinkIndex() throws Exception { ensureYellow(); } + public void testSnapshotWithDateMath() { + final String repo = "repo"; + final AdminClient admin = client().admin(); + + final IndexNameExpressionResolver nameExpressionResolver = new IndexNameExpressionResolver(Settings.EMPTY); + final String snapshotName = ""; + + logger.info("--> creating repository"); + assertAcked(admin.cluster().preparePutRepository(repo).setType("fs") + .setSettings(Settings.builder().put("location", randomRepoPath()) + .put("compress", randomBoolean()))); + + final String expression1 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + logger.info("--> creating date math snapshot"); + CreateSnapshotResponse snapshotResponse = + admin.cluster().prepareCreateSnapshot(repo, snapshotName) + .setIncludeGlobalState(true) + .setWaitForCompletion(true) + .get(); + assertThat(snapshotResponse.status(), equalTo(RestStatus.OK)); + // snapshot could be taken before or after a day rollover + final String expression2 = nameExpressionResolver.resolveDateMathExpression(snapshotName); + + SnapshotsStatusResponse response = admin.cluster().prepareSnapshotStatus(repo) + .setSnapshots(Sets.newHashSet(expression1, expression2).toArray(Strings.EMPTY_ARRAY)) + .setIgnoreUnavailable(true) + .get(); + List snapshots = response.getSnapshots(); + assertThat(snapshots, hasSize(1)); + assertThat(snapshots.get(0).getState().completed(), equalTo(true)); + } + public static class SnapshottableMetadata extends TestCustomMetaData { public static final String TYPE = "test_snapshottable"; From 0b82d6d1675c2a979bc447215757345ad1d8cf00 Mon Sep 17 00:00:00 2001 From: lukens Date: Wed, 16 May 2018 08:23:07 +0100 Subject: [PATCH 12/44] [Docs] Update code snippet in has-child-query.asciidoc (#30510) Changed `InetSocketTransportAddress` to `TransportAddress`, as that seems to be the thing now. --- docs/java-api/query-dsl/has-child-query.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/java-api/query-dsl/has-child-query.asciidoc b/docs/java-api/query-dsl/has-child-query.asciidoc index 300b32e1922b0..f47f3af487dfe 100644 --- a/docs/java-api/query-dsl/has-child-query.asciidoc +++ b/docs/java-api/query-dsl/has-child-query.asciidoc @@ -9,7 +9,7 @@ When using the `has_child` query it is important to use the `PreBuiltTransportCl -------------------------------------------------- Settings settings = Settings.builder().put("cluster.name", "elasticsearch").build(); TransportClient client = new PreBuiltTransportClient(settings); -client.addTransportAddress(new InetSocketTransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); +client.addTransportAddress(new TransportAddress(new InetSocketAddress(InetAddresses.forString("127.0.0.1"), 9300))); -------------------------------------------------- Otherwise the parent-join module doesn't get loaded and the `has_child` query can't be used from the transport client. From 472077819c50481316f56b71afca8ab77f85fe34 Mon Sep 17 00:00:00 2001 From: Yannick Welsch Date: Wed, 16 May 2018 09:57:22 +0200 Subject: [PATCH 13/44] Move allocation awareness attributes to list setting (#30626) Allows the setting to be specified using proper array syntax, for example: "cluster.routing.allocation.awareness.attributes": [ "foo", "bar", "baz" ] Closes #30617 --- .../routing/IndexShardRoutingTable.java | 12 +++++------ .../cluster/routing/OperationRouting.java | 11 +++++----- .../decider/AwarenessAllocationDecider.java | 15 ++++++++------ .../routing/IndexShardRoutingTableTests.java | 6 ++++-- .../structure/RoutingIteratorTests.java | 20 ++++++++++++------- 5 files changed, 38 insertions(+), 26 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java index f80da84a08879..3b3de800194e5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/IndexShardRoutingTable.java @@ -625,20 +625,20 @@ public Set getAllAllocationIds() { static class AttributesKey { - final String[] attributes; + final List attributes; - AttributesKey(String[] attributes) { + AttributesKey(List attributes) { this.attributes = attributes; } @Override public int hashCode() { - return Arrays.hashCode(attributes); + return attributes.hashCode(); } @Override public boolean equals(Object obj) { - return obj instanceof AttributesKey && Arrays.equals(attributes, ((AttributesKey) obj).attributes); + return obj instanceof AttributesKey && attributes.equals(((AttributesKey) obj).attributes); } } @@ -702,11 +702,11 @@ private static List collectAttributeShards(AttributesKey key, Disc return Collections.unmodifiableList(to); } - public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes) { + public ShardIterator preferAttributesActiveInitializingShardsIt(List attributes, DiscoveryNodes nodes) { return preferAttributesActiveInitializingShardsIt(attributes, nodes, shuffler.nextSeed()); } - public ShardIterator preferAttributesActiveInitializingShardsIt(String[] attributes, DiscoveryNodes nodes, int seed) { + public ShardIterator preferAttributesActiveInitializingShardsIt(List attributes, DiscoveryNodes nodes, int seed) { AttributesKey key = new AttributesKey(attributes); AttributesRoutings activeRoutings = getActiveAttribute(key, nodes); AttributesRoutings initializingRoutings = getInitializingAttribute(key, nodes); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 477744311f44b..3c1f953e23f69 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -39,6 +39,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; @@ -49,7 +50,7 @@ public class OperationRouting extends AbstractComponent { Setting.boolSetting("cluster.routing.use_adaptive_replica_selection", false, Setting.Property.Dynamic, Setting.Property.NodeScope); - private String[] awarenessAttributes; + private List awarenessAttributes; private boolean useAdaptiveReplicaSelection; public OperationRouting(Settings settings, ClusterSettings clusterSettings) { @@ -65,7 +66,7 @@ void setUseAdaptiveReplicaSelection(boolean useAdaptiveReplicaSelection) { this.useAdaptiveReplicaSelection = useAdaptiveReplicaSelection; } - private void setAwarenessAttributes(String[] awarenessAttributes) { + private void setAwarenessAttributes(List awarenessAttributes) { this.awarenessAttributes = awarenessAttributes; } @@ -139,7 +140,7 @@ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable index @Nullable ResponseCollectorService collectorService, @Nullable Map nodeCounts) { if (preference == null || preference.isEmpty()) { - if (awarenessAttributes.length == 0) { + if (awarenessAttributes.isEmpty()) { if (useAdaptiveReplicaSelection) { return indexShard.activeInitializingShardsRankedIt(collectorService, nodeCounts); } else { @@ -174,7 +175,7 @@ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable index } // no more preference if (index == -1 || index == preference.length() - 1) { - if (awarenessAttributes.length == 0) { + if (awarenessAttributes.isEmpty()) { if (useAdaptiveReplicaSelection) { return indexShard.activeInitializingShardsRankedIt(collectorService, nodeCounts); } else { @@ -234,7 +235,7 @@ private ShardIterator preferenceActiveShardIterator(IndexShardRoutingTable index // shard ID into the hash of the user-supplied preference key. routingHash = 31 * routingHash + indexShard.shardId.hashCode(); } - if (awarenessAttributes.length == 0) { + if (awarenessAttributes.isEmpty()) { return indexShard.activeInitializingShardsIt(routingHash); } else { return indexShard.preferAttributesActiveInitializingShardsIt(awarenessAttributes, nodes, routingHash); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java index e7e538ae3713f..6105c732d5511 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/AwarenessAllocationDecider.java @@ -22,6 +22,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.function.Function; import com.carrotsearch.hppc.ObjectIntHashMap; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -34,6 +35,8 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import static java.util.Collections.emptyList; + /** * This {@link AllocationDecider} controls shard allocation based on * {@code awareness} key-value pairs defined in the node configuration. @@ -78,13 +81,13 @@ public class AwarenessAllocationDecider extends AllocationDecider { public static final String NAME = "awareness"; - public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = - new Setting<>("cluster.routing.allocation.awareness.attributes", "", s -> Strings.tokenizeToStringArray(s, ","), Property.Dynamic, + public static final Setting> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = + Setting.listSetting("cluster.routing.allocation.awareness.attributes", emptyList(), Function.identity(), Property.Dynamic, Property.NodeScope); public static final Setting CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope); - private volatile String[] awarenessAttributes; + private volatile List awarenessAttributes; private volatile Map> forcedAwarenessAttributes; @@ -109,7 +112,7 @@ private void setForcedAwarenessAttributes(Settings forceSettings) { this.forcedAwarenessAttributes = forcedAwarenessAttributes; } - private void setAwarenessAttributes(String[] awarenessAttributes) { + private void setAwarenessAttributes(List awarenessAttributes) { this.awarenessAttributes = awarenessAttributes; } @@ -124,7 +127,7 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl } private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation, boolean moveToNode) { - if (awarenessAttributes.length == 0) { + if (awarenessAttributes.isEmpty()) { return allocation.decision(Decision.YES, NAME, "allocation awareness is not enabled, set cluster setting [%s] to enable it", CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey()); @@ -138,7 +141,7 @@ private Decision underCapacity(ShardRouting shardRouting, RoutingNode node, Rout return allocation.decision(Decision.NO, NAME, "node does not contain the awareness attribute [%s]; required attributes cluster setting [%s=%s]", awarenessAttribute, CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), - allocation.debugDecision() ? Strings.arrayToCommaDelimitedString(awarenessAttributes) : null); + allocation.debugDecision() ? Strings.collectionToCommaDelimitedString(awarenessAttributes) : null); } // build attr_value -> nodes map diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java index 7823970ff46d8..659d600703690 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/IndexShardRoutingTableTests.java @@ -24,11 +24,13 @@ import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; public class IndexShardRoutingTableTests extends ESTestCase { public void testEqualsAttributesKey() { - String[] attr1 = {"a"}; - String[] attr2 = {"b"}; + List attr1 = Arrays.asList("a"); + List attr2 = Arrays.asList("b"); IndexShardRoutingTable.AttributesKey attributesKey1 = new IndexShardRoutingTable.AttributesKey(attr1); IndexShardRoutingTable.AttributesKey attributesKey2 = new IndexShardRoutingTable.AttributesKey(attr1); IndexShardRoutingTable.AttributesKey attributesKey3 = new IndexShardRoutingTable.AttributesKey(attr2); diff --git a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java index 05d37365b5c9c..0d955a74e9d46 100644 --- a/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/structure/RoutingIteratorTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.shard.ShardId; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -223,11 +224,16 @@ public void testRandomRouting() { } public void testAttributePreferenceRouting() { - AllocationService strategy = createAllocationService(Settings.builder() - .put("cluster.routing.allocation.node_concurrent_recoveries", 10) - .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always") - .put("cluster.routing.allocation.awareness.attributes", "rack_id,zone") - .build()); + Settings.Builder settings = Settings.builder() + .put("cluster.routing.allocation.node_concurrent_recoveries", 10) + .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), "always"); + if (randomBoolean()) { + settings.put("cluster.routing.allocation.awareness.attributes", " rack_id, zone "); + } else { + settings.putList("cluster.routing.allocation.awareness.attributes", "rack_id", "zone"); + } + + AllocationService strategy = createAllocationService(settings.build()); MetaData metaData = MetaData.builder() .put(IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(1).numberOfReplicas(1)) @@ -257,7 +263,7 @@ public void testAttributePreferenceRouting() { clusterState = strategy.applyStartedShards(clusterState, clusterState.getRoutingNodes().shardsWithState(INITIALIZING)); // after all are started, check routing iteration - ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes()); + ShardIterator shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); ShardRouting shardRouting = shardIterator.nextOrNull(); assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node1")); @@ -265,7 +271,7 @@ public void testAttributePreferenceRouting() { assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node2")); - shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(new String[]{"rack_id"}, clusterState.nodes()); + shardIterator = clusterState.routingTable().index("test").shard(0).preferAttributesActiveInitializingShardsIt(Arrays.asList("rack_id"), clusterState.nodes()); shardRouting = shardIterator.nextOrNull(); assertThat(shardRouting, notNullValue()); assertThat(shardRouting.currentNodeId(), equalTo("node1")); From b1eda60e48a4334088d53a032fb30e4f1de9fbd2 Mon Sep 17 00:00:00 2001 From: Alexander Reelsen Date: Wed, 16 May 2018 10:32:22 +0200 Subject: [PATCH 14/44] Watcher: Ensure secrets integration tests also run triggered watch (#30478) When the encrpytion of sensitive date is enabled, test that a scheduled watch is executed as expected and produces the correct value from a secret in the basic auth header. --- .../support/xcontent/WatcherXContentParser.java | 2 -- .../test/integration/HttpSecretsIntegrationTests.java | 10 +++++++++- .../watcher/trigger/ScheduleTriggerEngineMock.java | 1 + 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java index 7b11c4625bb31..59eedd640a400 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/support/xcontent/WatcherXContentParser.java @@ -5,8 +5,6 @@ */ package org.elasticsearch.xpack.core.watcher.support.xcontent; -import org.apache.lucene.util.BytesRef; -import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.xcontent.DeprecationHandler; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java index b91acc1f969ba..07fb45936e97f 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/test/integration/HttpSecretsIntegrationTests.java @@ -57,7 +57,7 @@ public class HttpSecretsIntegrationTests extends AbstractWatcherIntegrationTestC private MockWebServer webServer = new MockWebServer(); private static Boolean encryptSensitiveData = null; - private static byte[] encryptionKey = CryptoServiceTests.generateKey(); + private static final byte[] encryptionKey = CryptoServiceTests.generateKey(); @Before public void init() throws Exception { @@ -155,6 +155,14 @@ public void testHttpInput() throws Exception { assertThat(webServer.requests(), hasSize(1)); assertThat(webServer.requests().get(0).getHeader("Authorization"), is(ApplicableBasicAuth.headerValue(USERNAME, PASSWORD.toCharArray()))); + + // now trigger the by the scheduler and make sure that the password is also correctly transmitted + webServer.enqueue(new MockResponse().setResponseCode(200).setBody( + BytesReference.bytes(jsonBuilder().startObject().field("key", "value").endObject()).utf8ToString())); + timeWarp().trigger("_id"); + assertThat(webServer.requests(), hasSize(2)); + assertThat(webServer.requests().get(1).getHeader("Authorization"), + is(ApplicableBasicAuth.headerValue(USERNAME, PASSWORD.toCharArray()))); } public void testWebhookAction() throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java index 58f5c8f4a26b0..63f4f95ae2161 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/trigger/ScheduleTriggerEngineMock.java @@ -53,6 +53,7 @@ public ScheduleTriggerEvent parseTriggerEvent(TriggerService service, String wat @Override public void start(Collection jobs) { + jobs.forEach(this::add); } @Override From c2e0061b85640dd3ab361168db36628bb1123b3a Mon Sep 17 00:00:00 2001 From: David Kyle Date: Wed, 16 May 2018 09:52:25 +0100 Subject: [PATCH 15/44] [ML] Wait for ML indices in rolling upgrade tests (#30615) --- .../test/mixed_cluster/30_ml_jobs_crud.yml | 7 +++++++ .../test/old_cluster/30_ml_jobs_crud.yml | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml index dbdfa83b96d98..d77cc8436defe 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/mixed_cluster/30_ml_jobs_crud.yml @@ -110,6 +110,13 @@ setup: xpack.ml.close_job: job_id: mixed-cluster-job +# Wait for indices to be fully allocated before +# killing the node + - do: + cluster.health: + index: [".ml-state", ".ml-anomalies-shared"] + wait_for_status: green + --- "Test get job with rules": diff --git a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml index 3ca9793230bcc..b59c0d3ad4c00 100644 --- a/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml +++ b/x-pack/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_ml_jobs_crud.yml @@ -49,6 +49,13 @@ job_id: old-cluster-job - match: { count: 1 } +# Wait for indices to be fully allocated before +# killing the node + - do: + cluster.health: + index: [".ml-state", ".ml-anomalies-shared"] + wait_for_status: green + --- "Put job on the old cluster with the default model memory limit and post some data": - do: @@ -96,6 +103,13 @@ job_id: no-model-memory-limit-job - match: { count: 201 } +# Wait for indices to be fully allocated before +# killing the node + - do: + cluster.health: + index: [".ml-state", ".ml-anomalies-shared"] + wait_for_status: green + --- "Put job with empty strings in the configuration": - do: From 24948aac236b4cdb5b1da77e87d977d7ced5fe2e Mon Sep 17 00:00:00 2001 From: Colin Goodheart-Smithe Date: Wed, 16 May 2018 11:38:24 +0100 Subject: [PATCH 16/44] Refactors ClientHelper to combine header logic (#30620) * Refactors ClientHelper to combine header logic This change removes all the `*ClientHelper` classes which were repeating logic between plugins and instead adds `ClientHelper.executeWithHeaders()` and `ClientHelper.executeWithHeadersAsync()` methods to centralise the logic for executing requests with stored security headers. * Removes Watcher headers constant x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/tran sport/actions/put/TransportPutWatchActionTests.java /Users/colings86/dev/work/git/elasticsearch/.git/worktrees/elasticsearch -6.x/CHERRY_PICK_HEAD x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelp er.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClien tHelper.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetad ata.java x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafee d/DatafeedUpdate.java x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelp erTests.java x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/Transpo rtPreviewDatafeedAction.java x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extra ctor/aggregation/AggregationDataExtractor.java x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extra ctor/chunked/ChunkedDataExtractor.java x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extra ctor/scroll/ScrollDataExtractor.java x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extra ctor/scroll/ScrollDataExtractorFactory.java x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlClientHelper Tests.java x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/Ro llupClientHelper.java x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/Ro llupJobTask.java x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/Ro llupClientHelperTests.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watc her.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watc herClientHelper.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/acti ons/index/ExecutableIndexAction.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/inpu t/search/ExecutableSearchInput.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/tran sform/search/ExecutableSearchTransform.java x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/tran sport/actions/put/TransportPutWatchAction.java x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/Watc herClientHelperTests.java x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/tran sport/actions/put/TransportPutWatchActionTests.java --- .../xpack/core/ClientHelper.java | 89 +++++++- .../xpack/core/ml/MlClientHelper.java | 71 ------- .../xpack/core/ml/MlMetadata.java | 7 +- .../core/ml/datafeed/DatafeedUpdate.java | 4 +- .../xpack/core/ClientHelperTests.java | 199 +++++++++++++++++- .../TransportPreviewDatafeedAction.java | 4 +- .../aggregation/AggregationDataExtractor.java | 4 +- .../chunked/ChunkedDataExtractor.java | 6 +- .../extractor/scroll/ScrollDataExtractor.java | 10 +- .../scroll/ScrollDataExtractorFactory.java | 6 +- .../xpack/ml/MlClientHelperTests.java | 118 ----------- .../xpack/rollup/job/RollupClientHelper.java | 59 ------ .../xpack/rollup/job/RollupJobTask.java | 7 +- .../rollup/job/RollupClientHelperTests.java | 133 ------------ .../elasticsearch/xpack/watcher/Watcher.java | 4 - .../xpack/watcher/WatcherClientHelper.java | 51 ----- .../actions/index/ExecutableIndexAction.java | 6 +- .../input/search/ExecutableSearchInput.java | 6 +- .../search/ExecutableSearchTransform.java | 5 +- .../actions/put/TransportPutWatchAction.java | 4 +- .../watcher/WatcherClientHelperTests.java | 119 ----------- .../put/TransportPutWatchActionTests.java | 4 +- 22 files changed, 324 insertions(+), 592 deletions(-) delete mode 100644 x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java delete mode 100644 x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlClientHelperTests.java delete mode 100644 x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupClientHelper.java delete mode 100644 x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupClientHelperTests.java delete mode 100644 x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java delete mode 100644 x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java index d657d4df809c4..c73bb8576a7ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ClientHelper.java @@ -14,9 +14,15 @@ import org.elasticsearch.client.Client; import org.elasticsearch.client.FilterClient; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.util.set.Sets; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import java.util.Map; +import java.util.Set; import java.util.function.BiConsumer; import java.util.function.Supplier; +import java.util.stream.Collectors; /** * Utility class to help with the execution of requests made using a {@link Client} such that they @@ -24,6 +30,12 @@ */ public final class ClientHelper { + /** + * List of headers that are related to security + */ + public static final Set SECURITY_HEADER_FILTERS = Sets.newHashSet(AuthenticationServiceField.RUN_AS_USER_HEADER, + AuthenticationField.AUTHENTICATION_KEY); + public static final String ACTION_ORIGIN_TRANSIENT_NAME = "action.origin"; public static final String SECURITY_ORIGIN = "security"; public static final String WATCHER_ORIGIN = "watcher"; @@ -78,6 +90,82 @@ RequestBuilder extends ActionRequestBuilder> } } + /** + * Execute a client operation and return the response, try to run an action + * with least privileges, when headers exist + * + * @param headers + * Request headers, ideally including security headers + * @param origin + * The origin to fall back to if there are no security headers + * @param client + * The client used to query + * @param supplier + * The action to run + * @return An instance of the response class + */ + public static T executeWithHeaders(Map headers, String origin, Client client, + Supplier supplier) { + Map filteredHeaders = headers.entrySet().stream().filter(e -> SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + // no security headers, we will have to use the xpack internal user for + // our execution by specifying the origin + if (filteredHeaders.isEmpty()) { + try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), origin)) { + return supplier.get(); + } + } else { + try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashContext()) { + client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet()); + return supplier.get(); + } + } + } + + /** + * Execute a client operation asynchronously, try to run an action with + * least privileges, when headers exist + * + * @param headers + * Request headers, ideally including security headers + * @param origin + * The origin to fall back to if there are no security headers + * @param action + * The action to execute + * @param request + * The request object for the action + * @param listener + * The listener to call when the action is complete + */ + public static > void executeWithHeadersAsync( + Map headers, String origin, Client client, Action action, Request request, + ActionListener listener) { + + Map filteredHeaders = headers.entrySet().stream().filter(e -> SECURITY_HEADER_FILTERS.contains(e.getKey())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + + final ThreadContext threadContext = client.threadPool().getThreadContext(); + + // No headers (e.g. security not installed/in use) so execute as origin + if (filteredHeaders.isEmpty()) { + ClientHelper.executeAsyncWithOrigin(client, origin, action, request, listener); + } else { + // Otherwise stash the context and copy in the saved headers before executing + final Supplier supplier = threadContext.newRestorableContext(false); + try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { + client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener)); + } + } + } + + private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { + final ThreadContext.StoredContext storedContext = threadContext.stashContext(); + threadContext.copyHeaders(headers.entrySet()); + return storedContext; + } + private static final class ClientWithOrigin extends FilterClient { private final String origin; @@ -98,5 +186,4 @@ RequestBuilder extends ActionRequestBuilder> } } } - } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java deleted file mode 100644 index a76c5c51e8d7f..0000000000000 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlClientHelper.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.core.ml; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; - -import java.util.Map; -import java.util.Set; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -/** - * A helper class for actions which decides if we should run via the _xpack user and set ML as origin - * or if we should use the run_as functionality by setting the correct headers - */ -public class MlClientHelper { - - /** - * List of headers that are related to security - */ - public static final Set SECURITY_HEADER_FILTERS = Sets.newHashSet(AuthenticationServiceField.RUN_AS_USER_HEADER, - AuthenticationField.AUTHENTICATION_KEY); - - /** - * Execute a client operation and return the response, try to run a datafeed search with least privileges, when headers exist - * - * @param datafeedConfig The config for a datafeed - * @param client The client used to query - * @param supplier The action to run - * @return An instance of the response class - */ - public static T execute(DatafeedConfig datafeedConfig, Client client, Supplier supplier) { - return execute(datafeedConfig.getHeaders(), client, supplier); - } - - /** - * Execute a client operation and return the response, try to run an action with least privileges, when headers exist - * - * @param headers Request headers, ideally including security headers - * @param client The client used to query - * @param supplier The action to run - * @return An instance of the response class - */ - public static T execute(Map headers, Client client, Supplier supplier) { - // no headers, we will have to use the xpack internal user for our execution by specifying the ml origin - if (headers == null || headers.isEmpty()) { - try (ThreadContext.StoredContext ignore = ClientHelper.stashWithOrigin(client.threadPool().getThreadContext(), - ClientHelper.ML_ORIGIN)) { - return supplier.get(); - } - } else { - try (ThreadContext.StoredContext ignore = client.threadPool().getThreadContext().stashContext()) { - Map filteredHeaders = headers.entrySet().stream() - .filter(e -> SECURITY_HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet()); - return supplier.get(); - } - } - } -} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index b09a7463ffdb1..b709e32946ec6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -23,6 +23,9 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData; +import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedJobValidator; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; @@ -35,8 +38,6 @@ import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; import org.elasticsearch.xpack.core.ml.utils.NameResolver; import org.elasticsearch.xpack.core.ml.utils.ToXContentParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData; -import org.elasticsearch.persistent.PersistentTasksCustomMetaData.PersistentTask; import java.io.IOException; import java.util.Collection; @@ -303,7 +304,7 @@ public Builder putDatafeed(DatafeedConfig datafeedConfig, ThreadContext threadCo // Adjust the request, adding security headers from the current thread context DatafeedConfig.Builder builder = new DatafeedConfig.Builder(datafeedConfig); Map headers = threadContext.getHeaders().entrySet().stream() - .filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); builder.setHeaders(headers); datafeedConfig = builder.build(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 6255be9f4383a..444532a7e3f15 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -21,7 +21,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -304,7 +304,7 @@ public DatafeedConfig apply(DatafeedConfig datafeedConfig, ThreadContext threadC if (threadContext != null) { // Adjust the request, adding security headers from the current thread context Map headers = threadContext.getHeaders().entrySet().stream() - .filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); builder.setHeaders(headers); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index a243b8c995d23..95361dbff42b0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -9,15 +9,33 @@ import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; +import org.elasticsearch.action.search.SearchAction; +import org.elasticsearch.action.search.SearchRequest; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.Client; +import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ClientHelper; +import org.elasticsearch.xpack.core.security.authc.AuthenticationField; +import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.function.Consumer; +import static org.elasticsearch.xpack.core.ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; +import static org.mockito.Matchers.any; import static org.mockito.Matchers.anyObject; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -97,7 +115,7 @@ public void testExecuteWithClient() throws Exception { assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); assertNull(threadContext.getHeader(headerName)); latch.countDown(); - ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); return null; }).when(client).execute(anyObject(), anyObject(), anyObject()); @@ -130,7 +148,7 @@ public void testClientWithOrigin() throws Exception { assertEquals(origin, threadContext.getTransient(ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME)); assertNull(threadContext.getHeader(headerName)); latch.countDown(); - ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); return null; }).when(client).execute(anyObject(), anyObject(), anyObject()); @@ -139,4 +157,179 @@ public void testClientWithOrigin() throws Exception { clientWithOrigin.execute(null, null, listener); latch.await(); } + + public void testExecuteWithHeadersAsyncNoHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + + String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); + ClientHelper.executeWithHeadersAsync(Collections.emptyMap(), originName, client, SearchAction.INSTANCE, request, listener); + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWrongHeaders() throws InterruptedException { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("foo", "foo"); + headers.put("bar", "bar"); + + String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); + ClientHelper.executeWithHeadersAsync(headers, originName, client, SearchAction.INSTANCE, request, listener); + + latch.await(); + } + + public void testExecuteWithHeadersAsyncWithHeaders() throws Exception { + final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + final Client client = mock(Client.class); + final ThreadPool threadPool = mock(ThreadPool.class); + when(client.threadPool()).thenReturn(threadPool); + when(threadPool.getThreadContext()).thenReturn(threadContext); + + final CountDownLatch latch = new CountDownLatch(2); + final ActionListener listener = ActionListener.wrap(v -> { + assertTrue(threadContext.getHeaders().isEmpty()); + latch.countDown(); + }, e -> fail(e.getMessage())); + + doAnswer(invocationOnMock -> { + assertThat(threadContext.getHeaders().size(), equalTo(2)); + assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo")); + assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar")); + latch.countDown(); + ((ActionListener) invocationOnMock.getArguments()[2]).onResponse(null); + return null; + }).when(client).execute(anyObject(), anyObject(), anyObject()); + + SearchRequest request = new SearchRequest("foo"); + Map headers = new HashMap<>(1); + headers.put("es-security-runas-user", "foo"); + headers.put("_xpack_security_authentication", "bar"); + + String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); + ClientHelper.executeWithHeadersAsync(headers, originName, client, SearchAction.INSTANCE, request, listener); + + latch.await(); + } + + public void testExecuteWithHeadersNoHeaders() { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + searchFuture.onResponse(new SearchResponse()); + when(client.search(any())).thenReturn(searchFuture); + assertExecutionWithOrigin(Collections.emptyMap(), client); + } + + public void testExecuteWithHeaders() { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + searchFuture.onResponse(new SearchResponse()); + when(client.search(any())).thenReturn(searchFuture); + Map headers = MapBuilder. newMapBuilder().put(AuthenticationField.AUTHENTICATION_KEY, "anything") + .put(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything").map(); + + assertRunAsExecution(headers, h -> { + assertThat(h.keySet(), hasSize(2)); + assertThat(h, hasEntry(AuthenticationField.AUTHENTICATION_KEY, "anything")); + assertThat(h, hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything")); + }, client); + } + + public void testExecuteWithHeadersNoSecurityHeaders() { + Client client = mock(Client.class); + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + when(client.threadPool()).thenReturn(threadPool); + + PlainActionFuture searchFuture = PlainActionFuture.newFuture(); + searchFuture.onResponse(new SearchResponse()); + when(client.search(any())).thenReturn(searchFuture); + Map unrelatedHeaders = MapBuilder. newMapBuilder().put(randomAlphaOfLength(10), "anything").map(); + + assertExecutionWithOrigin(unrelatedHeaders, client); + } + + /** + * This method executes a search and checks if the thread context was + * enriched with the ml origin + */ + private void assertExecutionWithOrigin(Map storedHeaders, Client client) { + String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); + ClientHelper.executeWithHeaders(storedHeaders, originName, client, () -> { + Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); + assertThat(origin, is(originName)); + + // Check that headers are not set + Map headers = client.threadPool().getThreadContext().getHeaders(); + assertThat(headers, not(hasEntry(AuthenticationField.AUTHENTICATION_KEY, "anything"))); + assertThat(headers, not(hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything"))); + + return client.search(new SearchRequest()).actionGet(); + }); + } + + /** + * This method executes a search and ensures no stashed origin thread + * context was created, so that the regular node client was used, to emulate + * a run_as function + */ + public void assertRunAsExecution(Map storedHeaders, Consumer> consumer, Client client) { + String originName = randomFrom(ClientHelper.ML_ORIGIN, ClientHelper.WATCHER_ORIGIN, ClientHelper.ROLLUP_ORIGIN); + ClientHelper.executeWithHeaders(storedHeaders, originName, client, () -> { + Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); + assertThat(origin, is(nullValue())); + + consumer.accept(client.threadPool().getThreadContext().getHeaders()); + return client.search(new SearchRequest()).actionGet(); + }); + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java index 98ba2caa408ed..2ffb318dc4fb2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPreviewDatafeedAction.java @@ -16,8 +16,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.MLMetadataField; -import org.elasticsearch.xpack.core.ml.MlClientHelper; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; import org.elasticsearch.xpack.core.ml.datafeed.ChunkingConfig; @@ -64,7 +64,7 @@ protected void doExecute(PreviewDatafeedAction.Request request, ActionListener

headers = threadPool.getThreadContext().getHeaders().entrySet().stream() - .filter(e -> MlClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); previewDatafeed.setHeaders(headers); // NB: this is using the client from the transport layer, NOT the internal client. diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java index f9089b6bc1704..d83865b751f50 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/aggregation/AggregationDataExtractor.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.search.aggregations.Aggregation; import org.elasticsearch.search.aggregations.Aggregations; -import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; @@ -112,7 +112,7 @@ private void initAggregationProcessor(Aggregations aggs) throws IOException { } protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get); + return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); } private SearchRequestBuilder buildSearchRequest() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java index 61298f16abd14..2e157c3d1e95a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/chunked/ChunkedDataExtractor.java @@ -15,10 +15,10 @@ import org.elasticsearch.search.aggregations.Aggregations; import org.elasticsearch.search.aggregations.metrics.max.Max; import org.elasticsearch.search.aggregations.metrics.min.Min; -import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; -import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; +import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import java.io.IOException; import java.io.InputStream; @@ -135,7 +135,7 @@ private DataSummary requestDataSummary() throws IOException { } protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get); + return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); } private Optional getNextStream() throws IOException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index 57601406e7117..24174730e2d3b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -20,7 +20,7 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.StoredFieldsContext; import org.elasticsearch.search.sort.SortOrder; -import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; import org.elasticsearch.xpack.core.ml.datafeed.extractor.ExtractorUtils; import org.elasticsearch.xpack.ml.utils.DomainSplitFunction; @@ -100,7 +100,7 @@ protected InputStream initScroll(long startTimestamp) throws IOException { } protected SearchResponse executeSearchRequest(SearchRequestBuilder searchRequestBuilder) { - return MlClientHelper.execute(context.headers, client, searchRequestBuilder::get); + return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, searchRequestBuilder::get); } private SearchRequestBuilder buildSearchRequest(long start) { @@ -211,7 +211,8 @@ private void markScrollAsErrored() { } protected SearchResponse executeSearchScrollRequest(String scrollId) { - return MlClientHelper.execute(context.headers, client, () -> SearchScrollAction.INSTANCE.newRequestBuilder(client) + return ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, + () -> SearchScrollAction.INSTANCE.newRequestBuilder(client) .setScroll(SCROLL_TIMEOUT) .setScrollId(scrollId) .get()); @@ -226,7 +227,8 @@ private void clearScroll(String scrollId) { if (scrollId != null) { ClearScrollRequest request = new ClearScrollRequest(); request.addScrollId(scrollId); - MlClientHelper.execute(context.headers, client, () -> client.execute(ClearScrollAction.INSTANCE, request).actionGet()); + ClientHelper.executeWithHeaders(context.headers, ClientHelper.ML_ORIGIN, client, + () -> client.execute(ClearScrollAction.INSTANCE, request).actionGet()); } } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java index f4f359580db8e..2c6e0deaebd9f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractorFactory.java @@ -12,12 +12,12 @@ import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.client.Client; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.xpack.core.ml.MlClientHelper; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; import org.elasticsearch.xpack.core.ml.datafeed.extractor.DataExtractor; -import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import org.elasticsearch.xpack.core.ml.job.config.Job; import org.elasticsearch.xpack.core.ml.utils.MlStrings; +import org.elasticsearch.xpack.ml.datafeed.extractor.DataExtractorFactory; import java.util.Objects; @@ -76,7 +76,7 @@ public static void create(Client client, DatafeedConfig datafeed, Job job, Actio String[] requestFields = job.allInputFields().stream().map(f -> MlStrings.getParentField(f) + "*") .toArray(size -> new String[size]); fieldCapabilitiesRequest.fields(requestFields); - MlClientHelper.execute(datafeed, client, () -> { + ClientHelper. executeWithHeaders(datafeed.getHeaders(), ClientHelper.ML_ORIGIN, client, () -> { client.execute(FieldCapabilitiesAction.INSTANCE, fieldCapabilitiesRequest, fieldCapabilitiesHandler); // This response gets discarded - the listener handles the real response return null; diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlClientHelperTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlClientHelperTests.java deleted file mode 100644 index 284e746e67db2..0000000000000 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlClientHelperTests.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.ml; - -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.ml.MlClientHelper; -import org.elasticsearch.xpack.core.ml.datafeed.DatafeedConfig; -import org.elasticsearch.xpack.core.security.authc.AuthenticationField; -import org.elasticsearch.xpack.core.security.authc.AuthenticationServiceField; -import org.junit.Before; - -import java.util.Collections; -import java.util.Map; -import java.util.function.Consumer; - -import static org.elasticsearch.xpack.core.ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class MlClientHelperTests extends ESTestCase { - - private Client client = mock(Client.class); - - @Before - public void setupMocks() { - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - when(client.threadPool()).thenReturn(threadPool); - - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - searchFuture.onResponse(new SearchResponse()); - when(client.search(any())).thenReturn(searchFuture); - } - - public void testEmptyHeaders() { - DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo"); - builder.setIndices(Collections.singletonList("foo-index")); - - assertExecutionWithOrigin(builder.build()); - } - - public void testWithHeaders() { - DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo"); - builder.setIndices(Collections.singletonList("foo-index")); - Map headers = MapBuilder.newMapBuilder() - .put(AuthenticationField.AUTHENTICATION_KEY, "anything") - .put(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything") - .map(); - builder.setHeaders(headers); - - assertRunAsExecution(builder.build(), h -> { - assertThat(h.keySet(), hasSize(2)); - assertThat(h, hasEntry(AuthenticationField.AUTHENTICATION_KEY, "anything")); - assertThat(h, hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything")); - }); - } - - public void testFilteredHeaders() { - DatafeedConfig.Builder builder = new DatafeedConfig.Builder("datafeed-foo", "foo"); - builder.setIndices(Collections.singletonList("foo-index")); - Map unrelatedHeaders = MapBuilder.newMapBuilder() - .put(randomAlphaOfLength(10), "anything") - .map(); - builder.setHeaders(unrelatedHeaders); - - assertRunAsExecution(builder.build(), h -> assertThat(h.keySet(), hasSize(0))); - } - - /** - * This method executes a search and checks if the thread context was enriched with the ml origin - */ - private void assertExecutionWithOrigin(DatafeedConfig datafeedConfig) { - MlClientHelper.execute(datafeedConfig, client, () -> { - Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); - assertThat(origin, is(ML_ORIGIN)); - - // Check that headers are not set - Map headers = client.threadPool().getThreadContext().getHeaders(); - assertThat(headers, not(hasEntry(AuthenticationField.AUTHENTICATION_KEY, "anything"))); - assertThat(headers, not(hasEntry(AuthenticationServiceField.RUN_AS_USER_HEADER, "anything"))); - - return client.search(new SearchRequest()).actionGet(); - }); - } - - /** - * This method executes a search and ensures no stashed origin thread context was created, so that the regular node - * client was used, to emulate a run_as function - */ - public void assertRunAsExecution(DatafeedConfig datafeedConfig, Consumer> consumer) { - MlClientHelper.execute(datafeedConfig, client, () -> { - Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); - assertThat(origin, is(nullValue())); - - consumer.accept(client.threadPool().getThreadContext().getHeaders()); - return client.search(new SearchRequest()).actionGet(); - }); - } -} diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupClientHelper.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupClientHelper.java deleted file mode 100644 index 20e4ba120cd8c..0000000000000 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupClientHelper.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.rollup.job; - -import org.elasticsearch.action.Action; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.ActionRequest; -import org.elasticsearch.action.ActionRequestBuilder; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.support.ContextPreservingActionListener; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.xpack.core.ClientHelper; -import org.elasticsearch.xpack.core.rollup.job.RollupJob; -import org.elasticsearch.xpack.rollup.Rollup; - -import java.util.Map; -import java.util.function.Supplier; -import java.util.stream.Collectors; - - -/** - * Helper class to execute actions with authentication headers cached in the rollup job (if they exist, otherwise Origin) - */ -public class RollupClientHelper { - - @SuppressWarnings("try") - public static > void executeAsync( - Client client, RollupJob job, Action action, Request request, - ActionListener listener) { - - Map filteredHeaders = job.getHeaders().entrySet().stream() - .filter(e -> Rollup.HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - - final ThreadContext threadContext = client.threadPool().getThreadContext(); - - // No headers (e.g. security not installed/in use) so execute as rollup origin - if (filteredHeaders.isEmpty()) { - ClientHelper.executeAsyncWithOrigin(client, ClientHelper.ROLLUP_ORIGIN, action, request, listener); - } else { - // Otherwise stash the context and copy in the saved headers before executing - final Supplier supplier = threadContext.newRestorableContext(false); - try (ThreadContext.StoredContext ignore = stashWithHeaders(threadContext, filteredHeaders)) { - client.execute(action, request, new ContextPreservingActionListener<>(supplier, listener)); - } - } - } - - private static ThreadContext.StoredContext stashWithHeaders(ThreadContext threadContext, Map headers) { - final ThreadContext.StoredContext storedContext = threadContext.stashContext(); - threadContext.copyHeaders(headers.entrySet()); - return storedContext; - } -} diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f357d579c82c5..425629c248c9c 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -23,6 +23,7 @@ import org.elasticsearch.persistent.PersistentTasksExecutor; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.rollup.RollupField; import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; @@ -103,12 +104,14 @@ protected class ClientRollupPageManager extends RollupIndexer { @Override protected void doNextSearch(SearchRequest request, ActionListener nextPhase) { - RollupClientHelper.executeAsync(client, job, SearchAction.INSTANCE, request, nextPhase); + ClientHelper.executeWithHeadersAsync(job.getHeaders(), ClientHelper.ROLLUP_ORIGIN, client, SearchAction.INSTANCE, request, + nextPhase); } @Override protected void doNextBulk(BulkRequest request, ActionListener nextPhase) { - RollupClientHelper.executeAsync(client, job, BulkAction.INSTANCE, request, nextPhase); + ClientHelper.executeWithHeadersAsync(job.getHeaders(), ClientHelper.ROLLUP_ORIGIN, client, BulkAction.INSTANCE, request, + nextPhase); } @Override diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupClientHelperTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupClientHelperTests.java deleted file mode 100644 index b2d098d458ea0..0000000000000 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupClientHelperTests.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.rollup.job; - -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.search.SearchAction; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.rollup.job.RollupJob; -import org.elasticsearch.xpack.core.rollup.job.RollupJobConfig; -import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; - -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.CountDownLatch; - -import static org.hamcrest.Matchers.equalTo; -import static org.mockito.Matchers.anyObject; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class RollupClientHelperTests extends ESTestCase { - - @SuppressWarnings("unchecked") - public void testNoHeaders() throws InterruptedException { - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final Client client = mock(Client.class); - final ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(threadContext); - - final CountDownLatch latch = new CountDownLatch(2); - final ActionListener listener = ActionListener.wrap(v -> { - assertTrue(threadContext.getHeaders().isEmpty()); - latch.countDown(); - }, e -> fail(e.getMessage())); - - doAnswer(invocationOnMock -> { - assertTrue(threadContext.getHeaders().isEmpty()); - latch.countDown(); - ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); - return null; - }).when(client).execute(anyObject(), anyObject(), anyObject()); - - SearchRequest request = new SearchRequest("foo"); - - RollupJobConfig config = ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(); - RollupJob job = new RollupJob(config, Collections.emptyMap()); - - RollupClientHelper.executeAsync(client, job, SearchAction.INSTANCE, request, listener); - - latch.await(); - } - - @SuppressWarnings("unchecked") - public void testWrongHeaders() throws InterruptedException { - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final Client client = mock(Client.class); - final ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(threadContext); - - final CountDownLatch latch = new CountDownLatch(2); - final ActionListener listener = ActionListener.wrap(v -> { - assertTrue(threadContext.getHeaders().isEmpty()); - latch.countDown(); - }, e -> fail(e.getMessage())); - - doAnswer(invocationOnMock -> { - assertTrue(threadContext.getHeaders().isEmpty()); - latch.countDown(); - ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); - return null; - }).when(client).execute(anyObject(), anyObject(), anyObject()); - - SearchRequest request = new SearchRequest("foo"); - Map headers = new HashMap<>(1); - headers.put("foo", "foo"); - headers.put("bar", "bar"); - RollupJobConfig config = ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(); - RollupJob job = new RollupJob(config, headers); - - RollupClientHelper.executeAsync(client, job, SearchAction.INSTANCE, request, listener); - - latch.await(); - } - - @SuppressWarnings("unchecked") - public void testWithHeaders() throws Exception { - final ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - final Client client = mock(Client.class); - final ThreadPool threadPool = mock(ThreadPool.class); - when(client.threadPool()).thenReturn(threadPool); - when(threadPool.getThreadContext()).thenReturn(threadContext); - - final CountDownLatch latch = new CountDownLatch(2); - final ActionListener listener = ActionListener.wrap(v -> { - assertTrue(threadContext.getHeaders().isEmpty()); - latch.countDown(); - }, e -> fail(e.getMessage())); - - doAnswer(invocationOnMock -> { - assertThat(threadContext.getHeaders().size(), equalTo(2)); - assertThat(threadContext.getHeaders().get("es-security-runas-user"), equalTo("foo")); - assertThat(threadContext.getHeaders().get("_xpack_security_authentication"), equalTo("bar")); - latch.countDown(); - ((ActionListener)invocationOnMock.getArguments()[2]).onResponse(null); - return null; - }).when(client).execute(anyObject(), anyObject(), anyObject()); - - SearchRequest request = new SearchRequest("foo"); - Map headers = new HashMap<>(1); - headers.put("es-security-runas-user", "foo"); - headers.put("_xpack_security_authentication", "bar"); - RollupJobConfig config = ConfigTestHelpers.getRollupJob(randomAlphaOfLength(5)).build(); - RollupJob job = new RollupJob(config, headers); - - RollupClientHelper.executeAsync(client, job, SearchAction.INSTANCE, request, listener); - - latch.await(); - } - -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 7f42777bb5faf..39760bb4dc7b9 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -206,10 +206,6 @@ public class Watcher extends Plugin implements ActionPlugin, ScriptPlugin { public static final Setting MAX_STOP_TIMEOUT_SETTING = Setting.timeSetting("xpack.watcher.stop.timeout", TimeValue.timeValueSeconds(30), Setting.Property.NodeScope); - // list of headers that will be stored when a watch is stored - public static final Set HEADER_FILTERS = - new HashSet<>(Arrays.asList("es-security-runas-user", "_xpack_security_authentication")); - public static final ScriptContext SCRIPT_SEARCH_CONTEXT = new ScriptContext<>("xpack", SearchScript.Factory.class); // TODO: remove this context when each xpack script use case has their own contexts diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java deleted file mode 100644 index 1019f5a423e98..0000000000000 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherClientHelper.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher; - -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.xpack.core.watcher.watch.Watch; - -import java.util.Map; -import java.util.function.Supplier; -import java.util.stream.Collectors; - -import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; - -/** - * A helper class which decides if we should run via the xpack user and set watcher as origin or - * if we should use the run_as functionality by setting the correct headers - */ -public class WatcherClientHelper { - - /** - * Execute a client operation and return the response, try to run with least privileges, when headers exist - * - * @param watch The watch in which context this method gets executed in - * @param client The client used to query - * @param supplier The action to run - * @param The client response class this should return - * @return An instance of the response class - */ - public static T execute(Watch watch, Client client, Supplier supplier) { - // no headers, we will have to use the xpack internal user for our execution by specifying the watcher origin - if (watch.status().getHeaders().isEmpty()) { - try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), WATCHER_ORIGIN)) { - return supplier.get(); - } - } else { - try (ThreadContext.StoredContext ignored = client.threadPool().getThreadContext().stashContext()) { - Map filteredHeaders = watch.status().getHeaders().entrySet().stream() - .filter(e -> Watcher.HEADER_FILTERS.contains(e.getKey())) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); - client.threadPool().getThreadContext().copyHeaders(filteredHeaders.entrySet()); - return supplier.get(); - } - } - } -} diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java index e49732f0cb543..a156e68a4b1ed 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/actions/index/ExecutableIndexAction.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.actions.Action; import org.elasticsearch.xpack.core.watcher.actions.Action.Result.Status; import org.elasticsearch.xpack.core.watcher.actions.ExecutableAction; @@ -24,7 +25,6 @@ import org.elasticsearch.xpack.core.watcher.support.WatcherDateTimeUtils; import org.elasticsearch.xpack.core.watcher.support.xcontent.XContentSource; import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.watcher.WatcherClientHelper; import org.elasticsearch.xpack.watcher.support.ArrayObjectIterator; import org.joda.time.DateTime; @@ -96,7 +96,7 @@ public Action.Result execute(String actionId, WatchExecutionContext ctx, Payload new XContentSource(indexRequest.source(), XContentType.JSON)); } - IndexResponse response = WatcherClientHelper.execute(ctx.watch(), client, + IndexResponse response = ClientHelper.executeWithHeaders(ctx.watch().status().getHeaders(), ClientHelper.WATCHER_ORIGIN, client, () -> client.index(indexRequest).actionGet(indexDefaultTimeout)); try (XContentBuilder builder = jsonBuilder()) { indexResponseToXContent(builder, response); @@ -137,7 +137,7 @@ Action.Result indexBulk(Iterable list, String actionId, WatchExecutionContext ct } bulkRequest.add(indexRequest); } - BulkResponse bulkResponse = WatcherClientHelper.execute(ctx.watch(), client, + BulkResponse bulkResponse = ClientHelper.executeWithHeaders(ctx.watch().status().getHeaders(), ClientHelper.WATCHER_ORIGIN, client, () -> client.bulk(bulkRequest).actionGet(bulkDefaultTimeout)); try (XContentBuilder jsonBuilder = jsonBuilder().startArray()) { for (BulkItemResponse item : bulkResponse) { diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java index 83a4f1f85e732..4aced1b6c0398 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/input/search/ExecutableSearchInput.java @@ -20,10 +20,10 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.script.Script; import org.elasticsearch.search.SearchHit; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.input.ExecutableInput; import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.watcher.WatcherClientHelper; import org.elasticsearch.xpack.watcher.support.XContentFilterKeysUtils; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; @@ -71,8 +71,8 @@ SearchInput.Result doExecute(WatchExecutionContext ctx, WatcherSearchTemplateReq } SearchRequest searchRequest = searchTemplateService.toSearchRequest(request); - final SearchResponse response = WatcherClientHelper.execute(ctx.watch(), client, - () -> client.search(searchRequest).actionGet(timeout)); + final SearchResponse response = ClientHelper.executeWithHeaders(ctx.watch().status().getHeaders(), ClientHelper.WATCHER_ORIGIN, + client, () -> client.search(searchRequest).actionGet(timeout)); if (logger.isDebugEnabled()) { logger.debug("[{}] found [{}] hits", ctx.id(), response.getHits().getTotalHits()); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java index 03dbf88fb0d80..1b408bc5e6463 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transform/search/ExecutableSearchTransform.java @@ -15,10 +15,10 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.script.Script; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; import org.elasticsearch.xpack.core.watcher.transform.ExecutableTransform; import org.elasticsearch.xpack.core.watcher.watch.Payload; -import org.elasticsearch.xpack.watcher.WatcherClientHelper; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateRequest; import org.elasticsearch.xpack.watcher.support.search.WatcherSearchTemplateService; @@ -49,7 +49,8 @@ public SearchTransform.Result execute(WatchExecutionContext ctx, Payload payload // We need to make a copy, so that we don't modify the original instance that we keep around in a watch: request = new WatcherSearchTemplateRequest(transform.getRequest(), new BytesArray(renderedTemplate)); SearchRequest searchRequest = searchTemplateService.toSearchRequest(request); - SearchResponse resp = WatcherClientHelper.execute(ctx.watch(), client, () -> client.search(searchRequest).actionGet(timeout)); + SearchResponse resp = ClientHelper.executeWithHeaders(ctx.watch().status().getHeaders(), ClientHelper.WATCHER_ORIGIN, client, + () -> client.search(searchRequest).actionGet(timeout)); return new SearchTransform.Result(request, new Payload.XContent(resp)); } catch (Exception e) { logger.error((Supplier) () -> new ParameterizedMessage("failed to execute [{}] transform for [{}]", TYPE, ctx.id()), e); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java index d836507596b74..7dcca20e2019e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchAction.java @@ -22,12 +22,12 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.support.xcontent.WatcherParams; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchResponse; import org.elasticsearch.xpack.core.watcher.watch.Watch; -import org.elasticsearch.xpack.watcher.Watcher; import org.elasticsearch.xpack.watcher.transport.actions.WatcherTransportAction; import org.elasticsearch.xpack.watcher.trigger.TriggerService; import org.elasticsearch.xpack.watcher.watch.WatchParser; @@ -90,7 +90,7 @@ protected void masterOperation(PutWatchRequest request, ClusterState state, // ensure we only filter for the allowed headers Map filteredHeaders = threadPool.getThreadContext().getHeaders().entrySet().stream() - .filter(e -> Watcher.HEADER_FILTERS.contains(e.getKey())) + .filter(e -> ClientHelper.SECURITY_HEADER_FILTERS.contains(e.getKey())) .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); watch.status().setHeaders(filteredHeaders); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java deleted file mode 100644 index f1908ccefc2ec..0000000000000 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherClientHelperTests.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.watcher; - -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.client.Client; -import org.elasticsearch.common.collect.MapBuilder; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.ThreadContext; -import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.core.watcher.execution.WatchExecutionContext; -import org.elasticsearch.xpack.watcher.test.WatchExecutionContextMockBuilder; -import org.junit.Before; - -import java.util.Collections; -import java.util.Map; -import java.util.function.Consumer; - -import static org.elasticsearch.xpack.core.ClientHelper.ACTION_ORIGIN_TRANSIENT_NAME; -import static org.elasticsearch.xpack.core.ClientHelper.WATCHER_ORIGIN; -import static org.hamcrest.Matchers.hasEntry; -import static org.hamcrest.Matchers.hasSize; -import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; -import static org.hamcrest.Matchers.nullValue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class WatcherClientHelperTests extends ESTestCase { - - private Client client = mock(Client.class); - - @Before - public void setupMocks() { - PlainActionFuture searchFuture = PlainActionFuture.newFuture(); - searchFuture.onResponse(new SearchResponse()); - when(client.search(any())).thenReturn(searchFuture); - - ThreadPool threadPool = mock(ThreadPool.class); - ThreadContext threadContext = new ThreadContext(Settings.EMPTY); - when(threadPool.getThreadContext()).thenReturn(threadContext); - when(client.threadPool()).thenReturn(threadPool); - } - - public void testEmptyHeaders() { - WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); - when(ctx.watch().status().getHeaders()).thenReturn(Collections.emptyMap()); - - assertExecutionWithOrigin(ctx); - } - - public void testWithHeaders() { - WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); - Map watchStatusHeaders = MapBuilder.newMapBuilder() - .put("es-security-runas-user", "anything") - .put("_xpack_security_authentication", "anything") - .map(); - when(ctx.watch().status().getHeaders()).thenReturn(watchStatusHeaders); - - assertRunAsExecution(ctx, headers -> { - assertThat(headers.keySet(), hasSize(2)); - assertThat(headers, hasEntry("es-security-runas-user", "anything")); - assertThat(headers, hasEntry("_xpack_security_authentication", "anything")); - }); - } - - public void testFilteredHeaders() { - WatchExecutionContext ctx = new WatchExecutionContextMockBuilder("_id").buildMock(); - Map watchStatusHeaders = MapBuilder.newMapBuilder() - .put(randomAlphaOfLength(10), "anything") - .map(); - when(ctx.watch().status().getHeaders()).thenReturn(watchStatusHeaders); - - assertRunAsExecution(ctx, headers -> { - assertThat(headers.keySet(), hasSize(0)); - }); - } - - /** - * This method executes a search and checks if the thread context was enriched with the watcher origin - */ - private void assertExecutionWithOrigin(WatchExecutionContext ctx) { - WatcherClientHelper.execute(ctx.watch(), client, () -> { - Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); - assertThat(origin, is(WATCHER_ORIGIN)); - - // check that headers are not set - Map headers = client.threadPool().getThreadContext().getHeaders(); - assertThat(headers, not(hasEntry("es-security-runas-user", "anything"))); - assertThat(headers, not(hasEntry("_xpack_security_authentication", "anything"))); - - return client.search(new SearchRequest()).actionGet(); - }); - - } - - /** - * This method executes a search and ensures no stashed origin thread context was created, so that the regular node - * client was used, to emulate a run_as function - */ - public void assertRunAsExecution(WatchExecutionContext ctx, Consumer> consumer) { - WatcherClientHelper.execute(ctx.watch(), client, () -> { - Object origin = client.threadPool().getThreadContext().getTransient(ACTION_ORIGIN_TRANSIENT_NAME); - assertThat(origin, is(nullValue())); - - Map headers = client.threadPool().getThreadContext().getHeaders(); - consumer.accept(headers); - return client.search(new SearchRequest()).actionGet(); - }); - - } -} \ No newline at end of file diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java index 9a41c9b7aabbf..ce223b1c9fd15 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/transport/actions/put/TransportPutWatchActionTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchRequest; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; import org.elasticsearch.xpack.core.watcher.watch.Watch; -import org.elasticsearch.xpack.watcher.Watcher; import org.elasticsearch.xpack.watcher.test.WatchExecutionContextMockBuilder; import org.elasticsearch.xpack.watcher.trigger.TriggerService; import org.elasticsearch.xpack.watcher.watch.WatchParser; @@ -86,7 +86,7 @@ public void setupAction() throws Exception { public void testHeadersAreFilteredWhenPuttingWatches() throws Exception { ClusterState state = mock(ClusterState.class); // set up threadcontext with some arbitrary info - String headerName = randomFrom(Watcher.HEADER_FILTERS); + String headerName = randomFrom(ClientHelper.SECURITY_HEADER_FILTERS); threadContext.putHeader(headerName, randomAlphaOfLength(10)); threadContext.putHeader(randomAlphaOfLength(10), "doesntmatter"); From ab02bbe71e1bef19ad94cdc1784da51f82fb7fd7 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 16 May 2018 14:42:10 +0300 Subject: [PATCH 17/44] Replace custom reloadable Key/TrustManager (#30509) (#30639) Make SSLContext reloadable This commit replaces all customKeyManagers and TrustManagers (ReloadableKeyManager,ReloadableTrustManager, EmptyKeyManager, EmptyTrustManager) with instances of X509ExtendedKeyManager and X509ExtendedTrustManager. This change was triggered by the effort to allow Elasticsearch to run in a FIPS-140 environment. In JVMs running in FIPS approved mode, only SunJSSE TrustManagers and KeyManagers can be used. Reloadability is now ensured by a volatile instance of SSLContext in SSLContectHolder. SSLConfigurationReloaderTests use the reloadable SSLContext to initialize HTTP Clients and Servers and use these for testing the key material and trust relations. --- .../xpack/core/ssl/SSLService.java | 299 ++-------- .../ssl/SSLConfigurationReloaderTests.java | 520 +++++++++--------- .../xpack/core/ssl/SSLServiceTests.java | 23 +- 3 files changed, 318 insertions(+), 524 deletions(-) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java index c59a2889c28db..e5150e3faadba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/SSLService.java @@ -8,7 +8,6 @@ import org.apache.http.conn.ssl.NoopHostnameVerifier; import org.apache.http.nio.conn.ssl.SSLIOSessionStrategy; import org.apache.lucene.util.SetOnce; -import org.bouncycastle.operator.OperatorCreationException; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.Strings; @@ -21,28 +20,24 @@ import org.elasticsearch.xpack.core.ssl.cert.CertificateInfo; import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; import javax.net.ssl.SSLSessionContext; import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedKeyManager; import javax.net.ssl.X509ExtendedTrustManager; -import javax.security.auth.DestroyFailedException; import java.io.IOException; import java.net.InetAddress; import java.net.Socket; import java.security.GeneralSecurityException; import java.security.KeyManagementException; -import java.security.KeyStoreException; +import java.security.KeyStore; import java.security.NoSuchAlgorithmException; -import java.security.Principal; -import java.security.PrivateKey; -import java.security.UnrecoverableKeyException; -import java.security.cert.CertificateException; -import java.security.cert.X509Certificate; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -54,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Optional; import java.util.Set; /** @@ -71,8 +67,7 @@ public class SSLService extends AbstractComponent { * Create a new SSLService that parses the settings for the ssl contexts that need to be created, creates them, and then caches them * for use later */ - public SSLService(Settings settings, Environment environment) throws CertificateException, UnrecoverableKeyException, - NoSuchAlgorithmException, IOException, DestroyFailedException, KeyStoreException, OperatorCreationException { + public SSLService(Settings settings, Environment environment) { super(settings); this.env = environment; this.globalSSLConfiguration = new SSLConfiguration(settings.getByPrefix(XPackSettings.GLOBAL_SSL_PREFIX)); @@ -403,10 +398,8 @@ private SSLContextHolder createSslContext(SSLConfiguration sslConfiguration) { if (logger.isDebugEnabled()) { logger.debug("using ssl settings [{}]", sslConfiguration); } - ReloadableTrustManager trustManager = - new ReloadableTrustManager(sslConfiguration.trustConfig().createTrustManager(env), sslConfiguration.trustConfig()); - ReloadableX509KeyManager keyManager = - new ReloadableX509KeyManager(sslConfiguration.keyConfig().createKeyManager(env), sslConfiguration.keyConfig()); + X509ExtendedTrustManager trustManager = sslConfiguration.trustConfig().createTrustManager(env); + X509ExtendedKeyManager keyManager = sslConfiguration.keyConfig().createKeyManager(env); return createSslContext(keyManager, trustManager, sslConfiguration); } @@ -417,7 +410,7 @@ private SSLContextHolder createSslContext(SSLConfiguration sslConfiguration) { * @param trustManager the trust manager to use * @return the created SSLContext */ - private SSLContextHolder createSslContext(ReloadableX509KeyManager keyManager, ReloadableTrustManager trustManager, + private SSLContextHolder createSslContext(X509ExtendedKeyManager keyManager, X509ExtendedTrustManager trustManager, SSLConfiguration sslConfiguration) { // Initialize sslContext try { @@ -427,7 +420,7 @@ private SSLContextHolder createSslContext(ReloadableX509KeyManager keyManager, R // check the supported ciphers and log them here to prevent spamming logs on every call supportedCiphers(sslContext.getSupportedSSLParameters().getCipherSuites(), sslConfiguration.cipherSuites(), true); - return new SSLContextHolder(sslContext, trustManager, keyManager); + return new SSLContextHolder(sslContext, sslConfiguration); } catch (NoSuchAlgorithmException | KeyManagementException e) { throw new ElasticsearchException("failed to initialize the SSLContext", e); } @@ -436,9 +429,7 @@ private SSLContextHolder createSslContext(ReloadableX509KeyManager keyManager, R /** * Parses the settings to load all SSLConfiguration objects that will be used. */ - Map loadSSLConfigurations() throws CertificateException, - UnrecoverableKeyException, NoSuchAlgorithmException, IOException, DestroyFailedException, KeyStoreException, - OperatorCreationException { + Map loadSSLConfigurations() { Map sslConfigurations = new HashMap<>(); sslConfigurations.put(globalSSLConfiguration, createSslContext(globalSSLConfiguration)); @@ -560,258 +551,70 @@ private static SSLSocket createWithPermissions(CheckedSupplier sessionIds = sslSessionContext.getIds(); while (sessionIds.hasMoreElements()) { byte[] sessionId = sessionIds.nextElement(); sslSessionContext.getSession(sessionId).invalidate(); } } - } - - /** - * This is an empty key manager that is used in case a loaded key manager is null - */ - private static final class EmptyKeyManager extends X509ExtendedKeyManager { - - @Override - public String[] getClientAliases(String s, Principal[] principals) { - return new String[0]; - } - - @Override - public String chooseClientAlias(String[] strings, Principal[] principals, Socket socket) { - return null; - } - - @Override - public String[] getServerAliases(String s, Principal[] principals) { - return new String[0]; - } - - @Override - public String chooseServerAlias(String s, Principal[] principals, Socket socket) { - return null; - } - - @Override - public X509Certificate[] getCertificateChain(String s) { - return new X509Certificate[0]; - } - - @Override - public PrivateKey getPrivateKey(String s) { - return null; - } - } - - /** - * This is an empty trust manager that is used in case a loaded trust manager is null - */ - static final class EmptyX509TrustManager extends X509ExtendedTrustManager { - - @Override - public void checkClientTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { - throw new CertificateException("no certificates are trusted"); - } - - @Override - public void checkServerTrusted(X509Certificate[] x509Certificates, String s, Socket socket) throws CertificateException { - throw new CertificateException("no certificates are trusted"); - } - - @Override - public void checkClientTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { - throw new CertificateException("no certificates are trusted"); - } - - @Override - public void checkServerTrusted(X509Certificate[] x509Certificates, String s, SSLEngine sslEngine) throws CertificateException { - throw new CertificateException("no certificates are trusted"); - } - @Override - public void checkClientTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { - throw new CertificateException("no certificates are trusted"); + synchronized void reload() { + invalidateSessions(context.getClientSessionContext()); + invalidateSessions(context.getServerSessionContext()); + reloadSslContext(); + } + + private void reloadSslContext() { + try { + X509ExtendedKeyManager loadedKeyManager = Optional.ofNullable(keyConfig.createKeyManager(env)). + orElse(getEmptyKeyManager()); + X509ExtendedTrustManager loadedTrustManager = Optional.ofNullable(trustConfig.createTrustManager(env)). + orElse(getEmptyTrustManager()); + SSLContext loadedSslContext = SSLContext.getInstance(sslContextAlgorithm(sslConfiguration.supportedProtocols())); + loadedSslContext.init(new X509ExtendedKeyManager[]{loadedKeyManager}, + new X509ExtendedTrustManager[]{loadedTrustManager}, null); + supportedCiphers(loadedSslContext.getSupportedSSLParameters().getCipherSuites(), sslConfiguration.cipherSuites(), false); + this.context = loadedSslContext; + } catch (GeneralSecurityException | IOException e) { + throw new ElasticsearchException("failed to initialize the SSLContext", e); + } } - - @Override - public void checkServerTrusted(X509Certificate[] x509Certificates, String s) throws CertificateException { - throw new CertificateException("no certificates are trusted"); + X509ExtendedKeyManager getEmptyKeyManager() throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(null, null); + KeyManagerFactory keyManagerFactory = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); + keyManagerFactory.init(keyStore, null); + return (X509ExtendedKeyManager) keyManagerFactory.getKeyManagers()[0]; } - @Override - public X509Certificate[] getAcceptedIssuers() { - return new X509Certificate[0]; + X509ExtendedTrustManager getEmptyTrustManager() throws GeneralSecurityException, IOException { + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + keyStore.load(null, null); + TrustManagerFactory trustManagerFactory = TrustManagerFactory.getInstance("X509"); + trustManagerFactory.init(keyStore); + return (X509ExtendedTrustManager) trustManagerFactory.getTrustManagers()[0]; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java index f19f13d38b74a..2ccbd549105d9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLConfigurationReloaderTests.java @@ -5,25 +5,33 @@ */ package org.elasticsearch.xpack.core.ssl; -import org.apache.lucene.util.SetOnce; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClients; +import org.apache.http.ssl.SSLContextBuilder; import org.bouncycastle.openssl.jcajce.JcaPEMWriter; import org.bouncycastle.openssl.jcajce.JcePEMEncryptorBuilder; +import org.elasticsearch.common.CheckedRunnable; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.http.MockResponse; +import org.elasticsearch.test.http.MockWebServer; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.watcher.ResourceWatcherService; import org.junit.After; import org.junit.Before; -import javax.net.ssl.X509ExtendedKeyManager; -import javax.net.ssl.X509ExtendedTrustManager; +import javax.net.ssl.SSLContext; +import javax.net.ssl.SSLHandshakeException; import javax.security.auth.x500.X500Principal; +import java.io.BufferedWriter; import java.io.IOException; +import java.io.InputStream; import java.io.OutputStream; import java.io.OutputStreamWriter; import java.nio.charset.StandardCharsets; @@ -32,20 +40,23 @@ import java.nio.file.Path; import java.nio.file.StandardCopyOption; import java.nio.file.StandardOpenOption; -import java.nio.file.attribute.BasicFileAttributes; +import java.security.AccessController; +import java.security.KeyManagementException; import java.security.KeyPair; import java.security.KeyStore; -import java.security.PrivateKey; +import java.security.KeyStoreException; +import java.security.NoSuchAlgorithmException; +import java.security.PrivilegedActionException; +import java.security.PrivilegedExceptionAction; +import java.security.UnrecoverableKeyException; import java.security.cert.Certificate; +import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.concurrent.CountDownLatch; -import java.util.function.BiConsumer; +import java.util.function.Consumer; import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; -import static org.hamcrest.core.Is.is; /** * Unit tests for the reloading of SSL configuration @@ -71,8 +82,7 @@ public void cleanup() throws Exception { } /** - * Tests reloading a keystore. The contents of the keystore is used for both keystore and truststore material, so both key - * config and trust config is checked. + * Tests reloading a keystore that is used in the KeyManager of SSLContext */ public void testReloadingKeyStore() throws Exception { final Path tempDir = createTempDir(); @@ -86,127 +96,127 @@ public void testReloadingKeyStore() throws Exception { .setSecureSettings(secureSettings) .build(); final Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); - - final BiConsumer keyManagerPreChecks = (keyManager, config) -> { - // key manager checks - String[] aliases = keyManager.getServerAliases("RSA", null); - assertNotNull(aliases); - assertThat(aliases.length, is(1)); - assertThat(aliases[0], is("testnode")); - }; - - final SetOnce trustedCount = new SetOnce<>(); - final BiConsumer trustManagerPreChecks = (trustManager, config) -> { - // trust manager checks - Certificate[] certificates = trustManager.getAcceptedIssuers(); - trustedCount.set(certificates.length); - }; - - final Runnable modifier = () -> { - try { - // modify it - KeyStore keyStore = KeyStore.getInstance("jks"); - keyStore.load(null, null); - final KeyPair keyPair = CertUtils.generateKeyPair(512); - X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=testReloadingKeyStore"), null, keyPair, + //Load HTTPClient only once. Client uses the same store as a truststore + try (CloseableHttpClient client = getSSLClient(keystorePath, "testnode")) { + final Consumer keyMaterialPreChecks = (context) -> { + try (MockWebServer server = new MockWebServer(context, true)) { + server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + server.start(); + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + } catch (Exception e) { + throw new RuntimeException("Exception starting or connecting to the mock server", e); + } + }; + + final Runnable modifier = () -> { + try { + // modify the keystore that the KeyManager uses + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(null, null); + final KeyPair keyPair = CertUtils.generateKeyPair(512); + X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=localhost"), null, keyPair, null, null, 365); - keyStore.setKeyEntry("key", keyPair.getPrivate(), "testnode".toCharArray(), new X509Certificate[] { cert }); - Path updated = tempDir.resolve("updated.jks"); - try (OutputStream out = Files.newOutputStream(updated)) { - keyStore.store(out, "testnode".toCharArray()); + keyStore.setKeyEntry("key", keyPair.getPrivate(), "testnode".toCharArray(), new X509Certificate[]{cert}); + Path updated = tempDir.resolve("updated.jks"); + try (OutputStream out = Files.newOutputStream(updated)) { + keyStore.store(out, "testnode".toCharArray()); + } + atomicMoveIfPossible(updated, keystorePath); + } catch (Exception e) { + throw new RuntimeException("modification failed", e); } - atomicMoveIfPossible(updated, keystorePath); - } catch (Exception e) { - throw new RuntimeException("modification failed", e); - } - }; - - final BiConsumer keyManagerPostChecks = (updatedKeyManager, config) -> { - String[] aliases = updatedKeyManager.getServerAliases("RSA", null); - assertNotNull(aliases); - assertThat(aliases.length, is(1)); - assertThat(aliases[0], is("key")); - }; - final BiConsumer trustManagerPostChecks = (updatedTrustManager, config) -> { - assertThat(trustedCount.get() - updatedTrustManager.getAcceptedIssuers().length, is(5)); - }; - validateSSLConfigurationIsReloaded(settings, env, keyManagerPreChecks, trustManagerPreChecks, modifier, keyManagerPostChecks, - trustManagerPostChecks); + }; + // The new server certificate is not in the client's truststore so SSLHandshake should fail + final Consumer keyMaterialPostChecks = (updatedContext) -> { + try (MockWebServer server = new MockWebServer(updatedContext, true)) { + server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + server.start(); + SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); + assertThat(sslException.getCause().getMessage(), containsString("PKIX path building failed")); + } catch (Exception e) { + throw new RuntimeException("Exception starting or connecting to the mock server", e); + } + }; + validateSSLConfigurationIsReloaded(settings, env, keyMaterialPreChecks, modifier, keyMaterialPostChecks); + } } /** - * Tests the reloading of a PEM key config when the key is overwritten. The trust portion is not tested as it is not modified by this - * test. + * Tests the reloading of SSLContext when a PEM key and certificate are used. */ - public void testPEMKeyConfigReloading() throws Exception { - Path tempDir = createTempDir(); - Path keyPath = tempDir.resolve("testnode.pem"); - Path certPath = tempDir.resolve("testnode.crt"); - Path clientCertPath = tempDir.resolve("testclient.crt"); + public void testPEMKeyCertConfigReloading() throws Exception { + final Path tempDir = createTempDir(); + final Path keyPath = tempDir.resolve("testnode.pem"); + final Path certPath = tempDir.resolve("testnode.crt"); + final Path clientTruststorePath = tempDir.resolve("testnode.jks"); + Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"), clientTruststorePath); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.pem"), keyPath); Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), certPath); - Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), clientCertPath); MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.ssl.secure_key_passphrase", "testnode"); final Settings settings = Settings.builder() - .put("path.home", createTempDir()) - .put("xpack.ssl.key", keyPath) - .put("xpack.ssl.certificate", certPath) - .putList("xpack.ssl.certificate_authorities", certPath.toString(), clientCertPath.toString()) - .setSecureSettings(secureSettings) - .build(); + .put("path.home", createTempDir()) + .put("xpack.ssl.key", keyPath) + .put("xpack.ssl.certificate", certPath) + .setSecureSettings(secureSettings) + .build(); final Environment env = randomBoolean() ? null : - TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); - - final SetOnce privateKey = new SetOnce<>(); - final BiConsumer keyManagerPreChecks = (keyManager, config) -> { - String[] aliases = keyManager.getServerAliases("RSA", null); - assertNotNull(aliases); - assertThat(aliases.length, is(1)); - assertThat(aliases[0], is("key")); - privateKey.set(keyManager.getPrivateKey("key")); - assertNotNull(privateKey.get()); - }; - - final KeyPair keyPair = CertUtils.generateKeyPair(randomFrom(1024, 2048)); - final Runnable modifier = () -> { - try { - // make sure we wait long enough to see a change. if time is within a second the file may not be seen as modified since the - // size is the same! - assertTrue(awaitBusy(() -> { - try { - BasicFileAttributes attributes = Files.readAttributes(keyPath, BasicFileAttributes.class); - return System.currentTimeMillis() - attributes.lastModifiedTime().toMillis() >= 1000L; - } catch (IOException e) { - throw new RuntimeException("io exception while checking time", e); + TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); + // Load HTTPClient once. Client uses a keystore containing testnode key/cert as a truststore + try (CloseableHttpClient client = getSSLClient(clientTruststorePath, "testnode")) { + final Consumer keyMaterialPreChecks = (context) -> { + try (MockWebServer server = new MockWebServer(context, false)) { + server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + server.start(); + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + } catch (Exception e) { + throw new RuntimeException("Exception starting or connecting to the mock server", e); + } + }; + final Runnable modifier = () -> { + try { + final KeyPair keyPair = CertUtils.generateKeyPair(512); + X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=localhost"), null, keyPair, + null, null, 365); + Path updatedKeyPath = tempDir.resolve("updated.pem"); + Path updatedCertPath = tempDir.resolve("updated.crt"); + try (OutputStream os = Files.newOutputStream(updatedKeyPath); + OutputStreamWriter osWriter = new OutputStreamWriter(os, StandardCharsets.UTF_8); + JcaPEMWriter writer = new JcaPEMWriter(osWriter)) { + writer.writeObject(keyPair, + new JcePEMEncryptorBuilder("DES-EDE3-CBC").setProvider(CertUtils.BC_PROV).build("testnode".toCharArray())); + } + try (BufferedWriter out = Files.newBufferedWriter(updatedCertPath); + JcaPEMWriter pemWriter = new JcaPEMWriter(out)) { + pemWriter.writeObject(cert); } - })); - Path updatedKeyPath = tempDir.resolve("updated.pem"); - try (OutputStream os = Files.newOutputStream(updatedKeyPath); - OutputStreamWriter osWriter = new OutputStreamWriter(os, StandardCharsets.UTF_8); - JcaPEMWriter writer = new JcaPEMWriter(osWriter)) { - writer.writeObject(keyPair, - new JcePEMEncryptorBuilder("DES-EDE3-CBC").setProvider(CertUtils.BC_PROV).build("testnode".toCharArray())); + atomicMoveIfPossible(updatedKeyPath, keyPath); + atomicMoveIfPossible(updatedCertPath, certPath); + } catch (Exception e) { + throw new RuntimeException("failed to modify file", e); } - atomicMoveIfPossible(updatedKeyPath, keyPath); - } catch (Exception e) { - throw new RuntimeException("failed to modify file", e); - } - }; + }; + // The new server certificate is not in the client's truststore so SSLHandshake should fail + final Consumer keyMaterialPostChecks = (updatedContext) -> { + try (MockWebServer server = new MockWebServer(updatedContext, false)) { + server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + server.start(); + SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); + assertThat(sslException.getCause().getMessage(), containsString("PKIX path building failed")); + } catch (Exception e) { + throw new RuntimeException("Exception starting or connecting to the mock server", e); + } + }; - final BiConsumer keyManagerPostChecks = (keyManager, config) -> { - String[] aliases = keyManager.getServerAliases("RSA", null); - assertNotNull(aliases); - assertThat(aliases.length, is(1)); - assertThat(aliases[0], is("key")); - assertThat(keyManager.getPrivateKey(aliases[0]), not(equalTo(privateKey))); - assertThat(keyManager.getPrivateKey(aliases[0]), is(equalTo(keyPair.getPrivate()))); - }; - validateKeyConfigurationIsReloaded(settings, env, keyManagerPreChecks, modifier, keyManagerPostChecks); + validateSSLConfigurationIsReloaded(settings, env, keyMaterialPreChecks, modifier, keyMaterialPostChecks); + } } /** - * Tests the reloading of the trust config when the trust store is modified. The key config is not tested as part of this test. + * Tests the reloading of SSLContext when the trust store is modified. The same store is used as a TrustStore (for the + * reloadable SSLContext used in the HTTPClient) and as a KeyStore for the MockWebServer */ public void testReloadingTrustStore() throws Exception { Path tempDir = createTempDir(); @@ -215,80 +225,106 @@ public void testReloadingTrustStore() throws Exception { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString("xpack.ssl.truststore.secure_password", "testnode"); Settings settings = Settings.builder() - .put("xpack.ssl.truststore.path", trustStorePath) - .put("path.home", createTempDir()) - .setSecureSettings(secureSettings) - .build(); + .put("xpack.ssl.truststore.path", trustStorePath) + .put("path.home", createTempDir()) + .setSecureSettings(secureSettings) + .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); - - final SetOnce trustedCount = new SetOnce<>(); - final BiConsumer trustManagerPreChecks = (trustManager, config) -> { - // trust manager checks - Certificate[] certificates = trustManager.getAcceptedIssuers(); - trustedCount.set(certificates.length); - }; - - - final Runnable modifier = () -> { - try { - Path updatedTruststore = tempDir.resolve("updated.jks"); - KeyStore keyStore = KeyStore.getInstance("jks"); - keyStore.load(null, null); - try (OutputStream out = Files.newOutputStream(updatedTruststore)) { - keyStore.store(out, "testnode".toCharArray()); + // Create the MockWebServer once for both pre and post checks + try(MockWebServer server = getSslServer(trustStorePath, "testnode")){ + final Consumer trustMaterialPreChecks = (context) -> { + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()){ + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + } catch (Exception e) { + throw new RuntimeException("Error connecting to the mock server", e); } - atomicMoveIfPossible(updatedTruststore, trustStorePath); - } catch (Exception e) { - throw new RuntimeException("failed to modify file", e); - } - }; - - final BiConsumer trustManagerPostChecks = (updatedTrustManager, config) -> { - assertThat(trustedCount.get() - updatedTrustManager.getAcceptedIssuers().length, is(6)); - }; + }; + + final Runnable modifier = () -> { + try { + Path updatedTrustStore = tempDir.resolve("updated.jks"); + KeyStore keyStore = KeyStore.getInstance("jks"); + keyStore.load(null, null); + final KeyPair keyPair = CertUtils.generateKeyPair(512); + X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=localhost"), null, keyPair, + null, null, 365); + keyStore.setKeyEntry("newKey", keyPair.getPrivate(), "testnode".toCharArray(), new Certificate[]{cert}); + try (OutputStream out = Files.newOutputStream(updatedTrustStore)) { + keyStore.store(out, "testnode".toCharArray()); + } + atomicMoveIfPossible(updatedTrustStore, trustStorePath); + } catch (Exception e) { + throw new RuntimeException("failed to modify file", e); + } + }; + + // Client's truststore doesn't contain the server's certificate anymore so SSLHandshake should fail + final Consumer trustMaterialPostChecks = (updatedContext) -> { + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(updatedContext).build()){ + SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); + assertThat(sslException.getCause().getMessage(), containsString("PKIX path building failed")); + } catch (Exception e) { + throw new RuntimeException("Error closing CloseableHttpClient", e); + } + }; - validateTrustConfigurationIsReloaded(settings, env, trustManagerPreChecks, modifier, trustManagerPostChecks); + validateSSLConfigurationIsReloaded(settings, env, trustMaterialPreChecks, modifier, trustMaterialPostChecks); + } } /** - * Test the reloading of a trust config that is backed by PEM certificate files. The key config is not tested as we only care about the - * trust config in this test. + * Test the reloading of SSLContext whose trust config is backed by PEM certificate files. */ public void testReloadingPEMTrustConfig() throws Exception { Path tempDir = createTempDir(); - Path clientCertPath = tempDir.resolve("testclient.crt"); - Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testclient.crt"), clientCertPath); + Path clientCertPath = tempDir.resolve("testnode.crt"); + Path keyStorePath = tempDir.resolve("testnode.jks"); + Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.jks"), keyStorePath); + Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), clientCertPath); Settings settings = Settings.builder() .putList("xpack.ssl.certificate_authorities", clientCertPath.toString()) .put("path.home", createTempDir()) .build(); Environment env = randomBoolean() ? null : TestEnvironment.newEnvironment(settings); + // Create the MockWebServer once for both pre and post checks + try(MockWebServer server = getSslServer(keyStorePath, "testnode")){ + final Consumer trustMaterialPreChecks = (context) -> { + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(context).build()){ + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close()); + } catch (Exception e) { + throw new RuntimeException("Exception connecting to the mock server", e); + } + }; - final BiConsumer trustManagerPreChecks = (trustManager, config) -> { - // trust manager checks - Certificate[] certificates = trustManager.getAcceptedIssuers(); - assertThat(certificates.length, is(1)); - assertThat(((X509Certificate)certificates[0]).getSubjectX500Principal().getName(), containsString("Test Client")); - }; - - final Runnable modifier = () -> { - try { - Path updatedCert = tempDir.resolve("updated.crt"); - Files.copy(getDataPath("/org/elasticsearch/xpack/security/transport/ssl/certs/simple/testnode.crt"), updatedCert, - StandardCopyOption.REPLACE_EXISTING); - atomicMoveIfPossible(updatedCert, clientCertPath); - } catch (Exception e) { - throw new RuntimeException("failed to modify file", e); - } - }; - - final BiConsumer trustManagerPostChecks = (updatedTrustManager, config) -> { - Certificate[] updatedCerts = updatedTrustManager.getAcceptedIssuers(); - assertThat(updatedCerts.length, is(1)); - assertThat(((X509Certificate)updatedCerts[0]).getSubjectX500Principal().getName(), containsString("Test Node")); - }; + final Runnable modifier = () -> { + try { + final KeyPair keyPair = CertUtils.generateKeyPair(512); + X509Certificate cert = CertUtils.generateSignedCertificate(new X500Principal("CN=localhost"), null, keyPair, + null, null, 365); + Path updatedCertPath = tempDir.resolve("updated.crt"); + try (BufferedWriter out = Files.newBufferedWriter(updatedCertPath); + JcaPEMWriter pemWriter = new JcaPEMWriter(out)) { + pemWriter.writeObject(cert); + } + atomicMoveIfPossible(updatedCertPath, clientCertPath); + } catch (Exception e) { + throw new RuntimeException("failed to modify file", e); + } + }; + // Client doesn't trust the Server certificate anymore so SSLHandshake should fail + final Consumer trustMaterialPostChecks = (updatedContext) -> { + try (CloseableHttpClient client = HttpClients.custom().setSSLContext(updatedContext).build()){ + SSLHandshakeException sslException = expectThrows(SSLHandshakeException.class, () -> + privilegedConnect(() -> client.execute(new HttpGet("https://localhost:" + server.getPort())).close())); + assertThat(sslException.getCause().getMessage(), containsString("PKIX path building failed")); + } catch (Exception e) { + throw new RuntimeException("Error closing CloseableHttpClient", e); + } + }; - validateTrustConfigurationIsReloaded(settings, env, trustManagerPreChecks, modifier, trustManagerPostChecks); + validateSSLConfigurationIsReloaded(settings, env, trustMaterialPreChecks, modifier, trustMaterialPostChecks); + } } /** @@ -316,15 +352,14 @@ void reloadSSLContext(SSLConfiguration configuration) { } }; - // key manager checks - final X509ExtendedKeyManager keyManager = sslService.sslContextHolder(config).keyManager().getKeyManager(); + final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the keystore try (OutputStream out = Files.newOutputStream(keystorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure - assertThat(sslService.sslContextHolder(config).keyManager().getKeyManager(), sameInstance(keyManager)); + assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } /** @@ -358,14 +393,14 @@ void reloadSSLContext(SSLConfiguration configuration) { } }; - final X509ExtendedKeyManager keyManager = sslService.sslContextHolder(config).keyManager().getKeyManager(); + final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the file try (OutputStream os = Files.newOutputStream(keyPath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure - assertThat(sslService.sslContextHolder(config).keyManager().getKeyManager(), sameInstance(keyManager)); + assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } /** @@ -393,14 +428,14 @@ void reloadSSLContext(SSLConfiguration configuration) { } }; - final X509ExtendedTrustManager trustManager = sslService.sslContextHolder(config).trustManager().getTrustManager(); + final SSLContext context = sslService.sslContextHolder(config).sslContext(); // truncate the truststore try (OutputStream os = Files.newOutputStream(trustStorePath, StandardOpenOption.TRUNCATE_EXISTING)) { } // we intentionally don't wait here as we rely on concurrency to catch a failure - assertThat(sslService.sslContextHolder(config).trustManager().getTrustManager(), sameInstance(trustManager)); + assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } /** @@ -425,7 +460,7 @@ void reloadSSLContext(SSLConfiguration configuration) { } }; - final X509ExtendedTrustManager trustManager = sslService.sslContextHolder(config).trustManager().getTrustManager(); + final SSLContext context = sslService.sslContextHolder(config).sslContext(); // write bad file Path updatedCert = tempDir.resolve("updated.crt"); @@ -435,53 +470,13 @@ void reloadSSLContext(SSLConfiguration configuration) { atomicMoveIfPossible(updatedCert, clientCertPath); // we intentionally don't wait here as we rely on concurrency to catch a failure - assertThat(sslService.sslContextHolder(config).trustManager().getTrustManager(), sameInstance(trustManager)); + assertThat(sslService.sslContextHolder(config).sslContext(), sameInstance(context)); } - /** - * Validates the trust configuration aspect of the SSLConfiguration is reloaded - */ - private void validateTrustConfigurationIsReloaded(Settings settings, Environment env, - BiConsumer trustManagerPreChecks, - Runnable modificationFunction, - BiConsumer trustManagerPostChecks) - throws Exception { - validateSSLConfigurationIsReloaded(settings, env, false, true, null, trustManagerPreChecks, modificationFunction, null, - trustManagerPostChecks); - } - - /** - * Validates the trust configuration aspect of the SSLConfiguration is reloaded - */ - private void validateKeyConfigurationIsReloaded(Settings settings, Environment env, - BiConsumer keyManagerPreChecks, - Runnable modificationFunction, - BiConsumer keyManagerPostChecks) - throws Exception { - validateSSLConfigurationIsReloaded(settings, env, true, false, keyManagerPreChecks, null, modificationFunction, - keyManagerPostChecks, null); - } - - /** - * Validates that both the key and trust configuration aspects of the SSLConfiguration are reloaded - */ private void validateSSLConfigurationIsReloaded(Settings settings, Environment env, - BiConsumer keyManagerPreChecks, - BiConsumer trustManagerPreChecks, + Consumer preChecks, Runnable modificationFunction, - BiConsumer keyManagerPostChecks, - BiConsumer trustManagerPostChecks) - throws Exception { - validateSSLConfigurationIsReloaded(settings, env, true, true, keyManagerPreChecks, trustManagerPreChecks, modificationFunction, - keyManagerPostChecks, trustManagerPostChecks); - } - - private void validateSSLConfigurationIsReloaded(Settings settings, Environment env, boolean checkKeys, boolean checkTrust, - BiConsumer keyManagerPreChecks, - BiConsumer trustManagerPreChecks, - Runnable modificationFunction, - BiConsumer keyManagerPostChecks, - BiConsumer trustManagerPostChecks) + Consumer postChecks) throws Exception { final CountDownLatch reloadLatch = new CountDownLatch(1); @@ -494,50 +489,16 @@ void reloadSSLContext(SSLConfiguration configuration) { reloadLatch.countDown(); } }; - - final X509ExtendedKeyManager keyManager; - if (checkKeys) { - keyManager = sslService.sslContextHolder(config).keyManager().getKeyManager(); - } else { - keyManager = null; - } - - final X509ExtendedTrustManager trustManager; - if (checkTrust) { - trustManager = sslService.sslContextHolder(config).trustManager().getTrustManager(); - } else { - trustManager = null; - } - - // key manager checks - if (checkKeys) { - keyManagerPreChecks.accept(keyManager, config); - } - - // trust manager checks - if (checkTrust) { - trustManagerPreChecks.accept(trustManager, config); - } + // Baseline checks + preChecks.accept(sslService.sslContextHolder(config).sslContext()); assertEquals("nothing should have called reload", 1, reloadLatch.getCount()); // modify modificationFunction.run(); reloadLatch.await(); - - // check key manager - if (checkKeys) { - final X509ExtendedKeyManager updatedKeyManager = sslService.sslContextHolder(config).keyManager().getKeyManager(); - assertThat(updatedKeyManager, not(sameInstance(keyManager))); - keyManagerPostChecks.accept(updatedKeyManager, config); - } - - // check trust manager - if (checkTrust) { - final X509ExtendedTrustManager updatedTrustManager = sslService.sslContextHolder(config).trustManager().getTrustManager(); - assertThat(updatedTrustManager, not(sameInstance(trustManager))); - trustManagerPostChecks.accept(updatedTrustManager, config); - } + // checks after reload + postChecks.accept(sslService.sslContextHolder(config).sslContext()); } private static void atomicMoveIfPossible(Path source, Path target) throws IOException { @@ -547,4 +508,41 @@ private static void atomicMoveIfPossible(Path source, Path target) throws IOExce Files.move(source, target, StandardCopyOption.REPLACE_EXISTING); } } + + private static MockWebServer getSslServer(Path keyStorePath, String keyStorePass) throws KeyStoreException, CertificateException, + NoSuchAlgorithmException, IOException, KeyManagementException, UnrecoverableKeyException { + KeyStore keyStore = KeyStore.getInstance(KeyStore.getDefaultType()); + try(InputStream is = Files.newInputStream(keyStorePath)) { + keyStore.load(is, keyStorePass.toCharArray()); + } + final SSLContext sslContext = new SSLContextBuilder().loadKeyMaterial(keyStore, keyStorePass.toCharArray()) + .build(); + MockWebServer server = new MockWebServer(sslContext, false); + server.enqueue(new MockResponse().setResponseCode(200).setBody("body")); + server.start(); + return server; + } + + private static CloseableHttpClient getSSLClient(Path trustStorePath, String trustStorePass) throws KeyStoreException, + NoSuchAlgorithmException, + KeyManagementException, IOException, CertificateException { + KeyStore trustStore = KeyStore.getInstance(KeyStore.getDefaultType()); + try(InputStream is = Files.newInputStream(trustStorePath)) { + trustStore.load(is, trustStorePass.toCharArray()); + } + final SSLContext sslContext = new SSLContextBuilder().loadTrustMaterial(trustStore, null).build(); + return HttpClients.custom().setSSLContext(sslContext).build(); + } + + private static void privilegedConnect(CheckedRunnable runnable) throws Exception { + try { + AccessController.doPrivileged((PrivilegedExceptionAction) () -> { + runnable.run(); + return null; + }); + } catch (PrivilegedActionException e) { + throw (Exception) e.getCause(); + } + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java index 598a0f8a77ada..bcb4b63865432 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/SSLServiceTests.java @@ -32,16 +32,20 @@ import org.mockito.ArgumentCaptor; import javax.net.ssl.HostnameVerifier; +import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; import javax.net.ssl.SSLSocket; import javax.net.ssl.SSLSocketFactory; +import javax.net.ssl.TrustManager; +import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509ExtendedTrustManager; import java.net.Socket; import java.nio.file.Path; import java.security.AccessController; +import java.security.KeyStore; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; import java.security.cert.CertificateException; @@ -446,22 +450,11 @@ public void testSSLStrategy() { } public void testEmptyTrustManager() throws Exception { - X509ExtendedTrustManager trustManager = new SSLService.EmptyX509TrustManager(); + Settings settings = Settings.builder().build(); + final SSLService sslService = new SSLService(settings, env); + SSLConfiguration sslConfig = new SSLConfiguration(settings); + X509ExtendedTrustManager trustManager = sslService.sslContextHolder(sslConfig).getEmptyTrustManager(); assertThat(trustManager.getAcceptedIssuers(), emptyArray()); - final String message = "no certificates are trusted"; - CertificateException ce = - expectThrows(CertificateException.class, () -> trustManager.checkClientTrusted(null, null, (Socket) null)); - assertEquals(message, ce.getMessage()); - ce = expectThrows(CertificateException.class, () -> trustManager.checkClientTrusted(null, null, (SSLEngine) null)); - assertEquals(message, ce.getMessage()); - ce = expectThrows(CertificateException.class, () -> trustManager.checkClientTrusted(null, null)); - assertEquals(message, ce.getMessage()); - ce = expectThrows(CertificateException.class, () -> trustManager.checkServerTrusted(null, null, (Socket) null)); - assertEquals(message, ce.getMessage()); - ce = expectThrows(CertificateException.class, () -> trustManager.checkServerTrusted(null, null, (SSLEngine) null)); - assertEquals(message, ce.getMessage()); - ce = expectThrows(CertificateException.class, () -> trustManager.checkServerTrusted(null, null)); - assertEquals(message, ce.getMessage()); } public void testReadCertificateInformation() throws Exception { From 75ecf58a8b7f2dedb6f4efc99293e4f5fe07504e Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 16 May 2018 14:43:02 +0300 Subject: [PATCH 18/44] SAML: Process only signed data (#30420) As conformance to best practices, this changes ensures that if a SAML Response is signed, we verify the signature before processing it any further. We were only checking the InResponseTo and Destination attributes before potential signature validation but there was no reason to do that up front either. --- .../authc/saml/SamlAuthenticator.java | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java index 93bbe2c1a7567..f8826bebcac71 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/saml/SamlAuthenticator.java @@ -87,6 +87,14 @@ private SamlAttributes authenticateResponse(Element element, Collection if (logger.isTraceEnabled()) { logger.trace(SamlUtils.describeSamlObject(response)); } + final boolean requireSignedAssertions; + if (response.isSigned()) { + validateSignature(response.getSignature()); + requireSignedAssertions = false; + } else { + requireSignedAssertions = true; + } + if (Strings.hasText(response.getInResponseTo()) && allowedSamlRequestIds.contains(response.getInResponseTo()) == false) { logger.debug("The SAML Response with ID {} is unsolicited. A user might have used a stale URL or the Identity Provider " + "incorrectly populates the InResponseTo attribute", response.getID()); @@ -102,10 +110,10 @@ private SamlAttributes authenticateResponse(Element element, Collection throw samlException("SAML Response is not a 'success' response: Code={} Message={} Detail={}", status.getStatusCode().getValue(), getMessage(status), getDetail(status)); } - + checkIssuer(response.getIssuer(), response); checkResponseDestination(response); - Tuple> details = extractDetails(response, allowedSamlRequestIds); + Tuple> details = extractDetails(response, allowedSamlRequestIds, requireSignedAssertions); final Assertion assertion = details.v1(); final SamlNameId nameId = SamlNameId.forSubject(assertion.getSubject()); final String session = getSessionIndex(assertion); @@ -156,17 +164,8 @@ private void checkResponseDestination(Response response) { } } - private Tuple> extractDetails(Response response, Collection allowedSamlRequestIds) { - final boolean requireSignedAssertions; - if (response.isSigned()) { - validateSignature(response.getSignature()); - requireSignedAssertions = false; - } else { - requireSignedAssertions = true; - } - - checkIssuer(response.getIssuer(), response); - + private Tuple> extractDetails(Response response, Collection allowedSamlRequestIds, + boolean requireSignedAssertions) { final int assertionCount = response.getAssertions().size() + response.getEncryptedAssertions().size(); if (assertionCount > 1) { throw samlException("Expecting only 1 assertion, but response contains multiple (" + assertionCount + ")"); @@ -328,5 +327,4 @@ private void checkLifetimeRestrictions(Conditions conditions) { private void checkLifetimeRestrictions(SubjectConfirmationData subjectConfirmationData) { validateNotOnOrAfter(subjectConfirmationData.getNotOnOrAfter()); } - } From 0aafb060c640ae0bb230e69fed088e702ea2a2ec Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 16 May 2018 14:50:05 +0300 Subject: [PATCH 19/44] Fix the mishandled backport 873d380 (#30638) Closes #30738 --- .../gcs/GoogleCloudStorageRepository.java | 46 ++++++++++++++++++- .../gcs/GoogleCloudStorageService.java | 18 ++++++-- ...eCloudStorageBlobStoreRepositoryTests.java | 6 ++- ...loudStorageRepositoryDeprecationTests.java | 4 +- .../gcs/GoogleCloudStorageServiceTests.java | 33 ++++++++++--- 5 files changed, 91 insertions(+), 16 deletions(-) diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java index 422d7a308f260..d261d738e5eee 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepository.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.gcs; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobPath; @@ -28,6 +29,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.repositories.RepositoryException; @@ -46,12 +48,17 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { + private final Logger logger = ESLoggerFactory.getLogger(GoogleCloudStorageRepository.class); + private final DeprecationLogger deprecationLogger = new DeprecationLogger(logger); + // package private for testing static final ByteSizeValue MIN_CHUNK_SIZE = new ByteSizeValue(1, ByteSizeUnit.BYTES); static final ByteSizeValue MAX_CHUNK_SIZE = new ByteSizeValue(100, ByteSizeUnit.MB); static final String TYPE = "gcs"; + static final TimeValue NO_TIMEOUT = timeValueMillis(-1); + static final Setting BUCKET = simpleString("bucket", Property.NodeScope, Property.Dynamic); static final Setting BASE_PATH = @@ -62,6 +69,18 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { byteSizeSetting("chunk_size", MAX_CHUNK_SIZE, MIN_CHUNK_SIZE, MAX_CHUNK_SIZE, Property.NodeScope, Property.Dynamic); static final Setting CLIENT_NAME = new Setting<>("client", "default", Function.identity()); + @Deprecated + static final Setting APPLICATION_NAME = + new Setting<>("application_name", "", Function.identity(), Property.NodeScope, Property.Dynamic); + + @Deprecated + static final Setting HTTP_READ_TIMEOUT = + timeSetting("http.read_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); + + @Deprecated + static final Setting HTTP_CONNECT_TIMEOUT = + timeSetting("http.connect_timeout", NO_TIMEOUT, Property.NodeScope, Property.Dynamic); + private final ByteSizeValue chunkSize; private final boolean compress; private final BlobPath basePath; @@ -90,7 +109,32 @@ class GoogleCloudStorageRepository extends BlobStoreRepository { logger.debug("using bucket [{}], base_path [{}], chunk_size [{}], compress [{}]", bucket, basePath, chunkSize, compress); - Storage client = SocketAccess.doPrivilegedIOException(() -> storageService.createClient(clientName)); + String application = APPLICATION_NAME.get(metadata.settings()); + if (Strings.hasText(application)) { + deprecationLogger.deprecated("Setting [application_name] in repository settings is deprecated, " + + "it must be specified in the client settings instead"); + } + TimeValue connectTimeout = null; + TimeValue readTimeout = null; + + TimeValue timeout = HTTP_CONNECT_TIMEOUT.get(metadata.settings()); + if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + deprecationLogger.deprecated("Setting [http.connect_timeout] in repository settings is deprecated, " + + "it must be specified in the client settings instead"); + connectTimeout = timeout; + } + timeout = HTTP_READ_TIMEOUT.get(metadata.settings()); + if ((timeout != null) && (timeout.millis() != NO_TIMEOUT.millis())) { + deprecationLogger.deprecated("Setting [http.read_timeout] in repository settings is deprecated, " + + "it must be specified in the client settings instead"); + readTimeout = timeout; + } + + TimeValue finalConnectTimeout = connectTimeout; + TimeValue finalReadTimeout = readTimeout; + + Storage client = SocketAccess.doPrivilegedIOException(() -> + storageService.createClient(clientName, application, finalConnectTimeout, finalReadTimeout)); this.blobStore = new GoogleCloudStorageBlobStore(settings, bucket, client); } diff --git a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java index 5a52fff463499..d3fa18ead0754 100644 --- a/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java +++ b/plugins/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageService.java @@ -28,6 +28,7 @@ import com.google.cloud.storage.Storage; import com.google.cloud.storage.StorageOptions; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.MapBuilder; import org.elasticsearch.common.component.AbstractComponent; @@ -55,9 +56,15 @@ public GoogleCloudStorageService(final Environment environment, final Map httpTransport) .build(); final StorageOptions.Builder storageOptionsBuilder = StorageOptions.newBuilder() .setTransportOptions(httpTransportOptions) .setHeaderProvider(() -> { final MapBuilder mapBuilder = MapBuilder.newMapBuilder(); - if (Strings.hasLength(clientSettings.getApplicationName())) { - mapBuilder.put("user-agent", clientSettings.getApplicationName()); + final String applicationName = Strings.hasLength(application) ? application : clientSettings.getApplicationName(); + if (Strings.hasLength(applicationName)) { + mapBuilder.put("user-agent", applicationName); } return mapBuilder.immutableMap(); }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java index d02100f63cc41..6ed67c1a26947 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStoreRepositoryTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase; @@ -85,7 +86,10 @@ public static class MockGoogleCloudStorageService extends GoogleCloudStorageServ } @Override - public Storage createClient(final String clientName) { + public Storage createClient(final String clientName, + final String application, + final TimeValue connectTimeout, + final TimeValue readTimeout) { return new MockStorage(BUCKET, blobs); } } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java index daf761a74dd43..e1f91eb1d31d3 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageRepositoryDeprecationTests.java @@ -22,6 +22,7 @@ import com.google.cloud.storage.Storage; import org.elasticsearch.cluster.metadata.RepositoryMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; import org.elasticsearch.env.TestEnvironment; @@ -31,7 +32,6 @@ public class GoogleCloudStorageRepositoryDeprecationTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/30638") public void testDeprecatedSettings() throws Exception { final Settings repositorySettings = Settings.builder() .put("bucket", "test") @@ -46,7 +46,7 @@ public void testDeprecatedSettings() throws Exception { new GoogleCloudStorageRepository(repositoryMetaData, environment, NamedXContentRegistry.EMPTY, new GoogleCloudStorageService(environment, GoogleCloudStorageClientSettings.load(Settings.EMPTY)) { @Override - public Storage createClient(String clientName) throws Exception { + public Storage createClient(String clientName, String application, TimeValue connect, TimeValue read) throws Exception { return new MockStorage("test", new ConcurrentHashMap<>()); } }); diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index a33ae90c549bc..4e87031a630b2 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -59,19 +59,38 @@ public void testClientInitializer() throws Exception { final GoogleCloudStorageClientSettings clientSettings = GoogleCloudStorageClientSettings.getClientSettings(settings, clientName); final GoogleCloudStorageService service = new GoogleCloudStorageService(environment, Collections.singletonMap(clientName, clientSettings)); - final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> service.createClient("another_client")); + final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, + () -> service.createClient("another_client", null, null, null)); assertThat(e.getMessage(), Matchers.startsWith("Unknown client name")); assertSettingDeprecationsAndWarnings( new Setting[] { GoogleCloudStorageClientSettings.APPLICATION_NAME_SETTING.getConcreteSettingForNamespace(clientName) }); - final Storage storage = service.createClient(clientName); - assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + final String deprecatedApplicationName = randomBoolean() ? null : "deprecated_" + randomAlphaOfLength(4); + final TimeValue deprecatedConnectTimeout = randomBoolean() ? null : TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final TimeValue deprecatedReadTimeout = randomBoolean() ? null : TimeValue.timeValueNanos(randomIntBetween(0, 2000000)); + final Storage storage = service.createClient(clientName, deprecatedApplicationName, deprecatedConnectTimeout, + deprecatedReadTimeout); + if (deprecatedApplicationName != null) { + assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(deprecatedApplicationName)); + } else { + assertThat(storage.getOptions().getApplicationName(), Matchers.containsString(applicationName)); + } assertThat(storage.getOptions().getHost(), Matchers.is(hostName)); assertThat(storage.getOptions().getProjectId(), Matchers.is(projectIdName)); assertThat(storage.getOptions().getTransportOptions(), Matchers.instanceOf(HttpTransportOptions.class)); - assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), - Matchers.is((int) connectTimeValue.millis())); - assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), - Matchers.is((int) readTimeValue.millis())); + if (deprecatedConnectTimeout != null) { + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), + Matchers.is((int) deprecatedConnectTimeout.millis())); + } else { + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getConnectTimeout(), + Matchers.is((int) connectTimeValue.millis())); + } + if (deprecatedReadTimeout != null) { + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), + Matchers.is((int) deprecatedReadTimeout.millis())); + } else { + assertThat(((HttpTransportOptions) storage.getOptions().getTransportOptions()).getReadTimeout(), + Matchers.is((int) readTimeValue.millis())); + } assertThat(storage.getOptions().getCredentials(), Matchers.nullValue(Credentials.class)); } From 5f16852ccf0d0c048cd22cf60bd7c2925ff30300 Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Wed, 16 May 2018 15:07:28 +0200 Subject: [PATCH 20/44] mute IndicesOptionsTests.testSerialization See https://github.com/elastic/elasticsearch/pull/30644 --- .../org/elasticsearch/action/support/IndicesOptionsTests.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 315af13133d30..3de71a83f6972 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -28,6 +28,8 @@ import static org.hamcrest.CoreMatchers.equalTo; public class IndicesOptionsTests extends ESTestCase { + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30644") public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { From 6833f08a94da68917c24797b259941e1791139a1 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Wed, 16 May 2018 10:10:26 -0400 Subject: [PATCH 21/44] Fix bug in BucketMetrics path traversal (#30632) When processing a top-level sibling pipeline, we destructively sublist the path by assigning back onto the same variable. But if aggs are specified such: A. Multi-bucket agg in the first entry of our internal list B. Regular agg as the immediate child of the multi-bucket in A C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list D. Finally, a pipeline agg with the path down to B We'll get class cast exception. The first agg will sublist the path from [A,B] to [B], and then when we loop around to check agg C, the sublisted path [B] matches the name of C and it fails. The fix is simple: we just need to store the sublist in a new object so that the old path remains valid for the rest of the aggs in the loop Closes #30608 --- .../BucketMetricsPipelineAggregator.java | 4 +- .../avg/AvgBucketAggregatorTests.java | 144 ++++++++++++++++++ 2 files changed, 146 insertions(+), 2 deletions(-) create mode 100644 server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java index 413862d3f1d2b..981b21346ade9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/BucketMetricsPipelineAggregator.java @@ -79,11 +79,11 @@ public final InternalAggregation doReduce(Aggregations aggregations, ReduceConte List bucketsPath = AggregationPath.parse(bucketsPaths()[0]).getPathElementsAsStringList(); for (Aggregation aggregation : aggregations) { if (aggregation.getName().equals(bucketsPath.get(0))) { - bucketsPath = bucketsPath.subList(1, bucketsPath.size()); + List sublistedPath = bucketsPath.subList(1, bucketsPath.size()); InternalMultiBucketAggregation multiBucketsAgg = (InternalMultiBucketAggregation) aggregation; List buckets = multiBucketsAgg.getBuckets(); for (InternalMultiBucketAggregation.InternalBucket bucket : buckets) { - Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, bucketsPath, gapPolicy); + Double bucketValue = BucketHelpers.resolveBucketValue(multiBucketsAgg, bucket, sublistedPath, gapPolicy); if (bucketValue != null && !Double.isNaN(bucketValue)) { collectBucketValue(bucket.getKeyAsString(), bucketValue); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java new file mode 100644 index 0000000000000..ba719219ee53b --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/aggregations/pipeline/bucketmetrics/avg/AvgBucketAggregatorTests.java @@ -0,0 +1,144 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.search.aggregations.pipeline.bucketmetrics.avg; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.store.Directory; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.NumberFieldMapper; +import org.elasticsearch.search.aggregations.Aggregation; +import org.elasticsearch.search.aggregations.Aggregations; +import org.elasticsearch.search.aggregations.AggregatorTestCase; +import org.elasticsearch.search.aggregations.InternalAggregation; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.histogram.InternalDateHistogram; +import org.elasticsearch.search.aggregations.metrics.avg.AvgAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.avg.InternalAvg; +import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + + +public class AvgBucketAggregatorTests extends AggregatorTestCase { + private static final String DATE_FIELD = "date"; + private static final String VALUE_FIELD = "value"; + + private static final List dataset = Arrays.asList( + "2010-03-12T01:07:45", + "2010-04-27T03:43:34", + "2012-05-18T04:11:00", + "2013-05-29T05:11:31", + "2013-10-31T08:24:05", + "2015-02-13T13:09:32", + "2015-06-24T13:47:43", + "2015-11-13T16:14:34", + "2016-03-04T17:09:50", + "2017-12-12T22:55:46"); + + /** + * Test for issue #30608. Under the following circumstances: + * + * A. Multi-bucket agg in the first entry of our internal list + * B. Regular agg as the immediate child of the multi-bucket in A + * C. Regular agg with the same name as B at the top level, listed as the second entry in our internal list + * D. Finally, a pipeline agg with the path down to B + * + * BucketMetrics reduction would throw a class cast exception due to bad subpathing. This test ensures + * it is fixed. + * + * Note: we have this test inside of the `avg_bucket` package so that we can get access to the package-private + * `doReduce()` needed for testing this + */ + public void testSameAggNames() throws IOException { + Query query = new MatchAllDocsQuery(); + + AvgAggregationBuilder avgBuilder = new AvgAggregationBuilder("foo").field(VALUE_FIELD); + DateHistogramAggregationBuilder histo = new DateHistogramAggregationBuilder("histo") + .dateHistogramInterval(DateHistogramInterval.YEAR) + .field(DATE_FIELD) + .subAggregation(new AvgAggregationBuilder("foo").field(VALUE_FIELD)); + + AvgBucketPipelineAggregationBuilder avgBucketBuilder + = new AvgBucketPipelineAggregationBuilder("the_avg_bucket", "histo>foo"); + + try (Directory directory = newDirectory()) { + try (RandomIndexWriter indexWriter = new RandomIndexWriter(random(), directory)) { + Document document = new Document(); + for (String date : dataset) { + if (frequently()) { + indexWriter.commit(); + } + + document.add(new SortedNumericDocValuesField(DATE_FIELD, asLong(date))); + document.add(new SortedNumericDocValuesField(VALUE_FIELD, randomInt())); + indexWriter.addDocument(document); + document.clear(); + } + } + + InternalAvg avgResult; + InternalDateHistogram histogramResult; + try (IndexReader indexReader = DirectoryReader.open(directory)) { + IndexSearcher indexSearcher = newSearcher(indexReader, true, true); + + DateFieldMapper.Builder builder = new DateFieldMapper.Builder("histo"); + DateFieldMapper.DateFieldType fieldType = builder.fieldType(); + fieldType.setHasDocValues(true); + fieldType.setName(DATE_FIELD); + + MappedFieldType valueFieldType = new NumberFieldMapper.NumberFieldType(NumberFieldMapper.NumberType.LONG); + valueFieldType.setName(VALUE_FIELD); + valueFieldType.setHasDocValues(true); + + avgResult = searchAndReduce(indexSearcher, query, avgBuilder, 10000, new MappedFieldType[]{fieldType, valueFieldType}); + histogramResult = searchAndReduce(indexSearcher, query, histo, 10000, new MappedFieldType[]{fieldType, valueFieldType}); + } + + // Finally, reduce the pipeline agg + PipelineAggregator avgBucketAgg = avgBucketBuilder.createInternal(Collections.emptyMap()); + List reducedAggs = new ArrayList<>(2); + + // Histo has to go first to exercise the bug + reducedAggs.add(histogramResult); + reducedAggs.add(avgResult); + Aggregations aggregations = new Aggregations(reducedAggs); + InternalAggregation pipelineResult = ((AvgBucketPipelineAggregator)avgBucketAgg).doReduce(aggregations, null); + assertNotNull(pipelineResult); + } + } + + + private static long asLong(String dateTime) { + return DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parser().parseDateTime(dateTime).getMillis(); + } +} From 250783508734de3bf5124ebd79d8a586ec3a45e0 Mon Sep 17 00:00:00 2001 From: Jay Modi Date: Wed, 16 May 2018 08:21:15 -0600 Subject: [PATCH 22/44] Template upgrades should happen in a system context (#30621) The TemplateUpgradeService is a system service that allows for plugins to register templates that need to be upgraded. These template upgrades should always happen in a system context as they are not a user initiated action. For security integrations, the lack of running this in a system context could lead to unexpected failures. The changes in this commit set an empty system context for the execution of the template upgrades performed by this service. Relates #30603 --- .../cluster/metadata/TemplateUpgradeService.java | 16 +++++++++++++--- .../metadata/TemplateUpgradeServiceTests.java | 14 +++++++++++++- 2 files changed, 26 insertions(+), 4 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index c0d8d1ceab6d5..3bdc949752afa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -41,6 +41,7 @@ import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; @@ -128,12 +129,21 @@ public void clusterChanged(ClusterChangedEvent event) { Version.CURRENT, changes.get().v1().size(), changes.get().v2().size()); - threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2())); + + final ThreadContext threadContext = threadPool.getThreadContext(); + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + threadContext.markAsSystemContext(); + threadPool.generic().execute(() -> updateTemplates(changes.get().v1(), changes.get().v2())); + } } } } void updateTemplates(Map changes, Set deletions) { + if (threadPool.getThreadContext().isSystemContext() == false) { + throw new IllegalStateException("template updates from the template upgrade service should always happen in a system context"); + } + for (Map.Entry change : changes.entrySet()) { PutIndexTemplateRequest request = new PutIndexTemplateRequest(change.getKey()).source(change.getValue(), XContentType.JSON); @@ -141,7 +151,7 @@ void updateTemplates(Map changes, Set deletions) client.admin().indices().putTemplate(request, new ActionListener() { @Override public void onResponse(PutIndexTemplateResponse response) { - if(updatesInProgress.decrementAndGet() == 0) { + if (updatesInProgress.decrementAndGet() == 0) { logger.info("Finished upgrading templates to version {}", Version.CURRENT); } if (response.isAcknowledged() == false) { @@ -151,7 +161,7 @@ public void onResponse(PutIndexTemplateResponse response) { @Override public void onFailure(Exception e) { - if(updatesInProgress.decrementAndGet() == 0) { + if (updatesInProgress.decrementAndGet() == 0) { logger.info("Templates were upgraded to version {}", Version.CURRENT); } logger.warn(new ParameterizedMessage("Error updating template [{}]", change.getKey()), e); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java index 2e82397767fc4..e46f2e06fe16d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -61,6 +62,7 @@ import static org.elasticsearch.test.VersionUtils.randomVersion; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.startsWith; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; @@ -188,9 +190,16 @@ public void testUpdateTemplates() { additions.put("add_template_" + i, new BytesArray("{\"index_patterns\" : \"*\", \"order\" : " + i + "}")); } - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, null, + ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); + TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, mockClient, clusterService, threadPool, Collections.emptyList()); + IllegalStateException ise = expectThrows(IllegalStateException.class, () -> service.updateTemplates(additions, deletions)); + assertThat(ise.getMessage(), containsString("template upgrade service should always happen in a system context")); + + threadContext.markAsSystemContext(); service.updateTemplates(additions, deletions); int updatesInProgress = service.getUpdatesInProgress(); @@ -241,11 +250,14 @@ public void testClusterStateUpdate() { ); ThreadPool threadPool = mock(ThreadPool.class); + ThreadContext threadContext = new ThreadContext(Settings.EMPTY); + when(threadPool.getThreadContext()).thenReturn(threadContext); ExecutorService executorService = mock(ExecutorService.class); when(threadPool.generic()).thenReturn(executorService); doAnswer(invocation -> { Object[] args = invocation.getArguments(); assert args.length == 1; + assertTrue(threadContext.isSystemContext()); Runnable runnable = (Runnable) args[0]; runnable.run(); updateInvocation.incrementAndGet(); From 836006a1d226c96114ed66fb198ec725f1fc0254 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Wed, 16 May 2018 11:24:55 -0400 Subject: [PATCH 23/44] Do not use hard-coded ports in tribe tests (#30637) This commit addresses two collections of tribe node tests that were implicitly using hard-coded ports. Since no ports were specified (in particular, an ephemeral port was not specified) the tribe nodes would default to using the default port range of 9300--9304, effectively acting as if they are using hard-coded ports. If we were unfortunate enough to have these tests fail and the JVM is left sitting around, they could cause other tests that are expecting these ports to not be in use to fail. A follow-up will address the fact that these tests are trying to hit ports 9300, 9301, and 9302, but we will attempt to address that later as that seems like a much harder problem given that these are docs tests. --- x-pack/qa/tribe-tests-with-license/build.gradle | 4 ++++ .../org/elasticsearch/license/TribeTransportTestCase.java | 2 ++ x-pack/qa/tribe-tests-with-security/build.gradle | 4 ++++ .../org/elasticsearch/xpack/security/SecurityTribeTests.java | 2 ++ 4 files changed, 12 insertions(+) diff --git a/x-pack/qa/tribe-tests-with-license/build.gradle b/x-pack/qa/tribe-tests-with-license/build.gradle index 92e84b18214da..a8cc44f85b261 100644 --- a/x-pack/qa/tribe-tests-with-license/build.gradle +++ b/x-pack/qa/tribe-tests-with-license/build.gradle @@ -82,6 +82,8 @@ integTestCluster { setting 'tribe.cluster1.cluster.name', 'cluster1' setting 'tribe.cluster1.discovery.zen.ping.unicast.hosts', "'${-> cluster1Nodes.get(0).transportUri()}'" setting 'tribe.cluster1.http.enabled', 'true' + setting 'tribe.cluster1.http.port', '0' + setting 'tribe.cluster1.transport.tcp.port', '0' setting 'tribe.cluster1.xpack.monitoring.enabled', false setting 'tribe.cluster1.xpack.monitoring.enabled', false setting 'tribe.cluster1.xpack.security.enabled', false @@ -92,6 +94,8 @@ integTestCluster { setting 'tribe.cluster2.cluster.name', 'cluster2' setting 'tribe.cluster2.discovery.zen.ping.unicast.hosts', "'${-> cluster2Nodes.get(0).transportUri()}'" setting 'tribe.cluster2.http.enabled', 'true' + setting 'tribe.cluster1.http.port', '0' + setting 'tribe.cluster1.transport.tcp.port', '0' setting 'tribe.cluster2.xpack.monitoring.enabled', false setting 'tribe.cluster2.xpack.monitoring.enabled', false setting 'tribe.cluster2.xpack.security.enabled', false diff --git a/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java b/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java index 5ffba4fa46ed0..f9b836d18cc44 100644 --- a/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java +++ b/x-pack/qa/tribe-tests-with-license/src/test/java/org/elasticsearch/license/TribeTransportTestCase.java @@ -199,6 +199,8 @@ public Collection> transportClientPlugins() { Settings merged = Settings.builder() .put("tribe.t1.cluster.name", internalCluster().getClusterName()) .put("tribe.t2.cluster.name", cluster2.getClusterName()) + .put("tribe.t1.transport.tcp.port", 0) + .put("tribe.t2.transport.tcp.port", 0) .put("tribe.t1.transport.type", getTestTransportType()) .put("tribe.t2.transport.type", getTestTransportType()) .put("tribe.blocks.write", false) diff --git a/x-pack/qa/tribe-tests-with-security/build.gradle b/x-pack/qa/tribe-tests-with-security/build.gradle index 4356e8ea9b0b7..2344ff7ed1fcf 100644 --- a/x-pack/qa/tribe-tests-with-security/build.gradle +++ b/x-pack/qa/tribe-tests-with-security/build.gradle @@ -92,11 +92,15 @@ integTestCluster { setting 'tribe.cluster1.cluster.name', 'cluster1' setting 'tribe.cluster1.discovery.zen.ping.unicast.hosts', "'${-> cluster1Nodes.get(0).transportUri()}'" setting 'tribe.cluster1.http.enabled', 'true' + setting 'tribe.cluster1.http.port', '0' + setting 'tribe.cluster1.transport.tcp.port', '0' setting 'tribe.cluster1.xpack.security.enabled', 'true' setting 'tribe.cluster1.xpack.ml.enabled', 'false' setting 'tribe.cluster2.cluster.name', 'cluster2' setting 'tribe.cluster2.discovery.zen.ping.unicast.hosts', "'${-> cluster2Nodes.get(0).transportUri()}'" setting 'tribe.cluster2.http.enabled', 'true' + setting 'tribe.cluster2.http.port', '0' + setting 'tribe.cluster2.transport.tcp.port', '0' setting 'tribe.cluster2.xpack.security.enabled', 'true' setting 'tribe.cluster2.xpack.ml.enabled', 'false' keystoreSetting 'bootstrap.password', 'x-pack-test-password' diff --git a/x-pack/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/xpack/security/SecurityTribeTests.java b/x-pack/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/xpack/security/SecurityTribeTests.java index d14e76f223ffe..ce7d587d10e76 100644 --- a/x-pack/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/xpack/security/SecurityTribeTests.java +++ b/x-pack/qa/tribe-tests-with-security/src/test/java/org/elasticsearch/xpack/security/SecurityTribeTests.java @@ -274,7 +274,9 @@ public Settings nodeSettings(int nodeOrdinal) { .put(internalCluster().getDefaultSettings()) .put(tribeSettings, false) .put("tribe.t1.cluster.name", internalCluster().getClusterName()) + .put("tribe.t1.transport.tcp.port", 0) .put("tribe.t2.cluster.name", cluster2.getClusterName()) + .put("tribe.t2.transport.tcp.port", 0) .put("tribe.blocks.write", false) .put("tribe.on_conflict", "prefer_t1") .put(tribe1Defaults.build()) From 83f839f67d5a1167353c6a103e316d52415c4cf9 Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Tue, 15 May 2018 15:03:08 -0600 Subject: [PATCH 24/44] Refactor IndicesOptions to not be byte-based (#30586) * Refactor IndicesOptions to not be byte-based This refactors IndicesOptions to be enum/enummap based rather than using a byte as a bitmap for each of the options. This is necessary because we'd like to add additional options, but we ran out of bits. Backwards compatibility is kept for earlier versions so the option serialization does not change the options. Relates sort of to #30188 --- .../action/support/IndicesOptions.java | 318 ++++++++++++------ .../common/io/stream/StreamInput.java | 18 + .../common/io/stream/StreamOutput.java | 11 + .../ClusterSearchShardsRequestTests.java | 2 +- .../action/support/IndicesOptionsTests.java | 82 ++++- 5 files changed, 328 insertions(+), 103 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java index b4db289148b1c..be9b2e931ffb1 100644 --- a/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java +++ b/server/src/main/java/org/elasticsearch/action/support/IndicesOptions.java @@ -25,7 +25,11 @@ import org.elasticsearch.rest.RestRequest; import java.io.IOException; +import java.util.Collection; +import java.util.EnumSet; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue; import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue; @@ -36,41 +40,155 @@ */ public class IndicesOptions { - private static final IndicesOptions[] VALUES; + public enum WildcardStates { + OPEN, + CLOSED; - private static final byte IGNORE_UNAVAILABLE = 1; - private static final byte ALLOW_NO_INDICES = 2; - private static final byte EXPAND_WILDCARDS_OPEN = 4; - private static final byte EXPAND_WILDCARDS_CLOSED = 8; - private static final byte FORBID_ALIASES_TO_MULTIPLE_INDICES = 16; - private static final byte FORBID_CLOSED_INDICES = 32; - private static final byte IGNORE_ALIASES = 64; + public static final EnumSet NONE = EnumSet.noneOf(WildcardStates.class); - private static final byte STRICT_EXPAND_OPEN = 6; - private static final byte LENIENT_EXPAND_OPEN = 7; - private static final byte STRICT_EXPAND_OPEN_CLOSED = 14; - private static final byte STRICT_EXPAND_OPEN_FORBID_CLOSED = 38; - private static final byte STRICT_SINGLE_INDEX_NO_EXPAND_FORBID_CLOSED = 48; + public static EnumSet parseParameter(Object value, EnumSet defaultStates) { + if (value == null) { + return defaultStates; + } - static { - short max = 1 << 7; - VALUES = new IndicesOptions[max]; - for (short id = 0; id < max; id++) { - VALUES[id] = new IndicesOptions((byte)id); + Set states = new HashSet<>(); + String[] wildcards = nodeStringArrayValue(value); + for (String wildcard : wildcards) { + if ("open".equals(wildcard)) { + states.add(OPEN); + } else if ("closed".equals(wildcard)) { + states.add(CLOSED); + } else if ("none".equals(wildcard)) { + states.clear(); + } else if ("all".equals(wildcard)) { + states.add(OPEN); + states.add(CLOSED); + } else { + throw new IllegalArgumentException("No valid expand wildcard value [" + wildcard + "]"); + } + } + + return states.isEmpty() ? NONE : EnumSet.copyOf(states); } } - private final byte id; + public enum Option { + IGNORE_UNAVAILABLE, + IGNORE_ALIASES, + ALLOW_NO_INDICES, + FORBID_ALIASES_TO_MULTIPLE_INDICES, + FORBID_CLOSED_INDICES; + + public static final EnumSet

+ * See + * Task Management API on elastic.co + */ + public ListTasksResponse listTasks(ListTasksRequest request, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + emptySet(), headers); + } + + /** + * Asynchronously get current tasks using the Task Management API + *

+ * See + * Task Management API on elastic.co + */ + public void listTasksAsync(ListTasksRequest request, ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(request, RequestConverters::listTasks, ListTasksResponse::fromXContent, + listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index 103e7cd6784d4..facffa4144e35 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -29,6 +29,7 @@ import org.apache.http.entity.ContentType; import org.apache.lucene.util.BytesRef; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -79,6 +80,7 @@ import org.elasticsearch.index.rankeval.RankEvalRequest; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; +import org.elasticsearch.tasks.TaskId; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -580,6 +582,22 @@ static Request clusterPutSettings(ClusterUpdateSettingsRequest clusterUpdateSett return request; } + static Request listTasks(ListTasksRequest listTaskRequest) { + if (listTaskRequest.getTaskId() != null && listTaskRequest.getTaskId().isSet()) { + throw new IllegalArgumentException("TaskId cannot be used for list tasks request"); + } + Request request = new Request(HttpGet.METHOD_NAME, "/_tasks"); + Params params = new Params(request); + params.withTimeout(listTaskRequest.getTimeout()) + .withDetailed(listTaskRequest.getDetailed()) + .withWaitForCompletion(listTaskRequest.getWaitForCompletion()) + .withParentTaskId(listTaskRequest.getParentTaskId()) + .withNodes(listTaskRequest.getNodes()) + .withActions(listTaskRequest.getActions()) + .putParam("group_by", "none"); + return request; + } + static Request rollover(RolloverRequest rolloverRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPart(rolloverRequest.getAlias()).addPathPartAsIs("_rollover") .addPathPart(rolloverRequest.getNewIndexName()).build(); @@ -880,6 +898,48 @@ Params withPreserveExisting(boolean preserveExisting) { } return this; } + + Params withDetailed(boolean detailed) { + if (detailed) { + return putParam("detailed", Boolean.TRUE.toString()); + } + return this; + } + + Params withWaitForCompletion(boolean waitForCompletion) { + if (waitForCompletion) { + return putParam("wait_for_completion", Boolean.TRUE.toString()); + } + return this; + } + + Params withNodes(String[] nodes) { + if (nodes != null && nodes.length > 0) { + return putParam("nodes", String.join(",", nodes)); + } + return this; + } + + Params withActions(String[] actions) { + if (actions != null && actions.length > 0) { + return putParam("actions", String.join(",", actions)); + } + return this; + } + + Params withParentTaskId(TaskId parentTaskId) { + if (parentTaskId != null && parentTaskId.isSet()) { + return putParam("parent_task_id", parentTaskId.toString()); + } + return this; + } + + Params withVerify(boolean verify) { + if (verify) { + return putParam("verify", Boolean.TRUE.toString()); + } + return this; + } } /** diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java index 9314bb2e36cea..fa3086442f528 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/ClusterClientIT.java @@ -20,6 +20,9 @@ package org.elasticsearch.client; import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; @@ -29,13 +32,16 @@ import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; import java.util.Map; +import static java.util.Collections.emptyList; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -105,4 +111,29 @@ public void testClusterUpdateSettingNonExistent() { assertThat(exception.getMessage(), equalTo( "Elasticsearch exception [type=illegal_argument_exception, reason=transient setting [" + setting + "], not recognized]")); } + + public void testListTasks() throws IOException { + ListTasksRequest request = new ListTasksRequest(); + ListTasksResponse response = execute(request, highLevelClient().cluster()::listTasks, highLevelClient().cluster()::listTasksAsync); + + assertThat(response, notNullValue()); + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + // It's possible that there are other tasks except 'cluster:monitor/tasks/lists[n]' and 'action":"cluster:monitor/tasks/lists' + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + boolean listTasksFound = false; + for (TaskGroup taskGroup : response.getTaskGroups()) { + TaskInfo parent = taskGroup.getTaskInfo(); + if ("cluster:monitor/tasks/lists".equals(parent.getAction())) { + assertThat(taskGroup.getChildTasks().size(), equalTo(1)); + TaskGroup childGroup = taskGroup.getChildTasks().iterator().next(); + assertThat(childGroup.getChildTasks().isEmpty(), equalTo(true)); + TaskInfo child = childGroup.getTaskInfo(); + assertThat(child.getAction(), equalTo("cluster:monitor/tasks/lists[n]")); + assertThat(child.getParentTaskId(), equalTo(parent.getTaskId())); + listTasksFound = true; + } + } + assertTrue("List tasks were not found", listTasksFound); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 5e6e6da7ece96..59b3be2796fdd 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -29,6 +29,7 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -103,6 +104,7 @@ import org.elasticsearch.search.rescore.QueryRescorerBuilder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.completion.CompletionSuggestionBuilder; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.RandomObjects; @@ -130,6 +132,7 @@ import static org.elasticsearch.search.RandomSearchRequestGenerator.randomSearchRequest; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertToXContentEquivalent; import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; public class RequestConvertersTests extends ESTestCase { @@ -176,8 +179,7 @@ public void testMultiGet() throws IOException { int numberOfRequests = randomIntBetween(0, 32); for (int i = 0; i < numberOfRequests; i++) { - MultiGetRequest.Item item = - new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); + MultiGetRequest.Item item = new MultiGetRequest.Item(randomAlphaOfLength(4), randomAlphaOfLength(4), randomAlphaOfLength(4)); if (randomBoolean()) { item.routing(randomAlphaOfLength(4)); } @@ -264,7 +266,7 @@ public void testIndicesExist() { public void testIndicesExistEmptyIndices() { expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest())); - expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[])null))); + expectThrows(IllegalArgumentException.class, () -> RequestConverters.indicesExist(new GetIndexRequest().indices((String[]) null))); } private static void getAndExistsTest(Function requestConverter, String method) { @@ -938,22 +940,21 @@ public void testBulkWithDifferentContentTypes() throws IOException { bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.SMILE)); bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [JSON], " + - "previous requests have content-type [SMILE]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [JSON], " + "previous requests have content-type [SMILE]", + exception.getMessage()); } { BulkRequest bulkRequest = new BulkRequest(); - bulkRequest.add(new IndexRequest("index", "type", "0") - .source(singletonMap("field", "value"), XContentType.JSON)); - bulkRequest.add(new IndexRequest("index", "type", "1") - .source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "0").source(singletonMap("field", "value"), XContentType.JSON)); + bulkRequest.add(new IndexRequest("index", "type", "1").source(singletonMap("field", "value"), XContentType.JSON)); bulkRequest.add(new UpdateRequest("index", "type", "2") .doc(new IndexRequest().source(singletonMap("field", "value"), XContentType.JSON)) - .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE)) - ); + .upsert(new IndexRequest().source(singletonMap("field", "value"), XContentType.SMILE))); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.bulk(bulkRequest)); - assertEquals("Mismatching content-type found for request with content-type [SMILE], " + - "previous requests have content-type [JSON]", exception.getMessage()); + assertEquals( + "Mismatching content-type found for request with content-type [SMILE], " + "previous requests have content-type [JSON]", + exception.getMessage()); } { XContentType xContentType = randomFrom(XContentType.CBOR, XContentType.YAML); @@ -1023,9 +1024,10 @@ public void testSearch() throws Exception { setRandomIndicesOptions(searchRequest::indicesOptions, searchRequest::indicesOptions, expectedParams); SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - //rarely skip setting the search source completely + // rarely skip setting the search source completely if (frequently()) { - //frequently set the search source to have some content, otherwise leave it empty but still set it + // frequently set the search source to have some content, otherwise leave it + // empty but still set it if (frequently()) { if (randomBoolean()) { searchSourceBuilder.size(randomIntBetween(0, Integer.MAX_VALUE)); @@ -1095,7 +1097,8 @@ public void testMultiSearch() throws IOException { MultiSearchRequest multiSearchRequest = new MultiSearchRequest(); for (int i = 0; i < numberOfSearchRequests; i++) { SearchRequest searchRequest = randomSearchRequest(() -> { - // No need to return a very complex SearchSourceBuilder here, that is tested elsewhere + // No need to return a very complex SearchSourceBuilder here, that is tested + // elsewhere SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); searchSourceBuilder.from(randomInt(10)); searchSourceBuilder.size(randomIntBetween(20, 100)); @@ -1103,14 +1106,13 @@ public void testMultiSearch() throws IOException { }); // scroll is not supported in the current msearch api, so unset it: searchRequest.scroll((Scroll) null); - // only expand_wildcards, ignore_unavailable and allow_no_indices can be specified from msearch api, so unset other options: + // only expand_wildcards, ignore_unavailable and allow_no_indices can be + // specified from msearch api, so unset other options: IndicesOptions randomlyGenerated = searchRequest.indicesOptions(); IndicesOptions msearchDefault = new MultiSearchRequest().indicesOptions(); - searchRequest.indicesOptions(IndicesOptions.fromOptions( - randomlyGenerated.ignoreUnavailable(), randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), - randomlyGenerated.expandWildcardsClosed(), msearchDefault.allowAliasesToMultipleIndices(), - msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases() - )); + searchRequest.indicesOptions(IndicesOptions.fromOptions(randomlyGenerated.ignoreUnavailable(), + randomlyGenerated.allowNoIndices(), randomlyGenerated.expandWildcardsOpen(), randomlyGenerated.expandWildcardsClosed(), + msearchDefault.allowAliasesToMultipleIndices(), msearchDefault.forbidClosedIndices(), msearchDefault.ignoreAliases())); multiSearchRequest.add(searchRequest); } @@ -1135,8 +1137,8 @@ public void testMultiSearch() throws IOException { requests.add(searchRequest); }; MultiSearchRequest.readMultiLineFormat(new BytesArray(EntityUtils.toByteArray(request.getEntity())), - REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, - null, xContentRegistry(), true); + REQUEST_BODY_CONTENT_TYPE.xContent(), consumer, null, multiSearchRequest.indicesOptions(), null, null, null, + xContentRegistry(), true); assertEquals(requests, multiSearchRequest.requests()); } @@ -1172,7 +1174,7 @@ public void testExistsAlias() { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); String[] indices = randomBoolean() ? null : randomIndicesNames(0, 5); getAliasesRequest.indices(indices); - //the HEAD endpoint requires at least an alias or an index + // the HEAD endpoint requires at least an alias or an index boolean hasIndices = indices != null && indices.length > 0; String[] aliases; if (hasIndices) { @@ -1203,15 +1205,15 @@ public void testExistsAlias() { public void testExistsAliasNoAliasNoIndex() { { GetAliasesRequest getAliasesRequest = new GetAliasesRequest(); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } { - GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[])null); - getAliasesRequest.indices((String[])null); - IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> - RequestConverters.existsAlias(getAliasesRequest)); + GetAliasesRequest getAliasesRequest = new GetAliasesRequest((String[]) null); + getAliasesRequest.indices((String[]) null); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> RequestConverters.existsAlias(getAliasesRequest)); assertEquals("existsAlias requires at least an alias or an index", iae.getMessage()); } } @@ -1368,6 +1370,66 @@ public void testIndexPutSettings() throws IOException { assertEquals(expectedParams, request.getParameters()); } + public void testListTasks() { + { + ListTasksRequest request = new ListTasksRequest(); + Map expectedParams = new HashMap<>(); + if (randomBoolean()) { + request.setDetailed(randomBoolean()); + if (request.getDetailed()) { + expectedParams.put("detailed", "true"); + } + } + if (randomBoolean()) { + request.setWaitForCompletion(randomBoolean()); + if (request.getWaitForCompletion()) { + expectedParams.put("wait_for_completion", "true"); + } + } + if (randomBoolean()) { + String timeout = randomTimeValue(); + request.setTimeout(timeout); + expectedParams.put("timeout", timeout); + } + if (randomBoolean()) { + if (randomBoolean()) { + TaskId taskId = new TaskId(randomAlphaOfLength(5), randomNonNegativeLong()); + request.setParentTaskId(taskId); + expectedParams.put("parent_task_id", taskId.toString()); + } else { + request.setParentTask(TaskId.EMPTY_TASK_ID); + } + } + if (randomBoolean()) { + String[] nodes = generateRandomStringArray(10, 8, false); + request.setNodes(nodes); + if (nodes.length > 0) { + expectedParams.put("nodes", String.join(",", nodes)); + } + } + if (randomBoolean()) { + String[] actions = generateRandomStringArray(10, 8, false); + request.setActions(actions); + if (actions.length > 0) { + expectedParams.put("actions", String.join(",", actions)); + } + } + expectedParams.put("group_by", "none"); + Request httpRequest = RequestConverters.listTasks(request); + assertThat(httpRequest, notNullValue()); + assertThat(httpRequest.getMethod(), equalTo(HttpGet.METHOD_NAME)); + assertThat(httpRequest.getEntity(), nullValue()); + assertThat(httpRequest.getEndpoint(), equalTo("/_tasks")); + assertThat(httpRequest.getParameters(), equalTo(expectedParams)); + } + { + ListTasksRequest request = new ListTasksRequest(); + request.setTaskId(new TaskId(randomAlphaOfLength(5), randomNonNegativeLong())); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> RequestConverters.listTasks(request)); + assertEquals("TaskId cannot be used for list tasks request", exception.getMessage()); + } + } + public void testGetRepositories() { Map expectedParams = new HashMap<>(); StringBuilder endpoint = new StringBuilder("/_snapshot"); @@ -1377,7 +1439,7 @@ public void testGetRepositories() { setRandomLocal(getRepositoriesRequest, expectedParams); if (randomBoolean()) { - String[] entries = new String[] {"a", "b", "c"}; + String[] entries = new String[] { "a", "b", "c" }; getRepositoriesRequest.repositories(entries); endpoint.append("/" + String.join(",", entries)); } @@ -1395,9 +1457,8 @@ public void testPutTemplateRequest() throws Exception { names.put("-#template", "-%23template"); names.put("foo^bar", "foo%5Ebar"); - PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest() - .name(randomFrom(names.keySet())) - .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); + PutIndexTemplateRequest putTemplateRequest = new PutIndexTemplateRequest().name(randomFrom(names.keySet())) + .patterns(Arrays.asList(generateRandomStringArray(20, 100, false, false))); if (randomBoolean()) { putTemplateRequest.order(randomInt()); } @@ -1454,14 +1515,12 @@ public void testEndpointBuilder() { assertEquals("/a/b", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a").addPathPart("b").addPathPartAsIs("_create"); assertEquals("/a/b/_create", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c") - .addPathPartAsIs("_create"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("a", "b", "c").addPathPartAsIs("_create"); assertEquals("/a/b/c/_create", endpointBuilder.build()); } { @@ -1520,13 +1579,12 @@ public void testEndpointBuilderEncodeParts() { assertEquals("/foo%5Ebar", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2") - .addPathPartAsIs("_search"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addPathPart("cluster1:index1,index2").addPathPartAsIs("_search"); assertEquals("/cluster1:index1,index2/_search", endpointBuilder.build()); } { - EndpointBuilder endpointBuilder = new EndpointBuilder() - .addCommaSeparatedPathParts(new String[]{"index1", "index2"}).addPathPartAsIs("cache/clear"); + EndpointBuilder endpointBuilder = new EndpointBuilder().addCommaSeparatedPathParts(new String[] { "index1", "index2" }) + .addPathPartAsIs("cache/clear"); assertEquals("/index1,index2/cache/clear", endpointBuilder.build()); } } @@ -1534,12 +1592,12 @@ public void testEndpointBuilderEncodeParts() { public void testEndpoint() { assertEquals("/index/type/id", RequestConverters.endpoint("index", "type", "id")); assertEquals("/index/type/id/_endpoint", RequestConverters.endpoint("index", "type", "id", "_endpoint")); - assertEquals("/index1,index2", RequestConverters.endpoint(new String[]{"index1", "index2"})); - assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, "_endpoint")); - assertEquals("/index1,index2/type1,type2/_endpoint", RequestConverters.endpoint(new String[]{"index1", "index2"}, - new String[]{"type1", "type2"}, "_endpoint")); - assertEquals("/index1,index2/_endpoint/suffix1,suffix2", RequestConverters.endpoint(new String[]{"index1", "index2"}, - "_endpoint", new String[]{"suffix1", "suffix2"})); + assertEquals("/index1,index2", RequestConverters.endpoint(new String[] { "index1", "index2" })); + assertEquals("/index1,index2/_endpoint", RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint")); + assertEquals("/index1,index2/type1,type2/_endpoint", + RequestConverters.endpoint(new String[] { "index1", "index2" }, new String[] { "type1", "type2" }, "_endpoint")); + assertEquals("/index1,index2/_endpoint/suffix1,suffix2", + RequestConverters.endpoint(new String[] { "index1", "index2" }, "_endpoint", new String[] { "suffix1", "suffix2" })); } public void testCreateContentType() { @@ -1555,20 +1613,22 @@ public void testEnforceSameContentType() { XContentType bulkContentType = randomBoolean() ? xContentType : null; - IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), bulkContentType)); + IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.CBOR), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [CBOR], only JSON and SMILE are supported", exception.getMessage()); - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), bulkContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), XContentType.YAML), + bulkContentType)); assertEquals("Unsupported content-type found for request with content-type [YAML], only JSON and SMILE are supported", exception.getMessage()); XContentType requestContentType = xContentType == XContentType.JSON ? XContentType.SMILE : XContentType.JSON; - exception = expectThrows(IllegalArgumentException.class, () -> - enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); + exception = expectThrows(IllegalArgumentException.class, + () -> enforceSameContentType(new IndexRequest().source(singletonMap("field", "value"), requestContentType), xContentType)); assertEquals("Mismatching content-type found for request with content-type [" + requestContentType + "], " + "previous requests have content-type [" + xContentType + "]", exception.getMessage()); } @@ -1603,11 +1663,10 @@ private static void randomizeFetchSourceContextParams(Consumer setter, Supplier getter, - Map expectedParams) { + Map expectedParams) { if (randomBoolean()) { - setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), - randomBoolean())); + setter.accept(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean())); } expectedParams.put("ignore_unavailable", Boolean.toString(getter.get().ignoreUnavailable())); expectedParams.put("allow_no_indices", Boolean.toString(getter.get().allowNoIndices())); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java index 2e7ea1650f424..d41b11c68fe44 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/ClusterClientDocumentationIT.java @@ -19,8 +19,14 @@ package org.elasticsearch.client.documentation; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.LatchedActionListener; +import org.elasticsearch.action.TaskOperationFailure; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; +import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; +import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskGroup; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; @@ -31,14 +37,20 @@ import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.indices.recovery.RecoverySettings; +import org.elasticsearch.tasks.TaskId; +import org.elasticsearch.tasks.TaskInfo; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static java.util.Collections.emptyList; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; /** * This class is used to generate the Java Cluster API documentation. @@ -177,4 +189,87 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } } + + public void testListTasks() throws IOException { + RestHighLevelClient client = highLevelClient(); + { + // tag::list-tasks-request + ListTasksRequest request = new ListTasksRequest(); + // end::list-tasks-request + + // tag::list-tasks-request-filter + request.setActions("cluster:*"); // <1> + request.setNodes("nodeId1", "nodeId2"); // <2> + request.setParentTaskId(new TaskId("parentTaskId", 42)); // <3> + // end::list-tasks-request-filter + + // tag::list-tasks-request-detailed + request.setDetailed(true); // <1> + // end::list-tasks-request-detailed + + // tag::list-tasks-request-wait-completion + request.setWaitForCompletion(true); // <1> + request.setTimeout(TimeValue.timeValueSeconds(50)); // <2> + request.setTimeout("50s"); // <3> + // end::list-tasks-request-wait-completion + } + + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute + ListTasksResponse response = client.cluster().listTasks(request); + // end::list-tasks-execute + + assertThat(response, notNullValue()); + + // tag::list-tasks-response-tasks + List tasks = response.getTasks(); // <1> + // end::list-tasks-response-tasks + + // tag::list-tasks-response-calc + Map> perNodeTasks = response.getPerNodeTasks(); // <1> + List groups = response.getTaskGroups(); // <2> + // end::list-tasks-response-calc + + // tag::list-tasks-response-failures + List nodeFailures = response.getNodeFailures(); // <1> + List taskFailures = response.getTaskFailures(); // <2> + // end::list-tasks-response-failures + + assertThat(response.getNodeFailures(), equalTo(emptyList())); + assertThat(response.getTaskFailures(), equalTo(emptyList())); + assertThat(response.getTasks().size(), greaterThanOrEqualTo(2)); + } + + public void testListTasksAsync() throws Exception { + RestHighLevelClient client = highLevelClient(); + { + ListTasksRequest request = new ListTasksRequest(); + + // tag::list-tasks-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(ListTasksResponse response) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::list-tasks-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::list-tasks-execute-async + client.cluster().listTasksAsync(request, listener); // <1> + // end::list-tasks-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } } diff --git a/docs/java-rest/high-level/cluster/list_tasks.asciidoc b/docs/java-rest/high-level/cluster/list_tasks.asciidoc new file mode 100644 index 0000000000000..1a2117b2e66e6 --- /dev/null +++ b/docs/java-rest/high-level/cluster/list_tasks.asciidoc @@ -0,0 +1,101 @@ +[[java-rest-high-cluster-list-tasks]] +=== List Tasks API + +The List Tasks API allows to get information about the tasks currently executing in the cluster. + +[[java-rest-high-cluster-list-tasks-request]] +==== List Tasks Request + +A `ListTasksRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request] +-------------------------------------------------- +There is no required parameters. By default the client will list all tasks and will not wait +for task completion. + +==== Parameters + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-filter] +-------------------------------------------------- +<1> Request only cluster-related tasks +<2> Request all tasks running on nodes nodeId1 and nodeId2 +<3> Request only children of a particular task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-detailed] +-------------------------------------------------- +<1> Should the information include detailed, potentially slow to generate data. Defaults to `false` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-request-wait-completion] +-------------------------------------------------- +<1> Should this request wait for all found tasks to complete. Defaults to `false` +<2> Timeout for the request as a `TimeValue`. Applicable only if `setWaitForCompletion` is `true`. +Defaults to 30 seconds +<3> Timeout as a `String` + +[[java-rest-high-cluster-list-tasks-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute] +-------------------------------------------------- + +[[java-rest-high-cluster-list-tasks-async]] +==== Asynchronous Execution + +The asynchronous execution of a cluster update settings requires both the +`ListTasksRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-async] +-------------------------------------------------- +<1> The `ListTasksRequest` to execute and the `ActionListener` to use +when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `ListTasksResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-cluster-list-tasks-response]] +==== List Tasks Response + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-tasks] +-------------------------------------------------- +<1> List of currently running tasks + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-calc] +-------------------------------------------------- +<1> List of tasks grouped by a node +<2> List of tasks grouped by a parent task + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/ClusterClientDocumentationIT.java[list-tasks-response-failures] +-------------------------------------------------- +<1> List of node failures +<2> List of tasks failures diff --git a/docs/java-rest/high-level/supported-apis.asciidoc b/docs/java-rest/high-level/supported-apis.asciidoc index c3988d8b0027e..658d6023caea5 100644 --- a/docs/java-rest/high-level/supported-apis.asciidoc +++ b/docs/java-rest/high-level/supported-apis.asciidoc @@ -98,8 +98,10 @@ include::indices/put_template.asciidoc[] The Java High Level REST Client supports the following Cluster APIs: * <> +* <> include::cluster/put_settings.asciidoc[] +include::cluster/list_tasks.asciidoc[] == Snapshot APIs @@ -107,4 +109,5 @@ The Java High Level REST Client supports the following Snapshot APIs: * <> -include::snapshot/get_repository.asciidoc[] \ No newline at end of file +include::snapshot/get_repository.asciidoc[] +include::snapshot/create_repository.asciidoc[] diff --git a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java index 885647441d01f..8740c446b068e 100644 --- a/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java +++ b/server/src/main/java/org/elasticsearch/action/TaskOperationFailure.java @@ -21,17 +21,20 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.common.xcontent.ToXContent.Params; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.rest.RestStatus; import java.io.IOException; import static org.elasticsearch.ExceptionsHelper.detailedMessage; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; /** * Information about task operation failures @@ -39,7 +42,10 @@ * The class is final due to serialization limitations */ public final class TaskOperationFailure implements Writeable, ToXContentFragment { - + private static final String TASK_ID = "task_id"; + private static final String NODE_ID = "node_id"; + private static final String STATUS = "status"; + private static final String REASON = "reason"; private final String nodeId; private final long taskId; @@ -48,6 +54,21 @@ public final class TaskOperationFailure implements Writeable, ToXContentFragment private final RestStatus status; + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("task_info", true, constructorObjects -> { + int i = 0; + String nodeId = (String) constructorObjects[i++]; + long taskId = (long) constructorObjects[i++]; + ElasticsearchException reason = (ElasticsearchException) constructorObjects[i]; + return new TaskOperationFailure(nodeId, taskId, reason); + }); + + static { + PARSER.declareString(constructorArg(), new ParseField(NODE_ID)); + PARSER.declareLong(constructorArg(), new ParseField(TASK_ID)); + PARSER.declareObject(constructorArg(), (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(REASON)); + } + public TaskOperationFailure(String nodeId, long taskId, Exception e) { this.nodeId = nodeId; this.taskId = taskId; @@ -98,13 +119,17 @@ public String toString() { return "[" + nodeId + "][" + taskId + "] failed, reason [" + getReason() + "]"; } + public static TaskOperationFailure fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field("task_id", getTaskId()); - builder.field("node_id", getNodeId()); - builder.field("status", status.name()); + builder.field(TASK_ID, getTaskId()); + builder.field(NODE_ID, getNodeId()); + builder.field(STATUS, status.name()); if (reason != null) { - builder.field("reason"); + builder.field(REASON); builder.startObject(); ElasticsearchException.generateThrowableXContent(builder, params, reason); builder.endObject(); @@ -112,5 +137,4 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index 88d8ff4679917..1233b7143ab77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -19,16 +19,19 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; @@ -40,10 +43,16 @@ import java.util.Map; import java.util.stream.Collectors; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + /** * Returns the list of tasks currently running on the nodes */ public class ListTasksResponse extends BaseTasksResponse implements ToXContentObject { + private static final String TASKS = "tasks"; + private static final String TASK_FAILURES = "task_failures"; + private static final String NODE_FAILURES = "node_failures"; private List tasks; @@ -56,11 +65,31 @@ public ListTasksResponse() { } public ListTasksResponse(List tasks, List taskFailures, - List nodeFailures) { + List nodeFailures) { super(taskFailures, nodeFailures); this.tasks = tasks == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(tasks)); } + private static final ConstructingObjectParser PARSER = + new ConstructingObjectParser<>("list_tasks_response", true, + constructingObjects -> { + int i = 0; + @SuppressWarnings("unchecked") + List tasks = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List tasksFailures = (List) constructingObjects[i++]; + @SuppressWarnings("unchecked") + List nodeFailures = (List) constructingObjects[i]; + return new ListTasksResponse(tasks, tasksFailures, nodeFailures); + }); + + static { + PARSER.declareObjectArray(constructorArg(), TaskInfo.PARSER, new ParseField(TASKS)); + PARSER.declareObjectArray(optionalConstructorArg(), (p, c) -> TaskOperationFailure.fromXContent(p), new ParseField(TASK_FAILURES)); + PARSER.declareObjectArray(optionalConstructorArg(), + (parser, c) -> ElasticsearchException.fromXContent(parser), new ParseField(NODE_FAILURES)); + } + @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); @@ -159,7 +188,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p builder.endObject(); } } - builder.startObject("tasks"); + builder.startObject(TASKS); for(TaskInfo task : entry.getValue()) { builder.startObject(task.getTaskId().toString()); task.toXContent(builder, params); @@ -177,7 +206,7 @@ public XContentBuilder toXContentGroupedByNode(XContentBuilder builder, Params p */ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startObject("tasks"); + builder.startObject(TASKS); for (TaskGroup group : getTaskGroups()) { builder.field(group.getTaskInfo().getTaskId().toString()); group.toXContent(builder, params); @@ -191,7 +220,7 @@ public XContentBuilder toXContentGroupedByParents(XContentBuilder builder, Param */ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params params) throws IOException { toXContentCommon(builder, params); - builder.startArray("tasks"); + builder.startArray(TASKS); for (TaskInfo taskInfo : getTasks()) { builder.startObject(); taskInfo.toXContent(builder, params); @@ -204,14 +233,14 @@ public XContentBuilder toXContentGroupedByNone(XContentBuilder builder, Params p @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - toXContentGroupedByParents(builder, params); + toXContentGroupedByNone(builder, params); builder.endObject(); return builder; } private void toXContentCommon(XContentBuilder builder, Params params) throws IOException { if (getTaskFailures() != null && getTaskFailures().size() > 0) { - builder.startArray("task_failures"); + builder.startArray(TASK_FAILURES); for (TaskOperationFailure ex : getTaskFailures()){ builder.startObject(); builder.value(ex); @@ -221,8 +250,8 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } if (getNodeFailures() != null && getNodeFailures().size() > 0) { - builder.startArray("node_failures"); - for (FailedNodeException ex : getNodeFailures()) { + builder.startArray(NODE_FAILURES); + for (ElasticsearchException ex : getNodeFailures()) { builder.startObject(); ex.toXContent(builder, params); builder.endObject(); @@ -231,6 +260,10 @@ private void toXContentCommon(XContentBuilder builder, Params params) throws IOE } } + public static ListTasksResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + @Override public String toString() { return Strings.toString(this); diff --git a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java index fdbd8e6fe708f..1436410bf2046 100644 --- a/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/tasks/BaseTasksResponse.java @@ -42,9 +42,9 @@ */ public class BaseTasksResponse extends ActionResponse { private List taskFailures; - private List nodeFailures; + private List nodeFailures; - public BaseTasksResponse(List taskFailures, List nodeFailures) { + public BaseTasksResponse(List taskFailures, List nodeFailures) { this.taskFailures = taskFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(taskFailures)); this.nodeFailures = nodeFailures == null ? Collections.emptyList() : Collections.unmodifiableList(new ArrayList<>(nodeFailures)); } @@ -59,7 +59,7 @@ public List getTaskFailures() { /** * The list of node failures exception. */ - public List getNodeFailures() { + public List getNodeFailures() { return nodeFailures; } @@ -99,7 +99,7 @@ public void writeTo(StreamOutput out) throws IOException { exp.writeTo(out); } out.writeVInt(nodeFailures.size()); - for (FailedNodeException exp : nodeFailures) { + for (ElasticsearchException exp : nodeFailures) { exp.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java index 8e6447e0e4980..ec4058fea9d7c 100644 --- a/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/rest/action/admin/cluster/RestListTasksAction.java @@ -103,18 +103,17 @@ public RestResponse buildResponse(T response, XContentBuilder builder) throws Ex return new BytesRestResponse(RestStatus.OK, builder); } }; - } else if ("none".equals(groupBy)) { + } else if ("parents".equals(groupBy)) { return new RestBuilderListener(channel) { @Override public RestResponse buildResponse(T response, XContentBuilder builder) throws Exception { builder.startObject(); - response.toXContentGroupedByNone(builder, channel.request()); + response.toXContentGroupedByParents(builder, channel.request()); builder.endObject(); return new BytesRestResponse(RestStatus.OK, builder); } }; - - } else if ("parents".equals(groupBy)) { + } else if ("none".equals(groupBy)) { return new RestToXContentListener<>(channel); } else { throw new IllegalArgumentException("[group_by] must be one of [nodes], [parents] or [none] but was [" + groupBy + "]"); diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index da4909bb3817f..26aabec3e9fc2 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.xcontent.ToXContent.Params; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Collections; @@ -214,6 +215,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } + public static TaskInfo fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "task_info", true, a -> { int i = 0; diff --git a/server/src/test/java/org/elasticsearch/action/TaskOperationFailureTests.java b/server/src/test/java/org/elasticsearch/action/TaskOperationFailureTests.java new file mode 100644 index 0000000000000..442cb55def5f2 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/TaskOperationFailureTests.java @@ -0,0 +1,63 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action; + +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractXContentTestCase; + +import java.io.IOException; + +import static org.hamcrest.Matchers.equalTo; + +public class TaskOperationFailureTests extends AbstractXContentTestCase { + + @Override + protected TaskOperationFailure createTestInstance() { + return new TaskOperationFailure(randomAlphaOfLength(5), randomNonNegativeLong(), new IllegalStateException("message")); + } + + @Override + protected TaskOperationFailure doParseInstance(XContentParser parser) throws IOException { + return TaskOperationFailure.fromXContent(parser); + } + + @Override + protected void assertEqualInstances(TaskOperationFailure expectedInstance, TaskOperationFailure newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.getNodeId(), equalTo(expectedInstance.getNodeId())); + assertThat(newInstance.getTaskId(), equalTo(expectedInstance.getTaskId())); + assertThat(newInstance.getStatus(), equalTo(expectedInstance.getStatus())); + // XContent loses the original exception and wraps it as a message in Elasticsearch exception + assertThat(newInstance.getCause().getMessage(), equalTo("Elasticsearch exception [type=illegal_state_exception, reason=message]")); + // getReason returns Exception class and the message + assertThat(newInstance.getReason(), + equalTo("ElasticsearchException[Elasticsearch exception [type=illegal_state_exception, reason=message]]")); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index b04205ed01813..4ab54cdd206be 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -18,6 +18,7 @@ */ package org.elasticsearch.action.admin.cluster.node.tasks; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; @@ -716,7 +717,7 @@ public void testTasksWaitForAllTask() throws Exception { .setTimeout(timeValueSeconds(10)).get(); // It should finish quickly and without complaint and list the list tasks themselves - assertThat(response.getNodeFailures(), emptyCollectionOf(FailedNodeException.class)); + assertThat(response.getNodeFailures(), emptyCollectionOf(ElasticsearchException.class)); assertThat(response.getTaskFailures(), emptyCollectionOf(TaskOperationFailure.class)); assertThat(response.getTasks().size(), greaterThanOrEqualTo(1)); } diff --git a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java index be0624d6bba83..295ff955e41a5 100644 --- a/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/ListTasksResponseTests.java @@ -19,18 +19,33 @@ package org.elasticsearch.tasks; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.FailedNodeException; +import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; -import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.test.AbstractXContentTestCase; +import java.io.IOException; +import java.util.ArrayList; import java.util.Collections; +import java.util.List; +import java.util.Objects; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.notNullValue; -public class ListTasksResponseTests extends ESTestCase { +public class ListTasksResponseTests extends AbstractXContentTestCase { public void testEmptyToString() { - assertEquals("{\"tasks\":{}}", new ListTasksResponse().toString()); + assertEquals("{\"tasks\":[]}", new ListTasksResponse().toString()); } public void testNonEmptyToString() { @@ -38,8 +53,48 @@ public void testNonEmptyToString() { new TaskId("node1", 1), "dummy-type", "dummy-action", "dummy-description", null, 0, 1, true, new TaskId("node1", 0), Collections.singletonMap("foo", "bar")); ListTasksResponse tasksResponse = new ListTasksResponse(singletonList(info), emptyList(), emptyList()); - assertEquals("{\"tasks\":{\"node1:1\":{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\"," + assertEquals("{\"tasks\":[{\"node\":\"node1\",\"id\":1,\"type\":\"dummy-type\",\"action\":\"dummy-action\"," + "\"description\":\"dummy-description\",\"start_time_in_millis\":0,\"running_time_in_nanos\":1,\"cancellable\":true," - + "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}}}", tasksResponse.toString()); + + "\"parent_task_id\":\"node1:0\",\"headers\":{\"foo\":\"bar\"}}]}", tasksResponse.toString()); + } + + @Override + protected ListTasksResponse createTestInstance() { + List tasks = new ArrayList<>(); + for (int i = 0; i < randomInt(10); i++) { + tasks.add(TaskInfoTests.randomTaskInfo()); + } + List taskFailures = new ArrayList<>(); + for (int i = 0; i < randomInt(5); i++) { + taskFailures.add(new TaskOperationFailure( + randomAlphaOfLength(5), randomNonNegativeLong(), new IllegalStateException("message"))); + } + return new ListTasksResponse(tasks, taskFailures, Collections.singletonList(new FailedNodeException("", "message", null))); + } + + @Override + protected ListTasksResponse doParseInstance(XContentParser parser) throws IOException { + return ListTasksResponse.fromXContent(parser); + } + + @Override + protected boolean supportsUnknownFields() { + return false; + } + + @Override + protected void assertEqualInstances(ListTasksResponse expectedInstance, ListTasksResponse newInstance) { + assertNotSame(expectedInstance, newInstance); + assertThat(newInstance.getTasks(), equalTo(expectedInstance.getTasks())); + assertThat(newInstance.getNodeFailures().size(), equalTo(1)); + for (ElasticsearchException failure : newInstance.getNodeFailures()) { + assertThat(failure, notNullValue()); + assertThat(failure.getMessage(), equalTo("Elasticsearch exception [type=failed_node_exception, reason=message]")); + } + } + + @Override + protected boolean assertToXContentEquivalence() { + return false; } } diff --git a/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java b/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java new file mode 100644 index 0000000000000..616ac1053871e --- /dev/null +++ b/server/src/test/java/org/elasticsearch/tasks/TaskInfoTests.java @@ -0,0 +1,156 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.tasks; + +import org.elasticsearch.client.Requests; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.test.AbstractSerializingTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.function.Predicate; + +public class TaskInfoTests extends AbstractSerializingTestCase { + + @Override + protected TaskInfo doParseInstance(XContentParser parser) { + return TaskInfo.fromXContent(parser); + } + + @Override + protected TaskInfo createTestInstance() { + return randomTaskInfo(); + } + + @Override + protected Writeable.Reader instanceReader() { + return TaskInfo::new; + } + + @Override + protected NamedWriteableRegistry getNamedWriteableRegistry() { + return new NamedWriteableRegistry(Collections.singletonList( + new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new))); + } + + @Override + protected boolean supportsUnknownFields() { + return true; + } + + @Override + protected Predicate getRandomFieldsExcludeFilter() { + return field -> "status".equals(field) || "headers".equals(field); + } + + @Override + protected TaskInfo mutateInstance(TaskInfo info) throws IOException { + switch (between(0, 9)) { + case 0: + TaskId taskId = new TaskId(info.getTaskId().getNodeId() + randomAlphaOfLength(5), info.getTaskId().getId()); + return new TaskInfo(taskId, info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), info.getHeaders()); + case 1: + return new TaskInfo(info.getTaskId(), info.getType() + randomAlphaOfLength(5), info.getAction(), info.getDescription(), + info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), + info.getHeaders()); + case 2: + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction() + randomAlphaOfLength(5), info.getDescription(), + info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), + info.getHeaders()); + case 3: + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription() + randomAlphaOfLength(5), + info.getStatus(), info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), + info.getHeaders()); + case 4: + Task.Status newStatus = randomValueOtherThan(info.getStatus(), TaskInfoTests::randomRawTaskStatus); + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), newStatus, + info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), info.getHeaders()); + case 5: + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime() + between(1, 100), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), + info.getHeaders()); + case 6: + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime(), info.getRunningTimeNanos() + between(1, 100), info.isCancellable(), info.getParentTaskId(), + info.getHeaders()); + case 7: + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable() == false, info.getParentTaskId(), + info.getHeaders()); + case 8: + TaskId parentId = new TaskId(info.getParentTaskId().getNodeId() + randomAlphaOfLength(5), info.getParentTaskId().getId()); + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), parentId, info.getHeaders()); + case 9: + Map headers = info.getHeaders(); + if (headers == null) { + headers = new HashMap<>(1); + } else { + headers = new HashMap<>(info.getHeaders()); + } + headers.put(randomAlphaOfLength(15), randomAlphaOfLength(15)); + return new TaskInfo(info.getTaskId(), info.getType(), info.getAction(), info.getDescription(), info.getStatus(), + info.getStartTime(), info.getRunningTimeNanos(), info.isCancellable(), info.getParentTaskId(), headers); + default: + throw new IllegalStateException(); + } + } + + static TaskInfo randomTaskInfo() { + TaskId taskId = randomTaskId(); + String type = randomAlphaOfLength(5); + String action = randomAlphaOfLength(5); + Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; + String description = randomBoolean() ? randomAlphaOfLength(5) : null; + long startTime = randomLong(); + long runningTimeNanos = randomLong(); + boolean cancellable = randomBoolean(); + TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(); + Map headers = randomBoolean() ? + Collections.emptyMap() : + Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); + return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers); + } + + private static TaskId randomTaskId() { + return new TaskId(randomAlphaOfLength(5), randomLong()); + } + + private static RawTaskStatus randomRawTaskStatus() { + try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) { + builder.startObject(); + int fields = between(0, 10); + for (int f = 0; f < fields; f++) { + builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); + } + builder.endObject(); + return new RawTaskStatus(BytesReference.bytes(builder)); + } catch (IOException e) { + throw new IllegalStateException(e); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java b/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java index 7a481100f1372..71916c0c94435 100644 --- a/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java +++ b/server/src/test/java/org/elasticsearch/tasks/TaskResultTests.java @@ -19,8 +19,6 @@ package org.elasticsearch.tasks; -import org.elasticsearch.client.Requests; -import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -37,6 +35,8 @@ import java.util.Map; import java.util.TreeMap; +import static org.elasticsearch.tasks.TaskInfoTests.randomTaskInfo; + /** * Round trip tests for {@link TaskResult} and those classes that it includes like {@link TaskInfo} and {@link RawTaskStatus}. */ @@ -125,37 +125,6 @@ private static TaskResult randomTaskResult() throws IOException { } } - private static TaskInfo randomTaskInfo() throws IOException { - TaskId taskId = randomTaskId(); - String type = randomAlphaOfLength(5); - String action = randomAlphaOfLength(5); - Task.Status status = randomBoolean() ? randomRawTaskStatus() : null; - String description = randomBoolean() ? randomAlphaOfLength(5) : null; - long startTime = randomLong(); - long runningTimeNanos = randomLong(); - boolean cancellable = randomBoolean(); - TaskId parentTaskId = randomBoolean() ? TaskId.EMPTY_TASK_ID : randomTaskId(); - Map headers = - randomBoolean() ? Collections.emptyMap() : Collections.singletonMap(randomAlphaOfLength(5), randomAlphaOfLength(5)); - return new TaskInfo(taskId, type, action, description, status, startTime, runningTimeNanos, cancellable, parentTaskId, headers); - } - - private static TaskId randomTaskId() { - return new TaskId(randomAlphaOfLength(5), randomLong()); - } - - private static RawTaskStatus randomRawTaskStatus() throws IOException { - try (XContentBuilder builder = XContentBuilder.builder(Requests.INDEX_CONTENT_TYPE.xContent())) { - builder.startObject(); - int fields = between(0, 10); - for (int f = 0; f < fields; f++) { - builder.field(randomAlphaOfLength(5), randomAlphaOfLength(5)); - } - builder.endObject(); - return new RawTaskStatus(BytesReference.bytes(builder)); - } - } - private static ToXContent randomTaskResponse() { Map result = new TreeMap<>(); int fields = between(0, 10); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 4717fc7c1ba31..edc762632b428 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -659,20 +659,20 @@ public static String randomRealisticUnicodeOfCodepointLength(int codePoints) { return RandomizedTest.randomRealisticUnicodeOfCodepointLength(codePoints); } - public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull, boolean allowEmpty) { + public static String[] generateRandomStringArray(int maxArraySize, int stringSize, boolean allowNull, boolean allowEmpty) { if (allowNull && random().nextBoolean()) { return null; } int arraySize = randomIntBetween(allowEmpty ? 0 : 1, maxArraySize); String[] array = new String[arraySize]; for (int i = 0; i < arraySize; i++) { - array[i] = RandomStrings.randomAsciiOfLength(random(), maxStringSize); + array[i] = RandomStrings.randomAsciiOfLength(random(), stringSize); } return array; } - public static String[] generateRandomStringArray(int maxArraySize, int maxStringSize, boolean allowNull) { - return generateRandomStringArray(maxArraySize, maxStringSize, allowNull, true); + public static String[] generateRandomStringArray(int maxArraySize, int stringSize, boolean allowNull) { + return generateRandomStringArray(maxArraySize, stringSize, allowNull, true); } private static final String[] TIME_SUFFIXES = new String[]{"d", "h", "ms", "s", "m", "micros", "nanos"}; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index b53f61e35fcf3..37e41854f7b8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -5,11 +5,11 @@ */ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.action.Action; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; -import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.TaskOperationFailure; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -297,7 +297,7 @@ public Response(QueryPage jobsStats) { this.jobsStats = jobsStats; } - public Response(List taskFailures, List nodeFailures, + public Response(List taskFailures, List nodeFailures, QueryPage jobsStats) { super(taskFailures, nodeFailures); this.jobsStats = jobsStats; From 989038eb52c78d88c45997915edee9829a2b57e5 Mon Sep 17 00:00:00 2001 From: Albert Zaharovits Date: Wed, 16 May 2018 16:12:24 +0300 Subject: [PATCH 27/44] S3 repo plugin populates SettingsFilter (#30652) The accessKey and secretKey repo settings (in the cluster state) for the s3 client are registered and will populate the SettingsFilter. --- .../repositories/s3/S3RepositoryPlugin.java | 4 +- .../s3/S3BlobStoreRepositoryTests.java | 126 ++++++++++++++++++ 2 files changed, 129 insertions(+), 1 deletion(-) create mode 100644 plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java index 010c4b92c21a0..e31495efc0eef 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RepositoryPlugin.java @@ -90,6 +90,8 @@ public List> getSettings() { S3ClientSettings.PROXY_PASSWORD_SETTING, S3ClientSettings.READ_TIMEOUT_SETTING, S3ClientSettings.MAX_RETRIES_SETTING, - S3ClientSettings.USE_THROTTLE_RETRIES_SETTING); + S3ClientSettings.USE_THROTTLE_RETRIES_SETTING, + S3Repository.ACCESS_KEY_SETTING, + S3Repository.SECRET_KEY_SETTING); } } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java new file mode 100644 index 0000000000000..854d159efce6e --- /dev/null +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobStoreRepositoryTests.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.repositories.s3; + +import com.amazonaws.services.s3.AmazonS3; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.rest.AbstractRestChannel; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.RestResponse; +import org.elasticsearch.rest.action.admin.cluster.RestGetRepositoriesAction; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.rest.FakeRestRequest; + +import java.util.Collection; +import java.util.Collections; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; + +import static java.util.Collections.emptyMap; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.not; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import static org.mockito.Matchers.any; + +public class S3BlobStoreRepositoryTests extends ESIntegTestCase { + + private final String bucket = "bucket_" + randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + private final String client = "client_" + randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + private final String accessKey = "accessKey_" + randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + private final String secureKey = "secureKey_" + randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT); + + protected void createTestRepository(final String name) { + assertAcked(client().admin().cluster().preparePutRepository(name) + .setType(S3Repository.TYPE) + .setVerify(false) + .setSettings(Settings.builder() + .put(S3Repository.BUCKET_SETTING.getKey(), bucket) + .put(InternalAwsS3Service.CLIENT_NAME.getKey(), client) + .put(S3Repository.ACCESS_KEY_SETTING.getKey(), accessKey) + .put(S3Repository.SECRET_KEY_SETTING.getKey(), secureKey))); + } + + @Override + protected Collection> nodePlugins() { + return Collections.singletonList(EmptyS3RepositoryPlugin.class); + } + + public static class EmptyS3RepositoryPlugin extends S3RepositoryPlugin { + + public EmptyS3RepositoryPlugin(final Settings settings) { + super(settings); + } + + @Override + public Map getRepositories(final Environment env, final NamedXContentRegistry registry) { + return Collections.singletonMap(S3Repository.TYPE, (metadata) -> + new S3Repository(metadata, env.settings(), registry, new InternalAwsS3Service(env.settings(), emptyMap()) { + @Override + public synchronized AmazonS3 client(final Settings repositorySettings) { + final AmazonS3 client = mock(AmazonS3.class); + when(client.doesBucketExist(any(String.class))).thenReturn(true); + return client; + } + })); + } + } + + public void testInsecureRepositoryCredentials() throws Exception { + final String repositoryName = "testInsecureRepositoryCredentials"; + createTestRepository(repositoryName); + final NodeClient nodeClient = internalCluster().getInstance(NodeClient.class); + final RestGetRepositoriesAction getRepoAction = new RestGetRepositoriesAction(Settings.EMPTY, mock(RestController.class), + internalCluster().getInstance(SettingsFilter.class)); + final RestRequest getRepoRequest = new FakeRestRequest(); + getRepoRequest.params().put("repository", repositoryName); + final CountDownLatch getRepoLatch = new CountDownLatch(1); + final AtomicReference getRepoError = new AtomicReference<>(); + getRepoAction.handleRequest(getRepoRequest, new AbstractRestChannel(getRepoRequest, true) { + @Override + public void sendResponse(RestResponse response) { + try { + final String responseContent = response.content().utf8ToString(); + assertThat(responseContent, containsString(bucket)); + assertThat(responseContent, containsString(client)); + assertThat(responseContent, not(containsString(accessKey))); + assertThat(responseContent, not(containsString(secureKey))); + } catch (final AssertionError ex) { + getRepoError.set(ex); + } + getRepoLatch.countDown(); + } + }, nodeClient); + getRepoLatch.await(); + if (getRepoError.get() != null) { + throw getRepoError.get(); + } + } +} From 53ec89276ec74c47d21887a0af3357a9458ee36f Mon Sep 17 00:00:00 2001 From: Lee Hinman Date: Wed, 16 May 2018 10:21:22 -0600 Subject: [PATCH 28/44] [TEST] Remove AwaitsFix in IndicesOptionsTests#testSerialization --- .../org/elasticsearch/action/support/IndicesOptionsTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java index 45b336af62054..d8c3d896a8843 100644 --- a/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/IndicesOptionsTests.java @@ -33,7 +33,6 @@ public class IndicesOptionsTests extends ESTestCase { - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/30644") public void testSerialization() throws Exception { int iterations = randomIntBetween(5, 20); for (int i = 0; i < iterations; i++) { From a9c8ca8dbbb6c7d14ba2c6b583a676bddff7a0c8 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 16 May 2018 17:06:52 +0200 Subject: [PATCH 29/44] Mitigate date histogram slowdowns with non-fixed timezones. (#30534) Date histograms on non-fixed timezones such as `Europe/Paris` proved much slower than histograms on fixed timezones in #28727. This change mitigates the issue by using a fixed time zone instead when shard data doesn't cross a transition so that all timestamps share the same fixed offset. This should be a common case with daily indices. NOTE: Rewriting the aggregation doesn't work since the timezone is then also used on the coordinating node to create empty buckets, which might be out of the range of data that exists on the shard. NOTE: In order to be able to get a shard context in the tests, I reused code from the base query test case by creating a new parent test case for both queries and aggregations: `AbstractBuilderTestCase`. Mitigates #28727 --- .../DateHistogramAggregationBuilder.java | 132 +++++- .../histogram/DateHistogramAggregator.java | 11 +- .../DateHistogramAggregatorFactory.java | 21 +- .../{ => histogram}/DateHistogramTests.java | 86 +++- .../aggregations/BaseAggregationTestCase.java | 59 +-- .../test/AbstractBuilderTestCase.java | 399 ++++++++++++++++++ .../test/AbstractQueryTestCase.java | 365 +--------------- 7 files changed, 629 insertions(+), 444 deletions(-) rename server/src/test/java/org/elasticsearch/search/aggregations/bucket/{ => histogram}/DateHistogramTests.java (52%) create mode 100644 test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index fa90e34ef3dd0..c72e9d22dc0ae 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -19,6 +19,10 @@ package org.elasticsearch.search.aggregations.bucket.histogram; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.rounding.DateTimeUnit; @@ -27,8 +31,13 @@ import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.index.fielddata.AtomicNumericFieldData; +import org.elasticsearch.index.fielddata.IndexNumericFieldData; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MappedFieldType.Relation; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; -import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.AggregatorFactories.Builder; import org.elasticsearch.search.aggregations.AggregatorFactory; import org.elasticsearch.search.aggregations.BucketOrder; @@ -44,6 +53,8 @@ import org.elasticsearch.search.aggregations.support.ValuesSourceParserHelper; import org.elasticsearch.search.aggregations.support.ValuesSourceType; import org.elasticsearch.search.internal.SearchContext; +import org.joda.time.DateTimeField; +import org.joda.time.DateTimeZone; import java.io.IOException; import java.util.HashMap; @@ -351,36 +362,121 @@ public String getType() { return NAME; } + /* + * NOTE: this can't be done in rewrite() because the timezone is then also used on the + * coordinating node in order to generate missing buckets, which may cross a transition + * even though data on the shards doesn't. + */ + DateTimeZone rewriteTimeZone(QueryShardContext context) throws IOException { + final DateTimeZone tz = timeZone(); + if (field() != null && + tz != null && + tz.isFixed() == false && + field() != null && + script() == null) { + final MappedFieldType ft = context.fieldMapper(field()); + final IndexReader reader = context.getIndexReader(); + if (ft != null && reader != null) { + Long anyInstant = null; + final IndexNumericFieldData fieldData = context.getForField(ft); + for (LeafReaderContext ctx : reader.leaves()) { + AtomicNumericFieldData leafFD = ((IndexNumericFieldData) fieldData).load(ctx); + SortedNumericDocValues values = leafFD.getLongValues(); + if (values.nextDoc() != DocIdSetIterator.NO_MORE_DOCS) { + anyInstant = values.nextValue(); + break; + } + } + + if (anyInstant != null) { + final long prevTransition = tz.previousTransition(anyInstant); + final long nextTransition = tz.nextTransition(anyInstant); + + // We need all not only values but also rounded values to be within + // [prevTransition, nextTransition]. + final long low; + DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + if (intervalAsUnit != null) { + final DateTimeField dateTimeField = intervalAsUnit.field(tz); + low = dateTimeField.roundCeiling(prevTransition); + } else { + final TimeValue intervalAsMillis = getIntervalAsTimeValue(); + low = Math.addExact(prevTransition, intervalAsMillis.millis()); + } + // rounding rounds down, so 'nextTransition' is a good upper bound + final long high = nextTransition; + + final DocValueFormat format = ft.docValueFormat(null, null); + final String formattedLow = format.format(low); + final String formattedHigh = format.format(high); + if (ft.isFieldWithinQuery(reader, formattedLow, formattedHigh, + true, false, tz, null, context) == Relation.WITHIN) { + // All values in this reader have the same offset despite daylight saving times. + // This is very common for location-based timezones such as Europe/Paris in + // combination with time-based indices. + return DateTimeZone.forOffsetMillis(tz.getOffset(anyInstant)); + } + } + } + } + return tz; + } + @Override protected ValuesSourceAggregatorFactory innerBuild(SearchContext context, ValuesSourceConfig config, AggregatorFactory parent, Builder subFactoriesBuilder) throws IOException { - Rounding rounding = createRounding(); + final DateTimeZone tz = timeZone(); + final Rounding rounding = createRounding(tz); + final DateTimeZone rewrittenTimeZone = rewriteTimeZone(context.getQueryShardContext()); + final Rounding shardRounding; + if (tz == rewrittenTimeZone) { + shardRounding = rounding; + } else { + shardRounding = createRounding(rewrittenTimeZone); + } + ExtendedBounds roundedBounds = null; if (this.extendedBounds != null) { // parse any string bounds to longs and round roundedBounds = this.extendedBounds.parseAndValidate(name, context, config.format()).round(rounding); } - return new DateHistogramAggregatorFactory(name, config, interval, dateHistogramInterval, offset, order, keyed, minDocCount, - rounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); + return new DateHistogramAggregatorFactory(name, config, offset, order, keyed, minDocCount, + rounding, shardRounding, roundedBounds, context, parent, subFactoriesBuilder, metaData); } - private Rounding createRounding() { - Rounding.Builder tzRoundingBuilder; + /** Return the interval as a date time unit if applicable. If this returns + * {@code null} then it means that the interval is expressed as a fixed + * {@link TimeValue} and may be accessed via + * {@link #getIntervalAsTimeValue()}. */ + private DateTimeUnit getIntervalAsDateTimeUnit() { if (dateHistogramInterval != null) { - DateTimeUnit dateTimeUnit = DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); - if (dateTimeUnit != null) { - tzRoundingBuilder = Rounding.builder(dateTimeUnit); - } else { - // the interval is a time value? - tzRoundingBuilder = Rounding.builder( - TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval")); - } + return DATE_FIELD_UNITS.get(dateHistogramInterval.toString()); + } + return null; + } + + /** + * Get the interval as a {@link TimeValue}. Should only be called if + * {@link #getIntervalAsDateTimeUnit()} returned {@code null}. + */ + private TimeValue getIntervalAsTimeValue() { + if (dateHistogramInterval != null) { + return TimeValue.parseTimeValue(dateHistogramInterval.toString(), null, getClass().getSimpleName() + ".interval"); + } else { + return TimeValue.timeValueMillis(interval); + } + } + + private Rounding createRounding(DateTimeZone timeZone) { + Rounding.Builder tzRoundingBuilder; + DateTimeUnit intervalAsUnit = getIntervalAsDateTimeUnit(); + if (intervalAsUnit != null) { + tzRoundingBuilder = Rounding.builder(intervalAsUnit); } else { - // the interval is an integer time value in millis? - tzRoundingBuilder = Rounding.builder(TimeValue.timeValueMillis(interval)); + tzRoundingBuilder = Rounding.builder(getIntervalAsTimeValue()); } - if (timeZone() != null) { - tzRoundingBuilder.timeZone(timeZone()); + if (timeZone != null) { + tzRoundingBuilder.timeZone(timeZone); } Rounding rounding = tzRoundingBuilder.build(); return rounding; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java index c32cedb4427e8..94dc18eae63e2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregator.java @@ -55,6 +55,7 @@ class DateHistogramAggregator extends BucketsAggregator { private final ValuesSource.Numeric valuesSource; private final DocValueFormat formatter; private final Rounding rounding; + private final Rounding shardRounding; private final BucketOrder order; private final boolean keyed; @@ -64,14 +65,15 @@ class DateHistogramAggregator extends BucketsAggregator { private final LongHash bucketOrds; private long offset; - DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, long offset, BucketOrder order, - boolean keyed, + DateHistogramAggregator(String name, AggregatorFactories factories, Rounding rounding, Rounding shardRounding, + long offset, BucketOrder order, boolean keyed, long minDocCount, @Nullable ExtendedBounds extendedBounds, @Nullable ValuesSource.Numeric valuesSource, DocValueFormat formatter, SearchContext aggregationContext, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { super(name, factories, aggregationContext, parent, pipelineAggregators, metaData); this.rounding = rounding; + this.shardRounding = shardRounding; this.offset = offset; this.order = InternalOrder.validate(order, this);; this.keyed = keyed; @@ -105,7 +107,9 @@ public void collect(int doc, long bucket) throws IOException { long previousRounded = Long.MIN_VALUE; for (int i = 0; i < valuesCount; ++i) { long value = values.nextValue(); - long rounded = rounding.round(value - offset) + offset; + // We can use shardRounding here, which is sometimes more efficient + // if daylight saving times are involved. + long rounded = shardRounding.round(value - offset) + offset; assert rounded >= previousRounded; if (rounded == previousRounded) { continue; @@ -138,6 +142,7 @@ public InternalAggregation buildAggregation(long owningBucketOrdinal) throws IOE CollectionUtil.introSort(buckets, BucketOrder.key(true).comparator(this)); // value source will be null for unmapped fields + // Important: use `rounding` here, not `shardRounding` InternalDateHistogram.EmptyBucketInfo emptyBucketInfo = minDocCount == 0 ? new InternalDateHistogram.EmptyBucketInfo(rounding, buildEmptySubAggregations(), extendedBounds) : null; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java index a64e018288879..c7ad6de7e0d72 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorFactory.java @@ -38,28 +38,27 @@ public final class DateHistogramAggregatorFactory extends ValuesSourceAggregatorFactory { - private final DateHistogramInterval dateHistogramInterval; - private final long interval; private final long offset; private final BucketOrder order; private final boolean keyed; private final long minDocCount; private final ExtendedBounds extendedBounds; - private Rounding rounding; + private final Rounding rounding; + private final Rounding shardRounding; - public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, long interval, - DateHistogramInterval dateHistogramInterval, long offset, BucketOrder order, boolean keyed, long minDocCount, - Rounding rounding, ExtendedBounds extendedBounds, SearchContext context, AggregatorFactory parent, - AggregatorFactories.Builder subFactoriesBuilder, Map metaData) throws IOException { + public DateHistogramAggregatorFactory(String name, ValuesSourceConfig config, + long offset, BucketOrder order, boolean keyed, long minDocCount, + Rounding rounding, Rounding shardRounding, ExtendedBounds extendedBounds, SearchContext context, + AggregatorFactory parent, AggregatorFactories.Builder subFactoriesBuilder, + Map metaData) throws IOException { super(name, config, context, parent, subFactoriesBuilder, metaData); - this.interval = interval; - this.dateHistogramInterval = dateHistogramInterval; this.offset = offset; this.order = order; this.keyed = keyed; this.minDocCount = minDocCount; this.extendedBounds = extendedBounds; this.rounding = rounding; + this.shardRounding = shardRounding; } public long minDocCount() { @@ -77,8 +76,8 @@ protected Aggregator doCreateInternal(ValuesSource.Numeric valuesSource, Aggrega private Aggregator createAggregator(ValuesSource.Numeric valuesSource, Aggregator parent, List pipelineAggregators, Map metaData) throws IOException { - return new DateHistogramAggregator(name, factories, rounding, offset, order, keyed, minDocCount, extendedBounds, valuesSource, - config.format(), context, parent, pipelineAggregators, metaData); + return new DateHistogramAggregator(name, factories, rounding, shardRounding, offset, order, keyed, minDocCount, extendedBounds, + valuesSource, config.format(), context, parent, pipelineAggregators, metaData); } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java similarity index 52% rename from server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java rename to server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java index e86b3a553e9c4..1f83842eab24f 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/DateHistogramTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramTests.java @@ -17,14 +17,27 @@ * under the License. */ -package org.elasticsearch.search.aggregations.bucket; +package org.elasticsearch.search.aggregations.bucket.histogram; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.LongPoint; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.store.Directory; +import org.elasticsearch.common.joda.FormatDateTimeFormatter; +import org.elasticsearch.common.joda.Joda; +import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.search.aggregations.BaseAggregationTestCase; +import org.elasticsearch.search.aggregations.BucketOrder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.search.aggregations.bucket.histogram.ExtendedBoundsTests; -import org.elasticsearch.search.aggregations.BucketOrder; +import org.joda.time.DateTimeZone; +import org.junit.Assume; +import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -120,4 +133,73 @@ private List randomOrder() { return orders; } + private static Document documentForDate(String field, long millis) { + Document doc = new Document(); + doc.add(new LongPoint(field, millis)); + doc.add(new SortedNumericDocValuesField(field, millis)); + return doc; + } + + public void testRewriteTimeZone() throws IOException { + Assume.assumeTrue(getCurrentTypes().length > 0); // we need mappings + FormatDateTimeFormatter format = Joda.forPattern("strict_date_optional_time"); + + try (Directory dir = newDirectory(); + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig())) { + + w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2018-03-11T11:55:00").getMillis())); + w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2017-10-30T18:13:00").getMillis())); + + try (IndexReader readerThatDoesntCross = DirectoryReader.open(w)) { + + w.addDocument(documentForDate(DATE_FIELD_NAME, format.parser().parseDateTime("2018-03-25T02:44:00").getMillis())); + + try (IndexReader readerThatCrosses = DirectoryReader.open(w)) { + + QueryShardContext shardContextThatDoesntCross = createShardContext(readerThatDoesntCross); + QueryShardContext shardContextThatCrosses = createShardContext(readerThatCrosses); + + DateHistogramAggregationBuilder builder = new DateHistogramAggregationBuilder("my_date_histo"); + builder.field(DATE_FIELD_NAME); + builder.dateHistogramInterval(DateHistogramInterval.DAY); + + // no timeZone => no rewrite + assertNull(builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertNull(builder.rewriteTimeZone(shardContextThatCrosses)); + + // fixed timeZone => no rewrite + DateTimeZone tz = DateTimeZone.forOffsetHours(1); + builder.timeZone(tz); + assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); + + // daylight-saving-times => rewrite if doesn't cross + tz = DateTimeZone.forID("Europe/Paris"); + builder.timeZone(tz); + assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); + + // Rounded values are no longer all within the same transitions => no rewrite + builder.dateHistogramInterval(DateHistogramInterval.MONTH); + assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); + + builder = new DateHistogramAggregationBuilder("my_date_histo"); + builder.field(DATE_FIELD_NAME); + builder.timeZone(tz); + + builder.interval(1000L * 60 * 60 * 24); // ~ 1 day + assertEquals(DateTimeZone.forOffsetHours(1), builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); + + // Because the interval is large, rounded values are not + // within the same transitions as the values => no rewrite + builder.interval(1000L * 60 * 60 * 24 * 30); // ~ 1 month + assertSame(tz, builder.rewriteTimeZone(shardContextThatDoesntCross)); + assertSame(tz, builder.rewriteTimeZone(shardContextThatCrosses)); + } + } + } + } + } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java index 3213ce526ce12..de5e238199693 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/BaseAggregationTestCase.java @@ -38,6 +38,7 @@ import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.support.ValuesSourceAggregationBuilder; +import org.elasticsearch.test.AbstractBuilderTestCase; import org.elasticsearch.test.AbstractQueryTestCase; import org.elasticsearch.test.ESTestCase; @@ -50,60 +51,12 @@ import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.hamcrest.Matchers.hasSize; -public abstract class BaseAggregationTestCase> extends ESTestCase { +public abstract class BaseAggregationTestCase> extends AbstractBuilderTestCase { - protected static final String STRING_FIELD_NAME = "mapped_string"; - protected static final String INT_FIELD_NAME = "mapped_int"; - protected static final String DOUBLE_FIELD_NAME = "mapped_double"; - protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; - protected static final String DATE_FIELD_NAME = "mapped_date"; protected static final String IP_FIELD_NAME = "mapped_ip"; - private String[] currentTypes; - - protected String[] getCurrentTypes() { - return currentTypes; - } - - private NamedWriteableRegistry namedWriteableRegistry; - private NamedXContentRegistry xContentRegistry; protected abstract AB createTestAggregatorBuilder(); - protected Collection> getPlugins() { - return Collections.emptyList(); - } - - /** - * Setup for the whole base test class. - */ - @Override - public void setUp() throws Exception { - super.setUp(); - Settings settings = Settings.builder() - .put("node.name", AbstractQueryTestCase.class.toString()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - IndicesModule indicesModule = new IndicesModule(Collections.emptyList()); - PluginsService pluginsService = new PluginsService(settings, null, null, null, getPlugins()); - SearchModule searchModule = new SearchModule(settings, false, pluginsService.filterPlugins(SearchPlugin.class)); - List entries = new ArrayList<>(); - entries.addAll(indicesModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - namedWriteableRegistry = new NamedWriteableRegistry(entries); - xContentRegistry = new NamedXContentRegistry(searchModule.getNamedXContents()); - //create some random type with some default field, those types will stick around for all of the subclasses - currentTypes = new String[randomIntBetween(0, 5)]; - for (int i = 0; i < currentTypes.length; i++) { - String type = randomAlphaOfLengthBetween(1, 10); - currentTypes[i] = type; - } - } - - @Override - protected NamedXContentRegistry xContentRegistry() { - return xContentRegistry; - } - /** * Generic test that creates new AggregatorFactory from the test * AggregatorFactory and checks both for equality and asserts equality on @@ -157,7 +110,7 @@ public void testSerialization() throws IOException { AB testAgg = createTestAggregatorBuilder(); try (BytesStreamOutput output = new BytesStreamOutput()) { output.writeNamedWriteable(testAgg); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) { AggregationBuilder deserialized = in.readNamedWriteable(AggregationBuilder.class); assertEquals(testAgg, deserialized); assertEquals(testAgg.hashCode(), deserialized.hashCode()); @@ -181,12 +134,12 @@ public void testShallowCopy() { // we use the streaming infra to create a copy of the query provided as // argument - private AB copyAggregation(AB agg) throws IOException { + protected AB copyAggregation(AB agg) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { agg.writeTo(output); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) { @SuppressWarnings("unchecked") - AB secondAgg = (AB) namedWriteableRegistry.getReader(AggregationBuilder.class, agg.getWriteableName()).read(in); + AB secondAgg = (AB) namedWriteableRegistry().getReader(AggregationBuilder.class, agg.getWriteableName()).read(in); return secondAgg; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java new file mode 100644 index 0000000000000..2eb08f8d06514 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractBuilderTestCase.java @@ -0,0 +1,399 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.test; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.util.Accountable; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; +import org.elasticsearch.action.get.GetRequest; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; +import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.IndexAnalyzers; +import org.elasticsearch.index.cache.bitset.BitsetFilterCache; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.fielddata.IndexFieldDataService; +import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.similarity.SimilarityService; +import org.elasticsearch.indices.IndicesModule; +import org.elasticsearch.indices.analysis.AnalysisModule; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; +import org.elasticsearch.indices.mapper.MapperRegistry; +import org.elasticsearch.node.InternalSettingsPreparer; +import org.elasticsearch.plugins.MapperPlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.PluginsService; +import org.elasticsearch.plugins.ScriptPlugin; +import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.script.ScriptModule; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.search.SearchModule; +import org.elasticsearch.search.internal.SearchContext; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; + +import java.io.Closeable; +import java.io.IOException; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; +import java.util.stream.Stream; + +import static java.util.Collections.emptyList; +import static java.util.stream.Collectors.toList; + +public abstract class AbstractBuilderTestCase extends ESTestCase { + + public static final String STRING_FIELD_NAME = "mapped_string"; + protected static final String STRING_FIELD_NAME_2 = "mapped_string_2"; + protected static final String INT_FIELD_NAME = "mapped_int"; + protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range"; + protected static final String DOUBLE_FIELD_NAME = "mapped_double"; + protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; + protected static final String DATE_FIELD_NAME = "mapped_date"; + protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range"; + protected static final String OBJECT_FIELD_NAME = "mapped_object"; + protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; + protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; + protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, + GEO_SHAPE_FIELD_NAME}; + protected static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, + DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; + + protected static Version indexVersionCreated; + + private static ServiceHolder serviceHolder; + private static int queryNameId = 0; + private static Settings nodeSettings; + private static Index index; + private static String[] currentTypes; + protected static String[] randomTypes; + + protected static Index getIndex() { + return index; + } + + protected static String[] getCurrentTypes() { + return currentTypes; + } + + protected Collection> getPlugins() { + return Collections.emptyList(); + } + + protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { + } + + @BeforeClass + public static void beforeClass() { + nodeSettings = Settings.builder() + .put("node.name", AbstractQueryTestCase.class.toString()) + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) + .build(); + + index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_"); + + // Set a single type in the index + switch (random().nextInt(3)) { + case 0: + currentTypes = new String[0]; // no types + break; + default: + currentTypes = new String[] { "_doc" }; + break; + } + randomTypes = getRandomTypes(); + } + + private static String[] getRandomTypes() { + String[] types; + if (currentTypes.length > 0 && randomBoolean()) { + int numberOfQueryTypes = randomIntBetween(1, currentTypes.length); + types = new String[numberOfQueryTypes]; + for (int i = 0; i < numberOfQueryTypes; i++) { + types[i] = randomFrom(currentTypes); + } + } else { + if (randomBoolean()) { + types = new String[]{MetaData.ALL}; + } else { + types = new String[0]; + } + } + return types; + } + + @Override + protected NamedXContentRegistry xContentRegistry() { + return serviceHolder.xContentRegistry; + } + + protected NamedWriteableRegistry namedWriteableRegistry() { + return serviceHolder.namedWriteableRegistry; + } + + /** + * make sure query names are unique by suffixing them with increasing counter + */ + protected static String createUniqueRandomName() { + String queryName = randomAlphaOfLengthBetween(1, 10) + queryNameId; + queryNameId++; + return queryName; + } + + protected Settings indexSettings() { + // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. + indexVersionCreated = randomBoolean() ? Version.CURRENT + : VersionUtils.randomVersionBetween(random(), null, Version.CURRENT); + return Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated) + .build(); + } + + @AfterClass + public static void afterClass() throws Exception { + IOUtils.close(serviceHolder); + serviceHolder = null; + } + + @Before + public void beforeTest() throws IOException { + if (serviceHolder == null) { + serviceHolder = new ServiceHolder(nodeSettings, indexSettings(), getPlugins(), this); + } + serviceHolder.clientInvocationHandler.delegate = this; + } + + protected static SearchContext getSearchContext(String[] types, QueryShardContext context) { + TestSearchContext testSearchContext = new TestSearchContext(context) { + @Override + public MapperService mapperService() { + return serviceHolder.mapperService; // need to build / parse inner hits sort fields + } + + @Override + public > IFD getForField(MappedFieldType fieldType) { + return serviceHolder.indexFieldDataService.getForField(fieldType); // need to build / parse inner hits sort fields + } + + }; + testSearchContext.getQueryShardContext().setTypes(types); + return testSearchContext; + } + + @After + public void afterTest() { + serviceHolder.clientInvocationHandler.delegate = null; + } + + /** + * Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders + */ + protected GetResponse executeGet(GetRequest getRequest) { + throw new UnsupportedOperationException("this test can't handle GET requests"); + } + + /** + * Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders + */ + protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsRequest mtvRequest) { + throw new UnsupportedOperationException("this test can't handle MultiTermVector requests"); + } + + /** + * @return a new {@link QueryShardContext} with the provided reader + */ + protected static QueryShardContext createShardContext(IndexReader reader) { + return serviceHolder.createShardContext(reader); + } + + /** + * @return a new {@link QueryShardContext} based on the base test index and queryParserService + */ + protected static QueryShardContext createShardContext() { + return createShardContext(null); + } + + private static class ClientInvocationHandler implements InvocationHandler { + AbstractBuilderTestCase delegate; + + @Override + public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { + if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))){ + GetResponse getResponse = delegate.executeGet((GetRequest) args[0]); + ActionListener listener = (ActionListener) args[1]; + if (randomBoolean()) { + listener.onResponse(getResponse); + } else { + new Thread(() -> listener.onResponse(getResponse)).start(); + } + return null; + } else if (method.equals(Client.class.getMethod + ("multiTermVectors", MultiTermVectorsRequest.class))) { + return new PlainActionFuture() { + @Override + public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException { + return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]); + } + }; + } else if (method.equals(Object.class.getMethod("toString"))) { + return "MockClient"; + } + throw new UnsupportedOperationException("this test can't handle calls to: " + method); + } + + } + + private static class ServiceHolder implements Closeable { + private final IndexFieldDataService indexFieldDataService; + private final SearchModule searchModule; + private final NamedWriteableRegistry namedWriteableRegistry; + private final NamedXContentRegistry xContentRegistry; + private final ClientInvocationHandler clientInvocationHandler = new ClientInvocationHandler(); + private final IndexSettings idxSettings; + private final SimilarityService similarityService; + private final MapperService mapperService; + private final BitsetFilterCache bitsetFilterCache; + private final ScriptService scriptService; + private final Client client; + private final long nowInMillis = randomNonNegativeLong(); + + ServiceHolder(Settings nodeSettings, Settings indexSettings, + Collection> plugins, AbstractBuilderTestCase testCase) throws IOException { + Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null); + PluginsService pluginsService; + pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins); + + client = (Client) Proxy.newProxyInstance( + Client.class.getClassLoader(), + new Class[]{Client.class}, + clientInvocationHandler); + ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class)); + List> additionalSettings = pluginsService.getPluginSettings(); + additionalSettings.add(InternalSettingsPlugin.VERSION_CREATED); + SettingsModule settingsModule = new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter()); + searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)); + IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); + List entries = new ArrayList<>(); + entries.addAll(indicesModule.getNamedWriteables()); + entries.addAll(searchModule.getNamedWriteables()); + namedWriteableRegistry = new NamedWriteableRegistry(entries); + xContentRegistry = new NamedXContentRegistry(Stream.of( + searchModule.getNamedXContents().stream() + ).flatMap(Function.identity()).collect(toList())); + IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings(); + idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings); + AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList()); + IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings); + scriptService = scriptModule.getScriptService(); + similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); + MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); + mapperService = new MapperService(idxSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, + () -> createShardContext(null)); + IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() { + }); + indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache, + new NoneCircuitBreakerService(), mapperService); + bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() { + @Override + public void onCache(ShardId shardId, Accountable accountable) { + + } + + @Override + public void onRemoval(ShardId shardId, Accountable accountable) { + + } + }); + + for (String type : currentTypes) { + mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type, + STRING_FIELD_NAME, "type=text", + STRING_FIELD_NAME_2, "type=keyword", + INT_FIELD_NAME, "type=integer", + INT_RANGE_FIELD_NAME, "type=integer_range", + DOUBLE_FIELD_NAME, "type=double", + BOOLEAN_FIELD_NAME, "type=boolean", + DATE_FIELD_NAME, "type=date", + DATE_RANGE_FIELD_NAME, "type=date_range", + OBJECT_FIELD_NAME, "type=object", + GEO_POINT_FIELD_NAME, "type=geo_point", + GEO_SHAPE_FIELD_NAME, "type=geo_shape" + ))), MapperService.MergeReason.MAPPING_UPDATE, false); + // also add mappings for two inner field in the object field + mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," + + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + + INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), + MapperService.MergeReason.MAPPING_UPDATE, false); + } + testCase.initializeAdditionalMappings(mapperService); + } + + @Override + public void close() throws IOException { + } + + QueryShardContext createShardContext(IndexReader reader) { + return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService::getForField, mapperService, + similarityService, scriptService, xContentRegistry, namedWriteableRegistry, this.client, reader, () -> nowInMillis, null); + } + + ScriptModule createScriptModule(List scriptPlugins) { + if (scriptPlugins == null || scriptPlugins.isEmpty()) { + return newTestScriptModule(); + } + return new ScriptModule(Settings.EMPTY, scriptPlugins); + } + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index f8c73c69b9f29..d2f3a56aebe3d 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -25,33 +25,17 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.spans.SpanBoostQuery; -import org.apache.lucene.util.Accountable; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.Version; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; import org.elasticsearch.action.support.PlainActionFuture; -import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; -import org.elasticsearch.action.termvectors.MultiTermVectorsResponse; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; -import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; -import org.elasticsearch.common.settings.IndexScopedSettings; -import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.common.unit.Fuzziness; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; @@ -64,55 +48,18 @@ import org.elasticsearch.common.xcontent.XContentParser; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; -import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.env.Environment; -import org.elasticsearch.env.TestEnvironment; -import org.elasticsearch.index.Index; -import org.elasticsearch.index.IndexSettings; -import org.elasticsearch.index.analysis.IndexAnalyzers; -import org.elasticsearch.index.cache.bitset.BitsetFilterCache; -import org.elasticsearch.index.fielddata.IndexFieldData; -import org.elasticsearch.index.fielddata.IndexFieldDataCache; -import org.elasticsearch.index.fielddata.IndexFieldDataService; -import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.AbstractQueryBuilder; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryRewriteContext; import org.elasticsearch.index.query.QueryShardContext; import org.elasticsearch.index.query.Rewriteable; import org.elasticsearch.index.query.support.QueryParsers; -import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.index.similarity.SimilarityService; -import org.elasticsearch.indices.IndicesModule; -import org.elasticsearch.indices.analysis.AnalysisModule; -import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; -import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache; -import org.elasticsearch.indices.mapper.MapperRegistry; -import org.elasticsearch.node.InternalSettingsPreparer; -import org.elasticsearch.plugins.MapperPlugin; -import org.elasticsearch.plugins.Plugin; -import org.elasticsearch.plugins.PluginsService; -import org.elasticsearch.plugins.ScriptPlugin; -import org.elasticsearch.plugins.SearchPlugin; -import org.elasticsearch.script.ScriptModule; -import org.elasticsearch.script.ScriptService; -import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.internal.SearchContext; import org.joda.time.DateTime; import org.joda.time.DateTimeZone; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Before; -import org.junit.BeforeClass; -import java.io.Closeable; import java.io.IOException; -import java.lang.reflect.InvocationHandler; -import java.lang.reflect.Method; -import java.lang.reflect.Proxy; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Deque; import java.util.HashSet; @@ -121,12 +68,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutionException; -import java.util.function.Function; -import java.util.stream.Stream; -import static java.util.Collections.emptyList; -import static java.util.stream.Collectors.toList; import static org.elasticsearch.index.query.AbstractQueryBuilder.parseInnerQueryBuilder; import static org.elasticsearch.test.EqualsHashCodeTestUtils.checkEqualsAndHashCode; import static org.hamcrest.CoreMatchers.equalTo; @@ -136,115 +78,9 @@ import static org.hamcrest.Matchers.instanceOf; -public abstract class AbstractQueryTestCase> extends ESTestCase { - - public static final String STRING_FIELD_NAME = "mapped_string"; - protected static final String STRING_FIELD_NAME_2 = "mapped_string_2"; - protected static final String INT_FIELD_NAME = "mapped_int"; - protected static final String INT_RANGE_FIELD_NAME = "mapped_int_range"; - protected static final String DOUBLE_FIELD_NAME = "mapped_double"; - protected static final String BOOLEAN_FIELD_NAME = "mapped_boolean"; - protected static final String DATE_FIELD_NAME = "mapped_date"; - protected static final String DATE_RANGE_FIELD_NAME = "mapped_date_range"; - protected static final String OBJECT_FIELD_NAME = "mapped_object"; - protected static final String GEO_POINT_FIELD_NAME = "mapped_geo_point"; - protected static final String GEO_SHAPE_FIELD_NAME = "mapped_geo_shape"; - protected static final String[] MAPPED_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, OBJECT_FIELD_NAME, GEO_POINT_FIELD_NAME, - GEO_SHAPE_FIELD_NAME}; - private static final String[] MAPPED_LEAF_FIELD_NAMES = new String[]{STRING_FIELD_NAME, INT_FIELD_NAME, INT_RANGE_FIELD_NAME, - DOUBLE_FIELD_NAME, BOOLEAN_FIELD_NAME, DATE_FIELD_NAME, DATE_RANGE_FIELD_NAME, GEO_POINT_FIELD_NAME, }; - private static final int NUMBER_OF_TESTQUERIES = 20; - - protected static Version indexVersionCreated; - - private static ServiceHolder serviceHolder; - private static int queryNameId = 0; - private static Settings nodeSettings; - private static Index index; - private static String[] currentTypes; - private static String[] randomTypes; - - protected static Index getIndex() { - return index; - } - - protected static String[] getCurrentTypes() { - return currentTypes; - } - - protected Collection> getPlugins() { - return Collections.emptyList(); - } - - protected void initializeAdditionalMappings(MapperService mapperService) throws IOException { - } - - @BeforeClass - public static void beforeClass() { - nodeSettings = Settings.builder() - .put("node.name", AbstractQueryTestCase.class.toString()) - .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) - .build(); - - index = new Index(randomAlphaOfLengthBetween(1, 10), "_na_"); - - // Set a single type in the index - switch (random().nextInt(3)) { - case 0: - currentTypes = new String[0]; // no types - break; - default: - currentTypes = new String[] { "_doc" }; - break; - } - randomTypes = getRandomTypes(); - } - - protected Settings indexSettings() { - // we have to prefer CURRENT since with the range of versions we support it's rather unlikely to get the current actually. - indexVersionCreated = randomBoolean() ? Version.CURRENT - : VersionUtils.randomVersionBetween(random(), null, Version.CURRENT); - return Settings.builder() - .put(IndexMetaData.SETTING_VERSION_CREATED, indexVersionCreated) - .build(); - } - - @AfterClass - public static void afterClass() throws Exception { - IOUtils.close(serviceHolder); - serviceHolder = null; - } - - @Before - public void beforeTest() throws IOException { - if (serviceHolder == null) { - serviceHolder = new ServiceHolder(nodeSettings, indexSettings(), getPlugins(), this); - } - serviceHolder.clientInvocationHandler.delegate = this; - } +public abstract class AbstractQueryTestCase> extends AbstractBuilderTestCase { - private static SearchContext getSearchContext(String[] types, QueryShardContext context) { - TestSearchContext testSearchContext = new TestSearchContext(context) { - @Override - public MapperService mapperService() { - return serviceHolder.mapperService; // need to build / parse inner hits sort fields - } - - @Override - public > IFD getForField(MappedFieldType fieldType) { - return serviceHolder.indexFieldDataService.getForField(fieldType); // need to build / parse inner hits sort fields - } - - }; - testSearchContext.getQueryShardContext().setTypes(types); - return testSearchContext; - } - - @After - public void afterTest() { - serviceHolder.clientInvocationHandler.delegate = null; - } + private static final int NUMBER_OF_TESTQUERIES = 20; public final QB createTestQueryBuilder() { QB query = doCreateTestQueryBuilder(); @@ -260,15 +96,6 @@ public final QB createTestQueryBuilder() { return query; } - /** - * make sure query names are unique by suffixing them with increasing counter - */ - private static String createUniqueRandomName() { - String queryName = randomAlphaOfLengthBetween(1, 10) + queryNameId; - queryNameId++; - return queryName; - } - /** * Create the query that is being tested */ @@ -717,18 +544,18 @@ public void testSerialization() throws IOException { } } - protected static QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException { + protected QueryBuilder assertSerialization(QueryBuilder testQuery) throws IOException { return assertSerialization(testQuery, Version.CURRENT); } /** * Serialize the given query builder and asserts that both are equal */ - protected static QueryBuilder assertSerialization(QueryBuilder testQuery, Version version) throws IOException { + protected QueryBuilder assertSerialization(QueryBuilder testQuery, Version version) throws IOException { try (BytesStreamOutput output = new BytesStreamOutput()) { output.setVersion(version); output.writeNamedWriteable(testQuery); - try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), serviceHolder.namedWriteableRegistry)) { + try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry())) { in.setVersion(version); QueryBuilder deserializedQuery = in.readNamedWriteable(QueryBuilder.class); assertEquals(testQuery, deserializedQuery); @@ -780,15 +607,8 @@ protected QB changeNameOrBoost(QB original) throws IOException { //we use the streaming infra to create a copy of the query provided as argument @SuppressWarnings("unchecked") private QB copyQuery(QB query) throws IOException { - Reader reader = (Reader) serviceHolder.namedWriteableRegistry.getReader(QueryBuilder.class, query.getWriteableName()); - return copyWriteable(query, serviceHolder.namedWriteableRegistry, reader); - } - - /** - * @return a new {@link QueryShardContext} based on the base test index and queryParserService - */ - protected static QueryShardContext createShardContext() { - return serviceHolder.createShardContext(); + Reader reader = (Reader) namedWriteableRegistry().getReader(QueryBuilder.class, query.getWriteableName()); + return copyWriteable(query, namedWriteableRegistry(), reader); } /** @@ -840,7 +660,7 @@ protected static String getRandomQueryText() { */ protected static String getRandomFieldName() { // if no type is set then return a random field name - if (currentTypes.length == 0 || randomBoolean()) { + if (getCurrentTypes().length == 0 || randomBoolean()) { return randomAlphaOfLengthBetween(1, 10); } return randomFrom(MAPPED_LEAF_FIELD_NAMES); @@ -863,24 +683,6 @@ protected static String getRandomRewriteMethod() { return rewrite; } - private static String[] getRandomTypes() { - String[] types; - if (currentTypes.length > 0 && randomBoolean()) { - int numberOfQueryTypes = randomIntBetween(1, currentTypes.length); - types = new String[numberOfQueryTypes]; - for (int i = 0; i < numberOfQueryTypes; i++) { - types[i] = randomFrom(currentTypes); - } - } else { - if (randomBoolean()) { - types = new String[]{MetaData.ALL}; - } else { - types = new String[0]; - } - } - return types; - } - protected static Fuzziness randomFuzziness(String fieldName) { switch (fieldName) { case INT_FIELD_NAME: @@ -905,50 +707,6 @@ protected static String randomMinimumShouldMatch() { return randomFrom("1", "-1", "75%", "-25%", "2<75%", "2<-25%"); } - private static class ClientInvocationHandler implements InvocationHandler { - AbstractQueryTestCase delegate; - - @Override - public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { - if (method.equals(Client.class.getMethod("get", GetRequest.class, ActionListener.class))){ - GetResponse getResponse = delegate.executeGet((GetRequest) args[0]); - ActionListener listener = (ActionListener) args[1]; - if (randomBoolean()) { - listener.onResponse(getResponse); - } else { - new Thread(() -> listener.onResponse(getResponse)).start(); - } - return null; - } else if (method.equals(Client.class.getMethod - ("multiTermVectors", MultiTermVectorsRequest.class))) { - return new PlainActionFuture() { - @Override - public MultiTermVectorsResponse get() throws InterruptedException, ExecutionException { - return delegate.executeMultiTermVectors((MultiTermVectorsRequest) args[0]); - } - }; - } else if (method.equals(Object.class.getMethod("toString"))) { - return "MockClient"; - } - throw new UnsupportedOperationException("this test can't handle calls to: " + method); - } - - } - - /** - * Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders - */ - protected GetResponse executeGet(GetRequest getRequest) { - throw new UnsupportedOperationException("this test can't handle GET requests"); - } - - /** - * Override this to handle {@link Client#get(GetRequest)} calls from parsers / builders - */ - protected MultiTermVectorsResponse executeMultiTermVectors(MultiTermVectorsRequest mtvRequest) { - throw new UnsupportedOperationException("this test can't handle MultiTermVector requests"); - } - /** * Call this method to check a valid json string representing the query under test against * it's generated json. @@ -1015,113 +773,6 @@ protected Query rewrite(Query query) throws IOException { return query; } - @Override - protected NamedXContentRegistry xContentRegistry() { - return serviceHolder.xContentRegistry; - } - - private static class ServiceHolder implements Closeable { - private final IndexFieldDataService indexFieldDataService; - private final SearchModule searchModule; - private final NamedWriteableRegistry namedWriteableRegistry; - private final NamedXContentRegistry xContentRegistry; - private final ClientInvocationHandler clientInvocationHandler = new ClientInvocationHandler(); - private final IndexSettings idxSettings; - private final SimilarityService similarityService; - private final MapperService mapperService; - private final BitsetFilterCache bitsetFilterCache; - private final ScriptService scriptService; - private final Client client; - private final long nowInMillis = randomNonNegativeLong(); - - ServiceHolder(Settings nodeSettings, Settings indexSettings, - Collection> plugins, AbstractQueryTestCase testCase) throws IOException { - Environment env = InternalSettingsPreparer.prepareEnvironment(nodeSettings, null); - PluginsService pluginsService; - pluginsService = new PluginsService(nodeSettings, null, env.modulesFile(), env.pluginsFile(), plugins); - - client = (Client) Proxy.newProxyInstance( - Client.class.getClassLoader(), - new Class[]{Client.class}, - clientInvocationHandler); - ScriptModule scriptModule = createScriptModule(pluginsService.filterPlugins(ScriptPlugin.class)); - List> additionalSettings = pluginsService.getPluginSettings(); - additionalSettings.add(InternalSettingsPlugin.VERSION_CREATED); - SettingsModule settingsModule = new SettingsModule(nodeSettings, additionalSettings, pluginsService.getPluginSettingsFilter()); - searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)); - IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)); - List entries = new ArrayList<>(); - entries.addAll(indicesModule.getNamedWriteables()); - entries.addAll(searchModule.getNamedWriteables()); - namedWriteableRegistry = new NamedWriteableRegistry(entries); - xContentRegistry = new NamedXContentRegistry(Stream.of( - searchModule.getNamedXContents().stream() - ).flatMap(Function.identity()).collect(toList())); - IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings(); - idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings); - AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList()); - IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings); - scriptService = scriptModule.getScriptService(); - similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); - MapperRegistry mapperRegistry = indicesModule.getMapperRegistry(); - mapperService = new MapperService(idxSettings, indexAnalyzers, xContentRegistry, similarityService, mapperRegistry, - this::createShardContext); - IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() { - }); - indexFieldDataService = new IndexFieldDataService(idxSettings, indicesFieldDataCache, - new NoneCircuitBreakerService(), mapperService); - bitsetFilterCache = new BitsetFilterCache(idxSettings, new BitsetFilterCache.Listener() { - @Override - public void onCache(ShardId shardId, Accountable accountable) { - - } - - @Override - public void onRemoval(ShardId shardId, Accountable accountable) { - - } - }); - - for (String type : currentTypes) { - mapperService.merge(type, new CompressedXContent(Strings.toString(PutMappingRequest.buildFromSimplifiedDef(type, - STRING_FIELD_NAME, "type=text", - STRING_FIELD_NAME_2, "type=keyword", - INT_FIELD_NAME, "type=integer", - INT_RANGE_FIELD_NAME, "type=integer_range", - DOUBLE_FIELD_NAME, "type=double", - BOOLEAN_FIELD_NAME, "type=boolean", - DATE_FIELD_NAME, "type=date", - DATE_RANGE_FIELD_NAME, "type=date_range", - OBJECT_FIELD_NAME, "type=object", - GEO_POINT_FIELD_NAME, "type=geo_point", - GEO_SHAPE_FIELD_NAME, "type=geo_shape" - ))), MapperService.MergeReason.MAPPING_UPDATE, false); - // also add mappings for two inner field in the object field - mapperService.merge(type, new CompressedXContent("{\"properties\":{\"" + OBJECT_FIELD_NAME + "\":{\"type\":\"object\"," - + "\"properties\":{\"" + DATE_FIELD_NAME + "\":{\"type\":\"date\"},\"" + - INT_FIELD_NAME + "\":{\"type\":\"integer\"}}}}}"), - MapperService.MergeReason.MAPPING_UPDATE, false); - } - testCase.initializeAdditionalMappings(mapperService); - } - - @Override - public void close() throws IOException { - } - - QueryShardContext createShardContext() { - return new QueryShardContext(0, idxSettings, bitsetFilterCache, indexFieldDataService::getForField, mapperService, - similarityService, scriptService, xContentRegistry, namedWriteableRegistry, this.client, null, () -> nowInMillis, null); - } - - ScriptModule createScriptModule(List scriptPlugins) { - if (scriptPlugins == null || scriptPlugins.isEmpty()) { - return newTestScriptModule(); - } - return new ScriptModule(Settings.EMPTY, scriptPlugins); - } - } - protected QueryBuilder rewriteAndFetch(QueryBuilder builder, QueryRewriteContext context) throws IOException { PlainActionFuture future = new PlainActionFuture<>(); Rewriteable.rewriteAndFetch(builder, context, future); From 0536c72383254a7c46381687ef14021e93780620 Mon Sep 17 00:00:00 2001 From: Adrien Grand Date: Wed, 16 May 2018 17:07:13 +0200 Subject: [PATCH 30/44] Remove unused DirectoryUtils class. (#30582) --- .../index/store/DirectoryUtils.java | 83 ------------------ .../index/store/DirectoryUtilsTests.java | 86 ------------------- 2 files changed, 169 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java delete mode 100644 server/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTests.java diff --git a/server/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java b/server/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java deleted file mode 100644 index 11203a8198770..0000000000000 --- a/server/src/main/java/org/elasticsearch/index/store/DirectoryUtils.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.store; - -import org.apache.lucene.store.Directory; -import org.apache.lucene.store.FileSwitchDirectory; -import org.apache.lucene.store.FilterDirectory; - -/** - * Utils for working with {@link Directory} classes. - */ -public final class DirectoryUtils { - - private DirectoryUtils() {} // no instance - - static Directory getLeafDirectory(FilterDirectory dir, Class targetClass) { - Directory current = dir.getDelegate(); - while (true) { - if ((current instanceof FilterDirectory)) { - if (targetClass != null && targetClass.isAssignableFrom(current.getClass())) { - break; - } - current = ((FilterDirectory) current).getDelegate(); - } else { - break; - } - } - return current; - } - - /** - * Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast - * it to the given target class or returns null if the leaf is not assignable to the target class. - * If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies. - */ - public static T getLeaf(Directory dir, Class targetClass) { - return getLeaf(dir, targetClass, null); - } - /** - * Tries to extract the leaf of the {@link Directory} if the directory is a {@link FilterDirectory} and cast - * it to the given target class or returns the given default value, if the leaf is not assignable to the target class. - * If the given {@link Directory} is a concrete directory it will treated as a leaf and the above applies. - */ - public static T getLeaf(Directory dir, Class targetClass, T defaultValue) { - Directory d = dir; - if (dir instanceof FilterDirectory) { - d = getLeafDirectory((FilterDirectory) dir, targetClass); - } - if (d instanceof FileSwitchDirectory) { - T leaf = getLeaf(((FileSwitchDirectory) d).getPrimaryDir(), targetClass); - if (leaf == null) { - d = getLeaf(((FileSwitchDirectory) d).getSecondaryDir(), targetClass, defaultValue); - } else { - d = leaf; - } - } - - if (d != null && targetClass.isAssignableFrom(d.getClass())) { - return targetClass.cast(d); - } else { - return defaultValue; - } - } - - -} diff --git a/server/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTests.java b/server/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTests.java deleted file mode 100644 index 57265872c4c79..0000000000000 --- a/server/src/test/java/org/elasticsearch/index/store/DirectoryUtilsTests.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.index.store; - -import org.apache.lucene.store.BaseDirectoryWrapper; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.FileSwitchDirectory; -import org.apache.lucene.store.FilterDirectory; -import org.apache.lucene.store.RAMDirectory; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collections; -import java.util.Set; - -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; -import static org.hamcrest.CoreMatchers.sameInstance; - -public class DirectoryUtilsTests extends ESTestCase { - public void testGetLeave() throws IOException { - Path file = createTempDir(); - final int iters = scaledRandomIntBetween(10, 100); - for (int i = 0; i < iters; i++) { - { - BaseDirectoryWrapper dir = newFSDirectory(file); - FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(dir) {}, FSDirectory.class, null); - assertThat(directory, notNullValue()); - assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); - dir.close(); - } - - { - BaseDirectoryWrapper dir = newFSDirectory(file); - FSDirectory directory = DirectoryUtils.getLeaf(dir, FSDirectory.class, null); - assertThat(directory, notNullValue()); - assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); - dir.close(); - } - - { - Set stringSet = Collections.emptySet(); - BaseDirectoryWrapper dir = newFSDirectory(file); - FSDirectory directory = DirectoryUtils.getLeaf(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean()), FSDirectory.class, null); - assertThat(directory, notNullValue()); - assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); - dir.close(); - } - - { - Set stringSet = Collections.emptySet(); - BaseDirectoryWrapper dir = newFSDirectory(file); - FSDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, FSDirectory.class, null); - assertThat(directory, notNullValue()); - assertThat(directory, sameInstance(DirectoryUtils.getLeafDirectory(dir, null))); - dir.close(); - } - - { - Set stringSet = Collections.emptySet(); - BaseDirectoryWrapper dir = newFSDirectory(file); - RAMDirectory directory = DirectoryUtils.getLeaf(new FilterDirectory(new FileSwitchDirectory(stringSet, dir, dir, random().nextBoolean())) {}, RAMDirectory.class, null); - assertThat(directory, nullValue()); - dir.close(); - } - - } - } -} From 26ca19e13ac235280da4738d51911292c5c73384 Mon Sep 17 00:00:00 2001 From: Jack Conradson Date: Mon, 14 May 2018 13:32:09 -0700 Subject: [PATCH 31/44] Deprecate Empty Templates (#30194) Deprecate the use of empty templates. Bug fix allows empty templates/scripts to be loaded on start up for upgrades/restarts, but empty templates can no longer be created. --- .../elasticsearch/script/ScriptMetaData.java | 21 ++++++- .../script/StoredScriptSource.java | 62 ++++++++++++++++--- .../script/ScriptMetaDataTests.java | 41 ++++++++++++ .../script/StoredScriptSourceTests.java | 2 +- .../script/StoredScriptTests.java | 36 ++++++++++- 5 files changed, 148 insertions(+), 14 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java index dca17ce486607..9505875ae1ebc 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetaData.java @@ -29,6 +29,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContentFragment; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; @@ -46,6 +48,11 @@ */ public final class ScriptMetaData implements MetaData.Custom, Writeable, ToXContentFragment { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(ScriptMetaData.class)); + /** * A builder used to modify the currently stored scripts data held within * the {@link ClusterState}. Scripts can be added or deleted, then built @@ -161,8 +168,8 @@ static ScriptMetaData deleteStoredScript(ScriptMetaData previous, String id) { * * {@code * { - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", - * "" : "<{@link StoredScriptSource#fromXContent(XContentParser)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", + * "" : "<{@link StoredScriptSource#fromXContent(XContentParser, boolean)}>", * ... * } * } @@ -209,6 +216,14 @@ public static ScriptMetaData fromXContent(XContentParser parser) throws IOExcept lang = id.substring(0, split); id = id.substring(split + 1); source = new StoredScriptSource(lang, parser.text(), Collections.emptyMap()); + + if (source.getSource().isEmpty()) { + if (source.getLang().equals(Script.DEFAULT_TEMPLATE_LANG)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } } exists = scripts.get(id); @@ -231,7 +246,7 @@ public static ScriptMetaData fromXContent(XContentParser parser) throws IOExcept } exists = scripts.get(id); - source = StoredScriptSource.fromXContent(parser); + source = StoredScriptSource.fromXContent(parser, true); if (exists == null) { scripts.put(id, source); diff --git a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java index 9c52ff943d2a1..da6dad1dff384 100644 --- a/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java +++ b/server/src/main/java/org/elasticsearch/script/StoredScriptSource.java @@ -32,6 +32,8 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.ObjectParser; @@ -57,6 +59,11 @@ */ public class StoredScriptSource extends AbstractDiffable implements Writeable, ToXContentObject { + /** + * Standard deprecation logger for used to deprecate allowance of empty templates. + */ + private static final DeprecationLogger DEPRECATION_LOGGER = new DeprecationLogger(Loggers.getLogger(StoredScriptSource.class)); + /** * Standard {@link ParseField} for outer level of stored script source. */ @@ -109,7 +116,7 @@ private void setLang(String lang) { private void setSource(XContentParser parser) { try { if (parser.currentToken() == Token.START_OBJECT) { - //this is really for search templates, that need to be converted to json format + // this is really for search templates, that need to be converted to json format XContentBuilder builder = XContentFactory.jsonBuilder(); source = Strings.toString(builder.copyCurrentStructure(parser)); options.put(Script.CONTENT_TYPE_OPTION, XContentType.JSON.mediaType()); @@ -131,8 +138,12 @@ private void setOptions(Map options) { /** * Validates the parameters and creates an {@link StoredScriptSource}. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allow empty templates to be loaded for backwards compatibility. + * This allow empty templates to be loaded for backwards compatibility. */ - private StoredScriptSource build() { + private StoredScriptSource build(boolean ignoreEmpty) { if (lang == null) { throw new IllegalArgumentException("must specify lang for stored script"); } else if (lang.isEmpty()) { @@ -140,9 +151,25 @@ private StoredScriptSource build() { } if (source == null) { - throw new IllegalArgumentException("must specify source for stored script"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("must specify source for stored script"); + } } else if (source.isEmpty()) { - throw new IllegalArgumentException("source cannot be empty"); + if (ignoreEmpty || Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + if (Script.DEFAULT_TEMPLATE_LANG.equals(lang)) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } else { + DEPRECATION_LOGGER.deprecated("empty scripts should no longer be used"); + } + } else { + throw new IllegalArgumentException("source cannot be empty"); + } } if (options.size() > 1 || options.size() == 1 && options.get(Script.CONTENT_TYPE_OPTION) == null) { @@ -257,6 +284,8 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.END_OBJECT) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); } @@ -271,7 +300,7 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.START_OBJECT) { - return PARSER.apply(parser, null).build(); + return PARSER.apply(parser, null).build(false); } else { throw new ParsingException(parser.getTokenLocation(), "unexpected token [" + token + "], expected [{, ]"); } @@ -280,7 +309,13 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon token = parser.nextToken(); if (token == Token.VALUE_STRING) { - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, parser.text(), Collections.emptyMap()); + String source = parser.text(); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } @@ -293,7 +328,13 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon builder.copyCurrentStructure(parser); } - return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, Strings.toString(builder), Collections.emptyMap()); + String source = Strings.toString(builder); + + if (source == null || source.isEmpty()) { + DEPRECATION_LOGGER.deprecated("empty templates should no longer be used"); + } + + return new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, source, Collections.emptyMap()); } } } catch (IOException ioe) { @@ -320,9 +361,12 @@ public static StoredScriptSource parse(BytesReference content, XContentType xCon * * Note that the "source" parameter can also handle template parsing including from * a complex JSON object. + * + * @param ignoreEmpty Specify as {@code true} to ignoreEmpty the empty source check. + * This allows empty templates to be loaded for backwards compatibility. */ - public static StoredScriptSource fromXContent(XContentParser parser) { - return PARSER.apply(parser, null).build(); + public static StoredScriptSource fromXContent(XContentParser parser, boolean ignoreEmpty) { + return PARSER.apply(parser, null).build(ignoreEmpty); } /** diff --git a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java index d5769cd192b75..32d4d48a44810 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptMetaDataTests.java @@ -22,6 +22,8 @@ import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.DeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -130,6 +132,45 @@ public void testBuilder() { assertEquals("1 + 1", result.getStoredScript("_id").getSource()); } + public void testLoadEmptyScripts() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject().field("mustache#empty", "").endObject(); + XContentParser parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty templates should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().field("lang#empty", "").endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty scripts should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().startObject("script").field("lang", "lang").field("source", "").endObject().endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty scripts should no longer be used"); + + builder = XContentFactory.jsonBuilder(); + builder.startObject().startObject("script").field("lang", "mustache").field("source", "").endObject().endObject(); + parser = XContentType.JSON.xContent() + .createParser(NamedXContentRegistry.EMPTY, DeprecationHandler.THROW_UNSUPPORTED_OPERATION, + BytesReference.bytes(builder).streamInput()); + ScriptMetaData.fromXContent(parser); + assertWarnings("empty templates should no longer be used"); + } + + @Override + protected boolean enableWarningsCheck() { + return true; + } + private ScriptMetaData randomScriptMetaData(XContentType sourceContentType, int minNumberScripts) throws IOException { ScriptMetaData.Builder builder = new ScriptMetaData.Builder(null); int numScripts = scaledRandomIntBetween(minNumberScripts, 32); diff --git a/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java b/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java index 168ec4fc553b9..8aa4ca57acfed 100644 --- a/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java +++ b/server/src/test/java/org/elasticsearch/script/StoredScriptSourceTests.java @@ -58,7 +58,7 @@ protected StoredScriptSource createTestInstance() { @Override protected StoredScriptSource doParseInstance(XContentParser parser) { - return StoredScriptSource.fromXContent(parser); + return StoredScriptSource.fromXContent(parser, false); } @Override diff --git a/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java b/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java index 2bf0216c546ec..79e3195f3d923 100644 --- a/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java +++ b/server/src/test/java/org/elasticsearch/script/StoredScriptTests.java @@ -29,6 +29,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.test.AbstractSerializingTestCase; +import java.io.IOException; import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -204,6 +205,39 @@ public void testSourceParsingErrors() throws Exception { } } + public void testEmptyTemplateDeprecations() throws IOException { + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().field("template", "").endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + + try (XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON)) { + builder.startObject().field("script").startObject().field("lang", "mustache") + .field("source", "").endObject().endObject(); + + StoredScriptSource parsed = StoredScriptSource.parse(BytesReference.bytes(builder), XContentType.JSON); + StoredScriptSource source = new StoredScriptSource(Script.DEFAULT_TEMPLATE_LANG, "", Collections.emptyMap()); + + assertThat(parsed, equalTo(source)); + assertWarnings("empty templates should no longer be used"); + } + } + @Override protected StoredScriptSource createTestInstance() { return new StoredScriptSource( @@ -219,7 +253,7 @@ protected Writeable.Reader instanceReader() { @Override protected StoredScriptSource doParseInstance(XContentParser parser) { - return StoredScriptSource.fromXContent(parser); + return StoredScriptSource.fromXContent(parser, false); } @Override From b2c88df7f49d71eeb40dc997334c3a45e7ee94da Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Wed, 16 May 2018 19:30:18 +0200 Subject: [PATCH 32/44] Delay _uid field data deprecation warning (#30651) A deprecation warning is printed when creating the fieldddata builder for the `_uid` field. This change moves the deprecation logging to the building of the fielddata since otherwise APIs like `_field_caps` can emit deprecation warning when they just test the capabilities of the `_uid` field. Closes #30625 --- .../index/mapper/UidFieldMapper.java | 2 +- .../index/mapper/UidFieldTypeTests.java | 40 +++++++++++++++++++ 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java index 95dc40bca637a..6b2c584663ba5 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/UidFieldMapper.java @@ -113,11 +113,11 @@ public String typeName() { @Override public IndexFieldData.Builder fielddataBuilder(String fullyQualifiedIndexName) { if (indexOptions() == IndexOptions.NONE) { - DEPRECATION_LOGGER.deprecated("Fielddata access on the _uid field is deprecated, use _id instead"); return new IndexFieldData.Builder() { @Override public IndexFieldData build(IndexSettings indexSettings, MappedFieldType fieldType, IndexFieldDataCache cache, CircuitBreakerService breakerService, MapperService mapperService) { + DEPRECATION_LOGGER.deprecated("Fielddata access on the _uid field is deprecated, use _id instead"); MappedFieldType idFieldType = mapperService.fullName(IdFieldMapper.NAME); IndexFieldData idFieldData = idFieldType.fielddataBuilder(fullyQualifiedIndexName) .build(indexSettings, idFieldType, cache, breakerService, mapperService); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java index 9b2e0ceb0721f..174f09a2eee90 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/UidFieldTypeTests.java @@ -25,16 +25,25 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.breaker.CircuitBreaker; +import org.elasticsearch.common.breaker.NoopCircuitBreaker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.fielddata.IndexFieldData; +import org.elasticsearch.index.fielddata.IndexFieldDataCache; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.query.QueryShardContext; +import org.elasticsearch.indices.breaker.CircuitBreakerService; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.mockito.Mockito; +import java.io.IOException; import java.util.Collection; import java.util.Collections; +import static org.mockito.Matchers.any; + public class UidFieldTypeTests extends FieldTypeTestCase { @Override protected MappedFieldType createDefaultFieldType() { @@ -132,4 +141,35 @@ public void testTermsQuery() throws Exception { query = ft.termQuery("type2#id", context); assertEquals(new TermInSetQuery("_id"), query); } + + public void testIsAggregatable() { + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); + IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + MappedFieldType ft = UidFieldMapper.defaultFieldType(mockSettings); + assertTrue(ft.isAggregatable()); + } + + public void testFieldDataDeprecation() { + Settings indexSettings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + IndexMetaData indexMetaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(indexSettings).build(); + IndexSettings mockSettings = new IndexSettings(indexMetaData, Settings.EMPTY); + MappedFieldType ft = UidFieldMapper.defaultFieldType(mockSettings); + IndexFieldData.Builder builder = ft.fielddataBuilder(""); + MapperService mockMapper = Mockito.mock(MapperService.class); + Mockito.when(mockMapper.fullName(any())).thenReturn(new IdFieldMapper.IdFieldType()); + Mockito.when(mockMapper.types()).thenReturn(Collections.singleton("doc")); + builder.build(mockSettings, ft, null, new NoneCircuitBreakerService(), mockMapper); + assertWarnings("Fielddata access on the _uid field is deprecated, use _id instead"); + } } From 1e03189c800a438a22a49a52ba883f530b075b89 Mon Sep 17 00:00:00 2001 From: jaymode Date: Wed, 16 May 2018 11:59:49 -0600 Subject: [PATCH 33/44] Test: increase search logging for LicensingTests This commit increases the logging level around search to aid in debugging failures in LicensingTests#testSecurityActionsByLicenseType where we are seeing all shards failed error while trying to search the security index. See #30301 --- .../test/java/org/elasticsearch/license/LicensingTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java index 351cf91bf9428..c92b311faae85 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/license/LicensingTests.java @@ -53,7 +53,8 @@ import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; -@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE") +@TestLogging("org.elasticsearch.cluster.service:TRACE,org.elasticsearch.discovery.zen:TRACE,org.elasticsearch.action.search:TRACE," + + "org.elasticsearch.search:TRACE") public class LicensingTests extends SecurityIntegTestCase { public static final String ROLES = SecuritySettingsSource.TEST_ROLE + ":\n" + From a0ef529f28d89c0351ad3386580ed7dd165ebbdd Mon Sep 17 00:00:00 2001 From: lcawl Date: Mon, 14 May 2018 16:45:09 -0700 Subject: [PATCH 34/44] [DOCS] Reorganizes RBAC documentation --- .../authorization/built-in-roles.asciidoc | 114 +++++++ .../authorization/managing-roles.asciidoc | 175 +++++++++++ .../security/authorization/overview.asciidoc | 290 +----------------- .../privileges.asciidoc | 9 +- x-pack/docs/en/security/reference.asciidoc | 2 - 5 files changed, 297 insertions(+), 293 deletions(-) create mode 100644 x-pack/docs/en/security/authorization/built-in-roles.asciidoc create mode 100644 x-pack/docs/en/security/authorization/managing-roles.asciidoc rename x-pack/docs/en/security/{reference => authorization}/privileges.asciidoc (97%) diff --git a/x-pack/docs/en/security/authorization/built-in-roles.asciidoc b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc new file mode 100644 index 0000000000000..f336393d81d3d --- /dev/null +++ b/x-pack/docs/en/security/authorization/built-in-roles.asciidoc @@ -0,0 +1,114 @@ +[role="xpack"] +[[built-in-roles]] +=== Built-in roles + +{security} applies a default role to all users, including +<>. The default role enables users to access +the authenticate endpoint, change their own passwords, and get information about +themselves. + +{security} also provides a set of built-in roles you can explicitly assign +to users. These roles have a fixed set of privileges and cannot be updated. + +[[built-in-roles-ingest-user]] `ingest_admin` :: +Grants access to manage *all* index templates and *all* ingest pipeline configurations. ++ +NOTE: This role does *not* provide the ability to create indices; those privileges +must be defined in a separate role. + +[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: +Grants access to the {kib} Dashboard and read-only permissions on the `.kibana` +index. This role does not have access to editing tools in {kib}. For more +information, see +{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. + +[[built-in-roles-kibana-system]] `kibana_system` :: +Grants access necessary for the {kib} system user to read from and write to the +{kib} indices, manage index templates, and check the availability of the {es} cluster. +This role grants read access to the `.monitoring-*` indices and read and write access +to the `.reporting-*` indices. For more information, see +{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. + +[[built-in-roles-kibana-user]] `kibana_user`:: +Grants the minimum privileges required for any user of {kib}. This role grants +access to the {kib} indices and grants monitoring privileges for the cluster. + +[[built-in-roles-logstash-admin]] `logstash_admin` :: +Grants access to the `.logstash*` indices for managing configurations. + +[[built-in-roles-logstash-system]] `logstash_system` :: +Grants access necessary for the Logstash system user to send system-level data +(such as monitoring) to {es}. For more information, see +{logstash-ref}/ls-security.html[Configuring Security in Logstash]. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the logstash indices and is not +suitable for use within a Logstash pipeline. + +[[built-in-roles-beats-system]] `beats_system` :: +Grants access necessary for the Beats system user to send system-level data +(such as monitoring) to {es}. ++ +NOTE: This role should not be assigned to users as the granted permissions may +change between releases. ++ +NOTE: This role does not provide access to the beats indices and is not +suitable for writing beats output to {es}. + +[[built-in-roles-ml-admin]] `machine_learning_admin`:: +Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices. + +[[built-in-roles-ml-user]] `machine_learning_user`:: +Grants the minimum privileges required to view {xpackml} configuration, +status, and results. This role grants `monitor_ml` cluster privileges and +read access to the `.ml-notifications` and `.ml-anomalies*` indices, +which store {ml} results. + +[[built-in-roles-monitoring-user]] `monitoring_user`:: +Grants the minimum privileges required for any user of {monitoring} other than those +required to use {kib}. This role grants access to the monitoring indices and grants +privileges necessary for reading basic cluster information. Monitoring users should +also be assigned the `kibana_user` role. + +[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: +Grants the minimum privileges required for a remote monitoring agent to write data +into this cluster. + +[[built-in-roles-reporting-user]] `reporting_user`:: +Grants the specific privileges required for users of {reporting} other than those +required to use {kib}. This role grants access to the reporting indices. Reporting +users should also be assigned the `kibana_user` role and a role that grants them +access to the data that will be used to generate reports with. + +[[built-in-roles-superuser]] `superuser`:: +Grants full access to the cluster, including all indices and data. A user with +the `superuser` role can also manage users and roles and +<> any other user in the system. Due to the +permissive nature of this role, take extra care when assigning it to a user. + +[[built-in-roles-transport-client]] `transport_client`:: +Grants the privileges required to access the cluster through the Java Transport +Client. The Java Transport Client fetches information about the nodes in the +cluster using the _Node Liveness API_ and the _Cluster State API_ (when +sniffing is enabled). Assign your users this role if they use the +Transport Client. ++ +NOTE: Using the Transport Client effectively means the users are granted access +to the cluster state. This means users can view the metadata over all indices, +index templates, mappings, node and basically everything about the cluster. +However, this role does not grant permission to view the data in all indices. + +[[built-in-roles-watcher-admin]] `watcher_admin`:: ++ +Grants write access to the `.watches` index, read access to the watch history and +the triggered watches index and allows to execute all watcher actions. + +[[built-in-roles-watcher-user]] `watcher_user`:: ++ +Grants read access to the `.watches` index, the get watch action and the watcher +stats. \ No newline at end of file diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc new file mode 100644 index 0000000000000..83edef1a67ba4 --- /dev/null +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -0,0 +1,175 @@ +[role="xpack"] +[[defining-roles]] +=== Defining roles + +A role is defined by the following JSON structure: + +[source,js] +----- +{ + "run_as": [ ... ], <1> + "cluster": [ ... ], <2> + "indices": [ ... ] <3> +} +----- +<1> A list of usernames the owners of this role can <>. +<2> A list of cluster privileges. These privileges define the + cluster level actions users with this role are able to execute. This field + is optional (missing `cluster` privileges effectively mean no cluster level + permissions). +<3> A list of indices permissions entries. This field is optional (missing `indices` + privileges effectively mean no index level permissions). + +[[valid-role-name]] +NOTE: Role names must be at least 1 and no more than 1024 characters. They can + contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, + punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. + Leading or trailing whitespace is not allowed. + +The following describes the structure of an indices permissions entry: + +[source,js] +------- +{ + "names": [ ... ], <1> + "privileges": [ ... ], <2> + "field_security" : { ... }, <3> + "query": "..." <4> +} +------- +<1> A list of indices (or index name patterns) to which the permissions in this + entry apply. +<2> The index level privileges the owners of the role have on the associated + indices (those indices that are specified in the `name` field) +<3> Specification for document fields the owners of the role have read access to. + See <> for details. +<4> A search query that defines the documents the owners of the role have read + access to. A document within the associated indices must match this query + in order for it to be accessible by the owners of the role. + +[TIP] +============================================================================== +When specifying index names, you can use indices and aliases with their full +names or regular expressions that refer to multiple indices. + +* Wildcard (default) - simple wildcard matching where `*` is a placeholder + for zero or more characters, `?` is a placeholder for a single character + and `\` may be used as an escape character. + +* Regular Expressions - A more powerful syntax for matching more complex + patterns. This regular expression is based on Lucene's regexp automaton + syntax. To enable this syntax, it must be wrapped within a pair of + forward slashes (`/`). Any pattern starting with `/` and not ending with + `/` is considered to be malformed. + +.Example Regular Expressions +[source,yaml] +------------------------------------------------------------------------------ +"foo-bar": # match the literal `foo-bar` +"foo-*": # match anything beginning with "foo-" +"logstash-201?-*": # ? matches any one character +"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 +"/foo": # syntax error - missing final / +------------------------------------------------------------------------------ +============================================================================== + +The following snippet shows an example definition of a `clicks_admin` role: + +[source,js] +----------- +{ + "run_as": [ "clicks_watcher_1" ] + "cluster": [ "monitor" ], + "indices": [ + { + "names": [ "events-*" ], + "privileges": [ "read" ], + "field_security" : { + "grant" : [ "category", "@timestamp", "message" ] + }, + "query": "{\"match\": {\"category\": \"click\"}}" + } + ] +} +----------- + +Based on the above definition, users owning the `clicks_admin` role can: + + * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. + * Monitor the {es} cluster + * Read data from all indices prefixed with `events-` + * Within these indices, only read the events of the `click` category + * Within these document, only read the `category`, `@timestamp` and `message` + fields. + +TIP: For a complete list of available <> + +There are two available mechanisms to define roles: using the _Role Management APIs_ +or in local files on the {es} nodes. {security} also supports implementing +custom roles providers. If you need to integrate with another system to retrieve +user roles, you can build a custom roles provider plugin. For more information, +see <>. + +[float] +[[roles-management-ui]] +=== Role management UI + +{security} enables you to easily manage users and roles from within {kib}. To +manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. + +[float] +[[roles-management-api]] +=== Role management API + +The _Role Management APIs_ enable you to add, update, remove and retrieve roles +dynamically. When you use the APIs to manage roles in the `native` realm, the +roles are stored in an internal {es} index. For more information and examples, +see {ref}/security-api-roles.html[Role Management APIs]. + +[float] +[[roles-management-file]] +=== File-based role management + +Apart from the _Role Management APIs_, roles can also be defined in local +`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each +role definition is keyed by its name. + +[IMPORTANT] +============================== +If the same role name is used in the `roles.yml` file and through the +_Role Management APIs_, the role found in the file will be used. +============================== + +While the _Role Management APIs_ is the preferred mechanism to define roles, +using the `roles.yml` file becomes useful if you want to define fixed roles that +no one (beside an administrator having physical access to the {es} nodes) +would be able to change. + +[IMPORTANT] +============================== +The `roles.yml` file is managed locally by the node and is not globally by the +cluster. This means that with a typical multi-node cluster, the exact same +changes need to be applied on each and every node in the cluster. + +A safer approach would be to apply the change on one of the nodes and have the +`roles.yml` distributed/copied to all other nodes in the cluster (either +manually or using a configuration management system such as Puppet or Chef). +============================== + +The following snippet shows an example of the `roles.yml` file configuration: + +[source,yaml] +----------------------------------- +click_admins: + run_as: [ 'clicks_watcher_1' ] + cluster: [ 'monitor' ] + indices: + - names: [ 'events-*' ] + privileges: [ 'read' ] + field_security: + grant: ['category', '@timestamp', 'message' ] + query: '{"match": {"category": "click"}}' +----------------------------------- + +{security} continuously monitors the `roles.yml` file and automatically picks +up and applies any changes to it. diff --git a/x-pack/docs/en/security/authorization/overview.asciidoc b/x-pack/docs/en/security/authorization/overview.asciidoc index 9dc8185db4d34..98a1ad8b786b6 100644 --- a/x-pack/docs/en/security/authorization/overview.asciidoc +++ b/x-pack/docs/en/security/authorization/overview.asciidoc @@ -49,295 +49,11 @@ As an administrator, you will need to define the roles that you want to use, then assign users to the roles. These can be assigned to users in a number of ways depending on the realms by which the users are authenticated. -[[built-in-roles]] -=== Built-in roles +include::built-in-roles.asciidoc[] -{security} applies a default role to all users, including -<>. The default role enables users to access -the authenticate endpoint, change their own passwords, and get information about -themselves. +include::managing-roles.asciidoc[] -{security} also provides a set of built-in roles you can explicitly assign -to users. These roles have a fixed set of privileges and cannot be updated. - -[[built-in-roles-ingest-user]] `ingest_admin` :: -Grants access to manage *all* index templates and *all* ingest pipeline configurations. -+ -NOTE: This role does *not* provide the ability to create indices; those privileges -must be defined in a separate role. - -[[built-in-roles-kibana-dashboard]] `kibana_dashboard_only_user` :: -Grants access to the {kib} Dashboard and read-only permissions on the `.kibana` -index. This role does not have access to editing tools in {kib}. For more -information, see -{kibana-ref}/xpack-dashboard-only-mode.html[{kib} Dashboard Only Mode]. - -[[built-in-roles-kibana-system]] `kibana_system` :: -Grants access necessary for the {kib} system user to read from and write to the -{kib} indices, manage index templates, and check the availability of the {es} cluster. -This role grants read access to the `.monitoring-*` indices and read and write access -to the `.reporting-*` indices. For more information, see -{kibana-ref}/using-kibana-with-security.html[Configuring Security in {kib}]. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. - -[[built-in-roles-kibana-user]] `kibana_user`:: -Grants the minimum privileges required for any user of {kib}. This role grants -access to the {kib} indices and grants monitoring privileges for the cluster. - -[[built-in-roles-logstash-admin]] `logstash_admin` :: -Grants access to the `.logstash*` indices for managing configurations. - -[[built-in-roles-logstash-system]] `logstash_system` :: -Grants access necessary for the Logstash system user to send system-level data -(such as monitoring) to {es}. For more information, see -{logstash-ref}/ls-security.html[Configuring Security in Logstash]. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. -+ -NOTE: This role does not provide access to the logstash indices and is not -suitable for use within a Logstash pipeline. - -[[built-in-roles-beats-system]] `beats_system` :: -Grants access necessary for the Beats system user to send system-level data -(such as monitoring) to {es}. -+ -NOTE: This role should not be assigned to users as the granted permissions may -change between releases. -+ -NOTE: This role does not provide access to the beats indices and is not -suitable for writing beats output to {es}. - -[[built-in-roles-ml-admin]] `machine_learning_admin`:: -Grants `manage_ml` cluster privileges and read access to the `.ml-*` indices. - -[[built-in-roles-ml-user]] `machine_learning_user`:: -Grants the minimum privileges required to view {xpackml} configuration, -status, and results. This role grants `monitor_ml` cluster privileges and -read access to the `.ml-notifications` and `.ml-anomalies*` indices, -which store {ml} results. - -[[built-in-roles-monitoring-user]] `monitoring_user`:: -Grants the minimum privileges required for any user of {monitoring} other than those -required to use {kib}. This role grants access to the monitoring indices and grants -privileges necessary for reading basic cluster information. Monitoring users should -also be assigned the `kibana_user` role. - -[[built-in-roles-remote-monitoring-agent]] `remote_monitoring_agent`:: -Grants the minimum privileges required for a remote monitoring agent to write data -into this cluster. - -[[built-in-roles-reporting-user]] `reporting_user`:: -Grants the specific privileges required for users of {reporting} other than those -required to use {kib}. This role grants access to the reporting indices. Reporting -users should also be assigned the `kibana_user` role and a role that grants them -access to the data that will be used to generate reports with. - -[[built-in-roles-superuser]] `superuser`:: -Grants full access to the cluster, including all indices and data. A user with -the `superuser` role can also manage users and roles and -<> any other user in the system. Due to the -permissive nature of this role, take extra care when assigning it to a user. - -[[built-in-roles-transport-client]] `transport_client`:: -Grants the privileges required to access the cluster through the Java Transport -Client. The Java Transport Client fetches information about the nodes in the -cluster using the _Node Liveness API_ and the _Cluster State API_ (when -sniffing is enabled). Assign your users this role if they use the -Transport Client. -+ -NOTE: Using the Transport Client effectively means the users are granted access -to the cluster state. This means users can view the metadata over all indices, -index templates, mappings, node and basically everything about the cluster. -However, this role does not grant permission to view the data in all indices. - -[[built-in-roles-watcher-admin]] `watcher_admin`:: -+ -Grants write access to the `.watches` index, read access to the watch history and -the triggered watches index and allows to execute all watcher actions. - -[[built-in-roles-watcher-user]] `watcher_user`:: -+ -Grants read access to the `.watches` index, the get watch action and the watcher -stats. - - -[[defining-roles]] -=== Defining roles - -A role is defined by the following JSON structure: - -[source,js] ------ -{ - "run_as": [ ... ], <1> - "cluster": [ ... ], <2> - "indices": [ ... ] <3> -} ------ -<1> A list of usernames the owners of this role can <>. -<2> A list of cluster privileges. These privileges define the - cluster level actions users with this role are able to execute. This field - is optional (missing `cluster` privileges effectively mean no cluster level - permissions). -<3> A list of indices permissions entries. This field is optional (missing `indices` - privileges effectively mean no index level permissions). - -[[valid-role-name]] -NOTE: Role names must be at least 1 and no more than 1024 characters. They can - contain alphanumeric characters (`a-z`, `A-Z`, `0-9`), spaces, - punctuation, and printable symbols in the https://en.wikipedia.org/wiki/Basic_Latin_(Unicode_block)[Basic Latin (ASCII) block]. - Leading or trailing whitespace is not allowed. - -The following describes the structure of an indices permissions entry: - -[source,js] -------- -{ - "names": [ ... ], <1> - "privileges": [ ... ], <2> - "field_security" : { ... }, <3> - "query": "..." <4> -} -------- -<1> A list of indices (or index name patterns) to which the permissions in this - entry apply. -<2> The index level privileges the owners of the role have on the associated - indices (those indices that are specified in the `name` field) -<3> Specification for document fields the owners of the role have read access to. - See <> for details. -<4> A search query that defines the documents the owners of the role have read - access to. A document within the associated indices must match this query - in order for it to be accessible by the owners of the role. - -[TIP] -============================================================================== -When specifying index names, you can use indices and aliases with their full -names or regular expressions that refer to multiple indices. - -* Wildcard (default) - simple wildcard matching where `*` is a placeholder - for zero or more characters, `?` is a placeholder for a single character - and `\` may be used as an escape character. - -* Regular Expressions - A more powerful syntax for matching more complex - patterns. This regular expression is based on Lucene's regexp automaton - syntax. To enable this syntax, it must be wrapped within a pair of - forward slashes (`/`). Any pattern starting with `/` and not ending with - `/` is considered to be malformed. - -.Example Regular Expressions -[source,yaml] ------------------------------------------------------------------------------- -"foo-bar": # match the literal `foo-bar` -"foo-*": # match anything beginning with "foo-" -"logstash-201?-*": # ? matches any one character -"/.*-201[0-9]-.*/": # use a regex to match anything containing 2010-2019 -"/foo": # syntax error - missing final / ------------------------------------------------------------------------------- -============================================================================== - -The following snippet shows an example definition of a `clicks_admin` role: - -[source,js] ------------ -{ - "run_as": [ "clicks_watcher_1" ] - "cluster": [ "monitor" ], - "indices": [ - { - "names": [ "events-*" ], - "privileges": [ "read" ], - "field_security" : { - "grant" : [ "category", "@timestamp", "message" ] - }, - "query": "{\"match\": {\"category\": \"click\"}}" - } - ] -} ------------ - -Based on the above definition, users owning the `clicks_admin` role can: - - * Impersonate the `clicks_watcher_1` user and execute requests on its behalf. - * Monitor the {es} cluster - * Read data from all indices prefixed with `events-` - * Within these indices, only read the events of the `click` category - * Within these document, only read the `category`, `@timestamp` and `message` - fields. - -TIP: For a complete list of available <> - -There are two available mechanisms to define roles: using the _Role Management APIs_ -or in local files on the {es} nodes. {security} also supports implementing -custom roles providers. If you need to integrate with another system to retrieve -user roles, you can build a custom roles provider plugin. For more information, -see <>. - -[float] -[[roles-management-ui]] -=== Role management UI - -{security} enables you to easily manage users and roles from within {kib}. To -manage roles, log in to {kib} and go to *Management / Elasticsearch / Roles*. - -[float] -[[roles-management-api]] -=== Role management API - -The _Role Management APIs_ enable you to add, update, remove and retrieve roles -dynamically. When you use the APIs to manage roles in the `native` realm, the -roles are stored in an internal {es} index. For more information and examples, -see {ref}/security-api-roles.html[Role Management APIs]. - -[float] -[[roles-management-file]] -=== File-based role management - -Apart from the _Role Management APIs_, roles can also be defined in local -`roles.yml` file located in `CONFIG_DIR`. This is a YAML file where each -role definition is keyed by its name. - -[IMPORTANT] -============================== -If the same role name is used in the `roles.yml` file and through the -_Role Management APIs_, the role found in the file will be used. -============================== - -While the _Role Management APIs_ is the preferred mechanism to define roles, -using the `roles.yml` file becomes useful if you want to define fixed roles that -no one (beside an administrator having physical access to the {es} nodes) -would be able to change. - -[IMPORTANT] -============================== -The `roles.yml` file is managed locally by the node and is not globally by the -cluster. This means that with a typical multi-node cluster, the exact same -changes need to be applied on each and every node in the cluster. - -A safer approach would be to apply the change on one of the nodes and have the -`roles.yml` distributed/copied to all other nodes in the cluster (either -manually or using a configuration management system such as Puppet or Chef). -============================== - -The following snippet shows an example of the `roles.yml` file configuration: - -[source,yaml] ------------------------------------ -click_admins: - run_as: [ 'clicks_watcher_1' ] - cluster: [ 'monitor' ] - indices: - - names: [ 'events-*' ] - privileges: [ 'read' ] - field_security: - grant: ['category', '@timestamp', 'message' ] - query: '{"match": {"category": "click"}}' ------------------------------------ - -{security} continuously monitors the `roles.yml` file and automatically picks -up and applies any changes to it. +include::privileges.asciidoc[] include::alias-privileges.asciidoc[] diff --git a/x-pack/docs/en/security/reference/privileges.asciidoc b/x-pack/docs/en/security/authorization/privileges.asciidoc similarity index 97% rename from x-pack/docs/en/security/reference/privileges.asciidoc rename to x-pack/docs/en/security/authorization/privileges.asciidoc index 7bdd1b5211bc8..107600183a8e0 100644 --- a/x-pack/docs/en/security/reference/privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/privileges.asciidoc @@ -1,10 +1,11 @@ +[role="xpack"] [[security-privileges]] -=== Security Privileges +=== Security privileges This section lists the privileges that you can assign to a role. [[privileges-list-cluster]] -==== Cluster Privileges +==== Cluster privileges [horizontal] `all`:: @@ -66,7 +67,7 @@ All privileges necessary for a transport client to connect. Required by the rem cluster to enable <>. [[privileges-list-indices]] -==== Indices Privileges +==== Indices privileges [horizontal] `all`:: @@ -125,7 +126,7 @@ Privilege to create an index. A create index request may contain aliases to be added to the index once created. In that case the request requires the `manage` privilege as well, on both the index and the aliases names. -==== Run As Privilege +==== Run as privilege The `run_as` permission enables an authenticated user to submit requests on behalf of another user. The value can be a user name or a comma-separated list diff --git a/x-pack/docs/en/security/reference.asciidoc b/x-pack/docs/en/security/reference.asciidoc index 21138138cfbf9..9c65fd6479a4f 100644 --- a/x-pack/docs/en/security/reference.asciidoc +++ b/x-pack/docs/en/security/reference.asciidoc @@ -7,6 +7,4 @@ * {ref}/security-api.html[Security API] * {ref}/xpack-commands.html[Security Commands] -include::reference/privileges.asciidoc[] - include::reference/files.asciidoc[] From 42070785c595939c1f0e9806e795e23b5bbbb1b5 Mon Sep 17 00:00:00 2001 From: Michael Basnight Date: Tue, 15 May 2018 21:21:11 -0500 Subject: [PATCH 35/44] Add Create Repository High Level REST API (#30501) This commit adds Create Repository, the associated docs and tests for the high level REST API client. A few small changes to the PutRepository Request and Response went into the commit as well. --- .../client/RequestConverters.java | 14 ++ .../elasticsearch/client/SnapshotClient.java | 27 +++- .../client/RequestConvertersTests.java | 26 ++++ .../org/elasticsearch/client/SnapshotIT.java | 58 ++++---- .../SnapshotClientDocumentationIT.java | 133 +++++++++++++++-- .../snapshot/create_repository.asciidoc | 139 ++++++++++++++++++ .../put/PutRepositoryRequest.java | 18 ++- .../put/PutRepositoryResponse.java | 12 ++ .../put/PutRepositoryRequestTests.java | 72 +++++++++ .../put/PutRepositoryResponseTests.java | 48 ++++++ 10 files changed, 503 insertions(+), 44 deletions(-) create mode 100644 docs/java-rest/high-level/snapshot/create_repository.asciidoc create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java create mode 100644 server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponseTests.java diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java index facffa4144e35..27b237c5302cb 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RequestConverters.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest; @@ -656,6 +657,19 @@ static Request getRepositories(GetRepositoriesRequest getRepositoriesRequest) { return request; } + static Request createRepository(PutRepositoryRequest putRepositoryRequest) throws IOException { + String endpoint = new EndpointBuilder().addPathPart("_snapshot").addPathPart(putRepositoryRequest.name()).build(); + Request request = new Request(HttpPut.METHOD_NAME, endpoint); + + Params parameters = new Params(request); + parameters.withMasterTimeout(putRepositoryRequest.masterNodeTimeout()); + parameters.withTimeout(putRepositoryRequest.timeout()); + parameters.withVerify(putRepositoryRequest.verify()); + + request.setEntity(createEntity(putRepositoryRequest, REQUEST_BODY_CONTENT_TYPE)); + return request; + } + static Request putTemplate(PutIndexTemplateRequest putIndexTemplateRequest) throws IOException { String endpoint = new EndpointBuilder().addPathPartAsIs("_template").addPathPart(putIndexTemplateRequest.name()).build(); Request request = new Request(HttpPut.METHOD_NAME, endpoint); diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java index e526fbe7164f9..aec94586bee30 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/SnapshotClient.java @@ -23,8 +23,8 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; -import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import java.io.IOException; @@ -67,4 +67,27 @@ public void getRepositoriesAsync(GetRepositoriesRequest getRepositoriesRequest, restHighLevelClient.performRequestAsyncAndParseEntity(getRepositoriesRequest, RequestConverters::getRepositories, GetRepositoriesResponse::fromXContent, listener, emptySet(), headers); } + + /** + * Creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public PutRepositoryResponse createRepository(PutRepositoryRequest putRepositoryRequest, Header... headers) throws IOException { + return restHighLevelClient.performRequestAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, emptySet(), headers); + } + + /** + * Asynchronously creates a snapshot repository. + *

+ * See Snapshot and Restore + * API on elastic.co + */ + public void createRepositoryAsync(PutRepositoryRequest putRepositoryRequest, + ActionListener listener, Header... headers) { + restHighLevelClient.performRequestAsyncAndParseEntity(putRepositoryRequest, RequestConverters::createRepository, + PutRepositoryResponse::fromXContent, listener, emptySet(), headers); + } } diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java index 59b3be2796fdd..af01590c0a1cf 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/RequestConvertersTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; @@ -76,9 +77,11 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -93,6 +96,7 @@ import org.elasticsearch.index.rankeval.RankEvalSpec; import org.elasticsearch.index.rankeval.RatedRequest; import org.elasticsearch.index.rankeval.RestRankEvalAction; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.action.search.RestSearchAction; import org.elasticsearch.search.Scroll; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; @@ -110,6 +114,7 @@ import java.io.IOException; import java.io.InputStream; +import java.nio.file.Path; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -1450,6 +1455,27 @@ public void testGetRepositories() { assertThat(expectedParams, equalTo(request.getParameters())); } + public void testCreateRepository() throws IOException { + String repository = "repo"; + String endpoint = "/_snapshot/" + repository; + Path repositoryLocation = PathUtils.get("."); + PutRepositoryRequest putRepositoryRequest = new PutRepositoryRequest(repository); + putRepositoryRequest.type(FsRepository.TYPE); + putRepositoryRequest.verify(randomBoolean()); + + putRepositoryRequest.settings( + Settings.builder() + .put(FsRepository.LOCATION_SETTING.getKey(), repositoryLocation) + .put(FsRepository.COMPRESS_SETTING.getKey(), randomBoolean()) + .put(FsRepository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(100, 1000), ByteSizeUnit.BYTES) + .build()); + + Request request = RequestConverters.createRepository(putRepositoryRequest); + assertThat(endpoint, equalTo(request.getEndpoint())); + assertThat(HttpPut.METHOD_NAME, equalTo(request.getMethod())); + assertToXContentBody(putRepositoryRequest, request.getEntity()); + } + public void testPutTemplateRequest() throws Exception { Map names = new HashMap<>(); names.put("log", "log"); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java index ab2c632bfeb58..1d0ea953cd5c1 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/SnapshotIT.java @@ -19,56 +19,56 @@ package org.elasticsearch.client; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import org.elasticsearch.rest.RestStatus; import java.io.IOException; -import java.util.Collections; import static org.hamcrest.Matchers.equalTo; public class SnapshotIT extends ESRestHighLevelClientTestCase { - public void testModulesGetRepositoriesUsingParams() throws IOException { - String repository = "test"; - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + repository + "_other", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + private PutRepositoryResponse createTestRepository(String repository, String type, String settings) throws IOException { + PutRepositoryRequest request = new PutRepositoryRequest(repository); + request.settings(settings, XContentType.JSON); + request.type(type); + return execute(request, highLevelClient().snapshot()::createRepository, + highLevelClient().snapshot()::createRepositoryAsync); - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - request.repositories(new String[]{repository}); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(1, equalTo(response.repositories().size())); - } - { - GetRepositoriesRequest request = new GetRepositoriesRequest(); - GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, - highLevelClient().snapshot()::getRepositoriesAsync); - assertThat(2, equalTo(response.repositories().size())); - } } - public void testModulesGetDefaultRepositories() throws IOException { - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - GetRepositoriesRequest request = new GetRepositoriesRequest(); + public void testCreateRepository() throws IOException { + PutRepositoryResponse response = createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}"); + assertTrue(response.isAcknowledged()); + } - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/test", Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); + public void testModulesGetRepositoriesUsingParams() throws IOException { + String testRepository = "test"; + assertTrue(createTestRepository(testRepository, FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + GetRepositoriesRequest request = new GetRepositoriesRequest(); + request.repositories(new String[]{testRepository}); GetRepositoriesResponse response = execute(request, highLevelClient().snapshot()::getRepositories, highLevelClient().snapshot()::getRepositoriesAsync); assertThat(1, equalTo(response.repositories().size())); } + public void testModulesGetDefaultRepositories() throws IOException { + assertTrue(createTestRepository("other", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + assertTrue(createTestRepository("test", FsRepository.TYPE, "{\"location\": \".\"}").isAcknowledged()); + + GetRepositoriesResponse response = execute(new GetRepositoriesRequest(), highLevelClient().snapshot()::getRepositories, + highLevelClient().snapshot()::getRepositoriesAsync); + assertThat(2, equalTo(response.repositories().size())); + } + public void testModulesGetRepositoriesNonExistent() throws IOException { String repository = "doesnotexist"; GetRepositoriesRequest request = new GetRepositoriesRequest(new String[]{repository}); diff --git a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java index 1044cc9da3332..c57f8e2a2fbd5 100644 --- a/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java +++ b/client/rest-high-level/src/test/java/org/elasticsearch/client/documentation/SnapshotClientDocumentationIT.java @@ -19,20 +19,24 @@ package org.elasticsearch.client.documentation; -import org.apache.http.entity.ContentType; -import org.apache.http.entity.StringEntity; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesRequest; import org.elasticsearch.action.admin.cluster.repositories.get.GetRepositoriesResponse; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; import org.elasticsearch.client.ESRestHighLevelClientTestCase; import org.elasticsearch.client.RestHighLevelClient; import org.elasticsearch.cluster.metadata.RepositoryMetaData; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.repositories.fs.FsRepository; import java.io.IOException; -import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -58,7 +62,114 @@ */ public class SnapshotClientDocumentationIT extends ESRestHighLevelClientTestCase { - private static final String testRepository = "test_repository"; + private static final String repositoryName = "test_repository"; + + public void testSnapshotCreateRepository() throws IOException { + RestHighLevelClient client = highLevelClient(); + + // tag::create-repository-request + PutRepositoryRequest request = new PutRepositoryRequest(); + // end::create-repository-request + + // tag::create-repository-create-settings + String locationKey = FsRepository.LOCATION_SETTING.getKey(); + String locationValue = "."; + String compressKey = FsRepository.COMPRESS_SETTING.getKey(); + boolean compressValue = true; + + Settings settings = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue) + .build(); // <1> + // end::create-repository-create-settings + + // tag::create-repository-request-repository-settings + request.settings(settings); // <1> + // end::create-repository-request-repository-settings + + { + // tag::create-repository-settings-builder + Settings.Builder settingsBuilder = Settings.builder() + .put(locationKey, locationValue) + .put(compressKey, compressValue); + request.settings(settingsBuilder); // <1> + // end::create-repository-settings-builder + } + { + // tag::create-repository-settings-map + Map map = new HashMap<>(); + map.put(locationKey, locationValue); + map.put(compressKey, compressValue); + request.settings(map); // <1> + // end::create-repository-settings-map + } + { + // tag::create-repository-settings-source + request.settings("{\"location\": \".\", \"compress\": \"true\"}", + XContentType.JSON); // <1> + // end::create-repository-settings-source + } + + // tag::create-repository-request-name + request.name(repositoryName); // <1> + // end::create-repository-request-name + // tag::create-repository-request-type + request.type(FsRepository.TYPE); // <1> + // end::create-repository-request-type + + // tag::create-repository-request-masterTimeout + request.masterNodeTimeout(TimeValue.timeValueMinutes(1)); // <1> + request.masterNodeTimeout("1m"); // <2> + // end::create-repository-request-masterTimeout + // tag::create-repository-request-timeout + request.timeout(TimeValue.timeValueMinutes(1)); // <1> + request.timeout("1m"); // <2> + // end::create-repository-request-timeout + // tag::create-repository-request-verify + request.verify(true); // <1> + // end::create-repository-request-verify + + // tag::create-repository-execute + PutRepositoryResponse response = client.snapshot().createRepository(request); + // end::create-repository-execute + + // tag::create-repository-response + boolean acknowledged = response.isAcknowledged(); // <1> + // end::create-repository-response + assertTrue(acknowledged); + } + + public void testSnapshotCreateRepositoryAsync() throws InterruptedException { + RestHighLevelClient client = highLevelClient(); + { + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + + // tag::create-repository-execute-listener + ActionListener listener = + new ActionListener() { + @Override + public void onResponse(PutRepositoryResponse putRepositoryResponse) { + // <1> + } + + @Override + public void onFailure(Exception e) { + // <2> + } + }; + // end::create-repository-execute-listener + + // Replace the empty listener by a blocking listener in test + final CountDownLatch latch = new CountDownLatch(1); + listener = new LatchedActionListener<>(listener, latch); + + // tag::create-repository-execute-async + client.snapshot().createRepositoryAsync(request, listener); // <1> + // end::create-repository-execute-async + + assertTrue(latch.await(30L, TimeUnit.SECONDS)); + } + } public void testSnapshotGetRepository() throws IOException { RestHighLevelClient client = highLevelClient(); @@ -70,7 +181,7 @@ public void testSnapshotGetRepository() throws IOException { // end::get-repository-request // tag::get-repository-request-repositories - String [] repositories = new String[] { testRepository }; + String [] repositories = new String[] {repositoryName}; request.repositories(repositories); // <1> // end::get-repository-request-repositories // tag::get-repository-request-local @@ -89,7 +200,7 @@ public void testSnapshotGetRepository() throws IOException { List repositoryMetaDataResponse = response.repositories(); // end::get-repository-response assertThat(1, equalTo(repositoryMetaDataResponse.size())); - assertThat(testRepository, equalTo(repositoryMetaDataResponse.get(0).name())); + assertThat(repositoryName, equalTo(repositoryMetaDataResponse.get(0).name())); } public void testSnapshotGetRepositoryAsync() throws InterruptedException { @@ -122,14 +233,12 @@ public void onFailure(Exception e) { assertTrue(latch.await(30L, TimeUnit.SECONDS)); } - } private void createTestRepositories() throws IOException { - RestHighLevelClient client = highLevelClient(); - String repositorySettings = "{\"type\":\"fs\", \"settings\":{\"location\": \".\"}}"; - highLevelClient().getLowLevelClient().performRequest("put", "_snapshot/" + testRepository, Collections.emptyMap(), - new StringEntity(repositorySettings, ContentType.APPLICATION_JSON)); - + PutRepositoryRequest request = new PutRepositoryRequest(repositoryName); + request.type(FsRepository.TYPE); + request.settings("{\"location\": \".\"}", XContentType.JSON); + assertTrue(highLevelClient().snapshot().createRepository(request).isAcknowledged()); } } diff --git a/docs/java-rest/high-level/snapshot/create_repository.asciidoc b/docs/java-rest/high-level/snapshot/create_repository.asciidoc new file mode 100644 index 0000000000000..5c54529209720 --- /dev/null +++ b/docs/java-rest/high-level/snapshot/create_repository.asciidoc @@ -0,0 +1,139 @@ +[[java-rest-high-snapshot-create-repository]] +=== Snapshot Create RepositoryAPI + +The Snapshot Create RepositoryAPI allows to register a snapshot repository. + +[[java-rest-high-snapshot-create-repository-request]] +==== Snapshot Create RepositoryRequest + +A `PutRepositoryRequest`: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request] +-------------------------------------------------- + +==== Repository Settings +Settings requirements will differ based on the repository backend chosen. + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-repository-settings] +-------------------------------------------------- +<1> Sets the repository settings + +==== Providing the Settings +The settings to be applied can be provided in different ways: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-create-settings] +-------------------------------------------------- +<1> Settings provided as `Settings` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-builder] +-------------------------------------------------- +<1> Settings provided as `Settings.Builder` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-source] +-------------------------------------------------- +<1> Settings provided as `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-settings-map] +-------------------------------------------------- +<1> Settings provided as a `Map` + +==== Required Arguments +The following arguments must be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-name] +-------------------------------------------------- +<1> The name of the repository + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-type] +-------------------------------------------------- +<1> The type of the repository + +==== Optional Arguments +The following arguments can optionally be provided: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-timeout] +-------------------------------------------------- +<1> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `TimeValue` +<2> Timeout to wait for the all the nodes to acknowledge the settings were applied +as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-masterTimeout] +-------------------------------------------------- +<1> Timeout to connect to the master node as a `TimeValue` +<2> Timeout to connect to the master node as a `String` + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-request-verify] +-------------------------------------------------- +<1> Verify after creation as a `Boolean` + +[[java-rest-high-snapshot-create-repository-sync]] +==== Synchronous Execution + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute] +-------------------------------------------------- + +[[java-rest-high-snapshot-create-repository-async]] +==== Asynchronous Execution + +The asynchronous execution of a repository put settings requires both the +`PutRepositoryRequest` instance and an `ActionListener` instance to be +passed to the asynchronous method: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-async] +-------------------------------------------------- +<1> The `PutRepositoryRequest` to execute and the `ActionListener` +to use when the execution completes + +The asynchronous method does not block and returns immediately. Once it is +completed the `ActionListener` is called back using the `onResponse` method +if the execution successfully completed or using the `onFailure` method if +it failed. + +A typical listener for `PutRepositoryResponse` looks like: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-execute-listener] +-------------------------------------------------- +<1> Called when the execution is successfully completed. The response is +provided as an argument +<2> Called in case of a failure. The raised exception is provided as an argument + +[[java-rest-high-snapshot-create-repository-response]] +==== Snapshot Create RepositoryResponse + +The returned `PutRepositoryResponse` allows to retrieve information about the +executed operation as follows: + +["source","java",subs="attributes,callouts,macros"] +-------------------------------------------------- +include-tagged::{doc-tests}/SnapshotClientDocumentationIT.java[create-repository-response] +-------------------------------------------------- +<1> Indicates the node has acknowledged the request diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java index ad81302918eb3..82f0e38572e77 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequest.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentType; @@ -44,7 +45,7 @@ * Registers a repository with given name, type and settings. If the repository with the same name already * exists in the cluster, the new repository will replace the existing repository. */ -public class PutRepositoryRequest extends AcknowledgedRequest { +public class PutRepositoryRequest extends AcknowledgedRequest implements ToXContentObject { private String name; @@ -232,4 +233,19 @@ public void writeTo(StreamOutput out) throws IOException { writeSettingsToStream(settings, out); out.writeBoolean(verify); } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field("name", name); + builder.field("type", type); + + builder.startObject("settings"); + settings.toXContent(builder, params); + builder.endObject(); + + builder.field("verify", verify); + builder.endObject(); + return builder; + } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java index c2b45743447f2..e58a1d9d147f9 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponse.java @@ -22,6 +22,8 @@ import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; @@ -30,6 +32,13 @@ */ public class PutRepositoryResponse extends AcknowledgedResponse { + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>("put_repository", + true, args -> new PutRepositoryResponse((boolean) args[0])); + + static { + declareAcknowledgedField(PARSER); + } + PutRepositoryResponse() { } @@ -49,4 +58,7 @@ public void writeTo(StreamOutput out) throws IOException { writeAcknowledged(out); } + public static PutRepositoryResponse fromXContent(XContentParser parser) { + return PARSER.apply(parser, null); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java new file mode 100644 index 0000000000000..9b88659a307f8 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryRequestTests.java @@ -0,0 +1,72 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.put; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.equalTo; + +public class PutRepositoryRequestTests extends ESTestCase { + + public void testCreateRepositoryToXContent() throws IOException { + Map mapParams = new HashMap<>(); + PutRepositoryRequest request = new PutRepositoryRequest(); + String repoName = "test"; + request.name(repoName); + mapParams.put("name", repoName); + Boolean verify = randomBoolean(); + request.verify(verify); + mapParams.put("verify", verify.toString()); + String type = FsRepository.TYPE; + request.type(type); + mapParams.put("type", type); + + Boolean addSettings = randomBoolean(); + if (addSettings) { + request.settings(Settings.builder().put(FsRepository.LOCATION_SETTING.getKey(), ".").build()); + } + + XContentBuilder builder = jsonBuilder(); + request.toXContent(builder, new ToXContent.MapParams(mapParams)); + builder.flush(); + + Map outputMap = XContentHelper.convertToMap(BytesReference.bytes(builder), false, builder.contentType()).v2(); + + assertThat(outputMap.get("name"), equalTo(request.name())); + assertThat(outputMap.get("verify"), equalTo(request.verify())); + assertThat(outputMap.get("type"), equalTo(request.type())); + Map settings = (Map) outputMap.get("settings"); + if (addSettings) { + assertThat(settings.get(FsRepository.LOCATION_SETTING.getKey()), equalTo(".")); + } else { + assertTrue(((Map) outputMap.get("settings")).isEmpty()); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponseTests.java new file mode 100644 index 0000000000000..30fbe61bb172a --- /dev/null +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/repositories/put/PutRepositoryResponseTests.java @@ -0,0 +1,48 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.action.admin.cluster.repositories.put; + +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.test.AbstractStreamableXContentTestCase; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; + +import static org.elasticsearch.test.ESTestCase.randomBoolean; +import static org.hamcrest.Matchers.equalTo; + +public class PutRepositoryResponseTests extends AbstractStreamableXContentTestCase { + + @Override + protected PutRepositoryResponse doParseInstance(XContentParser parser) throws IOException { + return PutRepositoryResponse.fromXContent(parser); + } + + @Override + protected PutRepositoryResponse createBlankInstance() { + return new PutRepositoryResponse(); + } + + @Override + protected PutRepositoryResponse createTestInstance() { + return new PutRepositoryResponse(randomBoolean()); + } +} From 22749e305765b5dcf51b3855346c3787ea30db35 Mon Sep 17 00:00:00 2001 From: Ioannis Kakavas Date: Wed, 16 May 2018 23:02:48 +0300 Subject: [PATCH 36/44] Use readFully() to read bytes from CipherInputStream (#30640) Changes how data is read from CipherInputStream in KeyStoreWrapper. Instead of using `read()` and checking that the bytes read are what we expect, use `readFully()` which will read exactly the number of bytes while keep reading until the end of the stream or throw an `EOFException` if not all bytes can be read. This approach keeps the simplicity of using CipherInputStream while working as expected with both JCE and BCFIPS Security Providers. See also: #28515 --- .../common/settings/KeyStoreWrapper.java | 26 +-- .../common/settings/KeyStoreWrapperTests.java | 156 +++++++++++++++++- 2 files changed, 164 insertions(+), 18 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 0f44039854ef1..f47760491f8d5 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -31,6 +31,7 @@ import java.io.ByteArrayOutputStream; import java.io.DataInputStream; import java.io.DataOutputStream; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; @@ -318,26 +319,24 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio DataInputStream input = new DataInputStream(bytesStream)) { int saltLen = input.readInt(); salt = new byte[saltLen]; - if (input.read(salt) != saltLen) { - throw new SecurityException("Keystore has been corrupted or tampered with"); - } + input.readFully(salt); int ivLen = input.readInt(); iv = new byte[ivLen]; - if (input.read(iv) != ivLen) { - throw new SecurityException("Keystore has been corrupted or tampered with"); - } + input.readFully(iv); int encryptedLen = input.readInt(); encryptedBytes = new byte[encryptedLen]; - if (input.read(encryptedBytes) != encryptedLen) { + input.readFully(encryptedBytes); + if (input.read() != -1) { throw new SecurityException("Keystore has been corrupted or tampered with"); } + } catch (EOFException e) { + throw new SecurityException("Keystore has been corrupted or tampered with", e); } Cipher cipher = createCipher(Cipher.DECRYPT_MODE, password, salt, iv); try (ByteArrayInputStream bytesStream = new ByteArrayInputStream(encryptedBytes); CipherInputStream cipherStream = new CipherInputStream(bytesStream, cipher); DataInputStream input = new DataInputStream(cipherStream)) { - entries.set(new HashMap<>()); int numEntries = input.readInt(); while (numEntries-- > 0) { @@ -345,11 +344,14 @@ public void decrypt(char[] password) throws GeneralSecurityException, IOExceptio EntryType entryType = EntryType.valueOf(input.readUTF()); int entrySize = input.readInt(); byte[] entryBytes = new byte[entrySize]; - if (input.read(entryBytes) != entrySize) { - throw new SecurityException("Keystore has been corrupted or tampered with"); - } + input.readFully(entryBytes); entries.get().put(setting, new Entry(entryType, entryBytes)); } + if (input.read() != -1) { + throw new SecurityException("Keystore has been corrupted or tampered with"); + } + } catch (EOFException e) { + throw new SecurityException("Keystore has been corrupted or tampered with", e); } } @@ -361,7 +363,6 @@ private byte[] encrypt(char[] password, byte[] salt, byte[] iv) throws GeneralSe Cipher cipher = createCipher(Cipher.ENCRYPT_MODE, password, salt, iv); try (CipherOutputStream cipherStream = new CipherOutputStream(bytes, cipher); DataOutputStream output = new DataOutputStream(cipherStream)) { - output.writeInt(entries.get().size()); for (Map.Entry mapEntry : entries.get().entrySet()) { output.writeUTF(mapEntry.getKey()); @@ -371,7 +372,6 @@ private byte[] encrypt(char[] password, byte[] salt, byte[] iv) throws GeneralSe output.write(entry.bytes); } } - return bytes.toByteArray(); } diff --git a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java index bc7756f6ce633..849841943ecc6 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/KeyStoreWrapperTests.java @@ -19,30 +19,33 @@ package org.elasticsearch.common.settings; +import javax.crypto.Cipher; +import javax.crypto.CipherOutputStream; import javax.crypto.SecretKey; import javax.crypto.SecretKeyFactory; +import javax.crypto.spec.GCMParameterSpec; import javax.crypto.spec.PBEKeySpec; +import javax.crypto.spec.SecretKeySpec; import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.EOFException; import java.io.IOException; import java.io.InputStream; -import java.nio.CharBuffer; -import java.nio.charset.CharsetEncoder; import java.nio.charset.StandardCharsets; import java.nio.file.FileSystem; import java.nio.file.Path; import java.security.KeyStore; +import java.security.SecureRandom; import java.util.ArrayList; import java.util.Base64; import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; import org.apache.lucene.codecs.CodecUtil; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.SimpleFSDirectory; +import org.elasticsearch.common.Randomness; import org.elasticsearch.core.internal.io.IOUtils; -import org.elasticsearch.bootstrap.BootstrapSettings; import org.elasticsearch.env.Environment; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -121,6 +124,149 @@ public void testUpgradeNoop() throws Exception { assertEquals(seed.toString(), keystore.getString(KeyStoreWrapper.SEED_SETTING.getKey()).toString()); } + public void testFailWhenCannotConsumeSecretStream() throws Exception { + Path configDir = env.configFile(); + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); + indexOutput.writeByte((byte) 0); // No password + SecureRandom random = Randomness.createSecure(); + byte[] salt = new byte[64]; + random.nextBytes(salt); + byte[] iv = new byte[12]; + random.nextBytes(iv); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + CipherOutputStream cipherStream = getCipherStream(bytes, salt, iv); + DataOutputStream output = new DataOutputStream(cipherStream); + // Indicate that the secret string is longer than it is so readFully() fails + possiblyAlterSecretString(output, -4); + cipherStream.close(); + final byte[] encryptedBytes = bytes.toByteArray(); + possiblyAlterEncryptedBytes(indexOutput, salt, iv, encryptedBytes, 0); + CodecUtil.writeFooter(indexOutput); + } + + KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); + SecurityException e = expectThrows(SecurityException.class, () -> keystore.decrypt(new char[0])); + assertThat(e.getMessage(), containsString("Keystore has been corrupted or tampered with")); + assertThat(e.getCause(), instanceOf(EOFException.class)); + } + + public void testFailWhenCannotConsumeEncryptedBytesStream() throws Exception { + Path configDir = env.configFile(); + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); + indexOutput.writeByte((byte) 0); // No password + SecureRandom random = Randomness.createSecure(); + byte[] salt = new byte[64]; + random.nextBytes(salt); + byte[] iv = new byte[12]; + random.nextBytes(iv); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + CipherOutputStream cipherStream = getCipherStream(bytes, salt, iv); + DataOutputStream output = new DataOutputStream(cipherStream); + + possiblyAlterSecretString(output, 0); + cipherStream.close(); + final byte[] encryptedBytes = bytes.toByteArray(); + // Indicate that the encryptedBytes is larger than it is so readFully() fails + possiblyAlterEncryptedBytes(indexOutput, salt, iv, encryptedBytes, -12); + CodecUtil.writeFooter(indexOutput); + } + + KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); + SecurityException e = expectThrows(SecurityException.class, () -> keystore.decrypt(new char[0])); + assertThat(e.getMessage(), containsString("Keystore has been corrupted or tampered with")); + assertThat(e.getCause(), instanceOf(EOFException.class)); + } + + public void testFailWhenSecretStreamNotConsumed() throws Exception { + Path configDir = env.configFile(); + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); + indexOutput.writeByte((byte) 0); // No password + SecureRandom random = Randomness.createSecure(); + byte[] salt = new byte[64]; + random.nextBytes(salt); + byte[] iv = new byte[12]; + random.nextBytes(iv); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + CipherOutputStream cipherStream = getCipherStream(bytes, salt, iv); + DataOutputStream output = new DataOutputStream(cipherStream); + // So that readFully during decryption will not consume the entire stream + possiblyAlterSecretString(output, 4); + cipherStream.close(); + final byte[] encryptedBytes = bytes.toByteArray(); + possiblyAlterEncryptedBytes(indexOutput, salt, iv, encryptedBytes, 0); + CodecUtil.writeFooter(indexOutput); + } + + KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); + SecurityException e = expectThrows(SecurityException.class, () -> keystore.decrypt(new char[0])); + assertThat(e.getMessage(), containsString("Keystore has been corrupted or tampered with")); + } + + public void testFailWhenEncryptedBytesStreamIsNotConsumed() throws Exception { + Path configDir = env.configFile(); + SimpleFSDirectory directory = new SimpleFSDirectory(configDir); + try (IndexOutput indexOutput = directory.createOutput("elasticsearch.keystore", IOContext.DEFAULT)) { + CodecUtil.writeHeader(indexOutput, "elasticsearch.keystore", 3); + indexOutput.writeByte((byte) 0); // No password + SecureRandom random = Randomness.createSecure(); + byte[] salt = new byte[64]; + random.nextBytes(salt); + byte[] iv = new byte[12]; + random.nextBytes(iv); + ByteArrayOutputStream bytes = new ByteArrayOutputStream(); + CipherOutputStream cipherStream = getCipherStream(bytes, salt, iv); + DataOutputStream output = new DataOutputStream(cipherStream); + possiblyAlterSecretString(output, 0); + cipherStream.close(); + final byte[] encryptedBytes = bytes.toByteArray(); + possiblyAlterEncryptedBytes(indexOutput, salt, iv, encryptedBytes, randomIntBetween(2, encryptedBytes.length)); + CodecUtil.writeFooter(indexOutput); + } + + KeyStoreWrapper keystore = KeyStoreWrapper.load(configDir); + SecurityException e = expectThrows(SecurityException.class, () -> keystore.decrypt(new char[0])); + assertThat(e.getMessage(), containsString("Keystore has been corrupted or tampered with")); + } + + private CipherOutputStream getCipherStream(ByteArrayOutputStream bytes, byte[] salt, byte[] iv) throws Exception { + PBEKeySpec keySpec = new PBEKeySpec(new char[0], salt, 10000, 128); + SecretKeyFactory keyFactory = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512"); + SecretKey secretKey = keyFactory.generateSecret(keySpec); + SecretKeySpec secret = new SecretKeySpec(secretKey.getEncoded(), "AES"); + GCMParameterSpec spec = new GCMParameterSpec(128, iv); + Cipher cipher = Cipher.getInstance("AES/GCM/NoPadding"); + cipher.init(Cipher.ENCRYPT_MODE, secret, spec); + cipher.updateAAD(salt); + return new CipherOutputStream(bytes, cipher); + } + + private void possiblyAlterSecretString(DataOutputStream output, int truncLength) throws Exception { + byte[] secret_value = "super_secret_value".getBytes(StandardCharsets.UTF_8); + output.writeInt(1); // One entry + output.writeUTF("string_setting"); + output.writeUTF("STRING"); + output.writeInt(secret_value.length - truncLength); + output.write(secret_value); + } + + private void possiblyAlterEncryptedBytes(IndexOutput indexOutput, byte[] salt, byte[] iv, byte[] encryptedBytes, int + truncEncryptedDataLength) + throws Exception { + indexOutput.writeInt(4 + salt.length + 4 + iv.length + 4 + encryptedBytes.length); + indexOutput.writeInt(salt.length); + indexOutput.writeBytes(salt, salt.length); + indexOutput.writeInt(iv.length); + indexOutput.writeBytes(iv, iv.length); + indexOutput.writeInt(encryptedBytes.length - truncEncryptedDataLength); + indexOutput.writeBytes(encryptedBytes, encryptedBytes.length); + } + public void testUpgradeAddsSeed() throws Exception { KeyStoreWrapper keystore = KeyStoreWrapper.create(); keystore.remove(KeyStoreWrapper.SEED_SETTING.getKey()); From 87874d001dd1609c40218876643b64d885702143 Mon Sep 17 00:00:00 2001 From: lcawl Date: Wed, 16 May 2018 13:11:06 -0700 Subject: [PATCH 37/44] [DOCS] Fixes list of unconverted snippets in build.gradle --- x-pack/docs/build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index bf2dd26def63f..8df72aaf3e7fe 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -81,7 +81,7 @@ buildRestTests.expectedUnconvertedCandidates = [ 'en/rest-api/ml/validate-job.asciidoc', 'en/rest-api/security/authenticate.asciidoc', 'en/rest-api/watcher/stats.asciidoc', - 'en/security/authorization/overview.asciidoc', + 'en/security/authorization/managing-roles.asciidoc', 'en/watcher/example-watches/watching-time-series-data.asciidoc', ] From 81f29fb298ebe6358b5fec201bb139636d9484c2 Mon Sep 17 00:00:00 2001 From: Shashwat Anand Date: Thu, 17 May 2018 01:56:23 +0530 Subject: [PATCH 38/44] Reindex: Fixed typo in assertion failure message (#30619) Fix a typo in an assertion failure message. --- .../java/org/elasticsearch/index/reindex/RestReindexAction.java | 2 +- .../org/elasticsearch/index/reindex/RestReindexActionTests.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java index f218d6ae8dfaa..f1ac681b59fdf 100644 --- a/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java +++ b/modules/reindex/src/main/java/org/elasticsearch/index/reindex/RestReindexAction.java @@ -115,7 +115,7 @@ public RestChannelConsumer prepareRequest(RestRequest request, NodeClient client @Override protected ReindexRequest buildRequest(RestRequest request) throws IOException { if (request.hasParam("pipeline")) { - throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parmaeter. " + throw new IllegalArgumentException("_reindex doesn't support [pipeline] as a query parameter. " + "Specify it in the [dest] object instead."); } ReindexRequest internal = new ReindexRequest(new SearchRequest(), new IndexRequest()); diff --git a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java index 1c33ccdaaa289..88fa31f423a21 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/index/reindex/RestReindexActionTests.java @@ -149,7 +149,7 @@ public void testPipelineQueryParameterIsError() throws IOException { request.withParams(singletonMap("pipeline", "doesn't matter")); Exception e = expectThrows(IllegalArgumentException.class, () -> action.buildRequest(request.build())); - assertEquals("_reindex doesn't support [pipeline] as a query parmaeter. Specify it in the [dest] object instead.", e.getMessage()); + assertEquals("_reindex doesn't support [pipeline] as a query parameter. Specify it in the [dest] object instead.", e.getMessage()); } public void testSetScrollTimeout() throws IOException { From db4085195c9ad39ba1257fe334b1471f1951b035 Mon Sep 17 00:00:00 2001 From: Dimitris Athanasiou Date: Wed, 16 May 2018 23:35:23 +0300 Subject: [PATCH 39/44] [ML] DeleteExpiredDataAction should use client with origin (#30646) This is an admin action that should be allowed to operate on ML indices with full permissions. --- .../ml/action/TransportDeleteExpiredDataAction.java | 3 ++- .../xpack/ml/job/retention/ExpiredForecastsRemover.java | 4 ++++ .../ml/job/retention/ExpiredModelSnapshotsRemover.java | 4 ++++ .../xpack/ml/job/retention/ExpiredResultsRemover.java | 9 +++++---- 4 files changed, 15 insertions(+), 5 deletions(-) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java index 6cf06695c7c91..0e1ca9dd9aec3 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteExpiredDataAction.java @@ -15,6 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.job.retention.ExpiredForecastsRemover; @@ -40,7 +41,7 @@ public TransportDeleteExpiredDataAction(Settings settings, ThreadPool threadPool Client client, ClusterService clusterService) { super(settings, DeleteExpiredDataAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, DeleteExpiredDataAction.Request::new); - this.client = client; + this.client = ClientHelper.clientWithOrigin(client, ClientHelper.ML_ORIGIN); this.clusterService = clusterService; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java index 30c49a834be58..75deb7bf0ae6d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredForecastsRemover.java @@ -45,6 +45,10 @@ * Removes up to {@link #MAX_FORECASTS} forecasts (stats + forecasts docs) that have expired. * A forecast is deleted if its expiration timestamp is earlier * than the start of the current day (local time-zone). + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredForecastsRemover implements MlDataRemover { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java index 3b1105774ea66..8808ed34277a4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredModelSnapshotsRemover.java @@ -34,6 +34,10 @@ * of their respective job with the exception of the currently used snapshot. * A snapshot is deleted if its timestamp is earlier than the start of the * current day (local time-zone) minus the retention period. + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredModelSnapshotsRemover extends AbstractExpiredJobDataRemover { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java index 3f0ca4558b570..f59fdddedecdb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/retention/ExpiredResultsRemover.java @@ -33,14 +33,15 @@ import java.time.format.DateTimeFormatter; import java.util.Objects; -import static org.elasticsearch.xpack.core.ClientHelper.ML_ORIGIN; -import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; - /** * Removes all results that have expired the configured retention time * of their respective job. A result is deleted if its timestamp is earlier * than the start of the current day (local time-zone) minus the retention * period. + * + * This is expected to be used by actions requiring admin rights. Thus, + * it is also expected that the provided client will be a client with the + * ML origin so that permissions to manage ML indices are met. */ public class ExpiredResultsRemover extends AbstractExpiredJobDataRemover { @@ -65,7 +66,7 @@ protected void removeDataBefore(Job job, long cutoffEpochMs, ActionListener() { + client.execute(DeleteByQueryAction.INSTANCE, request, new ActionListener() { @Override public void onResponse(BulkByScrollResponse bulkByScrollResponse) { try { From f1073f5868c649c0beaea1be1417d02794fb99be Mon Sep 17 00:00:00 2001 From: Nhat Nguyen Date: Wed, 16 May 2018 16:34:48 -0400 Subject: [PATCH 40/44] Mute ShrinkIndexIT This is tracked at https://issues.apache.org/jira/browse/LUCENE-8318 --- .../action/admin/indices/create/ShrinkIndexIT.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index e48f151081f62..d89a8a134ff7c 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -23,6 +23,7 @@ import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; +import org.apache.lucene.util.LuceneTestCase.AwaitsFix; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -83,6 +84,7 @@ protected Collection> nodePlugins() { return Arrays.asList(InternalSettingsPlugin.class); } + @AwaitsFix(bugUrl = "https://issues.apache.org/jira/browse/LUCENE-8318") public void testCreateShrinkIndexToN() { int[][] possibleShardSplits = new int[][] {{8,4,2}, {9, 3, 1}, {4, 2, 1}, {15,5,1}}; int[] shardSplits = randomFrom(possibleShardSplits); From ef427c5429409cabe40c2fef5d86b709ed007e18 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 15 May 2018 16:09:15 -0700 Subject: [PATCH 41/44] Build: Add task interdependencies for ssl configuration (#30633) This commit fixes the tasks creating ssl certs for tests to have correct dependsOn to ensure the right tasks are run before tests run. --- x-pack/qa/smoke-test-plugins-ssl/build.gradle | 10 +++------- x-pack/qa/sql/security/ssl/build.gradle | 11 +++++------ 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/x-pack/qa/smoke-test-plugins-ssl/build.gradle b/x-pack/qa/smoke-test-plugins-ssl/build.gradle index 0a9fc79d4836b..bf26831fae82c 100644 --- a/x-pack/qa/smoke-test-plugins-ssl/build.gradle +++ b/x-pack/qa/smoke-test-plugins-ssl/build.gradle @@ -105,7 +105,7 @@ task exportNodeCertificate(type: LoggedExec) { // Import the node certificate in the client's keystore task importNodeCertificateInClientKeyStore(type: LoggedExec) { - dependsOn exportNodeCertificate + dependsOn createClientKeyStore, exportNodeCertificate executable = new File(project.runtimeJavaHome, 'bin/keytool') args '-import', '-alias', 'test-node', @@ -137,7 +137,7 @@ task exportClientCertificate(type: LoggedExec) { // Import the client certificate in the node's keystore task importClientCertificateInNodeKeyStore(type: LoggedExec) { - dependsOn exportClientCertificate + dependsOn createNodeKeyStore, exportClientCertificate executable = new File(project.runtimeJavaHome, 'bin/keytool') args '-import', '-alias', 'test-client', @@ -153,14 +153,10 @@ forbiddenPatterns { // Add keystores to test classpath: it expects it there sourceSets.test.resources.srcDir(keystoreDir) -processTestResources.dependsOn( - createNodeKeyStore, createClientKeyStore, - importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore -) +processTestResources.dependsOn(importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore) integTestCluster.dependsOn(importClientCertificateInNodeKeyStore, importNodeCertificateInClientKeyStore) - ext.pluginsCount = 0 project(':plugins').getChildProjects().each { pluginName, pluginProject -> // need to get a non-decorated project object, so must re-lookup the project by path diff --git a/x-pack/qa/sql/security/ssl/build.gradle b/x-pack/qa/sql/security/ssl/build.gradle index 8c19ba0303f78..fe8aaeaff2b64 100644 --- a/x-pack/qa/sql/security/ssl/build.gradle +++ b/x-pack/qa/sql/security/ssl/build.gradle @@ -74,6 +74,7 @@ task createClientKeyStore(type: LoggedExec) { // Export the node's certificate File nodeCertificate = new File(keystoreDir, 'test-node.cert') task exportNodeCertificate(type: LoggedExec) { + dependsOn createNodeKeyStore doFirst { if (nodeCertificate.parentFile.exists() == false) { nodeCertificate.parentFile.mkdirs() @@ -92,7 +93,7 @@ task exportNodeCertificate(type: LoggedExec) { // Import the node certificate in the client's keystore task importNodeCertificateInClientKeyStore(type: LoggedExec) { - dependsOn exportNodeCertificate + dependsOn createClientKeyStore, exportNodeCertificate executable = new File(project.runtimeJavaHome, 'bin/keytool') args '-import', '-alias', 'test-node', @@ -105,6 +106,7 @@ task importNodeCertificateInClientKeyStore(type: LoggedExec) { // Export the client's certificate File clientCertificate = new File(keystoreDir, 'test-client.cert') task exportClientCertificate(type: LoggedExec) { + dependsOn createClientKeyStore doFirst { if (clientCertificate.parentFile.exists() == false) { clientCertificate.parentFile.mkdirs() @@ -123,7 +125,7 @@ task exportClientCertificate(type: LoggedExec) { // Import the client certificate in the node's keystore task importClientCertificateInNodeKeyStore(type: LoggedExec) { - dependsOn exportClientCertificate + dependsOn createNodeKeyStore, exportClientCertificate executable = new File(project.runtimeJavaHome, 'bin/keytool') args '-import', '-alias', 'test-client', @@ -139,10 +141,7 @@ forbiddenPatterns { // Add keystores to test classpath: it expects it there sourceSets.test.resources.srcDir(keystoreDir) -processTestResources.dependsOn( - createNodeKeyStore, createClientKeyStore, - importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore -) +processTestResources.dependsOn(importNodeCertificateInClientKeyStore, importClientCertificateInNodeKeyStore) integTestCluster.dependsOn(importClientCertificateInNodeKeyStore) From 4f1a5474be8c91f38305e7e53a79e3f743542608 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Tue, 15 May 2018 12:13:24 -0700 Subject: [PATCH 42/44] Security: Remove SecurityLifecycleService (#30526) This commit removes the SecurityLifecycleService, relegating its former functions of listening for cluster state updates to SecurityIndexManager and IndexAuditTrail. --- .../xpack/security/Security.java | 33 ++--- .../security/SecurityLifecycleService.java | 126 ------------------ .../security/audit/index/IndexAuditTrail.java | 35 ++++- .../security/authc/ExpiredTokenRemover.java | 4 +- .../xpack/security/authc/InternalRealms.java | 6 +- .../xpack/security/authc/TokenService.java | 50 ++++--- .../authc/esnative/NativeUsersStore.java | 38 +++--- .../authc/esnative/ReservedRealm.java | 12 +- .../mapper/NativeRoleMappingStore.java | 31 ++--- .../security/authz/AuthorizationService.java | 7 +- .../security/authz/AuthorizedIndices.java | 4 +- .../authz/store/CompositeRolesStore.java | 3 - .../authz/store/NativeRolesStore.java | 42 +++--- .../support/SecurityIndexManager.java | 23 +++- .../integration/ClearRolesCacheTests.java | 5 +- .../test/SecurityIntegTestCase.java | 2 +- .../xpack/security/SecurityTests.java | 2 +- ...sportSamlInvalidateSessionActionTests.java | 5 +- .../saml/TransportSamlLogoutActionTests.java | 5 +- .../user/TransportGetUsersActionTests.java | 13 +- .../user/TransportPutUserActionTests.java | 5 +- .../authc/AuthenticationServiceTests.java | 8 +- .../security/authc/InternalRealmsTests.java | 8 +- .../security/authc/TokenAuthIntegTests.java | 10 +- .../security/authc/TokenServiceTests.java | 41 +++--- .../esnative/ESNativeMigrateToolTests.java | 6 +- .../authc/esnative/NativeRealmIntegTests.java | 2 +- .../authc/esnative/NativeUsersStoreTests.java | 12 +- .../authc/esnative/ReservedRealmTests.java | 32 ++--- .../mapper/NativeRoleMappingStoreTests.java | 7 +- .../authz/AuthorizationServiceTests.java | 2 +- .../authz/AuthorizedIndicesTests.java | 8 +- .../authz/IndicesAndAliasesResolverTests.java | 13 +- .../authz/store/NativeRolesStoreTests.java | 13 +- .../support/SecurityIndexManagerTests.java | 6 +- .../security/test/SecurityTestUtils.java | 2 +- .../xpack/security/user/XPackUserTests.java | 8 +- 37 files changed, 248 insertions(+), 381 deletions(-) delete mode 100644 x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index b3489bd86b83c..041d783adcafc 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -7,6 +7,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; +import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; @@ -16,6 +17,7 @@ import org.elasticsearch.bootstrap.BootstrapCheck; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -235,7 +237,7 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.core.XPackSettings.HTTP_SSL_ENABLED; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_TEMPLATE_NAME; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_INDEX_FORMAT; public class Security extends Plugin implements ActionPlugin, IngestPlugin, NetworkPlugin, ClusterPlugin, DiscoveryPlugin, MapperPlugin, @@ -271,6 +273,8 @@ public class Security extends Plugin implements ActionPlugin, IngestPlugin, Netw private final SetOnce threadContext = new SetOnce<>(); private final SetOnce tokenService = new SetOnce<>(); private final SetOnce securityActionFilter = new SetOnce<>(); + private final SetOnce securityIndex = new SetOnce<>(); + private final SetOnce indexAuditTrail = new SetOnce<>(); private final List bootstrapChecks; private final List securityExtensions = new ArrayList<>(); private volatile boolean indicesAdminFilteredFields; @@ -386,7 +390,6 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste components.add(securityContext.get()); // audit trails construction - IndexAuditTrail indexAuditTrail = null; Set auditTrails = new LinkedHashSet<>(); if (XPackSettings.AUDIT_ENABLED.get(settings)) { List outputs = AUDIT_OUTPUTS_SETTING.get(settings); @@ -401,8 +404,8 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste auditTrails.add(new LoggingAuditTrail(settings, clusterService, threadPool)); break; case IndexAuditTrail.NAME: - indexAuditTrail = new IndexAuditTrail(settings, client, threadPool, clusterService); - auditTrails.add(indexAuditTrail); + indexAuditTrail.set(new IndexAuditTrail(settings, client, threadPool, clusterService)); + auditTrails.add(indexAuditTrail.get()); break; default: throw new IllegalArgumentException("Unknown audit trail output [" + output + "]"); @@ -414,20 +417,20 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste components.add(auditTrailService); this.auditTrailService.set(auditTrailService); - final SecurityLifecycleService securityLifecycleService = - new SecurityLifecycleService(settings, clusterService, threadPool, client, indexAuditTrail); - final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, securityLifecycleService, clusterService); + securityIndex.set(new SecurityIndexManager(settings, client, SecurityIndexManager.SECURITY_INDEX_NAME, clusterService)); + + final TokenService tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex.get(), clusterService); this.tokenService.set(tokenService); components.add(tokenService); // realms construction - final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityLifecycleService); - final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityLifecycleService); + final NativeUsersStore nativeUsersStore = new NativeUsersStore(settings, client, securityIndex.get()); + final NativeRoleMappingStore nativeRoleMappingStore = new NativeRoleMappingStore(settings, client, securityIndex.get()); final AnonymousUser anonymousUser = new AnonymousUser(settings); final ReservedRealm reservedRealm = new ReservedRealm(env, settings, nativeUsersStore, - anonymousUser, securityLifecycleService, threadPool.getThreadContext()); + anonymousUser, securityIndex.get(), threadPool.getThreadContext()); Map realmFactories = new HashMap<>(InternalRealms.getFactories(threadPool, resourceWatcherService, - getSslService(), nativeUsersStore, nativeRoleMappingStore, securityLifecycleService)); + getSslService(), nativeUsersStore, nativeRoleMappingStore, securityIndex.get())); for (SecurityExtension extension : securityExtensions) { Map newRealms = extension.getRealms(resourceWatcherService); for (Map.Entry entry : newRealms.entrySet()) { @@ -442,7 +445,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste components.add(realms); components.add(reservedRealm); - securityLifecycleService.securityIndex().addIndexStateListener(nativeRoleMappingStore::onSecurityIndexStateChange); + securityIndex.get().addIndexStateListener(nativeRoleMappingStore::onSecurityIndexStateChange); AuthenticationFailureHandler failureHandler = null; String extensionName = null; @@ -466,7 +469,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste components.add(authcService.get()); final FileRolesStore fileRolesStore = new FileRolesStore(settings, env, resourceWatcherService, getLicenseState()); - final NativeRolesStore nativeRolesStore = new NativeRolesStore(settings, client, getLicenseState(), securityLifecycleService); + final NativeRolesStore nativeRolesStore = new NativeRolesStore(settings, client, getLicenseState(), securityIndex.get()); final ReservedRolesStore reservedRolesStore = new ReservedRolesStore(); List, ActionListener>>> rolesProviders = new ArrayList<>(); for (SecurityExtension extension : securityExtensions) { @@ -474,7 +477,7 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste } final CompositeRolesStore allRolesStore = new CompositeRolesStore(settings, fileRolesStore, nativeRolesStore, reservedRolesStore, rolesProviders, threadPool.getThreadContext(), getLicenseState()); - securityLifecycleService.securityIndex().addIndexStateListener(allRolesStore::onSecurityIndexStateChange); + securityIndex.get().addIndexStateListener(allRolesStore::onSecurityIndexStateChange); // to keep things simple, just invalidate all cached entries on license change. this happens so rarely that the impact should be // minimal getLicenseState().addListener(allRolesStore::invalidateAll); @@ -485,8 +488,6 @@ Collection createComponents(Client client, ThreadPool threadPool, Cluste components.add(allRolesStore); // for SecurityFeatureSet and clear roles cache components.add(authzService); - components.add(securityLifecycleService); - ipFilter.set(new IPFilter(settings, auditTrailService, clusterService.getClusterSettings(), getLicenseState())); components.add(ipFilter.get()); DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterService.getClusterSettings()); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java deleted file mode 100644 index d4ad757ff4cab..0000000000000 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/SecurityLifecycleService.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License; - * you may not use this file except in compliance with the Elastic License. - */ -package org.elasticsearch.xpack.security; - -import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; -import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterChangedEvent; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.ClusterStateListener; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.health.ClusterIndexHealth; -import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; -import org.elasticsearch.common.component.AbstractComponent; -import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; -import org.elasticsearch.xpack.security.support.SecurityIndexManager; - -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -/** - * This class is used to provide a lifecycle for services that is based on the cluster's state - * rather than the typical lifecycle that is used to start services as part of the node startup. - * - * This type of lifecycle is necessary for services that need to perform actions that require the - * cluster to be in a certain state; some examples are storing index templates and creating indices. - * These actions would most likely fail from within a plugin if executed in the - * {@link org.elasticsearch.common.component.AbstractLifecycleComponent#doStart()} method. - * However, if the startup of these services waits for the cluster to form and recover indices then - * it will be successful. This lifecycle service allows for this to happen by listening for - * {@link ClusterChangedEvent} and checking if the services can start. Additionally, the service - * also provides hooks for stop and close functionality. - */ -public class SecurityLifecycleService extends AbstractComponent implements ClusterStateListener { - - public static final String INTERNAL_SECURITY_INDEX = SecurityIndexManager.INTERNAL_SECURITY_INDEX; - public static final String SECURITY_INDEX_NAME = ".security"; - - private final Settings settings; - private final ThreadPool threadPool; - private final IndexAuditTrail indexAuditTrail; - - private final SecurityIndexManager securityIndex; - - public SecurityLifecycleService(Settings settings, ClusterService clusterService, - ThreadPool threadPool, Client client, - @Nullable IndexAuditTrail indexAuditTrail) { - super(settings); - this.settings = settings; - this.threadPool = threadPool; - this.indexAuditTrail = indexAuditTrail; - this.securityIndex = new SecurityIndexManager(settings, client, SECURITY_INDEX_NAME); - clusterService.addListener(this); - clusterService.addLifecycleListener(new LifecycleListener() { - @Override - public void beforeStop() { - close(); - } - }); - } - - @Override - public void clusterChanged(ClusterChangedEvent event) { - final ClusterState state = event.state(); - if (state.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { - // wait until the gateway has recovered from disk, otherwise we think we don't have the - // .security index but they may not have been restored from the cluster state on disk - logger.debug("lifecycle service waiting until state has been recovered"); - return; - } - - securityIndex.clusterChanged(event); - - try { - if (Security.indexAuditLoggingEnabled(settings) && - indexAuditTrail.state() == IndexAuditTrail.State.INITIALIZED) { - if (indexAuditTrail.canStart(event)) { - threadPool.generic().execute(new AbstractRunnable() { - - @Override - public void onFailure(Exception throwable) { - logger.error("failed to start index audit trail services", throwable); - assert false : "security lifecycle services startup failed"; - } - - @Override - public void doRun() { - indexAuditTrail.start(); - } - }); - } - } - } catch (Exception e) { - logger.error("failed to start index audit trail", e); - } - } - - public SecurityIndexManager securityIndex() { - return securityIndex; - } - - // this is called in a lifecycle listener beforeStop on the cluster service - private void close() { - if (indexAuditTrail != null) { - try { - indexAuditTrail.stop(); - } catch (Exception e) { - logger.error("failed to stop audit trail module", e); - } - } - } - - public static List indexNames() { - return Collections.unmodifiableList(Arrays.asList(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); - } -} diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java index 590c2bc5ecd4e..db7475a89727f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/index/IndexAuditTrail.java @@ -20,6 +20,7 @@ import org.elasticsearch.client.transport.TransportClient; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.metadata.AliasOrIndex; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; @@ -29,12 +30,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; +import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.xcontent.XContentBuilder; @@ -110,7 +113,7 @@ /** * Audit trail implementation that writes events into an index. */ -public class IndexAuditTrail extends AbstractComponent implements AuditTrail { +public class IndexAuditTrail extends AbstractComponent implements AuditTrail, ClusterStateListener { public static final String NAME = "index"; public static final String DOC_TYPE = "doc"; @@ -199,6 +202,13 @@ public IndexAuditTrail(Settings settings, Client client, ThreadPool threadPool, } else { this.client = initializeRemoteClient(settings, logger); } + clusterService.addListener(this); + clusterService.addLifecycleListener(new LifecycleListener() { + @Override + public void beforeStop() { + stop(); + } + }); } @@ -206,6 +216,29 @@ public State state() { return state.get(); } + @Override + public void clusterChanged(ClusterChangedEvent event) { + try { + if (state() == IndexAuditTrail.State.INITIALIZED && canStart(event)) { + threadPool.generic().execute(new AbstractRunnable() { + + @Override + public void onFailure(Exception throwable) { + logger.error("failed to start index audit trail services", throwable); + assert false : "security lifecycle services startup failed"; + } + + @Override + public void doRun() { + start(); + } + }); + } + } catch (Exception e) { + logger.error("failed to start index audit trail", e); + } + } + /** * This method determines if this service can be started based on the state in the {@link ClusterChangedEvent} and * if the node is the master or not. When using remote indexing, a call to the remote cluster will be made to retrieve diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java index 6d897e3c64f1a..b8ae5c944419a 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ExpiredTokenRemover.java @@ -22,6 +22,7 @@ import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool.Names; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -30,7 +31,6 @@ import static org.elasticsearch.action.support.TransportActions.isShardNotAvailableException; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; /** * Responsible for cleaning the invalidated tokens from the invalidated tokens index. @@ -50,7 +50,7 @@ final class ExpiredTokenRemover extends AbstractRunnable { @Override public void doRun() { - SearchRequest searchRequest = new SearchRequest(SECURITY_INDEX_NAME); + SearchRequest searchRequest = new SearchRequest(SecurityIndexManager.SECURITY_INDEX_NAME); DeleteByQueryRequest expiredDbq = new DeleteByQueryRequest(searchRequest); if (timeout != TimeValue.MINUS_ONE) { expiredDbq.setTimeout(timeout); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java index b50264a73e949..1e38e6fd10391 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/InternalRealms.java @@ -20,7 +20,6 @@ import org.elasticsearch.xpack.core.security.authc.pki.PkiRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeRealm; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -30,6 +29,7 @@ import org.elasticsearch.xpack.security.authc.saml.SamlRealm; import org.elasticsearch.xpack.security.authc.support.RoleMappingFileBootstrapCheck; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; import java.util.Arrays; @@ -90,13 +90,13 @@ static boolean isStandardRealm(String type) { public static Map getFactories(ThreadPool threadPool, ResourceWatcherService resourceWatcherService, SSLService sslService, NativeUsersStore nativeUsersStore, NativeRoleMappingStore nativeRoleMappingStore, - SecurityLifecycleService securityLifecycleService) { + SecurityIndexManager securityIndex) { Map map = new HashMap<>(); map.put(FileRealmSettings.TYPE, config -> new FileRealm(config, resourceWatcherService)); map.put(NativeRealmSettings.TYPE, config -> { final NativeRealm nativeRealm = new NativeRealm(config, nativeUsersStore); - securityLifecycleService.securityIndex().addIndexStateListener(nativeRealm::onSecurityIndexStateChange); + securityIndex.addIndexStateListener(nativeRealm::onSecurityIndexStateChange); return nativeRealm; }); map.put(LdapRealmSettings.AD_TYPE, config -> new LdapRealm(LdapRealmSettings.AD_TYPE, config, sslService, diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index b1ae7a7506a1f..dc6b9c485f5a6 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -9,7 +9,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.core.internal.io.IOUtils; -import org.apache.lucene.util.StringHelper; import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; @@ -71,7 +70,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.KeyAndTimestamp; import org.elasticsearch.xpack.core.security.authc.TokenMetaData; -import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import javax.crypto.Cipher; import javax.crypto.CipherInputStream; @@ -117,7 +116,6 @@ import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; /** * Service responsible for the creation, validation, and other management of {@link UserToken} @@ -165,7 +163,7 @@ public final class TokenService extends AbstractComponent { private final TimeValue expirationDelay; private final TimeValue deleteInterval; private final Client client; - private final SecurityLifecycleService lifecycleService; + private final SecurityIndexManager securityIndex; private final ExpiredTokenRemover expiredTokenRemover; private final boolean enabled; private volatile TokenKeys keyCache; @@ -180,7 +178,7 @@ public final class TokenService extends AbstractComponent { * @param client the client to use when checking for revocations */ public TokenService(Settings settings, Clock clock, Client client, - SecurityLifecycleService lifecycleService, ClusterService clusterService) throws GeneralSecurityException { + SecurityIndexManager securityIndex, ClusterService clusterService) throws GeneralSecurityException { super(settings); byte[] saltArr = new byte[SALT_BYTES]; secureRandom.nextBytes(saltArr); @@ -196,7 +194,7 @@ public TokenService(Settings settings, Clock clock, Client client, this.clock = clock.withZone(ZoneOffset.UTC); this.expirationDelay = TOKEN_EXPIRATION.get(settings); this.client = client; - this.lifecycleService = lifecycleService; + this.securityIndex = securityIndex; this.lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); this.deleteInterval = DELETE_INTERVAL.get(settings); this.enabled = isTokenServiceEnabled(settings); @@ -256,12 +254,12 @@ public void createUserToken(Authentication authentication, Authentication origin .endObject(); builder.endObject(); IndexRequest request = - client.prepareIndex(SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) + client.prepareIndex(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) .setOpType(OpType.CREATE) .setSource(builder) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .request(); - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client, SECURITY_ORIGIN, IndexAction.INSTANCE, request, ActionListener.wrap(indexResponse -> listener.onResponse(new Tuple<>(userToken, refreshToken)), listener::onFailure)) @@ -370,9 +368,9 @@ void decodeToken(String token, ActionListener listener) throws IOExce if (version.onOrAfter(Version.V_6_2_0)) { // we only have the id and need to get the token from the doc! decryptTokenId(in, cipher, version, ActionListener.wrap(tokenId -> - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { final GetRequest getRequest = - client.prepareGet(SECURITY_INDEX_NAME, TYPE, + client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(tokenId)).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, ActionListener.wrap(response -> { @@ -533,14 +531,14 @@ private void indexBwcInvalidation(UserToken userToken, ActionListener l listener.onFailure(invalidGrantException("failed to invalidate token")); } else { final String invalidatedTokenId = getInvalidatedTokenDocumentId(userToken); - IndexRequest indexRequest = client.prepareIndex(SECURITY_INDEX_NAME, TYPE, invalidatedTokenId) + IndexRequest indexRequest = client.prepareIndex(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, invalidatedTokenId) .setOpType(OpType.CREATE) .setSource("doc_type", INVALIDATED_TOKEN_DOC_TYPE, "expiration_time", expirationEpochMilli) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .request(); final String tokenDocId = getTokenDocumentId(userToken); final Version version = userToken.getVersion(); - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, indexRequest, ActionListener.wrap(indexResponse -> { ActionListener wrappedListener = @@ -577,12 +575,12 @@ private void indexInvalidation(String tokenDocId, Version version, ActionListene if (attemptCount.get() > 5) { listener.onFailure(invalidGrantException("failed to invalidate token")); } else { - UpdateRequest request = client.prepareUpdate(SECURITY_INDEX_NAME, TYPE, tokenDocId) + UpdateRequest request = client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId) .setDoc(srcPrefix, Collections.singletonMap("invalidated", true)) .setVersion(documentVersion) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) .request(); - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, ActionListener.wrap(updateResponse -> { if (updateResponse.getGetResult() != null @@ -609,7 +607,7 @@ private void indexInvalidation(String tokenDocId, Version version, ActionListene || isShardNotAvailableException(cause)) { attemptCount.incrementAndGet(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, TYPE, tokenDocId).request(), + client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId).request(), ActionListener.wrap(getResult -> { if (getResult.isExists()) { Map source = getResult.getSource(); @@ -674,14 +672,14 @@ private void findTokenFromRefreshToken(String refreshToken, ActionListener 5) { listener.onFailure(invalidGrantException("could not refresh the requested token")); } else { - SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) + SearchRequest request = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.boolQuery() .filter(QueryBuilders.termQuery("doc_type", "token")) .filter(QueryBuilders.termQuery("refresh_token.token", refreshToken))) .setVersion(true) .request(); - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, ActionListener.wrap(searchResponse -> { if (searchResponse.isTimedOut()) { @@ -718,7 +716,7 @@ private void innerRefresh(String tokenDocId, Authentication userAuth, ActionList if (attemptCount.getAndIncrement() > 5) { listener.onFailure(invalidGrantException("could not refresh the requested token")); } else { - GetRequest getRequest = client.prepareGet(SECURITY_INDEX_NAME, TYPE, tokenDocId).request(); + GetRequest getRequest = client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId).request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, getRequest, ActionListener.wrap(response -> { if (response.isExists()) { @@ -739,7 +737,7 @@ private void innerRefresh(String tokenDocId, Authentication userAuth, ActionList in.setVersion(authVersion); Authentication authentication = new Authentication(in); UpdateRequest updateRequest = - client.prepareUpdate(SECURITY_INDEX_NAME, TYPE, tokenDocId) + client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, tokenDocId) .setVersion(response.getVersion()) .setDoc("refresh_token", Collections.singletonMap("refreshed", true)) .setRefreshPolicy(RefreshPolicy.WAIT_UNTIL) @@ -854,7 +852,7 @@ public void findActiveTokensForRealm(String realmName, ActionListener supplier = client.threadPool().getThreadContext().newRestorableContext(false); - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> ScrollHelper.fetchAllByEntity(client, request, new ContextPreservingActionListener<>(supplier, listener), this::parseHit)); } @@ -930,14 +928,14 @@ private void ensureEnabled() { * have been explicitly cleared. */ private void checkIfTokenIsRevoked(UserToken userToken, ActionListener listener) { - if (lifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // index doesn't exist so the token is considered valid. listener.onResponse(userToken); } else { - lifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { MultiGetRequest mGetRequest = client.prepareMultiGet() - .add(SECURITY_INDEX_NAME, TYPE, getInvalidatedTokenDocumentId(userToken)) - .add(SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) + .add(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, getInvalidatedTokenDocumentId(userToken)) + .add(SecurityIndexManager.SECURITY_INDEX_NAME, TYPE, getTokenDocumentId(userToken)) .request(); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, mGetRequest, @@ -1005,7 +1003,7 @@ private Instant getExpirationTime(Instant now) { } private void maybeStartTokenRemover() { - if (lifecycleService.securityIndex().isAvailable()) { + if (securityIndex.isAvailable()) { if (client.threadPool().relativeTimeInMillis() - lastExpirationRunMs > deleteInterval.getMillis()) { expiredTokenRemover.submit(client.threadPool()); lastExpirationRunMs = client.threadPool().relativeTimeInMillis(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java index 13c248f10218f..5e51a5d675541 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStore.java @@ -52,7 +52,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.User.Fields; import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.Arrays; import java.util.Collection; @@ -66,7 +66,7 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; /** * NativeUsersStore is a store for users that reads from an Elasticsearch index. This store is responsible for fetching the full @@ -84,13 +84,13 @@ public class NativeUsersStore extends AbstractComponent { private final Client client; private final boolean isTribeNode; - private volatile SecurityLifecycleService securityLifecycleService; + private final SecurityIndexManager securityIndex; - public NativeUsersStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) { + public NativeUsersStore(Settings settings, Client client, SecurityIndexManager securityIndex) { super(settings); this.client = client; this.isTribeNode = XPackClientActionPlugin.isTribeNode(settings); - this.securityLifecycleService = securityLifecycleService; + this.securityIndex = securityIndex; } /** @@ -116,7 +116,7 @@ public void getUsers(String[] userNames, final ActionListener> } }; - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! listener.onResponse(Collections.emptyList()); } else if (userNames.length == 1) { // optimization for single user lookup @@ -125,7 +125,7 @@ public void getUsers(String[] userNames, final ActionListener> (uap) -> listener.onResponse(uap == null ? Collections.emptyList() : Collections.singletonList(uap.user())), handleException)); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { final QueryBuilder query; if (userNames == null || userNames.length == 0) { query = QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), USER_DOC_TYPE); @@ -156,11 +156,11 @@ public void getUsers(String[] userNames, final ActionListener> * Async method to retrieve a user and their password */ private void getUserAndPassword(final String user, final ActionListener listener) { - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! listener.onResponse(null); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareGet(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(USER_DOC_TYPE, user)).request(), @@ -204,7 +204,7 @@ public void changePassword(final ChangePasswordRequest request, final ActionList docType = USER_DOC_TYPE; } - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareUpdate(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(docType, username)) .setDoc(Requests.INDEX_CONTENT_TYPE, Fields.PASSWORD.getPreferredName(), @@ -243,7 +243,7 @@ public void onFailure(Exception e) { * has been indexed */ private void createReservedUser(String username, char[] passwordHash, RefreshPolicy refresh, ActionListener listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareIndex(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(NativeUserStoreField.RESERVED_USER_TYPE, username)) @@ -287,7 +287,7 @@ public void putUser(final PutUserRequest request, final ActionListener private void updateUserWithoutPassword(final PutUserRequest putUserRequest, final ActionListener listener) { assert putUserRequest.passwordHash() == null; // We must have an existing document - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareUpdate(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(USER_DOC_TYPE, putUserRequest.username())) @@ -330,7 +330,7 @@ public void onFailure(Exception e) { private void indexUser(final PutUserRequest putUserRequest, final ActionListener listener) { assert putUserRequest.passwordHash() != null; - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareIndex(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(USER_DOC_TYPE, putUserRequest.username())) @@ -376,7 +376,7 @@ public void setEnabled(final String username, final boolean enabled, final Refre private void setRegularUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy, final ActionListener listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareUpdate(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(USER_DOC_TYPE, username)) @@ -411,7 +411,7 @@ public void onFailure(Exception e) { private void setReservedUserEnabled(final String username, final boolean enabled, final RefreshPolicy refreshPolicy, boolean clearCache, final ActionListener listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareUpdate(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(NativeUserStoreField.RESERVED_USER_TYPE, username)) @@ -444,7 +444,7 @@ public void deleteUser(final DeleteUserRequest deleteUserRequest, final ActionLi if (isTribeNode) { listener.onFailure(new UnsupportedOperationException("users may not be deleted using a tribe node")); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { DeleteRequest request = client.prepareDelete(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(USER_DOC_TYPE, deleteUserRequest.username())).request(); request.setRefreshPolicy(deleteUserRequest.getRefreshPolicy()); @@ -484,11 +484,11 @@ void verifyPassword(String username, final SecureString password, ActionListener } void getReservedUserInfo(String username, ActionListener listener) { - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! listener.onResponse(null); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareGet(SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, getIdForUser(NativeUserStoreField.RESERVED_USER_TYPE, username)).request(), @@ -528,7 +528,7 @@ public void onFailure(Exception e) { } void getAllReservedUserInfo(ActionListener> listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareSearch(SECURITY_INDEX_NAME) .setQuery(QueryBuilders.termQuery(Fields.TYPE.getPreferredName(), NativeUserStoreField.RESERVED_USER_TYPE)) diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java index 199a1c1968408..7dbcea908722c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealm.java @@ -30,9 +30,9 @@ import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; import java.util.Arrays; @@ -63,16 +63,16 @@ public class ReservedRealm extends CachingUsernamePasswordRealm { private final AnonymousUser anonymousUser; private final boolean realmEnabled; private final boolean anonymousEnabled; - private final SecurityLifecycleService securityLifecycleService; + private final SecurityIndexManager securityIndex; public ReservedRealm(Environment env, Settings settings, NativeUsersStore nativeUsersStore, AnonymousUser anonymousUser, - SecurityLifecycleService securityLifecycleService, ThreadContext threadContext) { + SecurityIndexManager securityIndex, ThreadContext threadContext) { super(TYPE, new RealmConfig(TYPE, Settings.EMPTY, settings, env, threadContext)); this.nativeUsersStore = nativeUsersStore; this.realmEnabled = XPackSettings.RESERVED_REALM_ENABLED_SETTING.get(settings); this.anonymousUser = anonymousUser; this.anonymousEnabled = AnonymousUser.isAnonymousEnabled(settings); - this.securityLifecycleService = securityLifecycleService; + this.securityIndex = securityIndex; final char[] hash = BOOTSTRAP_ELASTIC_PASSWORD.get(settings).length() == 0 ? EMPTY_PASSWORD_HASH : Hasher.BCRYPT.hash(BOOTSTRAP_ELASTIC_PASSWORD.get(settings)); bootstrapUserInfo = new ReservedUserInfo(hash, true, hash == EMPTY_PASSWORD_HASH); @@ -191,7 +191,7 @@ private void getUserInfo(final String username, ActionListener if (userIsDefinedForCurrentSecurityMapping(username) == false) { logger.debug("Marking user [{}] as disabled because the security mapping is not at the required version", username); listener.onResponse(DISABLED_DEFAULT_USER_INFO.deepClone()); - } else if (securityLifecycleService.securityIndex().indexExists() == false) { + } else if (securityIndex.indexExists() == false) { listener.onResponse(getDefaultUserInfo(username)); } else { nativeUsersStore.getReservedUserInfo(username, ActionListener.wrap((userInfo) -> { @@ -218,7 +218,7 @@ private ReservedUserInfo getDefaultUserInfo(String username) { private boolean userIsDefinedForCurrentSecurityMapping(String username) { final Version requiredVersion = getDefinedVersion(username); - return securityLifecycleService.securityIndex().checkMappingVersion(requiredVersion::onOrBefore); + return securityIndex.checkMappingVersion(requiredVersion::onOrBefore); } private Version getDefinedVersion(String username) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java index 59d3a9d87442c..66a490ef959a9 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStore.java @@ -12,8 +12,6 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.component.AbstractComponent; @@ -37,7 +35,6 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.ExpressionRoleMapping; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.ExpressionModel; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -63,13 +60,13 @@ import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isIndexDeleted; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.isMoveFromRedToNonRed; /** * This store reads + writes {@link ExpressionRoleMapping role mappings} in an Elasticsearch - * {@link SecurityLifecycleService#SECURITY_INDEX_NAME index}. + * {@link SecurityIndexManager#SECURITY_INDEX_NAME index}. *
* The store is responsible for all read and write operations as well as * {@link #resolveRoles(UserData, ActionListener) resolving roles}. @@ -101,14 +98,14 @@ public void onFailure(Exception e) { private final Client client; private final boolean isTribeNode; - private final SecurityLifecycleService securityLifecycleService; + private final SecurityIndexManager securityIndex; private final List realmsToRefresh = new CopyOnWriteArrayList<>(); - public NativeRoleMappingStore(Settings settings, Client client, SecurityLifecycleService securityLifecycleService) { + public NativeRoleMappingStore(Settings settings, Client client, SecurityIndexManager securityIndex) { super(settings); this.client = client; this.isTribeNode = XPackClientActionPlugin.isTribeNode(settings); - this.securityLifecycleService = securityLifecycleService; + this.securityIndex = securityIndex; } private String getNameFromId(String id) { @@ -125,7 +122,7 @@ private String getIdForName(String name) { * package private for unit testing */ void loadMappings(ActionListener> listener) { - if (securityLifecycleService.securityIndex().isIndexUpToDate() == false) { + if (securityIndex.isIndexUpToDate() == false) { listener.onFailure(new IllegalStateException( "Security index is not on the current version - the native realm will not be operational until " + "the upgrade API is run on the security index")); @@ -183,7 +180,7 @@ private void modifyMapping(String name, CheckedBiConsumer listener) { if (isTribeNode) { listener.onFailure(new UnsupportedOperationException("role-mappings may not be modified using a tribe node")); - } else if (securityLifecycleService.securityIndex().isIndexUpToDate() == false) { + } else if (securityIndex.isIndexUpToDate() == false) { listener.onFailure(new IllegalStateException( "Security index is not on the current version - the native realm will not be operational until " + "the upgrade API is run on the security index")); @@ -199,7 +196,7 @@ private void modifyMapping(String name, CheckedBiConsumer listener) { final ExpressionRoleMapping mapping = request.getMapping(); - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { final XContentBuilder xContentBuilder; try { xContentBuilder = mapping.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true); @@ -229,7 +226,7 @@ public void onFailure(Exception e) { } private void innerDeleteMapping(DeleteRoleMappingRequest request, ActionListener listener) throws IOException { - if (securityLifecycleService.securityIndex().isIndexUpToDate() == false) { + if (securityIndex.isIndexUpToDate() == false) { listener.onFailure(new IllegalStateException( "Security index is not on the current version - the native realm will not be operational until " + "the upgrade API is run on the security index")); @@ -283,16 +280,16 @@ public void onFailure(Exception e) { } private void getMappings(ActionListener> listener) { - if (securityLifecycleService.securityIndex().isAvailable()) { + if (securityIndex.isAvailable()) { loadMappings(listener); } else { logger.info("The security index is not yet available - no role mappings can be loaded"); if (logger.isDebugEnabled()) { logger.debug("Security Index [{}] [exists: {}] [available: {}] [mapping up to date: {}]", SECURITY_INDEX_NAME, - securityLifecycleService.securityIndex().indexExists(), - securityLifecycleService.securityIndex().isAvailable(), - securityLifecycleService.securityIndex().isMappingUpToDate() + securityIndex.indexExists(), + securityIndex.isAvailable(), + securityIndex.isMappingUpToDate() ); } listener.onResponse(Collections.emptyList()); @@ -309,7 +306,7 @@ private void getMappings(ActionListener> listener) { * */ public void usageStats(ActionListener> listener) { - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { reportStats(listener, Collections.emptyList()); } else { getMappings(ActionListener.wrap(mappings -> reportStats(listener, mappings), listener::onFailure)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 36c0987f50de9..19760ccab0202 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -60,11 +60,11 @@ import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackSecurityUser; import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authz.IndicesAndAliasesResolver.ResolvedIndices; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.Arrays; import java.util.Collections; @@ -77,7 +77,6 @@ import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.support.Exceptions.authorizationError; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; public class AuthorizationService extends AbstractComponent { public static final Setting ANONYMOUS_AUTHORIZATION_EXCEPTION_SETTING = @@ -302,7 +301,7 @@ && isSuperuser(authentication.getUser()) == false) { // only the XPackUser is allowed to work with this index, but we should allow indices monitoring actions through for debugging // purposes. These monitor requests also sometimes resolve indices concretely and then requests them logger.debug("user [{}] attempted to directly perform [{}] against the security index [{}]", - authentication.getUser().principal(), action, SECURITY_INDEX_NAME); + authentication.getUser().principal(), action, SecurityIndexManager.SECURITY_INDEX_NAME); throw denial(authentication, action, request, permission.names()); } else { putTransientIfNonExisting(AuthorizationServiceField.INDICES_PERMISSIONS_KEY, indicesAccessControl); @@ -338,7 +337,7 @@ && isSuperuser(authentication.getUser()) == false) { } private boolean hasSecurityIndexAccess(IndicesAccessControl indicesAccessControl) { - for (String index : SecurityLifecycleService.indexNames()) { + for (String index : SecurityIndexManager.indexNames()) { final IndicesAccessControl.IndexAccessControl indexPermissions = indicesAccessControl.getIndexPermissions(index); if (indexPermissions != null && indexPermissions.isGranted()) { return true; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java index 3f257b7f0ce91..3068a3993d309 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizedIndices.java @@ -9,7 +9,7 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.xpack.core.security.authz.permission.Role; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; import java.util.Collections; @@ -58,7 +58,7 @@ private List load() { if (isSuperuser(user) == false) { // we should filter out all of the security indices from wildcards - indicesAndAliases.removeAll(SecurityLifecycleService.indexNames()); + indicesAndAliases.removeAll(SecurityIndexManager.indexNames()); } return Collections.unmodifiableList(indicesAndAliases); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java index 5a005d7445b36..c31c7ea7d578c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStore.java @@ -8,8 +8,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.ActionListener; -import org.elasticsearch.cluster.health.ClusterHealthStatus; -import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -35,7 +33,6 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.authz.privilege.Privilege; import org.elasticsearch.xpack.core.security.authz.store.ReservedRolesStore; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.ArrayList; diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java index a4b465b4f520c..fd49fff0f587c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStore.java @@ -44,7 +44,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; import org.elasticsearch.xpack.core.security.client.SecurityClient; -import org.elasticsearch.xpack.security.SecurityLifecycleService; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.io.IOException; import java.util.ArrayList; @@ -64,7 +64,6 @@ import static org.elasticsearch.xpack.core.ClientHelper.stashWithOrigin; import static org.elasticsearch.xpack.core.security.SecurityField.setting; import static org.elasticsearch.xpack.core.security.authz.RoleDescriptor.ROLE_TYPE; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; /** * NativeRolesStore is a {@code RolesStore} that, instead of reading from a @@ -88,23 +87,22 @@ public class NativeRolesStore extends AbstractComponent { private final boolean isTribeNode; private SecurityClient securityClient; - private final SecurityLifecycleService securityLifecycleService; + private final SecurityIndexManager securityIndex; - public NativeRolesStore(Settings settings, Client client, XPackLicenseState licenseState, - SecurityLifecycleService securityLifecycleService) { + public NativeRolesStore(Settings settings, Client client, XPackLicenseState licenseState, SecurityIndexManager securityIndex) { super(settings); this.client = client; this.isTribeNode = XPackClientActionPlugin.isTribeNode(settings); this.securityClient = new SecurityClient(client); this.licenseState = licenseState; - this.securityLifecycleService = securityLifecycleService; + this.securityIndex = securityIndex; } /** * Retrieve a list of roles, if rolesToGet is null or empty, fetch all roles */ public void getRoleDescriptors(String[] names, final ActionListener> listener) { - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! listener.onResponse(Collections.emptyList()); } else if (names != null && names.length == 1) { @@ -112,7 +110,7 @@ public void getRoleDescriptors(String[] names, final ActionListener { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { QueryBuilder query; if (names == null || names.length == 0) { query = QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE); @@ -122,7 +120,7 @@ public void getRoleDescriptors(String[] names, final ActionListener supplier = client.threadPool().getThreadContext().newRestorableContext(false); try (ThreadContext.StoredContext ignore = stashWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN)) { - SearchRequest request = client.prepareSearch(SECURITY_INDEX_NAME) + SearchRequest request = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setScroll(TimeValue.timeValueSeconds(10L)) .setQuery(query) .setSize(1000) @@ -140,8 +138,8 @@ public void deleteRole(final DeleteRoleRequest deleteRoleRequest, final ActionLi if (isTribeNode) { listener.onFailure(new UnsupportedOperationException("roles may not be deleted using a tribe node")); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { - DeleteRequest request = client.prepareDelete(SECURITY_INDEX_NAME, + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + DeleteRequest request = client.prepareDelete(SecurityIndexManager.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(deleteRoleRequest.name())).request(); request.setRefreshPolicy(deleteRoleRequest.getRefreshPolicy()); executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, request, @@ -176,7 +174,7 @@ public void putRole(final PutRoleRequest request, final RoleDescriptor role, fin // pkg-private for testing void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> { + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> { final XContentBuilder xContentBuilder; try { xContentBuilder = role.toXContent(jsonBuilder(), ToXContent.EMPTY_PARAMS, true); @@ -185,7 +183,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final return; } executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareIndex(SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName())) + client.prepareIndex(SecurityIndexManager.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role.getName())) .setSource(xContentBuilder) .setRefreshPolicy(request.getRefreshPolicy()) .request(), @@ -207,19 +205,19 @@ public void onFailure(Exception e) { public void usageStats(ActionListener> listener) { Map usageStats = new HashMap<>(); - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { usageStats.put("size", 0L); usageStats.put("fls", false); usageStats.put("dls", false); listener.onResponse(usageStats); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, client.prepareMultiSearch() - .add(client.prepareSearch(SECURITY_INDEX_NAME) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) .setSize(0)) - .add(client.prepareSearch(SECURITY_INDEX_NAME) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.boolQuery() .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) .must(QueryBuilders.boolQuery() @@ -229,7 +227,7 @@ public void usageStats(ActionListener> listener) { .should(existsQuery("indices.fields")))) .setSize(0) .setTerminateAfter(1)) - .add(client.prepareSearch(SECURITY_INDEX_NAME) + .add(client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setQuery(QueryBuilders.boolQuery() .must(QueryBuilders.termQuery(RoleDescriptor.Fields.TYPE.getPreferredName(), ROLE_TYPE)) .filter(existsQuery("indices.query"))) @@ -269,11 +267,11 @@ public void onFailure(Exception e) { } private void getRoleDescriptor(final String roleId, ActionListener roleActionListener) { - if (securityLifecycleService.securityIndex().indexExists() == false) { + if (securityIndex.indexExists() == false) { // TODO remove this short circuiting and fix tests that fail without this! roleActionListener.onResponse(null); } else { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(roleActionListener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(roleActionListener::onFailure, () -> executeGetRoleRequest(roleId, new ActionListener() { @Override public void onResponse(GetResponse response) { @@ -298,9 +296,9 @@ public void onFailure(Exception e) { } private void executeGetRoleRequest(String role, ActionListener listener) { - securityLifecycleService.securityIndex().prepareIndexIfNeededThenExecute(listener::onFailure, () -> + securityIndex.prepareIndexIfNeededThenExecute(listener::onFailure, () -> executeAsyncWithOrigin(client.threadPool().getThreadContext(), SECURITY_ORIGIN, - client.prepareGet(SECURITY_INDEX_NAME, + client.prepareGet(SecurityIndexManager.SECURITY_INDEX_NAME, ROLE_DOC_TYPE, getIdForUser(role)).request(), listener, client::get)); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java index 4bcfb779b0d50..45c55c633d923 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/support/SecurityIndexManager.java @@ -23,6 +23,7 @@ import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterChangedEvent; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.health.ClusterIndexHealth; import org.elasticsearch.cluster.metadata.AliasOrIndex; @@ -30,15 +31,19 @@ import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.xpack.core.template.TemplateUtils; import org.elasticsearch.xpack.core.upgrade.IndexUpgradeCheckVersion; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -54,18 +59,18 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; /** * Manages the lifecycle of a single index, its template, mapping and and data upgrades/migrations. */ -public class SecurityIndexManager extends AbstractComponent { +public class SecurityIndexManager extends AbstractComponent implements ClusterStateListener { public static final String INTERNAL_SECURITY_INDEX = ".security-" + IndexUpgradeCheckVersion.UPRADE_VERSION; public static final int INTERNAL_INDEX_FORMAT = 6; public static final String SECURITY_VERSION_STRING = "security-version"; public static final String TEMPLATE_VERSION_PATTERN = Pattern.quote("${security.template.version}"); public static final String SECURITY_TEMPLATE_NAME = "security-index-template"; + public static final String SECURITY_INDEX_NAME = ".security"; private final String indexName; private final Client client; @@ -74,10 +79,15 @@ public class SecurityIndexManager extends AbstractComponent { private volatile State indexState = new State(false, false, false, false, null, null); - public SecurityIndexManager(Settings settings, Client client, String indexName) { + public SecurityIndexManager(Settings settings, Client client, String indexName, ClusterService clusterService) { super(settings); this.client = client; this.indexName = indexName; + clusterService.addListener(this); + } + + public static List indexNames() { + return Collections.unmodifiableList(Arrays.asList(SECURITY_INDEX_NAME, INTERNAL_SECURITY_INDEX)); } public boolean checkMappingVersion(Predicate requiredVersion) { @@ -115,7 +125,14 @@ public void addIndexStateListener(BiConsumer listener) { stateChangeListeners.add(listener); } + @Override public void clusterChanged(ClusterChangedEvent event) { + if (event.state().blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) { + // wait until the gateway has recovered from disk, otherwise we think we don't have the + // .security index but they may not have been restored from the cluster state on disk + logger.debug("security index manager waiting until state has been recovered"); + return; + } final State previousState = indexState; final IndexMetaData indexMetaData = resolveConcreteIndex(indexName, event.state().metaData()); final boolean indexExists = indexMetaData != null; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java index 54b8d72260ac7..e45325f08234d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/integration/ClearRolesCacheTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.client.SecurityClient; import org.elasticsearch.xpack.security.authz.store.NativeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; import org.junit.BeforeClass; @@ -25,11 +26,9 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; - /** * Test for the clear roles API */ @@ -57,7 +56,7 @@ public void setupForTests() { logger.debug("--> created role [{}]", role); } - ensureGreen(SECURITY_INDEX_NAME); + ensureGreen(SecurityIndexManager.SECURITY_INDEX_NAME); // warm up the caches on every node for (NativeRolesStore rolesStore : internalCluster().getInstances(NativeRolesStore.class)) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java index e8dd50ac7330c..5b93545b912c9 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/test/SecurityIntegTestCase.java @@ -64,7 +64,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.is; import static org.hamcrest.core.IsCollectionContaining.hasItem; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java index 274f7b1129aed..1cc758c13eaf8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityTests.java @@ -62,7 +62,7 @@ import java.util.function.Predicate; import static org.elasticsearch.cluster.metadata.IndexMetaData.INDEX_FORMAT_SETTING; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_INDEX_FORMAT; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java index 52a2e537d8db5..09a48a0eb1370 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlInvalidateSessionActionTests.java @@ -57,7 +57,6 @@ import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.UserToken; @@ -161,16 +160,14 @@ void doExecute(Action action, Request request } }; - final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); doAnswer(inv -> { ((Runnable) inv.getArguments()[1]).run(); return null; }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); final TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java index 93e6ebf2861cf..eca52831d9adc 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/saml/TransportSamlLogoutActionTests.java @@ -47,7 +47,6 @@ import org.elasticsearch.xpack.core.security.authc.saml.SamlRealmSettings; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.Realms; import org.elasticsearch.xpack.security.authc.TokenService; import org.elasticsearch.xpack.security.authc.UserToken; @@ -173,16 +172,14 @@ public void setup() throws Exception { return Void.TYPE; }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); - final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); final SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); doAnswer(inv -> { ((Runnable) inv.getArguments()[1]).run(); return null; }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); final ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); final TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index 02af431f8978b..6750560b0b0d2 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -24,7 +24,6 @@ import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealmTests; @@ -76,13 +75,11 @@ public void maybeEnableAnonymous() { public void testAnonymousUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); AnonymousUser anonymousUser = new AnonymousUser(settings); ReservedRealm reservedRealm = - new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, securityIndex, new ThreadContext(Settings.EMPTY)); TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), @@ -148,15 +145,13 @@ public void onFailure(Exception e) { public void testReservedUsersOnly() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.checkMappingVersion(any())).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = - new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); final Collection allReservedUsers = userFuture.actionGet(); @@ -198,13 +193,11 @@ public void testGetAllUsers() { final List storeUsers = randomFrom(Collections.emptyList(), Collections.singletonList(new User("joe")), Arrays.asList(new User("jane"), new User("fred")), randomUsers()); NativeUsersStore usersStore = mock(NativeUsersStore.class); - SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), - securityLifecycleService, new ThreadContext(Settings.EMPTY)); + securityIndex, new ThreadContext(Settings.EMPTY)); TransportService transportService = new TransportService(Settings.EMPTY, null, null, TransportService.NOOP_TRANSPORT_INTERCEPTOR, x -> null, null, Collections.emptySet()); TransportGetUsersAction action = new TransportGetUsersAction(Settings.EMPTY, mock(ThreadPool.class), mock(ActionFilters.class), diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java index 7b26e605207a2..65cf74971a55c 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportPutUserActionTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealmTests; @@ -118,14 +117,12 @@ public void onFailure(Exception e) { public void testReservedUser() { NativeUsersStore usersStore = mock(NativeUsersStore.class); - SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); ReservedRealmTests.mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); Settings settings = Settings.builder().put("path.home", createTempDir()).build(); ReservedRealm reservedRealm = new ReservedRealm(TestEnvironment.newEnvironment(settings), settings, usersStore, - new AnonymousUser(settings), securityLifecycleService, new ThreadContext(settings)); + new AnonymousUser(settings), securityIndex, new ThreadContext(settings)); PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); final User reserved = randomFrom(userFuture.actionGet().toArray(new User[0])); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java index 41b765cb33322..cd685b8f34c28 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/AuthenticationServiceTests.java @@ -64,7 +64,6 @@ import org.elasticsearch.xpack.core.security.user.AnonymousUser; import org.elasticsearch.xpack.core.security.user.SystemUser; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authc.AuthenticationService.Authenticator; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -125,7 +124,6 @@ public class AuthenticationServiceTests extends ESTestCase { private ThreadPool threadPool; private ThreadContext threadContext; private TokenService tokenService; - private SecurityLifecycleService lifecycleService; private SecurityIndexManager securityIndex; private Client client; private InetSocketAddress remoteAddress; @@ -182,16 +180,14 @@ licenseState, threadContext, mock(ReservedRealm.class), Arrays.asList(firstRealm .setId((String) invocationOnMock.getArguments()[2]); return builder; }).when(client).prepareGet(anyString(), anyString(), anyString()); - lifecycleService = mock(SecurityLifecycleService.class); securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); doAnswer(invocationOnMock -> { Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; runnable.run(); return null; }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); ClusterService clusterService = ClusterServiceUtils.createClusterService(threadPool); - tokenService = new TokenService(settings, Clock.systemUTC(), client, lifecycleService, clusterService); + tokenService = new TokenService(settings, Clock.systemUTC(), client, securityIndex, clusterService); service = new AuthenticationService(settings, realms, auditTrail, new DefaultAuthenticationFailureHandler(), threadPool, new AnonymousUser(settings), tokenService); } @@ -929,7 +925,7 @@ public void testInvalidToken() throws Exception { public void testExpiredToken() throws Exception { when(securityIndex.isAvailable()).thenReturn(true); - when(lifecycleService.securityIndex().indexExists()).thenReturn(true); + when(securityIndex.indexExists()).thenReturn(true); User user = new User("_username", "r1"); final Authentication expected = new Authentication(user, new RealmRef("realm", "custom", "node"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java index 47eb1eabae159..0cbeced00b2ab 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/InternalRealmsTests.java @@ -15,7 +15,6 @@ import org.elasticsearch.xpack.core.security.authc.RealmConfig; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.ssl.SSLService; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore; import org.elasticsearch.xpack.security.authc.support.mapper.NativeRoleMappingStore; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -31,18 +30,15 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyZeroInteractions; -import static org.mockito.Mockito.when; public class InternalRealmsTests extends ESTestCase { public void testNativeRealmRegistersIndexHealthChangeListener() throws Exception { - SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); Map factories = InternalRealms.getFactories(mock(ThreadPool.class), mock(ResourceWatcherService.class), - mock(SSLService.class), mock(NativeUsersStore.class), mock(NativeRoleMappingStore.class), lifecycleService); + mock(SSLService.class), mock(NativeUsersStore.class), mock(NativeRoleMappingStore.class), securityIndex); assertThat(factories, hasEntry(is(NativeRealmSettings.TYPE), any(Realm.Factory.class))); - verifyZeroInteractions(lifecycleService); + verifyZeroInteractions(securityIndex); Settings settings = Settings.builder().put("path.home", createTempDir()).build(); factories.get(NativeRealmSettings.TYPE).create(new RealmConfig("test", Settings.EMPTY, settings, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index aa6b9cab99467..0fe27731b31d5 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.After; import org.junit.Before; @@ -43,7 +44,6 @@ import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.equalTo; public class TokenAuthIntegTests extends SecurityIntegTestCase { @@ -146,7 +146,7 @@ public void testExpiredTokensDeletedAfterExpiration() throws Exception { assertTrue(invalidateResponse.isCreated()); AtomicReference docId = new AtomicReference<>(); assertBusy(() -> { - SearchResponse searchResponse = client.prepareSearch(SECURITY_INDEX_NAME) + SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setSource(SearchSourceBuilder.searchSource() .query(QueryBuilders.termQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE))) .setSize(1) @@ -159,7 +159,7 @@ public void testExpiredTokensDeletedAfterExpiration() throws Exception { // hack doc to modify the time to the day before Instant dayBefore = created.minus(1L, ChronoUnit.DAYS); assertTrue(Instant.now().isAfter(dayBefore)); - client.prepareUpdate(SECURITY_INDEX_NAME, "doc", docId.get()) + client.prepareUpdate(SecurityIndexManager.SECURITY_INDEX_NAME, "doc", docId.get()) .setDoc("expiration_time", dayBefore.toEpochMilli()) .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) .get(); @@ -177,8 +177,8 @@ public void testExpiredTokensDeletedAfterExpiration() throws Exception { assertEquals("token malformed", e.getMessage()); } } - client.admin().indices().prepareRefresh(SECURITY_INDEX_NAME).get(); - SearchResponse searchResponse = client.prepareSearch(SECURITY_INDEX_NAME) + client.admin().indices().prepareRefresh(SecurityIndexManager.SECURITY_INDEX_NAME).get(); + SearchResponse searchResponse = client.prepareSearch(SecurityIndexManager.SECURITY_INDEX_NAME) .setSource(SearchSourceBuilder.searchSource() .query(QueryBuilders.termQuery("doc_type", TokenService.INVALIDATED_TOKEN_DOC_TYPE))) .setSize(0) diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 3cc7247ad7910..07276e33b4efe 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -52,7 +52,6 @@ import org.elasticsearch.xpack.core.security.authc.TokenMetaData; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.watcher.watch.ClockMock; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.AfterClass; import org.junit.Before; @@ -90,7 +89,6 @@ public class TokenServiceTests extends ESTestCase { .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), true).build(); private Client client; - private SecurityLifecycleService lifecycleService; private SecurityIndexManager securityIndex; private ClusterService clusterService; private Settings tokenServiceEnabledSettings = Settings.builder() @@ -136,9 +134,7 @@ public void setupClient() { }).when(client).execute(eq(IndexAction.INSTANCE), any(IndexRequest.class), any(ActionListener.class)); // setup lifecycle service - lifecycleService = mock(SecurityLifecycleService.class); securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); doAnswer(invocationOnMock -> { Runnable runnable = (Runnable) invocationOnMock.getArguments()[1]; runnable.run(); @@ -161,8 +157,7 @@ public static void shutdownThreadpool() throws InterruptedException { } public void testAttachAndGetToken() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -182,7 +177,7 @@ public void testAttachAndGetToken() throws Exception { try (ThreadContext.StoredContext ignore = requestContext.newStoredContext(true)) { // verify a second separate token service with its own salt can also verify - TokenService anotherService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService + TokenService anotherService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex , clusterService); anotherService.refreshMetaData(tokenService.getTokenMetaData()); PlainActionFuture future = new PlainActionFuture<>(); @@ -193,8 +188,7 @@ public void testAttachAndGetToken() throws Exception { } public void testRotateKey() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -246,13 +240,12 @@ private void rotateKeys(TokenService tokenService) { } public void testKeyExchange() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); int numRotations = 0;randomIntBetween(1, 5); for (int i = 0; i < numRotations; i++) { rotateKeys(tokenService); } - TokenService otherTokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, + TokenService otherTokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); otherTokenService.refreshMetaData(tokenService.getTokenMetaData()); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); @@ -284,8 +277,7 @@ public void testKeyExchange() throws Exception { } public void testPruneKeys() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -346,8 +338,7 @@ public void testPruneKeys() throws Exception { } public void testPassphraseWorks() throws Exception { - TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -370,7 +361,7 @@ public void testPassphraseWorks() throws Exception { MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(TokenService.TOKEN_PASSPHRASE.getKey(), randomAlphaOfLengthBetween(8, 30)); Settings settings = Settings.builder().setSecureSettings(secureSettings).build(); - TokenService anotherService = new TokenService(settings, systemUTC(), client, lifecycleService, clusterService); + TokenService anotherService = new TokenService(settings, systemUTC(), client, securityIndex, clusterService); PlainActionFuture future = new PlainActionFuture<>(); anotherService.getAndValidateToken(requestContext, future); assertNull(future.get()); @@ -380,7 +371,7 @@ public void testPassphraseWorks() throws Exception { } public void testGetTokenWhenKeyCacheHasExpired() throws Exception { - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); @@ -395,7 +386,7 @@ public void testGetTokenWhenKeyCacheHasExpired() throws Exception { public void testInvalidatedToken() throws Exception { when(securityIndex.indexExists()).thenReturn(true); TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -449,7 +440,7 @@ public void testComputeSecretKeyIsConsistent() throws Exception { public void testTokenExpiry() throws Exception { ClockMock clock = ClockMock.frozen(); - TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(tokenServiceEnabledSettings, clock, client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -501,7 +492,7 @@ public void testTokenServiceDisabled() throws Exception { TokenService tokenService = new TokenService(Settings.builder() .put(XPackSettings.TOKEN_SERVICE_ENABLED_SETTING.getKey(), false) .build(), - systemUTC(), client, lifecycleService, clusterService); + systemUTC(), client, securityIndex, clusterService); IllegalStateException e = expectThrows(IllegalStateException.class, () -> tokenService.createUserToken(null, null, null, null)); assertEquals("tokens are not enabled", e.getMessage()); @@ -543,7 +534,7 @@ public void testMalformedToken() throws Exception { final int numBytes = randomIntBetween(1, TokenService.MINIMUM_BYTES + 32); final byte[] randomBytes = new byte[numBytes]; random().nextBytes(randomBytes); - TokenService tokenService = new TokenService(Settings.EMPTY, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(Settings.EMPTY, systemUTC(), client, securityIndex, clusterService); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); requestContext.putHeader("Authorization", "Bearer " + Base64.getEncoder().encodeToString(randomBytes)); @@ -557,7 +548,7 @@ public void testMalformedToken() throws Exception { public void testIndexNotAvailable() throws Exception { TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, systemUTC(), client, lifecycleService, clusterService); + new TokenService(tokenServiceEnabledSettings, systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); PlainActionFuture> tokenFuture = new PlainActionFuture<>(); tokenService.createUserToken(authentication, authentication, tokenFuture, Collections.emptyMap()); @@ -594,7 +585,7 @@ public void testDecodePre6xToken() throws GeneralSecurityException, ExecutionExc MockSecureSettings secureSettings = new MockSecureSettings(); secureSettings.setString(TokenService.TOKEN_PASSPHRASE.getKey(), "xpack_token_passpharse"); Settings settings = Settings.builder().put(XPackSettings.HTTP_SSL_ENABLED.getKey(), true).setSecureSettings(secureSettings).build(); - TokenService tokenService = new TokenService(settings, systemUTC(), client, lifecycleService, clusterService); + TokenService tokenService = new TokenService(settings, systemUTC(), client, securityIndex, clusterService); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); requestContext.putHeader("Authorization", "Bearer " + token); @@ -612,7 +603,7 @@ public void testDecodePre6xToken() throws GeneralSecurityException, ExecutionExc } public void testGetAuthenticationWorksWithExpiredToken() throws Exception { TokenService tokenService = - new TokenService(tokenServiceEnabledSettings, Clock.systemUTC(), client, lifecycleService, clusterService); + new TokenService(tokenServiceEnabledSettings, Clock.systemUTC(), client, securityIndex, clusterService); Authentication authentication = new Authentication(new User("joe", "admin"), new RealmRef("native_realm", "native", "node1"), null); UserToken expired = new UserToken(authentication, Instant.now().minus(3L, ChronoUnit.DAYS)); mockGetTokenFromId(expired); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java index da9699e9ecfcb..0b96c55b5ff35 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ESNativeMigrateToolTests.java @@ -16,6 +16,7 @@ import org.elasticsearch.test.SecuritySettingsSource; import org.elasticsearch.xpack.core.security.authc.support.CharArrays; import org.elasticsearch.xpack.core.security.client.SecurityClient; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.BeforeClass; import java.nio.charset.StandardCharsets; @@ -23,7 +24,6 @@ import java.util.HashSet; import java.util.Set; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.is; /** @@ -78,7 +78,7 @@ public void testRetrieveUsers() throws Exception { addedUsers.add(uname); } logger.error("--> waiting for .security index"); - ensureGreen(SECURITY_INDEX_NAME); + ensureGreen(SecurityIndexManager.SECURITY_INDEX_NAME); MockTerminal t = new MockTerminal(); String username = nodeClientUsername(); @@ -123,7 +123,7 @@ public void testRetrieveRoles() throws Exception { addedRoles.add(rname); } logger.error("--> waiting for .security index"); - ensureGreen(SECURITY_INDEX_NAME); + ensureGreen(SecurityIndexManager.SECURITY_INDEX_NAME); MockTerminal t = new MockTerminal(); String username = nodeClientUsername(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java index 2c11411955a0f..a238576e41323 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeRealmIntegTests.java @@ -54,7 +54,7 @@ import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout; import static org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken.basicAuthHeaderValue; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.INTERNAL_SECURITY_INDEX; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.containsString; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java index 6fa9cb868e909..ba5c2ce26b487 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/NativeUsersStoreTests.java @@ -33,7 +33,6 @@ import org.elasticsearch.xpack.core.security.user.KibanaUser; import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; @@ -46,7 +45,6 @@ import java.util.function.Consumer; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.notNullValue; @@ -115,7 +113,7 @@ public void testBlankPasswordInIndexImpliesDefaultPassword() throws Exception { values.put(PASSWORD_FIELD, BLANK_PASSWORD); final GetResult result = new GetResult( - SECURITY_INDEX_NAME, + SecurityIndexManager.SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, NativeUsersStore.getIdForUser(NativeUserStoreField.RESERVED_USER_TYPE, randomAlphaOfLength(12)), 1L, @@ -184,7 +182,7 @@ public void testVerifyNonExistentUser() throws Exception { nativeUsersStore.verifyPassword(username, password, future); final GetResult getResult = new GetResult( - SECURITY_INDEX_NAME, + SecurityIndexManager.SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), 1L, @@ -225,7 +223,7 @@ private void respondToGetUserRequest(String username, SecureString password, Str values.put(User.Fields.TYPE.getPreferredName(), NativeUsersStore.USER_DOC_TYPE); final BytesReference source = BytesReference.bytes(jsonBuilder().map(values)); final GetResult getResult = new GetResult( - SECURITY_INDEX_NAME, + SecurityIndexManager.SECURITY_INDEX_NAME, NativeUserStoreField.INDEX_TYPE, NativeUsersStore.getIdForUser(NativeUsersStore.USER_DOC_TYPE, username), 1L, @@ -238,9 +236,7 @@ private void respondToGetUserRequest(String username, SecureString password, Str } private NativeUsersStore startNativeUsersStore() { - SecurityLifecycleService securityLifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.indexExists()).thenReturn(true); when(securityIndex.isMappingUpToDate()).thenReturn(true); @@ -250,7 +246,7 @@ private NativeUsersStore startNativeUsersStore() { action.run(); return null; }).when(securityIndex).prepareIndexIfNeededThenExecute(any(Consumer.class), any(Runnable.class)); - return new NativeUsersStore(Settings.EMPTY, client, securityLifecycleService); + return new NativeUsersStore(Settings.EMPTY, client, securityIndex); } } \ No newline at end of file diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java index e71ed093d39da..5266703dfd843 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/esnative/ReservedRealmTests.java @@ -27,7 +27,6 @@ import org.elasticsearch.xpack.core.security.user.LogstashSystemUser; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.core.security.user.UsernamesField; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.esnative.NativeUsersStore.ReservedUserInfo; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.junit.Before; @@ -63,15 +62,12 @@ public class ReservedRealmTests extends ESTestCase { private static final SecureString EMPTY_PASSWORD = new SecureString("".toCharArray()); private NativeUsersStore usersStore; - private SecurityLifecycleService securityLifecycleService; private SecurityIndexManager securityIndex; @Before public void setupMocks() throws Exception { usersStore = mock(NativeUsersStore.class); - securityLifecycleService = mock(SecurityLifecycleService.class); securityIndex = mock(SecurityIndexManager.class); - when(securityLifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); when(securityIndex.checkMappingVersion(any())).thenReturn(true); mockGetAllReservedUserInfo(usersStore, Collections.emptyMap()); @@ -82,7 +78,7 @@ public void testReservedUserEmptyPasswordAuthenticationFails() throws Throwable UsernamesField.BEATS_NAME); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); @@ -98,7 +94,7 @@ public void testAuthenticationDisabled() throws Throwable { } final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(settings), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(settings), securityIndex, new ThreadContext(Settings.EMPTY)); final User expected = randomReservedUser(true); final String principal = expected.principal(); @@ -120,7 +116,7 @@ public void testAuthenticationDisabledUserWithStoredPassword() throws Throwable private void verifySuccessfulAuthentication(boolean enabled) throws Exception { final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); final User expectedUser = randomReservedUser(enabled); final String principal = expectedUser.principal(); final SecureString newPassword = new SecureString("foobar".toCharArray()); @@ -161,7 +157,7 @@ private void verifySuccessfulAuthentication(boolean enabled) throws Exception { public void testLookup() throws Exception { final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); final User expectedUser = randomReservedUser(true); final String principal = expectedUser.principal(); @@ -186,7 +182,7 @@ public void testLookupDisabled() throws Exception { Settings settings = Settings.builder().put(XPackSettings.RESERVED_REALM_ENABLED_SETTING.getKey(), false).build(); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, new AnonymousUser(settings), - securityLifecycleService, new ThreadContext(Settings.EMPTY)); + securityIndex, new ThreadContext(Settings.EMPTY)); final User expectedUser = randomReservedUser(true); final String principal = expectedUser.principal(); @@ -200,7 +196,7 @@ public void testLookupDisabled() throws Exception { public void testLookupThrows() throws Exception { final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); final User expectedUser = randomReservedUser(true); final String principal = expectedUser.principal(); when(securityIndex.indexExists()).thenReturn(true); @@ -247,7 +243,7 @@ public void testIsReservedDisabled() { public void testGetUsers() { final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); assertThat(userFuture.actionGet(), @@ -262,7 +258,7 @@ public void testGetUsersDisabled() { .build(); final AnonymousUser anonymousUser = new AnonymousUser(settings); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, anonymousUser, - securityLifecycleService, new ThreadContext(Settings.EMPTY)); + securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture> userFuture = new PlainActionFuture<>(); reservedRealm.users(userFuture); if (anonymousEnabled) { @@ -279,7 +275,7 @@ public void testFailedAuthentication() throws Exception { ReservedUserInfo userInfo = new ReservedUserInfo(hash, true, false); mockGetAllReservedUserInfo(usersStore, Collections.singletonMap("elastic", userInfo)); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), Settings.EMPTY, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); if (randomBoolean()) { PlainActionFuture future = new PlainActionFuture<>(); @@ -309,7 +305,7 @@ public void testBootstrapElasticPasswordWorksOnceSecurityIndexExists() throws Ex when(securityIndex.indexExists()).thenReturn(true); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); doAnswer((i) -> { @@ -331,7 +327,7 @@ public void testBootstrapElasticPasswordFailsOnceElasticUserExists() throws Exce when(securityIndex.indexExists()).thenReturn(true); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); SecureString password = new SecureString("password".toCharArray()); doAnswer((i) -> { @@ -358,7 +354,7 @@ public void testBootstrapElasticPasswordWorksBeforeSecurityIndexExists() throws when(securityIndex.indexExists()).thenReturn(false); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); reservedRealm.doAuthenticate(new UsernamePasswordToken(new ElasticUser(true).principal(), @@ -376,7 +372,7 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexExists when(securityIndex.indexExists()).thenReturn(true); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); @@ -398,7 +394,7 @@ public void testNonElasticUsersCannotUseBootstrapPasswordWhenSecurityIndexDoesNo when(securityIndex.indexExists()).thenReturn(false); final ReservedRealm reservedRealm = new ReservedRealm(mock(Environment.class), settings, usersStore, - new AnonymousUser(Settings.EMPTY), securityLifecycleService, new ThreadContext(Settings.EMPTY)); + new AnonymousUser(Settings.EMPTY), securityIndex, new ThreadContext(Settings.EMPTY)); PlainActionFuture listener = new PlainActionFuture<>(); final String principal = randomFrom(KibanaUser.NAME, LogstashSystemUser.NAME, BeatsSystemUser.NAME); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java index 693118c21bde5..2a1c2dabe30b7 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/NativeRoleMappingStoreTests.java @@ -26,7 +26,6 @@ import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.FieldExpression.FieldValue; import org.elasticsearch.xpack.core.security.user.User; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.authc.support.CachingUsernamePasswordRealm; import org.elasticsearch.xpack.security.authc.support.UserRoleMapper; import org.elasticsearch.xpack.security.support.SecurityIndexManager; @@ -73,12 +72,10 @@ public void testResolveRoles() throws Exception { Arrays.asList("mutants"), Collections.emptyMap(), false); final Client client = mock(Client.class); - final SecurityLifecycleService lifecycleService = mock(SecurityLifecycleService.class); SecurityIndexManager securityIndex = mock(SecurityIndexManager.class); - when(lifecycleService.securityIndex()).thenReturn(securityIndex); when(securityIndex.isAvailable()).thenReturn(true); - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, lifecycleService) { + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, securityIndex) { @Override protected void loadMappings(ActionListener> listener) { final List mappings = Arrays.asList(mapping1, mapping2, mapping3, mapping4); @@ -212,7 +209,7 @@ protected void doLookupUser(String username, ActionListener listener) { listener.onResponse(null); } }; - final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityLifecycleService.class)); + final NativeRoleMappingStore store = new NativeRoleMappingStore(Settings.EMPTY, client, mock(SecurityIndexManager.class)); store.refreshRealmOnChange(mockRealm); return store; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 3013a7c41c2ac..bcd31c32f7f78 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -140,7 +140,7 @@ import static org.elasticsearch.test.SecurityTestsUtils.assertAuthenticationException; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationException; import static org.elasticsearch.test.SecurityTestsUtils.assertThrowsAuthorizationExceptionRunAs; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.endsWith; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java index b8f94c8134371..1d0e5c179a9cd 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizedIndicesTests.java @@ -21,10 +21,10 @@ import org.elasticsearch.xpack.core.security.authz.privilege.IndexPrivilege; import org.elasticsearch.xpack.core.security.user.User; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import java.util.List; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.containsInAnyOrder; public class AuthorizedIndicesTests extends ESTestCase { @@ -81,7 +81,7 @@ public void testSecurityIndicesAreRemovedFromRegularUser() { MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME).settings(indexSettings) + .put(new IndexMetaData.Builder(SecurityIndexManager.SECURITY_INDEX_NAME).settings(indexSettings) .numberOfShards(1).numberOfReplicas(0).build(), true) .build(); @@ -97,12 +97,12 @@ public void testSecurityIndicesAreNotRemovedFromSuperUsers() { MetaData metaData = MetaData.builder() .put(new IndexMetaData.Builder("an-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) .put(new IndexMetaData.Builder("another-index").settings(indexSettings).numberOfShards(1).numberOfReplicas(0).build(), true) - .put(new IndexMetaData.Builder(SECURITY_INDEX_NAME).settings(indexSettings) + .put(new IndexMetaData.Builder(SecurityIndexManager.SECURITY_INDEX_NAME).settings(indexSettings) .numberOfShards(1).numberOfReplicas(0).build(), true) .build(); AuthorizedIndices authorizedIndices = new AuthorizedIndices(user, role, SearchAction.NAME, metaData); List list = authorizedIndices.get(); - assertThat(list, containsInAnyOrder("an-index", "another-index", SECURITY_INDEX_NAME)); + assertThat(list, containsInAnyOrder("an-index", "another-index", SecurityIndexManager.SECURITY_INDEX_NAME)); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java index d03389f5ddbbb..b080b5924ce7a 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/IndicesAndAliasesResolverTests.java @@ -64,6 +64,7 @@ import org.elasticsearch.xpack.security.audit.AuditTrailService; import org.elasticsearch.xpack.security.authz.IndicesAndAliasesResolver.ResolvedIndices; import org.elasticsearch.xpack.security.authz.store.CompositeRolesStore; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.junit.Before; @@ -74,7 +75,7 @@ import java.util.Map; import java.util.Set; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -1198,14 +1199,14 @@ public void testXPackSecurityUserHasAccessToSecurityIndex() { { final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackSecurityUser.INSTANCE, SearchAction.NAME); List indices = resolveIndices(request, authorizedIndices).getLocal(); - assertThat(indices, hasItem(SECURITY_INDEX_NAME)); + assertThat(indices, hasItem(SecurityIndexManager.SECURITY_INDEX_NAME)); } { IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest(); aliasesRequest.addAliasAction(AliasActions.add().alias("security_alias").index(SECURITY_INDEX_NAME)); final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackSecurityUser.INSTANCE, IndicesAliasesAction.NAME); List indices = resolveIndices(aliasesRequest, authorizedIndices).getLocal(); - assertThat(indices, hasItem(SECURITY_INDEX_NAME)); + assertThat(indices, hasItem(SecurityIndexManager.SECURITY_INDEX_NAME)); } } @@ -1213,7 +1214,7 @@ public void testXPackUserDoesNotHaveAccessToSecurityIndex() { SearchRequest request = new SearchRequest(); final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(XPackUser.INSTANCE, SearchAction.NAME); List indices = resolveIndices(request, authorizedIndices).getLocal(); - assertThat(indices, not(hasItem(SECURITY_INDEX_NAME))); + assertThat(indices, not(hasItem(SecurityIndexManager.SECURITY_INDEX_NAME))); } public void testNonXPackUserAccessingSecurityIndex() { @@ -1225,7 +1226,7 @@ public void testNonXPackUserAccessingSecurityIndex() { SearchRequest request = new SearchRequest(); final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, SearchAction.NAME); List indices = resolveIndices(request, authorizedIndices).getLocal(); - assertThat(indices, not(hasItem(SECURITY_INDEX_NAME))); + assertThat(indices, not(hasItem(SecurityIndexManager.SECURITY_INDEX_NAME))); } { @@ -1233,7 +1234,7 @@ public void testNonXPackUserAccessingSecurityIndex() { aliasesRequest.addAliasAction(AliasActions.add().alias("security_alias1").index("*")); final AuthorizedIndices authorizedIndices = buildAuthorizedIndices(allAccessUser, IndicesAliasesAction.NAME); List indices = resolveIndices(aliasesRequest, authorizedIndices).getLocal(); - assertThat(indices, not(hasItem(SECURITY_INDEX_NAME))); + assertThat(indices, not(hasItem(SecurityIndexManager.SECURITY_INDEX_NAME))); } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index ab6664b53b0fb..a2c70db3b63e8 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -41,8 +41,6 @@ import org.elasticsearch.xpack.core.security.action.role.PutRoleRequest; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor.IndicesPrivileges; -import org.elasticsearch.xpack.security.SecurityLifecycleService; -import org.elasticsearch.xpack.security.audit.index.IndexAuditTrail; import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.elasticsearch.xpack.security.test.SecurityTestUtils; import org.junit.After; @@ -58,7 +56,7 @@ import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; @@ -189,10 +187,9 @@ public void testPutOfRoleWithFlsDlsUnlicensed() throws IOException { final ClusterService clusterService = mock(ClusterService.class); final XPackLicenseState licenseState = mock(XPackLicenseState.class); final AtomicBoolean methodCalled = new AtomicBoolean(false); - final SecurityLifecycleService securityLifecycleService = - new SecurityLifecycleService(Settings.EMPTY, clusterService, threadPool, client, - mock(IndexAuditTrail.class)); - final NativeRolesStore rolesStore = new NativeRolesStore(Settings.EMPTY, client, licenseState, securityLifecycleService) { + final SecurityIndexManager securityIndex = + new SecurityIndexManager(Settings.EMPTY, client, SecurityIndexManager.SECURITY_INDEX_NAME, clusterService); + final NativeRolesStore rolesStore = new NativeRolesStore(Settings.EMPTY, client, licenseState, securityIndex) { @Override void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final ActionListener listener) { if (methodCalled.compareAndSet(false, true)) { @@ -203,7 +200,7 @@ void innerPutRole(final PutRoleRequest request, final RoleDescriptor role, final } }; // setup the roles store so the security index exists - securityLifecycleService.clusterChanged(new ClusterChangedEvent( + securityIndex.clusterChanged(new ClusterChangedEvent( "fls_dls_license", getClusterStateWithSecurityIndex(), getEmptyClusterState())); PutRoleRequest putRoleRequest = new PutRoleRequest(); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java index fe51f2beca34d..b5b67c7e7b2c1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/support/SecurityIndexManagerTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; @@ -51,7 +52,7 @@ import org.junit.Before; import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_TEMPLATE_NAME; import static org.elasticsearch.xpack.security.support.SecurityIndexManager.TEMPLATE_VERSION_PATTERN; import static org.hamcrest.Matchers.equalTo; @@ -74,6 +75,7 @@ public void setUpManager() { when(threadPool.getThreadContext()).thenReturn(new ThreadContext(Settings.EMPTY)); when(mockClient.threadPool()).thenReturn(threadPool); when(mockClient.settings()).thenReturn(Settings.EMPTY); + final ClusterService clusterService = mock(ClusterService.class); actions = new LinkedHashMap<>(); final Client client = new FilterClient(mockClient) { @@ -88,7 +90,7 @@ void doExecute(Action action, Request request actions.put(action, map); } }; - manager = new SecurityIndexManager(Settings.EMPTY, client, INDEX_NAME); + manager = new SecurityIndexManager(Settings.EMPTY, client, INDEX_NAME, clusterService); } public void testIndexWithUpToDateMappingAndTemplate() throws IOException { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java index 63c267eb816fc..aa4982cce3f84 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/test/SecurityTestUtils.java @@ -40,7 +40,7 @@ import static java.nio.file.StandardOpenOption.TRUNCATE_EXISTING; import static java.nio.file.StandardOpenOption.WRITE; import static org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; +import static org.elasticsearch.xpack.security.support.SecurityIndexManager.SECURITY_INDEX_NAME; import static org.junit.Assert.assertEquals; public class SecurityTestUtils { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java index 414d04e42323a..e7b31d88eda19 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/user/XPackUserTests.java @@ -12,15 +12,13 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.security.index.IndexAuditTrailField; import org.elasticsearch.xpack.core.security.user.XPackUser; -import org.elasticsearch.xpack.security.SecurityLifecycleService; import org.elasticsearch.xpack.security.audit.index.IndexNameResolver; +import org.elasticsearch.xpack.security.support.SecurityIndexManager; import org.hamcrest.Matchers; import org.joda.time.DateTime; import java.util.function.Predicate; -import static org.elasticsearch.xpack.security.SecurityLifecycleService.SECURITY_INDEX_NAME; - public class XPackUserTests extends ESTestCase { public void testXPackUserCanAccessNonSecurityIndices() { @@ -33,8 +31,8 @@ public void testXPackUserCanAccessNonSecurityIndices() { public void testXPackUserCannotAccessSecurityIndex() { final String action = randomFrom(GetAction.NAME, SearchAction.NAME, IndexAction.NAME); final Predicate predicate = XPackUser.ROLE.indices().allowedIndicesMatcher(action); - assertThat(predicate.test(SECURITY_INDEX_NAME), Matchers.is(false)); - assertThat(predicate.test(SecurityLifecycleService.INTERNAL_SECURITY_INDEX), Matchers.is(false)); + assertThat(predicate.test(SecurityIndexManager.SECURITY_INDEX_NAME), Matchers.is(false)); + assertThat(predicate.test(SecurityIndexManager.INTERNAL_SECURITY_INDEX), Matchers.is(false)); } public void testXPackUserCanReadAuditTrail() { From 3844e3fd03e3322ec63b1140f9f9278e8dff4199 Mon Sep 17 00:00:00 2001 From: Ryan Ernst Date: Wed, 16 May 2018 15:35:57 -0700 Subject: [PATCH 43/44] Make xpack modules instead of a meta plugin (#30589) This commit removes xpack from being a meta-plugin-as-a-module. It also fixes a couple tests which were missing task dependencies, which failed once the gradle execution order changed. --- distribution/archives/build.gradle | 2 +- distribution/build.gradle | 11 +++--- x-pack/plugin/build.gradle | 10 ++---- x-pack/plugin/core/src/main/bin/x-pack-env | 2 +- .../plugin/core/src/main/bin/x-pack-env.bat | 2 +- .../security/src/main/bin/x-pack-security-env | 2 +- .../src/main/bin/x-pack-security-env.bat | 2 +- .../watcher/src/main/bin/x-pack-watcher-env | 2 +- .../src/main/bin/x-pack-watcher-env.bat | 2 +- x-pack/qa/vagrant/build.gradle | 35 ------------------- .../test/resources/packaging/utils/xpack.bash | 9 ----- 11 files changed, 13 insertions(+), 66 deletions(-) diff --git a/distribution/archives/build.gradle b/distribution/archives/build.gradle index 9fa06021236a2..5d1703399aad4 100644 --- a/distribution/archives/build.gradle +++ b/distribution/archives/build.gradle @@ -224,7 +224,7 @@ subprojects { doLast { // this is just a small sample from the C++ notices, the idea being that if we've added these lines we've probably added all the required lines final List expectedLines = Arrays.asList("Apache log4cxx", "Boost Software License - Version 1.0 - August 17th, 2003") - final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack/x-pack-ml/NOTICE.txt") + final Path noticePath = archiveExtractionDir.toPath().resolve("elasticsearch-${VersionProperties.elasticsearch}/modules/x-pack-ml/NOTICE.txt") final List actualLines = Files.readAllLines(noticePath) for (final String expectedLine : expectedLines) { if (actualLines.contains(expectedLine) == false) { diff --git a/distribution/build.gradle b/distribution/build.gradle index 266cb8f8b270a..d2e2810bc7eec 100644 --- a/distribution/build.gradle +++ b/distribution/build.gradle @@ -201,17 +201,14 @@ project.rootProject.subprojects.findAll { it.parent.path == ':modules' }.each { // use licenses from each of the bundled xpack plugins Project xpack = project(':x-pack:plugin') -xpack.subprojects.findAll { it.name != 'bwc' }.each { Project xpackSubproject -> - File licenses = new File(xpackSubproject.projectDir, 'licenses') +xpack.subprojects.findAll { it.parent == xpack }.each { Project xpackModule -> + File licenses = new File(xpackModule.projectDir, 'licenses') if (licenses.exists()) { buildDefaultNotice.licensesDir licenses } + copyModule(processDefaultOutputs, xpackModule) + copyLog4jProperties(buildDefaultLog4jConfig, xpackModule) } -// but copy just the top level meta plugin to the default modules -copyModule(processDefaultOutputs, xpack) -copyLog4jProperties(buildDefaultLog4jConfig, xpack) - -// // make sure we have a clean task since we aren't a java project, but we have tasks that // put stuff in the build dir diff --git a/x-pack/plugin/build.gradle b/x-pack/plugin/build.gradle index 75266a3cef45d..dbe35445638a2 100644 --- a/x-pack/plugin/build.gradle +++ b/x-pack/plugin/build.gradle @@ -8,17 +8,11 @@ import java.nio.file.Path import java.nio.file.StandardCopyOption import org.elasticsearch.gradle.test.RunTask; -apply plugin: 'elasticsearch.es-meta-plugin' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' archivesBaseName = 'x-pack' -es_meta_plugin { - name = 'x-pack' - description = 'Elasticsearch Expanded Pack Plugin' - plugins = ['core', 'deprecation', 'graph', 'logstash', - 'ml', 'monitoring', 'security', 'upgrade', 'watcher', 'sql', 'rollup'] -} - dependencies { testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') } diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env b/x-pack/plugin/core/src/main/bin/x-pack-env index 7cfb04a9f2e11..d18d0a12ded42 100644 --- a/x-pack/plugin/core/src/main/bin/x-pack-env +++ b/x-pack/plugin/core/src/main/bin/x-pack-env @@ -4,4 +4,4 @@ # or more contributor license agreements. Licensed under the Elastic License; # you may not use this file except in compliance with the Elastic License. -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-core/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-core/*" diff --git a/x-pack/plugin/core/src/main/bin/x-pack-env.bat b/x-pack/plugin/core/src/main/bin/x-pack-env.bat index de45a53c9269c..fc97721a737d1 100644 --- a/x-pack/plugin/core/src/main/bin/x-pack-env.bat +++ b/x-pack/plugin/core/src/main/bin/x-pack-env.bat @@ -2,4 +2,4 @@ rem Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one rem or more contributor license agreements. Licensed under the Elastic License; rem you may not use this file except in compliance with the Elastic License. -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-core/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-core/* diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env b/x-pack/plugin/security/src/main/bin/x-pack-security-env index fd35535be8cca..3a2b15e13fa4a 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env @@ -7,4 +7,4 @@ source "`dirname "$0"`"/x-pack-env # include x-pack-security jars in classpath -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-security/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-security/*" diff --git a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat index 610f5835d28c2..035f1c965ffb6 100644 --- a/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat +++ b/x-pack/plugin/security/src/main/bin/x-pack-security-env.bat @@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License. call "%~dp0x-pack-env.bat" || exit /b 1 -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-security/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-security/* diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env index 4abe3d8c60761..13718a01b4330 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env @@ -7,4 +7,4 @@ source "`dirname "$0"`"/x-pack-env # include x-pack-security jars in classpath -ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack/x-pack-watcher/*" +ES_CLASSPATH="$ES_CLASSPATH:$ES_HOME/modules/x-pack-watcher/*" diff --git a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat index 9e43ffaa0521f..010c154eb5a39 100644 --- a/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat +++ b/x-pack/plugin/watcher/src/main/bin/x-pack-watcher-env.bat @@ -4,4 +4,4 @@ rem you may not use this file except in compliance with the Elastic License. call "%~dp0x-pack-env.bat" || exit /b 1 -set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack/x-pack-watcher/* +set ES_CLASSPATH=!ES_CLASSPATH!;!ES_HOME!/modules/x-pack-watcher/* diff --git a/x-pack/qa/vagrant/build.gradle b/x-pack/qa/vagrant/build.gradle index 0c3428f258c0e..c69214578fd16 100644 --- a/x-pack/qa/vagrant/build.gradle +++ b/x-pack/qa/vagrant/build.gradle @@ -11,41 +11,6 @@ esvagrant { } dependencies { - // Packaging tests use the x-pack meta plugin - packaging project(path: xpackProject('plugin').path, configuration: 'zip') - // Inherit Bats test utils from :qa:vagrant project packaging project(path: ':qa:vagrant', configuration: 'packaging') } - -Map> metaPlugins = [:] -for (Project metaPlugin : project.rootProject.subprojects) { - if (metaPlugin.plugins.hasPlugin(MetaPluginBuildPlugin)) { - MetaPluginPropertiesExtension extension = metaPlugin.extensions.findByName('es_meta_plugin') - if (extension != null) { - List plugins = [] - metaPlugin.subprojects.each { - if (extension.plugins.contains(it.name)) { - Project plugin = (Project) it - if (plugin.plugins.hasPlugin(PluginBuildPlugin)) { - PluginPropertiesExtension esplugin = plugin.extensions.findByName('esplugin') - if (esplugin != null) { - plugins.add(esplugin.name) - } - } - } - } - metaPlugins.put(extension.name, plugins.toSorted()) - } - } -} - -setupPackagingTest { - doLast { - metaPlugins.each{ name, plugins -> - File expectedMetaPlugins = file("build/plugins/${name}.expected") - expectedMetaPlugins.parentFile.mkdirs() - expectedMetaPlugins.setText(plugins.join('\n'), 'UTF-8') - } - } -} diff --git a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash index 3caf28c2450b7..a595861ba1aa2 100644 --- a/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash +++ b/x-pack/qa/vagrant/src/test/resources/packaging/utils/xpack.bash @@ -69,15 +69,6 @@ verify_xpack_installation() { done # nocommit: decide whether to check the files added by the distribution, not part of xpack... #assert_number_of_files "$ESCONFIG/" $configFilesCount - - # Read the $name.expected file that contains all the expected - # plugins for the meta plugin - while read plugin; do - assert_module_or_plugin_directory "$ESMODULES/$name/$plugin" - assert_file_exist "$ESMODULES/$name/$plugin/$plugin"*".jar" - assert_file_exist "$ESMODULES/$name/$plugin/plugin-descriptor.properties" - assert_file_exist "$ESMODULES/$name/$plugin/plugin-security.policy" - done Date: Wed, 16 May 2018 15:07:14 -0400 Subject: [PATCH 44/44] SQL: Remove dependency for server's version from JDBC driver (#30631) Removes dependency for server's version from the JDBC driver code. This should allow us to dramatically reduce driver's size by removing the server dependency from the driver. Relates #29856 --- .../xpack/sql/jdbc/net/client/JdbcHttpClient.java | 4 +++- .../xpack/sql/cli/command/CliSession.java | 3 ++- .../xpack/sql/cli/command/ServerInfoCliCommand.java | 2 +- .../elasticsearch/xpack/sql/cli/CliSessionTests.java | 4 ++-- .../sql/cli/command/ServerInfoCliCommandTests.java | 2 +- .../elasticsearch/xpack/sql/proto/MainResponse.java | 10 ++++------ 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java index 89ee78e0bae9e..17afc34efffe6 100644 --- a/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java +++ b/x-pack/plugin/sql/jdbc/src/main/java/org/elasticsearch/xpack/sql/jdbc/net/client/JdbcHttpClient.java @@ -8,6 +8,7 @@ import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.xpack.sql.client.HttpClient; +import org.elasticsearch.xpack.sql.client.shared.Version; import org.elasticsearch.xpack.sql.jdbc.jdbc.JdbcConfiguration; import org.elasticsearch.xpack.sql.jdbc.net.protocol.ColumnInfo; import org.elasticsearch.xpack.sql.jdbc.net.protocol.InfoResponse; @@ -79,7 +80,8 @@ public InfoResponse serverInfo() throws SQLException { private InfoResponse fetchServerInfo() throws SQLException { MainResponse mainResponse = httpClient.serverInfo(); - return new InfoResponse(mainResponse.getClusterName(), mainResponse.getVersion().major, mainResponse.getVersion().minor); + Version version = Version.fromString(mainResponse.getVersion()); + return new InfoResponse(mainResponse.getClusterName(), version.major, version.minor); } /** diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java index 8e030f36dd042..fc89e3939cc35 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/CliSession.java @@ -65,8 +65,9 @@ public void checkConnection() throws ClientException { } catch (SQLException ex) { throw new ClientException(ex); } + Version version = Version.fromString(response.getVersion()); // TODO: We can relax compatibility requirement later when we have a better idea about protocol compatibility guarantees - if (response.getVersion().major != Version.CURRENT.major || response.getVersion().minor != Version.CURRENT.minor) { + if (version.major != Version.CURRENT.major || version.minor != Version.CURRENT.minor) { throw new ClientException("This alpha version of CLI is only compatible with Elasticsearch version " + Version.CURRENT.toString()); } diff --git a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java index e637386f9798f..9e7b75102ec6f 100644 --- a/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java +++ b/x-pack/plugin/sql/sql-cli/src/main/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommand.java @@ -31,7 +31,7 @@ public boolean doHandle(CliTerminal terminal, CliSession cliSession, String line terminal.line() .text("Node:").em(info.getNodeName()) .text(" Cluster:").em(info.getClusterName()) - .text(" Version:").em(info.getVersion().toString()) + .text(" Version:").em(info.getVersion()) .ln(); return true; } diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java index e5643ad443a59..265051a5a58df 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/CliSessionTests.java @@ -27,7 +27,7 @@ public class CliSessionTests extends ESTestCase { public void testProperConnection() throws Exception { HttpClient httpClient = mock(HttpClient.class); - when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT, + when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), org.elasticsearch.Version.CURRENT.toString(), ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); cliSession.checkConnection(); @@ -57,7 +57,7 @@ public void testWrongServerVersion() throws Exception { } when(httpClient.serverInfo()).thenReturn(new MainResponse(randomAlphaOfLength(5), - org.elasticsearch.Version.fromString(major + "." + minor + ".23"), + org.elasticsearch.Version.fromString(major + "." + minor + ".23").toString(), ClusterName.DEFAULT.value(), UUIDs.randomBase64UUID(), Build.CURRENT)); CliSession cliSession = new CliSession(httpClient); expectThrows(ClientException.class, cliSession::checkConnection); diff --git a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java index e99cb2fb7f7e2..6c9d4933a9912 100644 --- a/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java +++ b/x-pack/plugin/sql/sql-cli/src/test/java/org/elasticsearch/xpack/sql/cli/command/ServerInfoCliCommandTests.java @@ -35,7 +35,7 @@ public void testShowInfo() throws Exception { TestTerminal testTerminal = new TestTerminal(); HttpClient client = mock(HttpClient.class); CliSession cliSession = new CliSession(client); - when(client.serverInfo()).thenReturn(new MainResponse("my_node", org.elasticsearch.Version.fromString("1.2.3"), + when(client.serverInfo()).thenReturn(new MainResponse("my_node", "1.2.3", new ClusterName("my_cluster").value(), UUIDs.randomBase64UUID(), Build.CURRENT)); ServerInfoCliCommand cliCommand = new ServerInfoCliCommand(); assertTrue(cliCommand.handle(testTerminal, cliSession, "info")); diff --git a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java index 73b6cbc529ec6..c8bb0c51f7fe7 100644 --- a/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java +++ b/x-pack/plugin/sql/sql-proto/src/main/java/org/elasticsearch/xpack/sql/proto/MainResponse.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.sql.proto; import org.elasticsearch.Build; -import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ObjectParser; import org.elasticsearch.common.xcontent.XContentParser; @@ -19,8 +18,7 @@ */ public class MainResponse { private String nodeName; - // TODO: Add parser for Version - private Version version; + private String version; private String clusterName; private String clusterUuid; // TODO: Add parser for Build @@ -29,7 +27,7 @@ public class MainResponse { private MainResponse() { } - public MainResponse(String nodeName, Version version, String clusterName, String clusterUuid, Build build) { + public MainResponse(String nodeName, String version, String clusterName, String clusterUuid, Build build) { this.nodeName = nodeName; this.version = version; this.clusterName = clusterName; @@ -41,7 +39,7 @@ public String getNodeName() { return nodeName; } - public Version getVersion() { + public String getVersion() { return version; } @@ -76,7 +74,7 @@ public Build getBuild() { (String) value.get("build_hash"), (String) value.get("build_date"), (boolean) value.get("build_snapshot")); - response.version = Version.fromString((String) value.get("number")); + response.version = (String) value.get("number"); }, (parser, context) -> parser.map(), new ParseField("version")); }