From 964016e2288198543e97332407fa9fc2260b1a16 Mon Sep 17 00:00:00 2001 From: Chris Earle Date: Sat, 4 Nov 2017 12:29:00 +0000 Subject: [PATCH 01/25] Fix RestGetAction name typo This changes the name from docuemnt_get_action to document_get_action. Relates #27266 --- .../org/elasticsearch/rest/action/document/RestGetAction.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java index 857af483325aa..3265a59692b61 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/document/RestGetAction.java @@ -50,7 +50,7 @@ public RestGetAction(final Settings settings, final RestController controller) { @Override public String getName() { - return "docuemnt_get_action"; + return "document_get_action"; } @Override From 749c3ec716c0c26d59dfed24e79bd96d380ddecb Mon Sep 17 00:00:00 2001 From: David Roberts Date: Sat, 4 Nov 2017 13:25:09 +0000 Subject: [PATCH 02/25] Remove the single argument Environment constructor (#27235) Only tests should use the single argument Environment constructor. To enforce this the single arg Environment constructor has been replaced with a test framework factory method. Production code (beyond initial Bootstrap) should always use the same Environment object that Node.getEnvironment() returns. This Environment is also available via dependency injection. --- .../org/elasticsearch/env/Environment.java | 4 -- .../indices/TransportAnalyzeActionTests.java | 3 +- .../settings/KeyStoreCommandTestCase.java | 4 +- .../elasticsearch/env/EnvironmentTests.java | 10 ++--- .../env/NodeEnvironmentTests.java | 12 +++--- .../elasticsearch/index/IndexModuleTests.java | 3 +- .../index/analysis/AnalysisRegistryTests.java | 9 +++-- .../index/analysis/AnalysisTests.java | 7 ++-- .../index/shard/NewPathForShardTests.java | 10 ++--- .../indices/analysis/AnalysisModuleTests.java | 19 ++++++---- .../indices/cluster/ClusterStateChanges.java | 3 +- .../plugins/PluginsServiceTests.java | 3 +- .../plugins/InstallPluginCommandTests.java | 5 ++- .../plugins/ListPluginsCommandTests.java | 3 +- .../plugins/RemovePluginCommandTests.java | 5 ++- .../common/CompoundAnalysisTests.java | 3 +- .../repositories/url/URLRepositoryTests.java | 10 +++-- .../analysis/AnalysisPolishFactoryTests.java | 3 +- .../FileBasedUnicastHostsProviderTests.java | 3 +- .../azure/AzureRepositorySettingsTests.java | 5 ++- .../gcs/GoogleCloudStorageServiceTests.java | 9 ++--- .../bootstrap/EvilSecurityTests.java | 5 ++- .../env/NodeEnvironmentEvilTests.java | 6 +-- .../bootstrap/SpawnerNoBootstrapTests.java | 9 +++-- .../elasticsearch/env/TestEnvironment.java | 37 +++++++++++++++++++ .../test/AbstractQueryTestCase.java | 3 +- .../elasticsearch/test/ESIntegTestCase.java | 3 +- .../org/elasticsearch/test/ESTestCase.java | 5 ++- 28 files changed, 129 insertions(+), 72 deletions(-) create mode 100644 test/framework/src/main/java/org/elasticsearch/env/TestEnvironment.java diff --git a/core/src/main/java/org/elasticsearch/env/Environment.java b/core/src/main/java/org/elasticsearch/env/Environment.java index 31a67333a810f..721cdcf9ba6db 100644 --- a/core/src/main/java/org/elasticsearch/env/Environment.java +++ b/core/src/main/java/org/elasticsearch/env/Environment.java @@ -85,10 +85,6 @@ public class Environment { /** Path to the temporary file directory used by the JDK */ private final Path tmpFile = PathUtils.get(System.getProperty("java.io.tmpdir")); - public Environment(Settings settings) { - this(settings, null); - } - public Environment(final Settings settings, final Path configPath) { final Path homeFile; if (PATH_HOME_SETTING.exists(settings)) { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java index fc41770b37766..90857da0be089 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/TransportAnalyzeActionTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AbstractCharFilterFactory; import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; @@ -74,7 +75,7 @@ public void setUp() throws Exception { .put("index.analysis.normalizer.my_normalizer.type", "custom") .putList("index.analysis.normalizer.my_normalizer.filter", "lowercase").build(); IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", indexSettings); - environment = new Environment(settings); + environment = TestEnvironment.newEnvironment(settings); AnalysisPlugin plugin = new AnalysisPlugin() { class MockFactory extends AbstractTokenFilterFactory { MockFactory(IndexSettings indexSettings, Environment env, String name, Settings settings) { diff --git a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java index 3d853404664b1..c1118b3bc6513 100644 --- a/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java +++ b/core/src/test/java/org/elasticsearch/common/settings/KeyStoreCommandTestCase.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.FileSystem; -import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; import java.util.ArrayList; @@ -35,6 +34,7 @@ import org.elasticsearch.cli.CommandTestCase; import org.elasticsearch.common.io.PathUtilsForTesting; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.junit.After; import org.junit.Before; @@ -70,7 +70,7 @@ public static Environment setupEnv(boolean posix, List fileSystems) PathUtilsForTesting.installMock(fs); // restored by restoreFileSystem in ESTestCase Path home = fs.getPath("/", "test-home"); Files.createDirectories(home.resolve("config")); - return new Environment(Settings.builder().put("path.home", home).build()); + return TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); } KeyStoreWrapper createKeystore(String password, String... settings) throws Exception { diff --git a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java index 70df7d33f291c..6ddf6b3ba73b1 100644 --- a/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/EnvironmentTests.java @@ -43,7 +43,7 @@ public Environment newEnvironment(Settings settings) throws IOException { .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build(); - return new Environment(build); + return new Environment(build, null); } public void testRepositoryResolution() throws IOException { @@ -76,21 +76,21 @@ public void testRepositoryResolution() throws IOException { public void testPathDataWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, null); assertThat(environment.dataFiles(), equalTo(new Path[]{pathHome.resolve("data")})); } public void testPathDataNotSetInEnvironmentIfNotSet() { final Settings settings = Settings.builder().put("path.home", createTempDir().toAbsolutePath()).build(); assertFalse(Environment.PATH_DATA_SETTING.exists(settings)); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, null); assertFalse(Environment.PATH_DATA_SETTING.exists(environment.settings())); } public void testPathLogsWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, null); assertThat(environment.logsFile(), equalTo(pathHome.resolve("logs"))); } @@ -111,7 +111,7 @@ public void testConfigPath() { public void testConfigPathWhenNotSet() { final Path pathHome = createTempDir().toAbsolutePath(); final Settings settings = Settings.builder().put("path.home", pathHome).build(); - final Environment environment = new Environment(settings); + final Environment environment = new Environment(settings, null); assertThat(environment.configFile(), equalTo(pathHome.resolve("config"))); } diff --git a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java index 615a75dda025a..90161e5faaf9f 100644 --- a/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java +++ b/core/src/test/java/org/elasticsearch/env/NodeEnvironmentTests.java @@ -80,12 +80,12 @@ public void testNodeLockSingleEnvironment() throws IOException { // Reuse the same location and attempt to lock again IllegalStateException ex = - expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, new Environment(settings))); + expectThrows(IllegalStateException.class, () -> new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings))); assertThat(ex.getMessage(), containsString("failed to obtain node lock")); // Close the environment that holds the lock and make sure we can get the lock after release env.close(); - env = new NodeEnvironment(settings, new Environment(settings)); + env = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); assertThat(env.nodeDataPaths(), arrayWithSize(dataPaths.size())); for (int i = 0; i < dataPaths.size(); i++) { @@ -120,7 +120,7 @@ public void testNodeLockMultipleEnvironment() throws IOException { final Settings settings = buildEnvSettings(Settings.builder().put("node.max_local_storage_nodes", 2).build()); final NodeEnvironment first = newNodeEnvironment(settings); List dataPaths = Environment.PATH_DATA_SETTING.get(settings); - NodeEnvironment second = new NodeEnvironment(settings, new Environment(settings)); + NodeEnvironment second = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); assertEquals(first.nodeDataPaths().length, dataPaths.size()); assertEquals(second.nodeDataPaths().length, dataPaths.size()); for (int i = 0; i < dataPaths.size(); i++) { @@ -477,7 +477,7 @@ public NodeEnvironment newNodeEnvironment() throws IOException { @Override public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException { Settings build = buildEnvSettings(settings); - return new NodeEnvironment(build, new Environment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); } public Settings buildEnvSettings(Settings settings) { @@ -492,7 +492,7 @@ public NodeEnvironment newNodeEnvironment(String[] dataPaths, Settings settings) .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build(); - return new NodeEnvironment(build, new Environment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); } public NodeEnvironment newNodeEnvironment(String[] dataPaths, String sharedDataPath, Settings settings) throws IOException { @@ -501,6 +501,6 @@ public NodeEnvironment newNodeEnvironment(String[] dataPaths, String sharedDataP .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .put(Environment.PATH_SHARED_DATA_SETTING.getKey(), sharedDataPath) .putList(Environment.PATH_DATA_SETTING.getKey(), dataPaths).build(); - return new NodeEnvironment(build, new Environment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); } } diff --git a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java index 609ed02eb2e89..f1037d67ff4aa 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexModuleTests.java @@ -42,6 +42,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.ShardLock; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.index.cache.query.DisabledQueryCache; import org.elasticsearch.index.cache.query.IndexQueryCache; @@ -118,7 +119,7 @@ public void setUp() throws Exception { indicesQueryCache = new IndicesQueryCache(settings); indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); index = indexSettings.getIndex(); - environment = new Environment(settings); + environment = TestEnvironment.newEnvironment(settings); emptyAnalysisRegistry = new AnalysisRegistry(environment, emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap()); threadPool = new TestThreadPool("test"); diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java index d93533ffc80d3..9c0f2b3c7a550 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisRegistryTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.indices.analysis.AnalysisModule; import org.elasticsearch.indices.analysis.AnalysisModule.AnalysisProvider; @@ -56,8 +57,8 @@ private static AnalyzerProvider analyzerProvider(final String name) { } private static AnalysisRegistry emptyAnalysisRegistry(Settings settings) { - return new AnalysisRegistry(new Environment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), - emptyMap(), emptyMap()); + return new AnalysisRegistry(TestEnvironment.newEnvironment(settings), emptyMap(), emptyMap(), emptyMap(), emptyMap(), emptyMap(), + emptyMap(), emptyMap(), emptyMap()); } private static IndexSettings indexSettingsOfCurrentVersion(Settings.Builder settings) { @@ -157,8 +158,8 @@ public Map> getTokenFilters() { return singletonMap("mock", MockFactory::new); } }; - IndexAnalyzers indexAnalyzers = new AnalysisModule(new Environment(settings), singletonList(plugin)).getAnalysisRegistry() - .build(idxSettings); + IndexAnalyzers indexAnalyzers = new AnalysisModule(TestEnvironment.newEnvironment(settings), + singletonList(plugin)).getAnalysisRegistry().build(idxSettings); // This shouldn't contain English stopwords try (NamedAnalyzer custom_analyser = indexAnalyzers.get("custom_analyzer_with_camel_case")) { diff --git a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java index e07b4e5b9d435..4ed2202f585ea 100644 --- a/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java +++ b/core/src/test/java/org/elasticsearch/index/analysis/AnalysisTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.analysis.CharArraySet; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import java.io.BufferedWriter; @@ -61,7 +62,7 @@ public void testParseNonExistingFile() { Settings nodeSettings = Settings.builder() .put("foo.bar_path", tempDir.resolve("foo.dict")) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir).build(); - Environment env = new Environment(nodeSettings); + Environment env = TestEnvironment.newEnvironment(nodeSettings); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> Analysis.getWordList(env, nodeSettings, "foo.bar")); assertEquals("IOException while reading foo.bar_path: " + tempDir.resolve("foo.dict").toString(), ex.getMessage()); @@ -80,7 +81,7 @@ public void testParseFalseEncodedFile() throws IOException { writer.write(new byte[]{(byte) 0xff, 0x00, 0x00}); // some invalid UTF-8 writer.write('\n'); } - Environment env = new Environment(nodeSettings); + Environment env = TestEnvironment.newEnvironment(nodeSettings); IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> Analysis.getWordList(env, nodeSettings, "foo.bar")); assertEquals("Unsupported character encoding detected while reading foo.bar_path: " + tempDir.resolve("foo.dict").toString() @@ -101,7 +102,7 @@ public void testParseWordList() throws IOException { writer.write("world"); writer.write('\n'); } - Environment env = new Environment(nodeSettings); + Environment env = TestEnvironment.newEnvironment(nodeSettings); List wordList = Analysis.getWordList(env, nodeSettings, "foo.bar"); assertEquals(Arrays.asList("hello", "world"), wordList); diff --git a/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java index 7e0328641e319..4e6e3036f4c40 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/NewPathForShardTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; import org.elasticsearch.env.NodeEnvironment.NodePath; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; @@ -34,7 +35,6 @@ import org.junit.BeforeClass; import java.io.IOException; -import java.math.BigInteger; import java.nio.file.FileStore; import java.nio.file.FileSystem; import java.nio.file.Files; @@ -178,7 +178,7 @@ public void testSelectNewPathForShard() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); @@ -233,7 +233,7 @@ public void testSelectNewPathForShardEvenly() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); @@ -290,7 +290,7 @@ public void testGettingPathWithMostFreeSpace() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); aFileStore.usableSpace = 100000; bFileStore.usableSpace = 1000; @@ -315,7 +315,7 @@ public void testTieBreakWithMostShards() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), path) .putList(Environment.PATH_DATA_SETTING.getKey(), paths).build(); - NodeEnvironment nodeEnv = new NodeEnvironment(settings, new Environment(settings)); + NodeEnvironment nodeEnv = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); // Make sure all our mocking above actually worked: NodePath[] nodePaths = nodeEnv.nodePaths(); diff --git a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java index 2abb4a5dedb0b..2bc98885f9096 100644 --- a/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java +++ b/core/src/test/java/org/elasticsearch/indices/analysis/AnalysisModuleTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.Analysis; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -91,7 +92,7 @@ public IndexAnalyzers getIndexAnalyzers(AnalysisRegistry registry, Settings sett public AnalysisRegistry getNewRegistry(Settings settings) { try { - return new AnalysisModule(new Environment(settings), singletonList(new AnalysisPlugin() { + return new AnalysisModule(TestEnvironment.newEnvironment(settings), singletonList(new AnalysisPlugin() { @Override public Map> getTokenFilters() { return singletonMap("myfilter", MyFilterTokenFilterFactory::new); @@ -162,7 +163,8 @@ public void testVersionedAnalyzers() throws Exception { indexAnalyzers.get("thai").analyzer().getVersion()); assertThat(indexAnalyzers.get("custom7").analyzer(), is(instanceOf(StandardAnalyzer.class))); - assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), indexAnalyzers.get("custom7").analyzer().getVersion()); + assertEquals(org.apache.lucene.util.Version.fromBits(3,6,0), + indexAnalyzers.get("custom7").analyzer().getVersion()); } private void testSimpleConfiguration(Settings settings) throws IOException { @@ -194,7 +196,7 @@ public void testWordListPath() throws Exception { Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - Environment env = new Environment(settings); + Environment env = TestEnvironment.newEnvironment(settings); String[] words = new String[]{"donau", "dampf", "schiff", "spargel", "creme", "suppe"}; Path wordListFile = generateWordList(words); @@ -241,7 +243,8 @@ public void testPluginPreConfiguredCharFilters() throws IOException { boolean noVersionSupportsMultiTerm = randomBoolean(); boolean luceneVersionSupportsMultiTerm = randomBoolean(); boolean elasticsearchVersionSupportsMultiTerm = randomBoolean(); - AnalysisRegistry registry = new AnalysisModule(new Environment(emptyNodeSettings), singletonList(new AnalysisPlugin() { + AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), + singletonList(new AnalysisPlugin() { @Override public List getPreConfiguredCharFilters() { return Arrays.asList( @@ -285,7 +288,8 @@ public void testPluginPreConfiguredTokenFilters() throws IOException { boolean noVersionSupportsMultiTerm = randomBoolean(); boolean luceneVersionSupportsMultiTerm = randomBoolean(); boolean elasticsearchVersionSupportsMultiTerm = randomBoolean(); - AnalysisRegistry registry = new AnalysisModule(new Environment(emptyNodeSettings), singletonList(new AnalysisPlugin() { + AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), + singletonList(new AnalysisPlugin() { @Override public List getPreConfiguredTokenFilters() { return Arrays.asList( @@ -359,7 +363,8 @@ public void reset() throws IOException { read = false; } } - AnalysisRegistry registry = new AnalysisModule(new Environment(emptyNodeSettings), singletonList(new AnalysisPlugin() { + AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), + singletonList(new AnalysisPlugin() { @Override public List getPreConfiguredTokenizers() { return Arrays.asList( @@ -402,7 +407,7 @@ public void testRegisterHunspellDictionary() throws Exception { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .build(); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); InputStream aff = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.aff"); InputStream dic = getClass().getResourceAsStream("/indices/analyze/conf_dir/hunspell/en_US/en_US.dic"); Dictionary dictionary; diff --git a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 61bc09fb0f1bb..6e6eaf726a599 100644 --- a/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/core/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -72,6 +72,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexService; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.shard.IndexEventListener; @@ -130,7 +131,7 @@ public ClusterStateChanges(NamedXContentRegistry xContentRegistry, ThreadPool th ActionFilters actionFilters = new ActionFilters(Collections.emptySet()); IndexNameExpressionResolver indexNameExpressionResolver = new IndexNameExpressionResolver(settings); DestructiveOperations destructiveOperations = new DestructiveOperations(settings, clusterSettings); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); Transport transport = null; // it's not used // mocks diff --git a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java index 2f4644c858935..3bd31097dcae6 100644 --- a/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java +++ b/core/src/test/java/org/elasticsearch/plugins/PluginsServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexModule; import org.elasticsearch.test.ESTestCase; @@ -59,7 +60,7 @@ public Settings additionalSettings() { public static class FilterablePlugin extends Plugin implements ScriptPlugin {} static PluginsService newPluginsService(Settings settings, Class... classpathPlugins) { - return new PluginsService(settings, null, null, new Environment(settings).pluginsFile(), Arrays.asList(classpathPlugins)); + return new PluginsService(settings, null, null, TestEnvironment.newEnvironment(settings).pluginsFile(), Arrays.asList(classpathPlugins)); } public void testAdditionalSettings() { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java index f5f8cdb32ef5f..8e37b10efc83f 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/InstallPluginCommandTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.common.settings.KeyStoreWrapper; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.PosixPermissionsResetter; import org.junit.After; @@ -176,7 +177,7 @@ static Tuple createEnv(FileSystem fs, Function Settings settings = Settings.builder() .put("path.home", home) .build(); - return Tuple.tuple(home, new Environment(settings)); + return Tuple.tuple(home, TestEnvironment.newEnvironment(settings)); } static Path createPluginDir(Function temp) throws IOException { @@ -236,7 +237,7 @@ MockTerminal installPlugin(String pluginUrl, Path home) throws Exception { } MockTerminal installPlugin(String pluginUrl, Path home, InstallPluginCommand command) throws Exception { - Environment env = new Environment(Settings.builder().put("path.home", home).build()); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); MockTerminal terminal = new MockTerminal(); command.execute(terminal, pluginUrl, true, env); return terminal; diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java index 2b98a13f4fd55..9a1f61c0d889c 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/ListPluginsCommandTests.java @@ -37,6 +37,7 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -54,7 +55,7 @@ public void setUp() throws Exception { Settings settings = Settings.builder() .put("path.home", home) .build(); - env = new Environment(settings); + env = TestEnvironment.newEnvironment(settings); } static MockTerminal listPlugins(Path home) throws Exception { diff --git a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java index 3a78da6b28404..6c462d39e5775 100644 --- a/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java +++ b/distribution/tools/plugin-cli/src/test/java/org/elasticsearch/plugins/RemovePluginCommandTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cli.UserException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -73,11 +74,11 @@ public void setUp() throws Exception { Settings settings = Settings.builder() .put("path.home", home) .build(); - env = new Environment(settings); + env = TestEnvironment.newEnvironment(settings); } static MockTerminal removePlugin(String name, Path home, boolean purge) throws Exception { - Environment env = new Environment(Settings.builder().put("path.home", home).build()); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", home).build()); MockTerminal terminal = new MockTerminal(); new MockRemovePluginCommand(env).execute(terminal, env, name, purge); return terminal; diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java index 88d218f907a52..9a7bf5eb91570 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CompoundAnalysisTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.MyFilterTokenFilterFactory; @@ -87,7 +88,7 @@ private List analyze(Settings settings, String analyzerName, String text private AnalysisModule createAnalysisModule(Settings settings) throws IOException { CommonAnalysisPlugin commonAnalysisPlugin = new CommonAnalysisPlugin(); - return new AnalysisModule(new Environment(settings), Arrays.asList(commonAnalysisPlugin, new AnalysisPlugin() { + return new AnalysisModule(TestEnvironment.newEnvironment(settings), Arrays.asList(commonAnalysisPlugin, new AnalysisPlugin() { @Override public Map> getTokenFilters() { return singletonMap("myfilter", MyFilterTokenFilterFactory::new); diff --git a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java index ea274eeae602a..1af4c1eaba9ad 100644 --- a/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java +++ b/modules/repository-url/src/test/java/org/elasticsearch/repositories/url/URLRepositoryTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.repositories.RepositoryException; import org.elasticsearch.test.ESTestCase; @@ -40,7 +41,8 @@ public void testWhiteListingRepoURL() throws IOException { .put(URLRepository.REPOSITORIES_URL_SETTING.getKey(), repoPath) .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); - new URLRepository(repositoryMetaData, new Environment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); + new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); } public void testIfNotWhiteListedMustSetRepoURL() throws IOException { @@ -51,7 +53,8 @@ public void testIfNotWhiteListedMustSetRepoURL() throws IOException { .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); try { - new URLRepository(repositoryMetaData, new Environment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); + new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { String msg = "[url] file url [" + repoPath @@ -71,7 +74,8 @@ public void testMustBeSupportedProtocol() throws IOException { .build(); RepositoryMetaData repositoryMetaData = new RepositoryMetaData("url", URLRepository.TYPE, baseSettings); try { - new URLRepository(repositoryMetaData, new Environment(baseSettings), new NamedXContentRegistry(Collections.emptyList())); + new URLRepository(repositoryMetaData, TestEnvironment.newEnvironment(baseSettings), + new NamedXContentRegistry(Collections.emptyList())); fail("RepositoryException should have been thrown."); } catch (RepositoryException e) { assertEquals("[url] unsupported url protocol [file] from URL [" + repoPath +"]", e.getMessage()); diff --git a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/AnalysisPolishFactoryTests.java b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/AnalysisPolishFactoryTests.java index ae78b9c01b3f8..f13d7b01149b5 100644 --- a/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/AnalysisPolishFactoryTests.java +++ b/plugins/analysis-stempel/src/test/java/org/elasticsearch/index/analysis/AnalysisPolishFactoryTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.pl.PolishStemTokenFilterFactory; import org.elasticsearch.indices.analysis.AnalysisFactoryTestCase; @@ -59,7 +60,7 @@ public void testThreadSafety() throws IOException { .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()) .build(); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); IndexMetaData metaData = IndexMetaData.builder(IndexMetaData.INDEX_UUID_NA_VALUE).settings(settings).build(); IndexSettings indexSettings = new IndexSettings(metaData, Settings.EMPTY); testThreadSafety(new PolishStemTokenFilterFactory(indexSettings, environment, "stempelpolishstem", settings)); diff --git a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java index db56f9c2f8341..3ddd15a7b4cf3 100644 --- a/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java +++ b/plugins/discovery-file/src/test/java/org/elasticsearch/discovery/file/FileBasedUnicastHostsProviderTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.transport.MockTransportService; @@ -126,7 +127,7 @@ public void testUnicastHostsDoesNotExist() throws Exception { final Settings settings = Settings.builder() .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = TestEnvironment.newEnvironment(settings); final FileBasedUnicastHostsProvider provider = new FileBasedUnicastHostsProvider(environment, transportService, executorService); final List nodes = provider.buildDynamicNodes(); assertEquals(0, nodes.size()); diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java index 6d609bd08d2c6..75025332889a7 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureRepositorySettingsTests.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import java.io.IOException; @@ -42,8 +43,8 @@ private AzureRepository azureRepository(Settings settings) throws StorageExcepti .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) .put(settings) .build(); - return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), new Environment(internalSettings), - NamedXContentRegistry.EMPTY, null); + return new AzureRepository(new RepositoryMetaData("foo", "azure", internalSettings), + TestEnvironment.newEnvironment(internalSettings), NamedXContentRegistry.EMPTY, null); } diff --git a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java index 5353f1c28e649..a12cd4fdb5c94 100644 --- a/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java +++ b/plugins/repository-gcs/src/test/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageServiceTests.java @@ -21,19 +21,16 @@ import java.io.IOException; import java.io.InputStream; -import java.nio.file.Files; -import java.nio.file.Path; import java.util.Collections; import java.util.Map; import com.google.api.client.googleapis.auth.oauth2.GoogleCredential; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.repositories.gcs.GoogleCloudStorageService.InternalGoogleCloudStorageService; import org.elasticsearch.test.ESTestCase; -import static org.hamcrest.Matchers.containsString; - public class GoogleCloudStorageServiceTests extends ESTestCase { private InputStream getDummyCredentialStream() throws IOException { @@ -41,7 +38,7 @@ private InputStream getDummyCredentialStream() throws IOException { } public void testDefaultCredential() throws Exception { - Environment env = new Environment(Settings.builder().put("path.home", createTempDir()).build()); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); GoogleCredential cred = GoogleCredential.fromStream(getDummyCredentialStream()); InternalGoogleCloudStorageService service = new InternalGoogleCloudStorageService(env, Collections.emptyMap()) { @Override @@ -55,7 +52,7 @@ GoogleCredential getDefaultCredential() throws IOException { public void testClientCredential() throws Exception { GoogleCredential cred = GoogleCredential.fromStream(getDummyCredentialStream()); Map credentials = Collections.singletonMap("clientname", cred); - Environment env = new Environment(Settings.builder().put("path.home", createTempDir()).build()); + Environment env = TestEnvironment.newEnvironment(Settings.builder().put("path.home", createTempDir()).build()); InternalGoogleCloudStorageService service = new InternalGoogleCloudStorageService(env, credentials); assertSame(cred, service.getCredential("clientname")); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java index 9ba59f8d49727..aa753f6d4509a 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/bootstrap/EvilSecurityTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.test.ESTestCase; import java.io.FilePermission; @@ -54,7 +55,7 @@ public void testGeneratedPermissions() throws Exception { Permissions permissions; try { System.setProperty("java.io.tmpdir", fakeTmpDir.toString()); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); permissions = Security.createPermissions(environment); } finally { System.setProperty("java.io.tmpdir", realTmpDir); @@ -156,7 +157,7 @@ public void testDuplicateDataPaths() throws IOException { .putList(Environment.PATH_DATA_SETTING.getKey(), data.toString(), duplicate.toString()) .build(); - final Environment environment = new Environment(settings); + final Environment environment = TestEnvironment.newEnvironment(settings); final IllegalStateException e = expectThrows(IllegalStateException.class, () -> Security.createPermissions(environment)); assertThat(e, hasToString(containsString("path [" + duplicate.toRealPath() + "] is duplicated by [" + duplicate + "]"))); } diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java index 8192a8c8a29c5..57d4a363cc8c7 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/env/NodeEnvironmentEvilTests.java @@ -52,7 +52,7 @@ public void testMissingWritePermission() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, new Environment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith(path.toString())); } @@ -72,7 +72,7 @@ public void testMissingWritePermissionOnIndex() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, new Environment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory")); } @@ -97,7 +97,7 @@ public void testMissingWritePermissionOnShard() throws IOException { .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString()) .putList(Environment.PATH_DATA_SETTING.getKey(), tempPaths).build(); IOException ioException = expectThrows(IOException.class, () -> { - new NodeEnvironment(build, new Environment(build)); + new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); }); assertTrue(ioException.getMessage(), ioException.getMessage().startsWith("failed to test writes in data directory")); } diff --git a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java index f4e2f0cb7b0c3..d9d4ab5c3aca9 100644 --- a/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java +++ b/qa/no-bootstrap-tests/src/test/java/org/elasticsearch/bootstrap/SpawnerNoBootstrapTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.plugins.PluginTestUtil; import org.elasticsearch.plugins.Platforms; @@ -72,7 +73,7 @@ public void testNoControllerSpawn() throws IOException, InterruptedException { settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); // This plugin will NOT have a controller daemon Path plugin = environment.pluginsFile().resolve("a_plugin"); @@ -108,7 +109,7 @@ public void testControllerSpawn() throws IOException, InterruptedException { settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); // this plugin will have a controller daemon Path plugin = environment.pluginsFile().resolve("test_plugin"); @@ -169,7 +170,7 @@ public void testControllerSpawnWithIncorrectDescriptor() throws IOException { settingsBuilder.put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()); Settings settings = settingsBuilder.build(); - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); Path plugin = environment.pluginsFile().resolve("test_plugin"); Files.createDirectories(plugin); @@ -198,7 +199,7 @@ public void testSpawnerHandlingOfDesktopServicesStoreFiles() throws IOException final Path esHome = createTempDir().resolve("home"); final Settings settings = Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), esHome.toString()).build(); - final Environment environment = new Environment(settings); + final Environment environment = TestEnvironment.newEnvironment(settings); Files.createDirectories(environment.pluginsFile()); diff --git a/test/framework/src/main/java/org/elasticsearch/env/TestEnvironment.java b/test/framework/src/main/java/org/elasticsearch/env/TestEnvironment.java new file mode 100644 index 0000000000000..aa2e03ae22ac1 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/env/TestEnvironment.java @@ -0,0 +1,37 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.env; + +import org.elasticsearch.common.settings.Settings; + +/** + * Provides a convenience method for tests to construct an Environment when the config path does not matter. + * This is in the test framework to force people who construct an Environment in production code to think + * about what the config path needs to be set to. + */ +public class TestEnvironment { + + private TestEnvironment() { + } + + public static Environment newEnvironment(Settings settings) { + return new Environment(settings, null); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java index d56db722def82..43904d1f1f9eb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/AbstractQueryTestCase.java @@ -63,6 +63,7 @@ import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.IndexAnalyzers; @@ -1048,7 +1049,7 @@ private static class ServiceHolder implements Closeable { ).flatMap(Function.identity()).collect(toList())); IndexScopedSettings indexScopedSettings = settingsModule.getIndexScopedSettings(); idxSettings = IndexSettingsModule.newIndexSettings(index, indexSettings, indexScopedSettings); - AnalysisModule analysisModule = new AnalysisModule(new Environment(nodeSettings), emptyList()); + AnalysisModule analysisModule = new AnalysisModule(TestEnvironment.newEnvironment(nodeSettings), emptyList()); IndexAnalyzers indexAnalyzers = analysisModule.getAnalysisRegistry().build(idxSettings); scriptService = scriptModule.getScriptService(); similarityService = new SimilarityService(idxSettings, null, Collections.emptyMap()); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index b0b0ffc9df8ad..4a25e95dbc304 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -110,6 +110,7 @@ import org.elasticsearch.discovery.zen.ZenDiscovery; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexService; @@ -1971,7 +1972,7 @@ public Path randomRepoPath() { * Returns path to a random directory that can be used to create a temporary file system repo */ public static Path randomRepoPath(Settings settings) { - Environment environment = new Environment(settings); + Environment environment = TestEnvironment.newEnvironment(settings); Path[] repoFiles = environment.repoFiles(); assert repoFiles.length > 0; Path path; diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 121471bdb0783..db43b5c9c599a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -77,6 +77,7 @@ import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.analysis.AnalysisRegistry; @@ -811,7 +812,7 @@ public NodeEnvironment newNodeEnvironment(Settings settings) throws IOException .put(settings) .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()).build(); - return new NodeEnvironment(build, new Environment(build)); + return new NodeEnvironment(build, TestEnvironment.newEnvironment(build)); } /** Return consistent index settings for the provided index version. */ @@ -1205,7 +1206,7 @@ public static TestAnalysis createTestAnalysis(Index index, Settings nodeSettings */ public static TestAnalysis createTestAnalysis(IndexSettings indexSettings, Settings nodeSettings, AnalysisPlugin... analysisPlugins) throws IOException { - Environment env = new Environment(nodeSettings); + Environment env = TestEnvironment.newEnvironment(nodeSettings); AnalysisModule analysisModule = new AnalysisModule(env, Arrays.asList(analysisPlugins)); AnalysisRegistry analysisRegistry = analysisModule.getAnalysisRegistry(); return new TestAnalysis(analysisRegistry.build(indexSettings), From c7ce5a07f26f09ec4e5e92d07aa08f338fbb41b8 Mon Sep 17 00:00:00 2001 From: Nhat Date: Sat, 4 Nov 2017 19:51:48 -0400 Subject: [PATCH 03/25] Add size-based condition to the index rollover API (#27160) This is to add a max_size condition to the index rollover API. We use a totalSizeInBytes from DocsStats to evaluate this condition. Closes #27004 --- .../admin/indices/rollover/Condition.java | 17 ++++- .../indices/rollover/MaxSizeCondition.java | 66 ++++++++++++++++++ .../indices/rollover/RolloverRequest.java | 12 +++- .../rollover/RolloverRequestBuilder.java | 6 ++ .../rollover/TransportRolloverAction.java | 4 +- .../elasticsearch/indices/IndicesModule.java | 2 + .../indices/rollover/ConditionTests.java | 30 ++++++-- .../admin/indices/rollover/RolloverIT.java | 69 +++++++++++++++++-- .../rollover/RolloverRequestTests.java | 56 ++++++++++++++- .../TransportRolloverActionTests.java | 54 +++++++++++---- .../reference/indices/rollover-index.asciidoc | 17 +++-- .../30_max_size_condition.yml | 60 ++++++++++++++++ 12 files changed, 362 insertions(+), 31 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java index d6bfaf0a48cec..83dc73f9e94b3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/Condition.java @@ -19,8 +19,10 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.Version; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.NamedWriteable; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; @@ -38,6 +40,9 @@ public abstract class Condition implements NamedWriteable { new ParseField(MaxAgeCondition.NAME)); PARSER.declareLong((conditions, value) -> conditions.add(new MaxDocsCondition(value)), new ParseField(MaxDocsCondition.NAME)); + PARSER.declareString((conditions, s) -> + conditions.add(new MaxSizeCondition(ByteSizeValue.parseBytesSizeValue(s, MaxSizeCondition.NAME))), + new ParseField(MaxSizeCondition.NAME)); } protected T value; @@ -49,6 +54,14 @@ protected Condition(String name) { public abstract Result evaluate(Stats stats); + /** + * Checks if this condition is available in a specific version. + * This makes sure BWC when introducing a new condition which is not recognized by older versions. + */ + boolean includedInVersion(Version version) { + return true; + } + @Override public final String toString() { return "[" + name + ": " + value + "]"; @@ -60,10 +73,12 @@ public final String toString() { public static class Stats { public final long numDocs; public final long indexCreated; + public final ByteSizeValue indexSize; - public Stats(long numDocs, long indexCreated) { + public Stats(long numDocs, long indexCreated, ByteSizeValue indexSize) { this.numDocs = numDocs; this.indexCreated = indexCreated; + this.indexSize = indexSize; } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java new file mode 100644 index 0000000000000..3d6496cbae496 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -0,0 +1,66 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.rollover; + +import org.elasticsearch.Version; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; + +import java.io.IOException; + +/** + * A size-based condition for an index size. + * Evaluates to true if the index size is at least {@link #value}. + */ +public class MaxSizeCondition extends Condition { + public static final String NAME = "max_size"; + + public MaxSizeCondition(ByteSizeValue value) { + super(NAME); + this.value = value; + } + + public MaxSizeCondition(StreamInput in) throws IOException { + super(NAME); + this.value = new ByteSizeValue(in.readVLong(), ByteSizeUnit.BYTES); + } + + @Override + public Result evaluate(Stats stats) { + return new Result(this, stats.indexSize.getBytes() >= value.getBytes()); + } + + @Override + boolean includedInVersion(Version version) { + return version.onOrAfter(Version.V_7_0_0_alpha1); + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeVLong(value.getBytes()); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java index 4804bc577fc58..c25fc7eb537d3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequest.java @@ -27,6 +27,7 @@ import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.ObjectParser; @@ -106,7 +107,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(dryRun); out.writeVInt(conditions.size()); for (Condition condition : conditions) { - out.writeNamedWriteable(condition); + if (condition.includedInVersion(out.getVersion())) { + out.writeNamedWriteable(condition); + } } createIndexRequest.writeTo(out); } @@ -155,6 +158,13 @@ public void addMaxIndexDocsCondition(long numDocs) { this.conditions.add(new MaxDocsCondition(numDocs)); } + /** + * Adds a size-based condition to check if the index size is at least size. + */ + public void addMaxIndexSizeCondition(ByteSizeValue size) { + this.conditions.add(new MaxSizeCondition(size)); + } + /** * Sets rollover index creation request to override index settings when * the rolled over index has to be created diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java index 35890d1d3a6fd..55df220ec0700 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestBuilder.java @@ -23,6 +23,7 @@ import org.elasticsearch.action.support.master.MasterNodeOperationRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; @@ -52,6 +53,11 @@ public RolloverRequestBuilder addMaxIndexDocsCondition(long docs) { return this; } + public RolloverRequestBuilder addMaxIndexSizeCondition(ByteSizeValue size){ + this.request.addMaxIndexSizeCondition(size); + return this; + } + public RolloverRequestBuilder dryRun(boolean dryRun) { this.request.dryRun(dryRun); return this; diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java index 430e0ff198e8a..c66f534bd8130 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverAction.java @@ -43,6 +43,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -195,7 +196,8 @@ static String generateRolloverIndexName(String sourceIndexName, IndexNameExpress static Set evaluateConditions(final Set conditions, final DocsStats docsStats, final IndexMetaData metaData) { final long numDocs = docsStats == null ? 0 : docsStats.getCount(); - final Condition.Stats stats = new Condition.Stats(numDocs, metaData.getCreationDate()); + final long indexSize = docsStats == null ? 0 : docsStats.getTotalSizeInBytes(); + final Condition.Stats stats = new Condition.Stats(numDocs, metaData.getCreationDate(), new ByteSizeValue(indexSize)); return conditions.stream() .map(condition -> condition.evaluate(stats)) .collect(Collectors.toSet()); diff --git a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java index 2751c34a7f314..e446ec7e6d3ea 100644 --- a/core/src/main/java/org/elasticsearch/indices/IndicesModule.java +++ b/core/src/main/java/org/elasticsearch/indices/IndicesModule.java @@ -22,6 +22,7 @@ import org.elasticsearch.action.admin.indices.rollover.Condition; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; import org.elasticsearch.action.resync.TransportResyncReplicationAction; import org.elasticsearch.index.shard.PrimaryReplicaSyncer; import org.elasticsearch.common.geo.ShapesAvailability; @@ -79,6 +80,7 @@ public IndicesModule(List mapperPlugins) { private void registerBuiltinWritables() { namedWritables.add(new Entry(Condition.class, MaxAgeCondition.NAME, MaxAgeCondition::new)); namedWritables.add(new Entry(Condition.class, MaxDocsCondition.NAME, MaxDocsCondition::new)); + namedWritables.add(new Entry(Condition.class, MaxSizeCondition.NAME, MaxSizeCondition::new)); } public List getNamedWriteables() { diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java index 95f186ba0e566..a4e6cdfade7ef 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/ConditionTests.java @@ -19,6 +19,8 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -30,12 +32,12 @@ public void testMaxAge() throws Exception { final MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(1)); long indexCreatedMatch = System.currentTimeMillis() - TimeValue.timeValueMinutes(61).getMillis(); - Condition.Result evaluate = maxAgeCondition.evaluate(new Condition.Stats(0, indexCreatedMatch)); + Condition.Result evaluate = maxAgeCondition.evaluate(new Condition.Stats(0, indexCreatedMatch, randomByteSize())); assertThat(evaluate.condition, equalTo(maxAgeCondition)); assertThat(evaluate.matched, equalTo(true)); long indexCreatedNotMatch = System.currentTimeMillis() - TimeValue.timeValueMinutes(59).getMillis(); - evaluate = maxAgeCondition.evaluate(new Condition.Stats(0, indexCreatedNotMatch)); + evaluate = maxAgeCondition.evaluate(new Condition.Stats(0, indexCreatedNotMatch, randomByteSize())); assertThat(evaluate.condition, equalTo(maxAgeCondition)); assertThat(evaluate.matched, equalTo(false)); } @@ -44,13 +46,33 @@ public void testMaxDocs() throws Exception { final MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L); long maxDocsMatch = randomIntBetween(100, 1000); - Condition.Result evaluate = maxDocsCondition.evaluate(new Condition.Stats(maxDocsMatch, 0)); + Condition.Result evaluate = maxDocsCondition.evaluate(new Condition.Stats(maxDocsMatch, 0, randomByteSize())); assertThat(evaluate.condition, equalTo(maxDocsCondition)); assertThat(evaluate.matched, equalTo(true)); long maxDocsNotMatch = randomIntBetween(0, 99); - evaluate = maxDocsCondition.evaluate(new Condition.Stats(0, maxDocsNotMatch)); + evaluate = maxDocsCondition.evaluate(new Condition.Stats(0, maxDocsNotMatch, randomByteSize())); assertThat(evaluate.condition, equalTo(maxDocsCondition)); assertThat(evaluate.matched, equalTo(false)); } + + public void testMaxSize() throws Exception { + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 20), ByteSizeUnit.MB)); + + Condition.Result result = maxSizeCondition.evaluate(new Condition.Stats(randomNonNegativeLong(), randomNonNegativeLong(), + new ByteSizeValue(0, ByteSizeUnit.MB))); + assertThat(result.matched, equalTo(false)); + + result = maxSizeCondition.evaluate(new Condition.Stats(randomNonNegativeLong(), randomNonNegativeLong(), + new ByteSizeValue(randomIntBetween(0, 9), ByteSizeUnit.MB))); + assertThat(result.matched, equalTo(false)); + + result = maxSizeCondition.evaluate(new Condition.Stats(randomNonNegativeLong(), randomNonNegativeLong(), + new ByteSizeValue(randomIntBetween(20, 1000), ByteSizeUnit.MB))); + assertThat(result.matched, equalTo(true)); + } + + private ByteSizeValue randomByteSize() { + return new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES); + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java index c449147cbbd5e..c047611f71932 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverIT.java @@ -19,13 +19,15 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; @@ -36,9 +38,15 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.is; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) public class RolloverIT extends ESIntegTestCase { @@ -128,15 +136,23 @@ public void testRolloverConditionsNotMet() throws Exception { index("test_index-0", "type1", "1", "field", "value"); flush("test_index-0"); final RolloverResponse response = client().admin().indices().prepareRolloverIndex("test_alias") + .addMaxIndexSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)) .addMaxIndexAgeCondition(TimeValue.timeValueHours(4)).get(); assertThat(response.getOldIndex(), equalTo("test_index-0")); assertThat(response.getNewIndex(), equalTo("test_index-000001")); assertThat(response.isDryRun(), equalTo(false)); assertThat(response.isRolledOver(), equalTo(false)); - assertThat(response.getConditionStatus().size(), equalTo(1)); - final Map.Entry conditionEntry = response.getConditionStatus().iterator().next(); - assertThat(conditionEntry.getKey(), equalTo(new MaxAgeCondition(TimeValue.timeValueHours(4)).toString())); - assertThat(conditionEntry.getValue(), equalTo(false)); + assertThat(response.getConditionStatus().size(), equalTo(2)); + + + assertThat(response.getConditionStatus(), everyItem(hasProperty("value", is(false)))); + Set conditions = response.getConditionStatus().stream() + .map(Map.Entry::getKey) + .collect(Collectors.toSet()); + assertThat(conditions, containsInAnyOrder( + new MaxSizeCondition(new ByteSizeValue(10, ByteSizeUnit.MB)).toString(), + new MaxAgeCondition(TimeValue.timeValueHours(4)).toString())); + final ClusterState state = client().admin().cluster().prepareState().get().getState(); final IndexMetaData oldIndex = state.metaData().index("test_index-0"); assertTrue(oldIndex.getAliases().containsKey("test_alias")); @@ -218,4 +234,47 @@ public void testRolloverWithDateMath() { assertThat(response.isRolledOver(), equalTo(true)); assertThat(response.getConditionStatus().size(), equalTo(0)); } + + public void testRolloverMaxSize() throws Exception { + assertAcked(prepareCreate("test-1").addAlias(new Alias("test_alias")).get()); + int numDocs = randomIntBetween(10, 20); + for (int i = 0; i < numDocs; i++) { + index("test-1", "doc", Integer.toString(i), "field", "foo-" + i); + } + flush("test-1"); + refresh("test_alias"); + + // A large max_size + { + final RolloverResponse response = client().admin().indices() + .prepareRolloverIndex("test_alias") + .addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(100, 50 * 1024), ByteSizeUnit.MB)) + .get(); + assertThat(response.getOldIndex(), equalTo("test-1")); + assertThat(response.getNewIndex(), equalTo("test-000002")); + assertThat("No rollover with a large max_size condition", response.isRolledOver(), equalTo(false)); + } + + // A small max_size + { + final RolloverResponse response = client().admin().indices() + .prepareRolloverIndex("test_alias") + .addMaxIndexSizeCondition(new ByteSizeValue(randomIntBetween(1, 20), ByteSizeUnit.BYTES)) + .get(); + assertThat(response.getOldIndex(), equalTo("test-1")); + assertThat(response.getNewIndex(), equalTo("test-000002")); + assertThat("Should rollover with a small max_size condition", response.isRolledOver(), equalTo(true)); + } + + // An empty index + { + final RolloverResponse response = client().admin().indices() + .prepareRolloverIndex("test_alias") + .addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong(), ByteSizeUnit.BYTES)) + .get(); + assertThat(response.getOldIndex(), equalTo("test-000002")); + assertThat(response.getNewIndex(), equalTo("test-000003")); + assertThat("No rollover with an empty index", response.isRolledOver(), equalTo(false)); + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java index 920ba2e9715f0..290ba79af0738 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/RolloverRequestTests.java @@ -19,17 +19,38 @@ package org.elasticsearch.action.admin.indices.rollover; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentFactory; +import org.elasticsearch.indices.IndicesModule; import org.elasticsearch.test.ESTestCase; +import org.junit.Before; +import java.util.Collections; +import java.util.List; import java.util.Set; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.equalTo; public class RolloverRequestTests extends ESTestCase { + private NamedWriteableRegistry writeableRegistry; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + writeableRegistry = new NamedWriteableRegistry(new IndicesModule(Collections.emptyList()).getNamedWriteables()); + } + public void testConditionsParsing() throws Exception { final RolloverRequest request = new RolloverRequest(randomAlphaOfLength(10), randomAlphaOfLength(10)); final XContentBuilder builder = XContentFactory.jsonBuilder() @@ -37,11 +58,12 @@ public void testConditionsParsing() throws Exception { .startObject("conditions") .field("max_age", "10d") .field("max_docs", 100) + .field("max_size", "45gb") .endObject() .endObject(); RolloverRequest.PARSER.parse(createParser(builder), request, null); Set conditions = request.getConditions(); - assertThat(conditions.size(), equalTo(2)); + assertThat(conditions.size(), equalTo(3)); for (Condition condition : conditions) { if (condition instanceof MaxAgeCondition) { MaxAgeCondition maxAgeCondition = (MaxAgeCondition) condition; @@ -49,6 +71,9 @@ public void testConditionsParsing() throws Exception { } else if (condition instanceof MaxDocsCondition) { MaxDocsCondition maxDocsCondition = (MaxDocsCondition) condition; assertThat(maxDocsCondition.value, equalTo(100L)); + } else if (condition instanceof MaxSizeCondition) { + MaxSizeCondition maxSizeCondition = (MaxSizeCondition) condition; + assertThat(maxSizeCondition.value.getBytes(), equalTo(ByteSizeUnit.GB.toBytes(45))); } else { fail("unexpected condition " + condition); } @@ -87,4 +112,33 @@ public void testParsingWithIndexSettings() throws Exception { assertThat(request.getCreateIndexRequest().aliases().size(), equalTo(1)); assertThat(request.getCreateIndexRequest().settings().getAsInt("number_of_shards", 0), equalTo(10)); } + + public void testSerialize() throws Exception { + RolloverRequest originalRequest = new RolloverRequest("alias-index", "new-index-name"); + originalRequest.addMaxIndexDocsCondition(randomNonNegativeLong()); + originalRequest.addMaxIndexAgeCondition(TimeValue.timeValueNanos(randomNonNegativeLong())); + originalRequest.addMaxIndexSizeCondition(new ByteSizeValue(randomNonNegativeLong())); + try (BytesStreamOutput out = new BytesStreamOutput()) { + originalRequest.writeTo(out); + BytesReference bytes = out.bytes(); + try (StreamInput in = new NamedWriteableAwareStreamInput(bytes.streamInput(), writeableRegistry)) { + RolloverRequest cloneRequest = new RolloverRequest(); + cloneRequest.readFrom(in); + assertThat(cloneRequest.getNewIndexName(), equalTo(originalRequest.getNewIndexName())); + assertThat(cloneRequest.getAlias(), equalTo(originalRequest.getAlias())); + + List originalConditions = originalRequest.getConditions().stream() + .map(Condition::toString) + .sorted() + .collect(Collectors.toList()); + + List cloneConditions = cloneRequest.getConditions().stream() + .map(Condition::toString) + .sorted() + .collect(Collectors.toList()); + + assertThat(originalConditions, equalTo(cloneConditions)); + } + } + } } diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java index b625b6c10aa34..dcb3a87df74f4 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/rollover/TransportRolloverActionTests.java @@ -32,23 +32,24 @@ import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.TimeValue; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.index.shard.DocsStats; import org.elasticsearch.test.ESTestCase; +import org.mockito.ArgumentCaptor; -import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Set; -import org.mockito.ArgumentCaptor; import static org.elasticsearch.action.admin.indices.rollover.TransportRolloverAction.evaluateConditions; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.mockito.Matchers.any; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -59,7 +60,7 @@ public void testDocStatsSelectionFromPrimariesOnly() throws Exception { long docsInShards = 200; final Condition condition = createTestCondition(); - evaluateConditions(Sets.newHashSet(condition), createMetaData(), createIndecesStatResponse(docsInShards, docsInPrimaryShards)); + evaluateConditions(Sets.newHashSet(condition), createMetaData(), createIndicesStatResponse(docsInShards, docsInPrimaryShards)); final ArgumentCaptor argument = ArgumentCaptor.forClass(Condition.Stats.class); verify(condition).evaluate(argument.capture()); @@ -69,8 +70,11 @@ public void testDocStatsSelectionFromPrimariesOnly() throws Exception { public void testEvaluateConditions() throws Exception { MaxDocsCondition maxDocsCondition = new MaxDocsCondition(100L); MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(2)); + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomIntBetween(10, 100), ByteSizeUnit.MB)); + long matchMaxDocs = randomIntBetween(100, 1000); long notMatchMaxDocs = randomIntBetween(0, 99); + ByteSizeValue notMatchMaxSize = new ByteSizeValue(randomIntBetween(0, 9), ByteSizeUnit.MB); final Settings settings = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) @@ -81,30 +85,56 @@ public void testEvaluateConditions() throws Exception { .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(3).getMillis()) .settings(settings) .build(); - final HashSet conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition); - Set results = evaluateConditions(conditions, new DocsStats(matchMaxDocs, 0L, between(1, 10000)), metaData); - assertThat(results.size(), equalTo(2)); + final Set conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition); + Set results = evaluateConditions(conditions, + new DocsStats(matchMaxDocs, 0L, ByteSizeUnit.MB.toBytes(120)), metaData); + assertThat(results.size(), equalTo(3)); for (Condition.Result result : results) { assertThat(result.matched, equalTo(true)); } - results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0, between(1, 10000)), metaData); - assertThat(results.size(), equalTo(2)); + + results = evaluateConditions(conditions, new DocsStats(notMatchMaxDocs, 0, notMatchMaxSize.getBytes()), metaData); + assertThat(results.size(), equalTo(3)); for (Condition.Result result : results) { if (result.condition instanceof MaxAgeCondition) { assertThat(result.matched, equalTo(true)); } else if (result.condition instanceof MaxDocsCondition) { assertThat(result.matched, equalTo(false)); + } else if (result.condition instanceof MaxSizeCondition) { + assertThat(result.matched, equalTo(false)); } else { fail("unknown condition result found " + result.condition); } } - results = evaluateConditions(conditions, null, metaData); - assertThat(results.size(), equalTo(2)); + } + + public void testEvaluateWithoutDocStats() throws Exception { + MaxDocsCondition maxDocsCondition = new MaxDocsCondition(randomNonNegativeLong()); + MaxAgeCondition maxAgeCondition = new MaxAgeCondition(TimeValue.timeValueHours(randomIntBetween(1, 3))); + MaxSizeCondition maxSizeCondition = new MaxSizeCondition(new ByteSizeValue(randomNonNegativeLong())); + + Set conditions = Sets.newHashSet(maxDocsCondition, maxAgeCondition, maxSizeCondition); + final Settings settings = Settings.builder() + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 1000)) + .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, randomInt(10)) + .build(); + + final IndexMetaData metaData = IndexMetaData.builder(randomAlphaOfLength(10)) + .creationDate(System.currentTimeMillis() - TimeValue.timeValueHours(randomIntBetween(5, 10)).getMillis()) + .settings(settings) + .build(); + Set results = evaluateConditions(conditions, null, metaData); + assertThat(results.size(), equalTo(3)); + for (Condition.Result result : results) { if (result.condition instanceof MaxAgeCondition) { assertThat(result.matched, equalTo(true)); } else if (result.condition instanceof MaxDocsCondition) { assertThat(result.matched, equalTo(false)); + } else if (result.condition instanceof MaxSizeCondition) { + assertThat(result.matched, equalTo(false)); } else { fail("unknown condition result found " + result.condition); } @@ -211,7 +241,7 @@ public void testCreateIndexRequest() throws Exception { assertThat(createIndexRequest.cause(), equalTo("rollover_index")); } - private IndicesStatsResponse createIndecesStatResponse(long totalDocs, long primaryDocs) { + private IndicesStatsResponse createIndicesStatResponse(long totalDocs, long primaryDocs) { final CommonStats primaryStats = mock(CommonStats.class); when(primaryStats.getDocs()).thenReturn(new DocsStats(primaryDocs, 0, between(1, 10000))); diff --git a/docs/reference/indices/rollover-index.asciidoc b/docs/reference/indices/rollover-index.asciidoc index 9aec8243af3f9..33bb09a1ef662 100644 --- a/docs/reference/indices/rollover-index.asciidoc +++ b/docs/reference/indices/rollover-index.asciidoc @@ -25,7 +25,8 @@ POST /logs_write/_rollover <2> { "conditions": { "max_age": "7d", - "max_docs": 1000 + "max_docs": 1000, + "max_size": "5gb" } } -------------------------------------------------- @@ -34,7 +35,7 @@ POST /logs_write/_rollover <2> // TEST[s/# Add > 1000 documents to logs-000001/POST _reindex?refresh\n{"source":{"index":"twitter"},"dest":{"index":"logs-000001"}}/] <1> Creates an index called `logs-0000001` with the alias `logs_write`. <2> If the index pointed to by `logs_write` was created 7 or more days ago, or - contains 1,000 or more documents, then the `logs-000002` index is created + contains 1,000 or more documents, or has an index size at least around 5GB, then the `logs-000002` index is created and the `logs_write` alias is updated to point to `logs-000002`. The above request might return the following response: @@ -50,7 +51,8 @@ The above request might return the following response: "dry_run": false, <2> "conditions": { <3> "[max_age: 7d]": false, - "[max_docs: 1000]": true + "[max_docs: 1000]": true, + "[max_size: 5gb]": false, } } -------------------------------------------------- @@ -76,7 +78,8 @@ POST /my_alias/_rollover/my_new_index_name { "conditions": { "max_age": "7d", - "max_docs": 1000 + "max_docs": 1000, + "max_size": "5gb" } } -------------------------------------------------- @@ -186,7 +189,8 @@ POST /logs_write/_rollover { "conditions" : { "max_age": "7d", - "max_docs": 1000 + "max_docs": 1000, + "max_size": "5gb" }, "settings": { "index.number_of_shards": 2 @@ -214,7 +218,8 @@ POST /logs_write/_rollover?dry_run { "conditions" : { "max_age": "7d", - "max_docs": 1000 + "max_docs": 1000, + "max_size": "5gb" } } -------------------------------------------------- diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml new file mode 100644 index 0000000000000..6804c51162aa1 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml @@ -0,0 +1,60 @@ +--- +"Rollover with max_size condition": + + - skip: + version: " - 6.99.99" + reason: max_size condition is introduced in v7 + + # create index with alias and replica + - do: + indices.create: + index: logs-1 + wait_for_active_shards: 1 + body: + aliases: + logs_search: {} + + # index a document + - do: + index: + index: logs-1 + type: doc + id: "1" + body: { "foo": "hello world" } + refresh: true + + # perform alias rollover with a large max_size, no action. + - do: + indices.rollover: + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_size: 100mb + + - match: { conditions: { "[max_size: 100mb]": false } } + - match: { rolled_over: false } + + # perform alias rollover with a small max_size, got action. + - do: + indices.rollover: + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_size: 10b + + - match: { conditions: { "[max_size: 10b]": true } } + - match: { rolled_over: true } + + # perform alias rollover on an empty index, no action. + - do: + indices.rollover: + alias: "logs_search" + wait_for_active_shards: 1 + body: + conditions: + max_size: 1b + + - match: { conditions: { "[max_size: 1b]": false } } + - match: { rolled_over: false } From fd3fac95656df87b655d29600ffdb8471119db1c Mon Sep 17 00:00:00 2001 From: Nhat Date: Sat, 4 Nov 2017 20:09:40 -0400 Subject: [PATCH 04/25] Backport the size-based index rollver to v6.1.0 Relates #27004 --- .../action/admin/indices/rollover/MaxSizeCondition.java | 2 +- .../test/indices.rollover/30_max_size_condition.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java index 3d6496cbae496..91b18bc050623 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxSizeCondition.java @@ -51,7 +51,7 @@ public Result evaluate(Stats stats) { @Override boolean includedInVersion(Version version) { - return version.onOrAfter(Version.V_7_0_0_alpha1); + return version.onOrAfter(Version.V_6_1_0); } @Override diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml index 6804c51162aa1..6e4df0f292915 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.rollover/30_max_size_condition.yml @@ -2,8 +2,8 @@ "Rollover with max_size condition": - skip: - version: " - 6.99.99" - reason: max_size condition is introduced in v7 + version: " - 6.0.99" + reason: max_size condition is introduced in 6.1.0 # create index with alias and replica - do: From 429275a773bfb78c712c8a092fcdc85a1a75b469 Mon Sep 17 00:00:00 2001 From: Jim Ferenczi Date: Mon, 6 Nov 2017 08:26:24 +0100 Subject: [PATCH 05/25] Remove ElasticsearchQueryCachingPolicy (#27190) We have an hidden setting called `index.queries.cache.term_queries` that disables caching of term queries in the query cache. Though term queries are not cached in the Lucene UsageTrackingQueryCachingPolicy since version 6.5. This makes the es policy useless but also makes it impossible to re-enable caching for term queries. This change appeared in Lucene 6.5 so this setting is no-op since version 5.4 of Elasticsearch The change in this PR removes the setting and the custom policy. --- .../common/settings/IndexScopedSettings.java | 1 - .../org/elasticsearch/index/IndexModule.java | 5 -- .../ElasticsearchQueryCachingPolicy.java | 56 ----------------- .../elasticsearch/index/shard/IndexShard.java | 6 +- .../ElasticsearchQueryCachingPolicyTests.java | 61 ------------------- .../elasticsearch/test/ESIntegTestCase.java | 3 - 6 files changed, 1 insertion(+), 131 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicy.java delete mode 100644 core/src/test/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicyTests.java diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index ed686bf9236e5..235300b8267f6 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -145,7 +145,6 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexModule.INDEX_STORE_PRE_LOAD_SETTING, IndexModule.INDEX_QUERY_CACHE_ENABLED_SETTING, IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING, - IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING, FsDirectoryService.INDEX_LOCK_FACTOR_SETTING, EngineConfig.INDEX_CODEC_SETTING, EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS, diff --git a/core/src/main/java/org/elasticsearch/index/IndexModule.java b/core/src/main/java/org/elasticsearch/index/IndexModule.java index f806c210f0014..869f8c9ca72db 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexModule.java +++ b/core/src/main/java/org/elasticsearch/index/IndexModule.java @@ -101,11 +101,6 @@ public final class IndexModule { public static final Setting INDEX_QUERY_CACHE_EVERYTHING_SETTING = Setting.boolSetting("index.queries.cache.everything", false, Property.IndexScope); - // This setting is an escape hatch in case not caching term queries would slow some users down - // Do not document. - public static final Setting INDEX_QUERY_CACHE_TERM_QUERIES_SETTING = - Setting.boolSetting("index.queries.cache.term_queries", false, Property.IndexScope); - private final IndexSettings indexSettings; private final AnalysisRegistry analysisRegistry; // pkg private so tests can mock diff --git a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicy.java b/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicy.java deleted file mode 100644 index 3ea3955a1f416..0000000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicy.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.TermQuery; - -import java.io.IOException; - -/** - * A {@link QueryCachingPolicy} that does not cache {@link TermQuery}s. - */ -final class ElasticsearchQueryCachingPolicy implements QueryCachingPolicy { - - private final QueryCachingPolicy in; - - ElasticsearchQueryCachingPolicy(QueryCachingPolicy in) { - this.in = in; - } - - @Override - public void onUse(Query query) { - if (query.getClass() != TermQuery.class) { - // Do not waste space in the history for term queries. The assumption - // is that these queries are very fast so not worth caching - in.onUse(query); - } - } - - @Override - public boolean shouldCache(Query query) throws IOException { - if (query.getClass() == TermQuery.class) { - return false; - } - return in.shouldCache(query); - } - -} diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index d27a8a0ada446..fc47c71573c1f 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -291,11 +291,7 @@ public IndexShard( if (IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.get(settings)) { cachingPolicy = QueryCachingPolicy.ALWAYS_CACHE; } else { - QueryCachingPolicy cachingPolicy = new UsageTrackingQueryCachingPolicy(); - if (IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING.get(settings) == false) { - cachingPolicy = new ElasticsearchQueryCachingPolicy(cachingPolicy); - } - this.cachingPolicy = cachingPolicy; + cachingPolicy = new UsageTrackingQueryCachingPolicy(); } indexShardOperationPermits = new IndexShardOperationPermits(shardId, logger, threadPool); searcherWrapper = indexSearcherWrapper; diff --git a/core/src/test/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicyTests.java b/core/src/test/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicyTests.java deleted file mode 100644 index 0344a15810f3b..0000000000000 --- a/core/src/test/java/org/elasticsearch/index/shard/ElasticsearchQueryCachingPolicyTests.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -import org.apache.lucene.index.Term; -import org.apache.lucene.search.PhraseQuery; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.QueryCachingPolicy; -import org.apache.lucene.search.TermQuery; -import org.elasticsearch.test.ESTestCase; - -import java.io.IOException; - -public class ElasticsearchQueryCachingPolicyTests extends ESTestCase { - - public void testDoesNotCacheTermQueries() throws IOException { - QueryCachingPolicy policy = QueryCachingPolicy.ALWAYS_CACHE; - assertTrue(policy.shouldCache(new TermQuery(new Term("foo", "bar")))); - assertTrue(policy.shouldCache(new PhraseQuery("foo", "bar", "baz"))); - policy = new ElasticsearchQueryCachingPolicy(policy); - assertFalse(policy.shouldCache(new TermQuery(new Term("foo", "bar")))); - assertTrue(policy.shouldCache(new PhraseQuery("foo", "bar", "baz"))); - } - - public void testDoesNotPutTermQueriesIntoTheHistory() { - boolean[] used = new boolean[1]; - QueryCachingPolicy policy = new QueryCachingPolicy() { - @Override - public boolean shouldCache(Query query) throws IOException { - throw new UnsupportedOperationException(); - } - @Override - public void onUse(Query query) { - used[0] = true; - } - }; - policy = new ElasticsearchQueryCachingPolicy(policy); - policy.onUse(new TermQuery(new Term("foo", "bar"))); - assertFalse(used[0]); - policy.onUse(new PhraseQuery("foo", "bar", "baz")); - assertTrue(used[0]); - } - -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 4a25e95dbc304..eb9998d420878 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -429,9 +429,6 @@ public void randomIndexTemplate() throws IOException { if (randomBoolean()) { randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_EVERYTHING_SETTING.getKey(), randomBoolean()); } - if (randomBoolean()) { - randomSettingsBuilder.put(IndexModule.INDEX_QUERY_CACHE_TERM_QUERIES_SETTING.getKey(), randomBoolean()); - } PutIndexTemplateRequestBuilder putTemplate = client().admin().indices() .preparePutTemplate("random_index_template") .setPatterns(Collections.singletonList("*")) From 5d661df1749483637c46ca8b464dcec98bffc3f5 Mon Sep 17 00:00:00 2001 From: kel Date: Mon, 6 Nov 2017 02:40:28 -0600 Subject: [PATCH 06/25] Add more information on `_failed_to_convert_` exception (#27034) --- .../java/org/elasticsearch/index/IndexingSlowLog.java | 6 +++--- .../org/elasticsearch/index/IndexingSlowLogTests.java | 11 +++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java index b1d8ac188626f..94c3892ef361e 100644 --- a/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java +++ b/core/src/main/java/org/elasticsearch/index/IndexingSlowLog.java @@ -181,9 +181,9 @@ public String toString() { sb.append("type[").append(doc.type()).append("], "); sb.append("id[").append(doc.id()).append("], "); if (doc.routing() == null) { - sb.append("routing[] "); + sb.append("routing[]"); } else { - sb.append("routing[").append(doc.routing()).append("] "); + sb.append("routing[").append(doc.routing()).append("]"); } if (maxSourceCharsToLog == 0 || doc.source() == null || doc.source().length() == 0) { @@ -193,7 +193,7 @@ public String toString() { String source = XContentHelper.convertToJson(doc.source(), reformat, doc.getXContentType()); sb.append(", source[").append(Strings.cleanTruncate(source, maxSourceCharsToLog)).append("]"); } catch (IOException e) { - sb.append(", source[_failed_to_convert_]"); + sb.append(", source[_failed_to_convert_[").append(e.getMessage()).append("]]"); } return sb.toString(); } diff --git a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java index a3d14fc518499..45b0d0aa2475c 100644 --- a/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java +++ b/core/src/test/java/org/elasticsearch/index/IndexingSlowLogTests.java @@ -22,6 +22,7 @@ import org.apache.lucene.document.NumericDocValuesField; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -62,6 +63,16 @@ public void testSlowLogParsedDocumentPrinterSourceToLog() throws IOException { p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); assertThat(p.toString(), containsString("source[{\"f]")); assertThat(p.toString(), startsWith("[foo/123] took")); + + // Throwing a error if source cannot be converted + source = new BytesArray("invalid"); + pd = new ParsedDocument(new NumericDocValuesField("version", 1), SeqNoFieldMapper.SequenceIDFields.emptySeqID(), "id", + "test", null, null, source, XContentType.JSON, null); + p = new SlowLogParsedDocumentPrinter(index, pd, 10, true, 3); + + assertThat(p.toString(), containsString("_failed_to_convert_[Unrecognized token 'invalid':" + + " was expecting ('true', 'false' or 'null')\n" + + " at [Source: org.elasticsearch.common.bytes.BytesReference$MarkSupportingStreamInputWrapper")); } public void testReformatSetting() { From 76f81e002c52af1da6854e902e91922bdf2503c7 Mon Sep 17 00:00:00 2001 From: kel Date: Mon, 6 Nov 2017 02:48:57 -0600 Subject: [PATCH 07/25] Remove unused parameters in AnalysisRegistry (#27232) Removes unused parameters for AnalysisRegistry#processAnalyzerFactory and AnalysisRegistry#processNormalizerFactory. --- .../index/analysis/AnalysisRegistry.java | 42 +++++-------------- 1 file changed, 10 insertions(+), 32 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java index 2e5be06347721..039aaba5a2490 100644 --- a/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java +++ b/core/src/main/java/org/elasticsearch/index/analysis/AnalysisRegistry.java @@ -18,16 +18,12 @@ */ package org.elasticsearch.index.analysis; -import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.util.IOUtils; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.common.logging.DeprecationLogger; -import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.env.Environment; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; @@ -42,7 +38,6 @@ import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; @@ -188,9 +183,9 @@ public Map> buildAnalyzerFactories(IndexSettings ind } public Map> buildNormalizerFactories(IndexSettings indexSettings) throws IOException { - final Map noralizersSettings = indexSettings.getSettings().getGroups("index.analysis.normalizer"); + final Map normalizersSettings = indexSettings.getSettings().getGroups("index.analysis.normalizer"); // TODO: Have pre-built normalizers - return buildMapping(Component.NORMALIZER, indexSettings, noralizersSettings, normalizers, Collections.emptyMap()); + return buildMapping(Component.NORMALIZER, indexSettings, normalizersSettings, normalizers, Collections.emptyMap()); } /** @@ -455,33 +450,20 @@ public IndexAnalyzers build(IndexSettings indexSettings, Index index = indexSettings.getIndex(); analyzerProviders = new HashMap<>(analyzerProviders); - Logger logger = Loggers.getLogger(getClass(), indexSettings.getSettings()); - DeprecationLogger deprecationLogger = new DeprecationLogger(logger); - Map analyzerAliases = new HashMap<>(); Map analyzers = new HashMap<>(); Map normalizers = new HashMap<>(); for (Map.Entry> entry : analyzerProviders.entrySet()) { - processAnalyzerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), analyzerAliases, analyzers, + processAnalyzerFactory(indexSettings, entry.getKey(), entry.getValue(), analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } for (Map.Entry> entry : normalizerProviders.entrySet()) { - processNormalizerFactory(deprecationLogger, indexSettings, entry.getKey(), entry.getValue(), normalizers, + processNormalizerFactory(entry.getKey(), entry.getValue(), normalizers, tokenizerFactoryFactories.get("keyword"), tokenFilterFactoryFactories, charFilterFactoryFactories); } - for (Map.Entry entry : analyzerAliases.entrySet()) { - String key = entry.getKey(); - if (analyzers.containsKey(key) && - ("default".equals(key) || "default_search".equals(key) || "default_search_quoted".equals(key)) == false) { - throw new IllegalStateException("already registered analyzer with name: " + key); - } else { - NamedAnalyzer configured = entry.getValue(); - analyzers.put(key, configured); - } - } if (!analyzers.containsKey("default")) { - processAnalyzerFactory(deprecationLogger, indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS), - analyzerAliases, analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); + processAnalyzerFactory(indexSettings, "default", new StandardAnalyzerProvider(indexSettings, null, "default", Settings.Builder.EMPTY_SETTINGS), + analyzers, tokenFilterFactoryFactories, charFilterFactoryFactories, tokenizerFactoryFactories); } if (!analyzers.containsKey("default_search")) { analyzers.put("default_search", analyzers.get("default")); @@ -490,7 +472,6 @@ public IndexAnalyzers build(IndexSettings indexSettings, analyzers.put("default_search_quoted", analyzers.get("default_search")); } - NamedAnalyzer defaultAnalyzer = analyzers.get("default"); if (defaultAnalyzer == null) { throw new IllegalArgumentException("no default analyzer configured"); @@ -498,8 +479,8 @@ public IndexAnalyzers build(IndexSettings indexSettings, if (analyzers.containsKey("default_index")) { throw new IllegalArgumentException("setting [index.analysis.analyzer.default_index] is not supported anymore, use [index.analysis.analyzer.default] instead for index [" + index.getName() + "]"); } - NamedAnalyzer defaultSearchAnalyzer = analyzers.containsKey("default_search") ? analyzers.get("default_search") : defaultAnalyzer; - NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.containsKey("default_search_quote") ? analyzers.get("default_search_quote") : defaultSearchAnalyzer; + NamedAnalyzer defaultSearchAnalyzer = analyzers.getOrDefault("default_search", defaultAnalyzer); + NamedAnalyzer defaultSearchQuoteAnalyzer = analyzers.getOrDefault("default_search_quote", defaultSearchAnalyzer); for (Map.Entry analyzer : analyzers.entrySet()) { if (analyzer.getKey().startsWith("_")) { @@ -510,11 +491,9 @@ public IndexAnalyzers build(IndexSettings indexSettings, unmodifiableMap(analyzers), unmodifiableMap(normalizers)); } - private void processAnalyzerFactory(DeprecationLogger deprecationLogger, - IndexSettings indexSettings, + private void processAnalyzerFactory(IndexSettings indexSettings, String name, AnalyzerProvider analyzerFactory, - Map analyzerAliases, Map analyzers, Map tokenFilters, Map charFilters, Map tokenizers) { /* @@ -561,8 +540,7 @@ private void processAnalyzerFactory(DeprecationLogger deprecationLogger, } } - private void processNormalizerFactory(DeprecationLogger deprecationLogger, - IndexSettings indexSettings, + private void processNormalizerFactory( String name, AnalyzerProvider normalizerFactory, Map normalizers, From 5d7d01ba7525d4bde8d2b2d3b22e83353d929aef Mon Sep 17 00:00:00 2001 From: Luca Cavanna Date: Mon, 6 Nov 2017 10:05:40 +0100 Subject: [PATCH 08/25] Adjust RestHighLevelClient method modifiers (#27238) RestHighLevelClient can be subclassed to add support for additional methods, but its public and protected methods should be final. --- .../client/RestHighLevelClient.java | 56 ++++++++++--------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java index bc3538930d3d0..e4827cf31c00d 100755 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/RestHighLevelClient.java @@ -227,7 +227,7 @@ public final void close() throws IOException { * * See Indices API on elastic.co */ - public IndicesClient indices() { + public final IndicesClient indices() { return indicesClient; } @@ -236,7 +236,7 @@ public IndicesClient indices() { * * See Bulk API on elastic.co */ - public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { + public final BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOException { return performRequestAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, emptySet(), headers); } @@ -245,14 +245,14 @@ public BulkResponse bulk(BulkRequest bulkRequest, Header... headers) throws IOEx * * See Bulk API on elastic.co */ - public void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { + public final void bulkAsync(BulkRequest bulkRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(bulkRequest, Request::bulk, BulkResponse::fromXContent, listener, emptySet(), headers); } /** * Pings the remote Elasticsearch cluster and returns true if the ping succeeded, false otherwise */ - public boolean ping(Header... headers) throws IOException { + public final boolean ping(Header... headers) throws IOException { return performRequest(new MainRequest(), (request) -> Request.ping(), RestHighLevelClient::convertExistsResponse, emptySet(), headers); } @@ -260,7 +260,7 @@ public boolean ping(Header... headers) throws IOException { /** * Get the cluster info otherwise provided when sending an HTTP request to port 9200 */ - public MainResponse info(Header... headers) throws IOException { + public final MainResponse info(Header... headers) throws IOException { return performRequestAndParseEntity(new MainRequest(), (request) -> Request.info(), MainResponse::fromXContent, emptySet(), headers); } @@ -270,7 +270,7 @@ public MainResponse info(Header... headers) throws IOException { * * See Get API on elastic.co */ - public GetResponse get(GetRequest getRequest, Header... headers) throws IOException { + public final GetResponse get(GetRequest getRequest, Header... headers) throws IOException { return performRequestAndParseEntity(getRequest, Request::get, GetResponse::fromXContent, singleton(404), headers); } @@ -288,7 +288,7 @@ public void getAsync(GetRequest getRequest, ActionListener listener * * See Get API on elastic.co */ - public boolean exists(GetRequest getRequest, Header... headers) throws IOException { + public final boolean exists(GetRequest getRequest, Header... headers) throws IOException { return performRequest(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, emptySet(), headers); } @@ -297,7 +297,7 @@ public boolean exists(GetRequest getRequest, Header... headers) throws IOExcepti * * See Get API on elastic.co */ - public void existsAsync(GetRequest getRequest, ActionListener listener, Header... headers) { + public final void existsAsync(GetRequest getRequest, ActionListener listener, Header... headers) { performRequestAsync(getRequest, Request::exists, RestHighLevelClient::convertExistsResponse, listener, emptySet(), headers); } @@ -306,7 +306,7 @@ public void existsAsync(GetRequest getRequest, ActionListener listener, * * See Index API on elastic.co */ - public IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { + public final IndexResponse index(IndexRequest indexRequest, Header... headers) throws IOException { return performRequestAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, emptySet(), headers); } @@ -315,7 +315,7 @@ public IndexResponse index(IndexRequest indexRequest, Header... headers) throws * * See Index API on elastic.co */ - public void indexAsync(IndexRequest indexRequest, ActionListener listener, Header... headers) { + public final void indexAsync(IndexRequest indexRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(indexRequest, Request::index, IndexResponse::fromXContent, listener, emptySet(), headers); } @@ -324,7 +324,7 @@ public void indexAsync(IndexRequest indexRequest, ActionListener *

* See Update API on elastic.co */ - public UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { + public final UpdateResponse update(UpdateRequest updateRequest, Header... headers) throws IOException { return performRequestAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, emptySet(), headers); } @@ -333,7 +333,7 @@ public UpdateResponse update(UpdateRequest updateRequest, Header... headers) thr *

* See Update API on elastic.co */ - public void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { + public final void updateAsync(UpdateRequest updateRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(updateRequest, Request::update, UpdateResponse::fromXContent, listener, emptySet(), headers); } @@ -342,7 +342,7 @@ public void updateAsync(UpdateRequest updateRequest, ActionListenerDelete API on elastic.co */ - public DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { + public final DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) throws IOException { return performRequestAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, Collections.singleton(404), headers); } @@ -352,7 +352,7 @@ public DeleteResponse delete(DeleteRequest deleteRequest, Header... headers) thr * * See Delete API on elastic.co */ - public void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { + public final void deleteAsync(DeleteRequest deleteRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(deleteRequest, Request::delete, DeleteResponse::fromXContent, listener, Collections.singleton(404), headers); } @@ -362,7 +362,7 @@ public void deleteAsync(DeleteRequest deleteRequest, ActionListenerSearch API on elastic.co */ - public SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { + public final SearchResponse search(SearchRequest searchRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, emptySet(), headers); } @@ -371,7 +371,7 @@ public SearchResponse search(SearchRequest searchRequest, Header... headers) thr * * See Search API on elastic.co */ - public void searchAsync(SearchRequest searchRequest, ActionListener listener, Header... headers) { + public final void searchAsync(SearchRequest searchRequest, ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchRequest, Request::search, SearchResponse::fromXContent, listener, emptySet(), headers); } @@ -381,7 +381,7 @@ public void searchAsync(SearchRequest searchRequest, ActionListenerSearch Scroll * API on elastic.co */ - public SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { + public final SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, emptySet(), headers); } @@ -391,7 +391,8 @@ public SearchResponse searchScroll(SearchScrollRequest searchScrollRequest, Head * See Search Scroll * API on elastic.co */ - public void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionListener listener, Header... headers) { + public final void searchScrollAsync(SearchScrollRequest searchScrollRequest, + ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(searchScrollRequest, Request::searchScroll, SearchResponse::fromXContent, listener, emptySet(), headers); } @@ -402,7 +403,7 @@ public void searchScrollAsync(SearchScrollRequest searchScrollRequest, ActionLis * See * Clear Scroll API on elastic.co */ - public ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { + public final ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, Header... headers) throws IOException { return performRequestAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, emptySet(), headers); } @@ -413,19 +414,20 @@ public ClearScrollResponse clearScroll(ClearScrollRequest clearScrollRequest, He * See * Clear Scroll API on elastic.co */ - public void clearScrollAsync(ClearScrollRequest clearScrollRequest, ActionListener listener, Header... headers) { + public final void clearScrollAsync(ClearScrollRequest clearScrollRequest, + ActionListener listener, Header... headers) { performRequestAsyncAndParseEntity(clearScrollRequest, Request::clearScroll, ClearScrollResponse::fromXContent, listener, emptySet(), headers); } - protected Resp performRequestAndParseEntity(Req request, + protected final Resp performRequestAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, Set ignores, Header... headers) throws IOException { return performRequest(request, requestConverter, (response) -> parseEntity(response.getEntity(), entityParser), ignores, headers); } - protected Resp performRequest(Req request, + protected final Resp performRequest(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, Set ignores, Header... headers) throws IOException { @@ -459,7 +461,7 @@ protected Resp performRequest(Req request, } } - protected void performRequestAsyncAndParseEntity(Req request, + protected final void performRequestAsyncAndParseEntity(Req request, CheckedFunction requestConverter, CheckedFunction entityParser, ActionListener listener, Set ignores, Header... headers) { @@ -467,7 +469,7 @@ protected void performRequestAsyncAndParseEnti listener, ignores, headers); } - protected void performRequestAsync(Req request, + protected final void performRequestAsync(Req request, CheckedFunction requestConverter, CheckedFunction responseConverter, ActionListener listener, Set ignores, Header... headers) { @@ -488,7 +490,7 @@ protected void performRequestAsync(Req request client.performRequestAsync(req.getMethod(), req.getEndpoint(), req.getParameters(), req.getEntity(), responseListener, headers); } - ResponseListener wrapResponseListener(CheckedFunction responseConverter, + final ResponseListener wrapResponseListener(CheckedFunction responseConverter, ActionListener actionListener, Set ignores) { return new ResponseListener() { @Override @@ -533,7 +535,7 @@ public void onFailure(Exception exception) { * that wraps the original {@link ResponseException}. The potential exception obtained while parsing is added to the returned * exception as a suppressed exception. This method is guaranteed to not throw any exception eventually thrown while parsing. */ - protected ElasticsearchStatusException parseResponseException(ResponseException responseException) { + protected final ElasticsearchStatusException parseResponseException(ResponseException responseException) { Response response = responseException.getResponse(); HttpEntity entity = response.getEntity(); ElasticsearchStatusException elasticsearchException; @@ -553,7 +555,7 @@ protected ElasticsearchStatusException parseResponseException(ResponseException return elasticsearchException; } - protected Resp parseEntity(final HttpEntity entity, + protected final Resp parseEntity(final HttpEntity entity, final CheckedFunction entityParser) throws IOException { if (entity == null) { throw new IllegalStateException("Response body expected but not returned"); From e440e23ad140a9b61c81ab164761c91be9595efc Mon Sep 17 00:00:00 2001 From: olcbean Date: Mon, 6 Nov 2017 10:11:25 +0100 Subject: [PATCH 09/25] Fix inconsistencies in the rest api specs for `tasks` (#27163) modify parameters names to reflect the changes done in the code base --- .../src/main/resources/rest-api-spec/api/tasks.cancel.json | 4 ++-- .../src/main/resources/rest-api-spec/api/tasks.list.json | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json index 69d21f4ec1def..cffa74934bccc 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.cancel.json @@ -12,7 +12,7 @@ } }, "params": { - "node_id": { + "nodes": { "type": "list", "description": "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes" }, @@ -24,7 +24,7 @@ "type": "string", "description": "Cancel tasks with specified parent node." }, - "parent_task": { + "parent_task_id": { "type" : "string", "description" : "Cancel tasks with specified parent task id (node_id:task_number). Set to -1 to cancel all." } diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json index a966cb0e50716..fbe355ee164b0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.list.json @@ -7,7 +7,7 @@ "paths": ["/_tasks"], "parts": {}, "params": { - "node_id": { + "nodes": { "type": "list", "description": "A comma-separated list of node IDs or names to limit the returned information; use `_local` to return information from the node you're connecting to, leave empty to get information from all nodes" }, @@ -23,7 +23,7 @@ "type": "string", "description": "Return tasks with specified parent node." }, - "parent_task": { + "parent_task_id": { "type" : "string", "description" : "Return tasks with specified parent task id (node_id:task_number). Set to -1 to return all." }, From 43e7a4a3495b411ac95aabfd18e94185d01c3207 Mon Sep 17 00:00:00 2001 From: Tanguy Leroux Date: Mon, 6 Nov 2017 10:20:05 +0100 Subject: [PATCH 10/25] Upgrade to Jackson 2.8.10 (#27230) While it's not possible to upgrade the Jackson dependencies to their latest versions yet (see #27032 (comment) for more) it's still possible to upgrade to the latest 2.8.x version. --- buildSrc/version.properties | 4 +- .../licenses/jackson-core-2.8.10.jar.sha1 | 1 + .../licenses/jackson-core-2.8.6.jar.sha1 | 1 - core/licenses/jackson-core-2.8.10.jar.sha1 | 1 + core/licenses/jackson-core-2.8.6.jar.sha1 | 1 - .../jackson-dataformat-cbor-2.8.10.jar.sha1 | 1 + .../jackson-dataformat-cbor-2.8.6.jar.sha1 | 1 - .../jackson-dataformat-smile-2.8.10.jar.sha1 | 1 + .../jackson-dataformat-smile-2.8.6.jar.sha1 | 1 - .../jackson-dataformat-yaml-2.8.10.jar.sha1 | 1 + .../jackson-dataformat-yaml-2.8.6.jar.sha1 | 1 - core/licenses/snakeyaml-1.15.jar.sha1 | 1 - core/licenses/snakeyaml-1.17.jar.sha1 | 1 + .../common/io/FastStringReader.java | 6 ++- .../common/xcontent/XContentParserTests.java | 42 +++++++++++++++++++ .../org/elasticsearch/test/RandomObjects.java | 2 +- 16 files changed, 55 insertions(+), 11 deletions(-) create mode 100644 client/sniffer/licenses/jackson-core-2.8.10.jar.sha1 delete mode 100644 client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 create mode 100644 core/licenses/jackson-core-2.8.10.jar.sha1 delete mode 100644 core/licenses/jackson-core-2.8.6.jar.sha1 create mode 100644 core/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 delete mode 100644 core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 create mode 100644 core/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 delete mode 100644 core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 create mode 100644 core/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 delete mode 100644 core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 delete mode 100644 core/licenses/snakeyaml-1.15.jar.sha1 create mode 100644 core/licenses/snakeyaml-1.17.jar.sha1 diff --git a/buildSrc/version.properties b/buildSrc/version.properties index 3cdbf099ec1d0..020ff236d9bf3 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -5,8 +5,8 @@ lucene = 7.1.0 # optional dependencies spatial4j = 0.6 jts = 1.13 -jackson = 2.8.6 -snakeyaml = 1.15 +jackson = 2.8.10 +snakeyaml = 1.17 # when updating log4j, please update also docs/java-api/index.asciidoc log4j = 2.9.1 slf4j = 1.6.2 diff --git a/client/sniffer/licenses/jackson-core-2.8.10.jar.sha1 b/client/sniffer/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/client/sniffer/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 b/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 deleted file mode 100644 index af7677d13c28c..0000000000000 --- a/client/sniffer/licenses/jackson-core-2.8.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ef7b1cc34de149600f5e75bc2d5bf40de894e60 \ No newline at end of file diff --git a/core/licenses/jackson-core-2.8.10.jar.sha1 b/core/licenses/jackson-core-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..a322d371e265e --- /dev/null +++ b/core/licenses/jackson-core-2.8.10.jar.sha1 @@ -0,0 +1 @@ +eb21a035c66ad307e66ec8fce37f5d50fd62d039 \ No newline at end of file diff --git a/core/licenses/jackson-core-2.8.6.jar.sha1 b/core/licenses/jackson-core-2.8.6.jar.sha1 deleted file mode 100644 index af7677d13c28c..0000000000000 --- a/core/licenses/jackson-core-2.8.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -2ef7b1cc34de149600f5e75bc2d5bf40de894e60 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 b/core/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..1d3e18e21a694 --- /dev/null +++ b/core/licenses/jackson-dataformat-cbor-2.8.10.jar.sha1 @@ -0,0 +1 @@ +1c58cc9313ddf19f0900cd61ed044874278ce320 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 deleted file mode 100644 index 6a2e980235381..0000000000000 --- a/core/licenses/jackson-dataformat-cbor-2.8.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -b88721371cfa2d7242bb5e52fe70861aa061c050 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 b/core/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..4f4cacde22079 --- /dev/null +++ b/core/licenses/jackson-dataformat-smile-2.8.10.jar.sha1 @@ -0,0 +1 @@ +e853081fadaad3e98ed801937acc3d8f77580686 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 deleted file mode 100644 index 19be9a2040bed..0000000000000 --- a/core/licenses/jackson-dataformat-smile-2.8.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -71590ad45cee21249774e2f93e5eca66e446cef3 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 b/core/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 new file mode 100644 index 0000000000000..40bcb05f69795 --- /dev/null +++ b/core/licenses/jackson-dataformat-yaml-2.8.10.jar.sha1 @@ -0,0 +1 @@ +1e08caf1d787c825307d8cc6362452086020d853 \ No newline at end of file diff --git a/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 b/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 deleted file mode 100644 index c61dad3bbcdd7..0000000000000 --- a/core/licenses/jackson-dataformat-yaml-2.8.6.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -8bd44d50f9a6cdff9c7578ea39d524eb519e35ab \ No newline at end of file diff --git a/core/licenses/snakeyaml-1.15.jar.sha1 b/core/licenses/snakeyaml-1.15.jar.sha1 deleted file mode 100644 index 48391d6d9e1a7..0000000000000 --- a/core/licenses/snakeyaml-1.15.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -3b132bea69e8ee099f416044970997bde80f4ea6 \ No newline at end of file diff --git a/core/licenses/snakeyaml-1.17.jar.sha1 b/core/licenses/snakeyaml-1.17.jar.sha1 new file mode 100644 index 0000000000000..9ac6e87f2244a --- /dev/null +++ b/core/licenses/snakeyaml-1.17.jar.sha1 @@ -0,0 +1 @@ +7a27ea250c5130b2922b86dea63cbb1cc10a660c \ No newline at end of file diff --git a/core/src/main/java/org/elasticsearch/common/io/FastStringReader.java b/core/src/main/java/org/elasticsearch/common/io/FastStringReader.java index 17398b7139b67..2ac7e9022e687 100644 --- a/core/src/main/java/org/elasticsearch/common/io/FastStringReader.java +++ b/core/src/main/java/org/elasticsearch/common/io/FastStringReader.java @@ -34,6 +34,7 @@ public class FastStringReader extends Reader implements CharSequence { private int length; private int next = 0; private int mark = 0; + private boolean closed = false; /** * Creates a new string reader. @@ -49,8 +50,9 @@ public FastStringReader(String s) { * Check to make sure that the stream has not been closed */ private void ensureOpen() throws IOException { - if (length == -1) + if (closed) { throw new IOException("Stream closed"); + } } @Override @@ -196,7 +198,7 @@ public void reset() throws IOException { */ @Override public void close() { - length = -1; + closed = true; } @Override diff --git a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java index 3397a463ea823..8e3246d8b8a59 100644 --- a/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java +++ b/core/src/test/java/org/elasticsearch/common/xcontent/XContentParserTests.java @@ -42,6 +42,48 @@ public class XContentParserTests extends ESTestCase { + public void testFloat() throws IOException { + final XContentType xContentType = randomFrom(XContentType.values()); + + final String field = randomAlphaOfLengthBetween(1, 5); + final Float value = randomFloat(); + + try (XContentBuilder builder = XContentBuilder.builder(xContentType.xContent())) { + builder.startObject(); + if (randomBoolean()) { + builder.field(field, value); + } else { + builder.field(field).value(value); + } + builder.endObject(); + + final Number number; + try (XContentParser parser = createParser(xContentType.xContent(), builder.bytes())) { + assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); + assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken()); + assertEquals(field, parser.currentName()); + assertEquals(XContentParser.Token.VALUE_NUMBER, parser.nextToken()); + + number = parser.numberValue(); + + assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken()); + assertNull(parser.nextToken()); + } + + assertEquals(value, number.floatValue(), 0.0f); + + if (xContentType == XContentType.CBOR) { + // CBOR parses back a float + assertTrue(number instanceof Float); + } else { + // JSON, YAML and SMILE parses back the float value as a double + // This will change for SMILE in Jackson 2.9 where all binary based + // formats will return a float + assertTrue(number instanceof Double); + } + } + } + public void testReadList() throws IOException { assertThat(readList("{\"foo\": [\"bar\"]}"), contains("bar")); assertThat(readList("{\"foo\": [\"bar\",\"baz\"]}"), contains("bar", "baz")); diff --git a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java index 67999b82a2fe6..1868fc34a991f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java +++ b/test/framework/src/main/java/org/elasticsearch/test/RandomObjects.java @@ -108,7 +108,7 @@ public static Tuple, List> randomStoredFieldValues(Random r //with CBOR we get back a float expectedParsedValues.add(randomFloat); } else if (xContentType == XContentType.SMILE) { - //with SMILE we get back a double + //with SMILE we get back a double (this will change in Jackson 2.9 where it will return a Float) expectedParsedValues.add(randomFloat.doubleValue()); } else { //with JSON AND YAML we get back a double, but with float precision. From 7b03d68f9f08965236cd560a10eb856089a67d66 Mon Sep 17 00:00:00 2001 From: Pablo Musa Date: Mon, 6 Nov 2017 10:16:52 +0100 Subject: [PATCH 11/25] [Docs] Fix minor paragraph indentation error for multiple Indices params (#25535) --- docs/reference/api-conventions.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/api-conventions.asciidoc b/docs/reference/api-conventions.asciidoc index d4e06b9d05248..472c48e523229 100644 --- a/docs/reference/api-conventions.asciidoc +++ b/docs/reference/api-conventions.asciidoc @@ -47,7 +47,7 @@ to. If `open` is specified then the wildcard expression is expanded to only open indices and if `closed` is specified then the wildcard expression is expanded only to closed indices. Also both values (`open,closed`) can be specified to expand to all indices. - ++ If `none` is specified then wildcard expansion will be disabled and if `all` is specified, wildcard expressions will expand to all indices (this is equivalent to specifying `open,closed`). From bd7efa908ab69831757d45ca7519c286097777dc Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 6 Nov 2017 10:37:55 +0000 Subject: [PATCH 12/25] Add ability to split shards (#26931) This change adds a new `_split` API that allows to split indices into a new index with a power of two more shards that the source index. This API works alongside the `_shrink` API but doesn't require any shard relocation before indices can be split. The split operation is conceptually an inverse `_shrink` operation since we initialize the index with a _syntetic_ number of routing shards that are used for the consistent hashing at index time. Compared to indices created with earlier versions this might produce slightly different shard distributions but has no impact on the per-index backwards compatibility. For now, the user is required to prepare an index to be splittable by setting the `index.number_of_routing_shards` at index creation time. The setting allows the user to prepare the index to be splittable in factors of `index.number_of_routing_shards` ie. if the index is created with `index.number_of_routing_shards: 16` and `index.number_of_shards: 2` it can be split into `4, 8, 16` shards. This is an intermediate step until we can make this the default. This also allows us to safely backport this change to 6.x. The `_split` operation is implemented internally as a DeleteByQuery on the lucene level that is executed while the primary shards execute their initial recovery. Subsequent merges that are triggered due to this operation will not be executed immediately. All merges will be deferred unti the shards are started and will then be throttled accordingly. This change is intended for the 6.1 feature release but will not support pre-6.1 indices to be split unless these indices have been shrunk before. In that case these indices can be split backwards into their original number of shards. --- .../elasticsearch/action/ActionModule.java | 8 +- .../CreateIndexClusterStateUpdateRequest.java | 25 +- .../admin/indices/shrink/ResizeAction.java | 45 ++ ...{ShrinkRequest.java => ResizeRequest.java} | 74 ++- ...Builder.java => ResizeRequestBuilder.java} | 30 +- ...hrinkResponse.java => ResizeResponse.java} | 6 +- .../admin/indices/shrink/ResizeType.java | 27 + .../admin/indices/shrink/ShrinkAction.java | 10 +- .../indices/shrink/TransportResizeAction.java | 201 ++++++++ .../indices/shrink/TransportShrinkAction.java | 127 +---- .../master/TransportMasterNodeAction.java | 14 +- .../client/IndicesAdminClient.java | 19 +- .../client/support/AbstractClient.java | 20 +- .../transport/TransportProxyClient.java | 1 + .../elasticsearch/cluster/ClusterModule.java | 2 + .../cluster/metadata/IndexMetaData.java | 111 ++++- .../metadata/MetaDataCreateIndexService.java | 139 ++++-- .../cluster/routing/IndexRoutingTable.java | 2 +- .../cluster/routing/OperationRouting.java | 2 +- .../decider/DiskThresholdDecider.java | 6 +- .../decider/ResizeAllocationDecider.java | 102 ++++ .../common/settings/IndexScopedSettings.java | 3 + .../common/settings/Setting.java | 11 + .../elasticsearch/index/shard/IndexShard.java | 26 +- .../index/shard/ShardSplittingQuery.java | 245 ++++++++++ .../index/shard/StoreRecovery.java | 30 +- .../admin/indices/RestShrinkIndexAction.java | 14 +- .../admin/indices/RestSplitIndexAction.java | 69 +++ .../admin/indices/create/ShrinkIndexIT.java | 16 +- .../admin/indices/create/SplitIndexIT.java | 462 ++++++++++++++++++ ...s.java => TransportResizeActionTests.java} | 32 +- .../cluster/ClusterModuleTests.java | 2 + .../metadata/IndexCreationTaskTests.java | 5 +- .../cluster/metadata/IndexMetaDataTests.java | 98 +++- .../MetaDataCreateIndexServiceTests.java | 108 +++- .../routing/OperationRoutingTests.java | 40 +- .../ResizeAllocationDeciderTests.java | 287 +++++++++++ .../index/shard/ShardSplittingQueryTests.java | 193 ++++++++ .../index/shard/StoreRecoveryTests.java | 108 +++- .../routing/PartitionedRoutingIT.java | 2 +- .../DedicatedClusterSnapshotRestoreIT.java | 2 +- .../SharedSignificantTermsTestMethods.java | 2 +- docs/reference/indices.asciidoc | 1 + docs/reference/indices/split-index.asciidoc | 165 +++++++ .../rest-api-spec/api/indices.split.json | 39 ++ .../test/indices.split/10_basic.yml | 101 ++++ .../test/indices.split/20_source_mapping.yml | 72 +++ .../cluster/routing/TestShardRouting.java | 4 + 48 files changed, 2749 insertions(+), 359 deletions(-) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java rename core/src/main/java/org/elasticsearch/action/admin/indices/shrink/{ShrinkRequest.java => ResizeRequest.java} (65%) rename core/src/main/java/org/elasticsearch/action/admin/indices/shrink/{ShrinkRequestBuilder.java => ResizeRequestBuilder.java} (73%) rename core/src/main/java/org/elasticsearch/action/admin/indices/shrink/{ShrinkResponse.java => ResizeResponse.java} (86%) create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java create mode 100644 core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java create mode 100644 core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java create mode 100644 core/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java create mode 100644 core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java create mode 100644 core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java rename core/src/test/java/org/elasticsearch/action/admin/indices/shrink/{TransportShrinkActionTests.java => TransportResizeActionTests.java} (87%) create mode 100644 core/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java create mode 100644 core/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java create mode 100644 docs/reference/indices/split-index.asciidoc create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml create mode 100644 rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml diff --git a/core/src/main/java/org/elasticsearch/action/ActionModule.java b/core/src/main/java/org/elasticsearch/action/ActionModule.java index 86582e9b8d046..28fd3458b902a 100644 --- a/core/src/main/java/org/elasticsearch/action/ActionModule.java +++ b/core/src/main/java/org/elasticsearch/action/ActionModule.java @@ -128,7 +128,9 @@ import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; +import org.elasticsearch.action.admin.indices.shrink.TransportResizeAction; import org.elasticsearch.action.admin.indices.shrink.TransportShrinkAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction; @@ -181,7 +183,6 @@ import org.elasticsearch.action.search.TransportMultiSearchAction; import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.search.TransportSearchScrollAction; -import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.AutoCreateIndex; import org.elasticsearch.action.support.DestructiveOperations; @@ -199,7 +200,6 @@ import org.elasticsearch.common.NamedRegistry; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.multibindings.MapBinder; -import org.elasticsearch.common.inject.multibindings.Multibinder; import org.elasticsearch.common.logging.ESLoggerFactory; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.IndexScopedSettings; @@ -271,6 +271,7 @@ import org.elasticsearch.rest.action.admin.indices.RestRefreshAction; import org.elasticsearch.rest.action.admin.indices.RestRolloverIndexAction; import org.elasticsearch.rest.action.admin.indices.RestShrinkIndexAction; +import org.elasticsearch.rest.action.admin.indices.RestSplitIndexAction; import org.elasticsearch.rest.action.admin.indices.RestSyncedFlushAction; import org.elasticsearch.rest.action.admin.indices.RestUpdateSettingsAction; import org.elasticsearch.rest.action.admin.indices.RestUpgradeAction; @@ -324,7 +325,6 @@ import java.util.function.UnaryOperator; import java.util.stream.Collectors; -import static java.util.Collections.unmodifiableList; import static java.util.Collections.unmodifiableMap; /** @@ -438,6 +438,7 @@ public void reg actions.register(IndicesShardStoresAction.INSTANCE, TransportIndicesShardStoresAction.class); actions.register(CreateIndexAction.INSTANCE, TransportCreateIndexAction.class); actions.register(ShrinkAction.INSTANCE, TransportShrinkAction.class); + actions.register(ResizeAction.INSTANCE, TransportResizeAction.class); actions.register(RolloverAction.INSTANCE, TransportRolloverAction.class); actions.register(DeleteIndexAction.INSTANCE, TransportDeleteIndexAction.class); actions.register(GetIndexAction.INSTANCE, TransportGetIndexAction.class); @@ -554,6 +555,7 @@ public void initRestHandlers(Supplier nodesInCluster) { registerHandler.accept(new RestIndicesAliasesAction(settings, restController)); registerHandler.accept(new RestCreateIndexAction(settings, restController)); registerHandler.accept(new RestShrinkIndexAction(settings, restController)); + registerHandler.accept(new RestSplitIndexAction(settings, restController)); registerHandler.accept(new RestRolloverIndexAction(settings, restController)); registerHandler.accept(new RestDeleteIndexAction(settings, restController)); registerHandler.accept(new RestCloseIndexAction(settings, restController)); diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java index a2290a5e2556e..1734c340bd4ef 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexClusterStateUpdateRequest.java @@ -20,6 +20,7 @@ package org.elasticsearch.action.admin.indices.create; import org.elasticsearch.action.admin.indices.alias.Alias; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ack.ClusterStateUpdateRequest; import org.elasticsearch.cluster.block.ClusterBlock; @@ -43,7 +44,8 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private final String index; private final String providedName; private final boolean updateAllTypes; - private Index shrinkFrom; + private Index recoverFrom; + private ResizeType resizeType; private IndexMetaData.State state = IndexMetaData.State.OPEN; @@ -59,7 +61,6 @@ public class CreateIndexClusterStateUpdateRequest extends ClusterStateUpdateRequ private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT; - public CreateIndexClusterStateUpdateRequest(TransportMessage originalMessage, String cause, String index, String providedName, boolean updateAllTypes) { this.originalMessage = originalMessage; @@ -99,8 +100,8 @@ public CreateIndexClusterStateUpdateRequest state(IndexMetaData.State state) { return this; } - public CreateIndexClusterStateUpdateRequest shrinkFrom(Index shrinkFrom) { - this.shrinkFrom = shrinkFrom; + public CreateIndexClusterStateUpdateRequest recoverFrom(Index recoverFrom) { + this.recoverFrom = recoverFrom; return this; } @@ -109,6 +110,11 @@ public CreateIndexClusterStateUpdateRequest waitForActiveShards(ActiveShardCount return this; } + public CreateIndexClusterStateUpdateRequest resizeType(ResizeType resizeType) { + this.resizeType = resizeType; + return this; + } + public TransportMessage originalMessage() { return originalMessage; } @@ -145,8 +151,8 @@ public Set blocks() { return blocks; } - public Index shrinkFrom() { - return shrinkFrom; + public Index recoverFrom() { + return recoverFrom; } /** True if all fields that span multiple types should be updated, false otherwise */ @@ -165,4 +171,11 @@ public String getProvidedName() { public ActiveShardCount waitForActiveShards() { return waitForActiveShards; } + + /** + * Returns the resize type or null if this is an ordinary create index request + */ + public ResizeType resizeType() { + return resizeType; + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java new file mode 100644 index 0000000000000..9447e0803e2ba --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeAction.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shrink; + +import org.elasticsearch.Version; +import org.elasticsearch.action.Action; +import org.elasticsearch.client.ElasticsearchClient; + +public class ResizeAction extends Action { + + public static final ResizeAction INSTANCE = new ResizeAction(); + public static final String NAME = "indices:admin/resize"; + public static final Version COMPATIBILITY_VERSION = Version.V_7_0_0_alpha1; // TODO remove this once it's backported + + private ResizeAction() { + super(NAME); + } + + @Override + public ResizeResponse newResponse() { + return new ResizeResponse(); + } + + @Override + public ResizeRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ResizeRequestBuilder(client, this); + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java similarity index 65% rename from core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java index 6ea58200a4500..f2f648f70ffa9 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequest.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequest.java @@ -18,12 +18,14 @@ */ package org.elasticsearch.action.admin.indices.shrink; +import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -37,37 +39,41 @@ /** * Request class to shrink an index into a single shard */ -public class ShrinkRequest extends AcknowledgedRequest implements IndicesRequest { +public class ResizeRequest extends AcknowledgedRequest implements IndicesRequest { - public static final ObjectParser PARSER = new ObjectParser<>("shrink_request", null); + public static final ObjectParser PARSER = new ObjectParser<>("resize_request", null); static { - PARSER.declareField((parser, request, context) -> request.getShrinkIndexRequest().settings(parser.map()), + PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().settings(parser.map()), new ParseField("settings"), ObjectParser.ValueType.OBJECT); - PARSER.declareField((parser, request, context) -> request.getShrinkIndexRequest().aliases(parser.map()), + PARSER.declareField((parser, request, context) -> request.getTargetIndexRequest().aliases(parser.map()), new ParseField("aliases"), ObjectParser.ValueType.OBJECT); } - private CreateIndexRequest shrinkIndexRequest; + private CreateIndexRequest targetIndexRequest; private String sourceIndex; + private ResizeType type = ResizeType.SHRINK; - ShrinkRequest() {} + ResizeRequest() {} - public ShrinkRequest(String targetIndex, String sourceindex) { - this.shrinkIndexRequest = new CreateIndexRequest(targetIndex); - this.sourceIndex = sourceindex; + public ResizeRequest(String targetIndex, String sourceIndex) { + this.targetIndexRequest = new CreateIndexRequest(targetIndex); + this.sourceIndex = sourceIndex; } @Override public ActionRequestValidationException validate() { - ActionRequestValidationException validationException = shrinkIndexRequest == null ? null : shrinkIndexRequest.validate(); + ActionRequestValidationException validationException = targetIndexRequest == null ? null : targetIndexRequest.validate(); if (sourceIndex == null) { validationException = addValidationError("source index is missing", validationException); } - if (shrinkIndexRequest == null) { - validationException = addValidationError("shrink index request is missing", validationException); + if (targetIndexRequest == null) { + validationException = addValidationError("target index request is missing", validationException); } - if (shrinkIndexRequest.settings().getByPrefix("index.sort.").isEmpty() == false) { - validationException = addValidationError("can't override index sort when shrinking index", validationException); + if (targetIndexRequest.settings().getByPrefix("index.sort.").isEmpty() == false) { + validationException = addValidationError("can't override index sort when resizing an index", validationException); + } + if (type == ResizeType.SPLIT && IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexRequest.settings()) == false) { + validationException = addValidationError("index.number_of_shards is required for split operations", validationException); } return validationException; } @@ -79,16 +85,24 @@ public void setSourceIndex(String index) { @Override public void readFrom(StreamInput in) throws IOException { super.readFrom(in); - shrinkIndexRequest = new CreateIndexRequest(); - shrinkIndexRequest.readFrom(in); + targetIndexRequest = new CreateIndexRequest(); + targetIndexRequest.readFrom(in); sourceIndex = in.readString(); + if (in.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { + type = in.readEnum(ResizeType.class); + } else { + type = ResizeType.SHRINK; // BWC this used to be shrink only + } } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - shrinkIndexRequest.writeTo(out); + targetIndexRequest.writeTo(out); out.writeString(sourceIndex); + if (out.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)) { + out.writeEnum(type); + } } @Override @@ -101,15 +115,15 @@ public IndicesOptions indicesOptions() { return IndicesOptions.lenientExpandOpen(); } - public void setShrinkIndex(CreateIndexRequest shrinkIndexRequest) { - this.shrinkIndexRequest = Objects.requireNonNull(shrinkIndexRequest, "shrink index request must not be null"); + public void setTargetIndex(CreateIndexRequest targetIndexRequest) { + this.targetIndexRequest = Objects.requireNonNull(targetIndexRequest, "target index request must not be null"); } /** * Returns the {@link CreateIndexRequest} for the shrink index */ - public CreateIndexRequest getShrinkIndexRequest() { - return shrinkIndexRequest; + public CreateIndexRequest getTargetIndexRequest() { + return targetIndexRequest; } /** @@ -128,13 +142,13 @@ public String getSourceIndex() { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on */ public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) { - this.getShrinkIndexRequest().waitForActiveShards(waitForActiveShards); + this.getTargetIndexRequest().waitForActiveShards(waitForActiveShards); } /** @@ -145,4 +159,18 @@ public void setWaitForActiveShards(ActiveShardCount waitForActiveShards) { public void setWaitForActiveShards(final int waitForActiveShards) { setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards)); } + + /** + * The type of the resize operation + */ + public void setResizeType(ResizeType type) { + this.type = Objects.requireNonNull(type); + } + + /** + * Returns the type of the resize operation + */ + public ResizeType getResizeType() { + return type; + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java similarity index 73% rename from core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java index 2bd10397193d5..6d8d98c0d75f0 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkRequestBuilder.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeRequestBuilder.java @@ -18,31 +18,32 @@ */ package org.elasticsearch.action.admin.indices.shrink; +import org.elasticsearch.action.Action; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.client.ElasticsearchClient; import org.elasticsearch.common.settings.Settings; -public class ShrinkRequestBuilder extends AcknowledgedRequestBuilder { - public ShrinkRequestBuilder(ElasticsearchClient client, ShrinkAction action) { - super(client, action, new ShrinkRequest()); +public class ResizeRequestBuilder extends AcknowledgedRequestBuilder { + public ResizeRequestBuilder(ElasticsearchClient client, Action action) { + super(client, action, new ResizeRequest()); } - public ShrinkRequestBuilder setTargetIndex(CreateIndexRequest request) { - this.request.setShrinkIndex(request); + public ResizeRequestBuilder setTargetIndex(CreateIndexRequest request) { + this.request.setTargetIndex(request); return this; } - public ShrinkRequestBuilder setSourceIndex(String index) { + public ResizeRequestBuilder setSourceIndex(String index) { this.request.setSourceIndex(index); return this; } - public ShrinkRequestBuilder setSettings(Settings settings) { - this.request.getShrinkIndexRequest().settings(settings); + public ResizeRequestBuilder setSettings(Settings settings) { + this.request.getTargetIndexRequest().settings(settings); return this; } @@ -55,12 +56,12 @@ public ShrinkRequestBuilder setSettings(Settings settings) { * non-negative integer, up to the number of copies per shard (number of replicas + 1), * to wait for the desired amount of shard copies to become active before returning. * Index creation will only wait up until the timeout value for the number of shard copies - * to be active before returning. Check {@link ShrinkResponse#isShardsAcked()} to + * to be active before returning. Check {@link ResizeResponse#isShardsAcked()} to * determine if the requisite shard copies were all started before returning or timing out. * * @param waitForActiveShards number of active shard copies to wait on */ - public ShrinkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) { + public ResizeRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiveShards) { this.request.setWaitForActiveShards(waitForActiveShards); return this; } @@ -70,7 +71,12 @@ public ShrinkRequestBuilder setWaitForActiveShards(ActiveShardCount waitForActiv * shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)} * to get the ActiveShardCount. */ - public ShrinkRequestBuilder setWaitForActiveShards(final int waitForActiveShards) { + public ResizeRequestBuilder setWaitForActiveShards(final int waitForActiveShards) { return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards)); } + + public ResizeRequestBuilder setResizeType(ResizeType type) { + this.request.setResizeType(type); + return this; + } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java similarity index 86% rename from core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java rename to core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java index 0c5149f6bf353..cea74ced69cfc 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkResponse.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeResponse.java @@ -21,11 +21,11 @@ import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; -public final class ShrinkResponse extends CreateIndexResponse { - ShrinkResponse() { +public final class ResizeResponse extends CreateIndexResponse { + ResizeResponse() { } - ShrinkResponse(boolean acknowledged, boolean shardsAcked, String index) { + ResizeResponse(boolean acknowledged, boolean shardsAcked, String index) { super(acknowledged, shardsAcked, index); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java new file mode 100644 index 0000000000000..bca386a9567d6 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ResizeType.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shrink; + +/** + * The type of the resize operation + */ +public enum ResizeType { + SHRINK, SPLIT; +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java index 8b5b4670e3c4d..48c23d643ba4c 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/ShrinkAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.action.Action; import org.elasticsearch.client.ElasticsearchClient; -public class ShrinkAction extends Action { +public class ShrinkAction extends Action { public static final ShrinkAction INSTANCE = new ShrinkAction(); public static final String NAME = "indices:admin/shrink"; @@ -32,12 +32,12 @@ private ShrinkAction() { } @Override - public ShrinkResponse newResponse() { - return new ShrinkResponse(); + public ResizeResponse newResponse() { + return new ResizeResponse(); } @Override - public ShrinkRequestBuilder newRequestBuilder(ElasticsearchClient client) { - return new ShrinkRequestBuilder(client, this); + public ResizeRequestBuilder newRequestBuilder(ElasticsearchClient client) { + return new ResizeRequestBuilder(client, this); } } diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java new file mode 100644 index 0000000000000..87dd9f9fa2d21 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeAction.java @@ -0,0 +1,201 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.shrink; + +import org.apache.lucene.index.IndexWriter; +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; +import org.elasticsearch.action.admin.indices.stats.IndexShardStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.DocsStats; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.util.Locale; +import java.util.Objects; +import java.util.Set; +import java.util.function.IntFunction; + +/** + * Main class to initiate resizing (shrink / split) an index into a new index + */ +public class TransportResizeAction extends TransportMasterNodeAction { + private final MetaDataCreateIndexService createIndexService; + private final Client client; + + @Inject + public TransportResizeAction(Settings settings, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataCreateIndexService createIndexService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { + this(settings, ResizeAction.NAME, transportService, clusterService, threadPool, createIndexService, actionFilters, + indexNameExpressionResolver, client); + } + + protected TransportResizeAction(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, + ThreadPool threadPool, MetaDataCreateIndexService createIndexService, + ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { + super(settings, actionName, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, + ResizeRequest::new); + this.createIndexService = createIndexService; + this.client = client; + } + + + @Override + protected String executor() { + // we go async right away + return ThreadPool.Names.SAME; + } + + @Override + protected ResizeResponse newResponse() { + return new ResizeResponse(); + } + + @Override + protected ClusterBlockException checkBlock(ResizeRequest request, ClusterState state) { + return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getTargetIndexRequest().index()); + } + + @Override + protected void masterOperation(final ResizeRequest resizeRequest, final ClusterState state, + final ActionListener listener) { + + // there is no need to fetch docs stats for split but we keep it simple and do it anyway for simplicity of the code + final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getSourceIndex()); + final String targetIndex = indexNameExpressionResolver.resolveDateMathExpression(resizeRequest.getTargetIndexRequest().index()); + client.admin().indices().prepareStats(sourceIndex).clear().setDocs(true).execute(new ActionListener() { + @Override + public void onResponse(IndicesStatsResponse indicesStatsResponse) { + CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(resizeRequest, state, + (i) -> { + IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); + return shard == null ? null : shard.getPrimary().getDocs(); + }, sourceIndex, targetIndex); + createIndexService.createIndex( + updateRequest, + ActionListener.wrap(response -> + listener.onResponse(new ResizeResponse(response.isAcknowledged(), response.isShardsAcked(), + updateRequest.index())), listener::onFailure + ) + ); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + + } + + // static for unittesting this method + static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ResizeRequest resizeRequest, final ClusterState state + , final IntFunction perShardDocStats, String sourceIndexName, String targetIndexName) { + final CreateIndexRequest targetIndex = resizeRequest.getTargetIndexRequest(); + final IndexMetaData metaData = state.metaData().index(sourceIndexName); + if (metaData == null) { + throw new IndexNotFoundException(sourceIndexName); + } + final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()) + .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); + final int numShards; + if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { + numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); + } else { + assert resizeRequest.getResizeType() == ResizeType.SHRINK : "split must specify the number of shards explicitly"; + numShards = 1; + } + + for (int i = 0; i < numShards; i++) { + if (resizeRequest.getResizeType() == ResizeType.SHRINK) { + Set shardIds = IndexMetaData.selectShrinkShards(i, metaData, numShards); + long count = 0; + for (ShardId id : shardIds) { + DocsStats docsStats = perShardDocStats.apply(id.id()); + if (docsStats != null) { + count += docsStats.getCount(); + } + if (count > IndexWriter.MAX_DOCS) { + throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS + + "] docs - too many documents in shards " + shardIds); + } + } + } else { + Objects.requireNonNull(IndexMetaData.selectSplitShard(i, metaData, numShards)); + // we just execute this to ensure we get the right exceptions if the number of shards is wrong or less then etc. + } + } + + if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) { + throw new IllegalArgumentException("cannot provide a routing partition size value when resizing an index"); + } + if (IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(targetIndexSettings)) { + throw new IllegalArgumentException("cannot provide index.number_of_routing_shards on resize"); + } + String cause = resizeRequest.getResizeType().name().toLowerCase(Locale.ROOT) + "_index"; + targetIndex.cause(cause); + Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings); + settingsBuilder.put("index.number_of_shards", numShards); + targetIndex.settings(settingsBuilder); + + return new CreateIndexClusterStateUpdateRequest(targetIndex, + cause, targetIndex.index(), targetIndexName, true) + // mappings are updated on the node when creating in the shards, this prevents race-conditions since all mapping must be + // applied once we took the snapshot and if somebody messes things up and switches the index read/write and adds docs we miss + // the mappings for everything is corrupted and hard to debug + .ackTimeout(targetIndex.timeout()) + .masterNodeTimeout(targetIndex.masterNodeTimeout()) + .settings(targetIndex.settings()) + .aliases(targetIndex.aliases()) + .customs(targetIndex.customs()) + .waitForActiveShards(targetIndex.waitForActiveShards()) + .recoverFrom(metaData.getIndex()) + .resizeType(resizeRequest.getResizeType()); + } + + @Override + protected String getMasterActionName(DiscoveryNode node) { + if (node.getVersion().onOrAfter(ResizeAction.COMPATIBILITY_VERSION)){ + return super.getMasterActionName(node); + } else { + // this is for BWC - when we send this to version that doesn't have ResizeAction.NAME registered + // we have to send to shrink instead. + return ShrinkAction.NAME; + } + } +} diff --git a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java index 2555299709cda..acc88251970f3 100644 --- a/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java +++ b/core/src/main/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkAction.java @@ -19,143 +19,28 @@ package org.elasticsearch.action.admin.indices.shrink; -import org.apache.lucene.index.IndexWriter; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; -import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; -import org.elasticsearch.action.admin.indices.stats.IndexShardStats; -import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; import org.elasticsearch.action.support.ActionFilters; -import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.client.Client; -import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.block.ClusterBlockException; -import org.elasticsearch.cluster.block.ClusterBlockLevel; -import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.shard.DocsStats; -import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; -import java.util.Set; -import java.util.function.IntFunction; - /** - * Main class to initiate shrinking an index into a new index with a single shard + * Main class to initiate shrinking an index into a new index + * This class is only here for backwards compatibility. It will be replaced by + * TransportResizeAction in 7.x once this is backported */ -public class TransportShrinkAction extends TransportMasterNodeAction { - - private final MetaDataCreateIndexService createIndexService; - private final Client client; +public class TransportShrinkAction extends TransportResizeAction { @Inject public TransportShrinkAction(Settings settings, TransportService transportService, ClusterService clusterService, ThreadPool threadPool, MetaDataCreateIndexService createIndexService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, Client client) { - super(settings, ShrinkAction.NAME, transportService, clusterService, threadPool, actionFilters, indexNameExpressionResolver, - ShrinkRequest::new); - this.createIndexService = createIndexService; - this.client = client; + super(settings, ShrinkAction.NAME, transportService, clusterService, threadPool, createIndexService, actionFilters, + indexNameExpressionResolver, client); } - - @Override - protected String executor() { - // we go async right away - return ThreadPool.Names.SAME; - } - - @Override - protected ShrinkResponse newResponse() { - return new ShrinkResponse(); - } - - @Override - protected ClusterBlockException checkBlock(ShrinkRequest request, ClusterState state) { - return state.blocks().indexBlockedException(ClusterBlockLevel.METADATA_WRITE, request.getShrinkIndexRequest().index()); - } - - @Override - protected void masterOperation(final ShrinkRequest shrinkRequest, final ClusterState state, - final ActionListener listener) { - final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex()); - client.admin().indices().prepareStats(sourceIndex).clear().setDocs(true).execute(new ActionListener() { - @Override - public void onResponse(IndicesStatsResponse indicesStatsResponse) { - CreateIndexClusterStateUpdateRequest updateRequest = prepareCreateIndexRequest(shrinkRequest, state, - (i) -> { - IndexShardStats shard = indicesStatsResponse.getIndex(sourceIndex).getIndexShards().get(i); - return shard == null ? null : shard.getPrimary().getDocs(); - }, indexNameExpressionResolver); - createIndexService.createIndex( - updateRequest, - ActionListener.wrap(response -> - listener.onResponse(new ShrinkResponse(response.isAcknowledged(), response.isShardsAcked(), updateRequest.index())), - listener::onFailure - ) - ); - } - - @Override - public void onFailure(Exception e) { - listener.onFailure(e); - } - }); - - } - - // static for unittesting this method - static CreateIndexClusterStateUpdateRequest prepareCreateIndexRequest(final ShrinkRequest shrinkRequest, final ClusterState state - , final IntFunction perShardDocStats, IndexNameExpressionResolver indexNameExpressionResolver) { - final String sourceIndex = indexNameExpressionResolver.resolveDateMathExpression(shrinkRequest.getSourceIndex()); - final CreateIndexRequest targetIndex = shrinkRequest.getShrinkIndexRequest(); - final String targetIndexName = indexNameExpressionResolver.resolveDateMathExpression(targetIndex.index()); - final IndexMetaData metaData = state.metaData().index(sourceIndex); - final Settings targetIndexSettings = Settings.builder().put(targetIndex.settings()) - .normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build(); - int numShards = 1; - if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { - numShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings); - } - for (int i = 0; i < numShards; i++) { - Set shardIds = IndexMetaData.selectShrinkShards(i, metaData, numShards); - long count = 0; - for (ShardId id : shardIds) { - DocsStats docsStats = perShardDocStats.apply(id.id()); - if (docsStats != null) { - count += docsStats.getCount(); - } - if (count > IndexWriter.MAX_DOCS) { - throw new IllegalStateException("Can't merge index with more than [" + IndexWriter.MAX_DOCS - + "] docs - too many documents in shards " + shardIds); - } - } - - } - if (IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING.exists(targetIndexSettings)) { - throw new IllegalArgumentException("cannot provide a routing partition size value when shrinking an index"); - } - targetIndex.cause("shrink_index"); - Settings.Builder settingsBuilder = Settings.builder().put(targetIndexSettings); - settingsBuilder.put("index.number_of_shards", numShards); - targetIndex.settings(settingsBuilder); - - return new CreateIndexClusterStateUpdateRequest(targetIndex, - "shrink_index", targetIndex.index(), targetIndexName, true) - // mappings are updated on the node when merging in the shards, this prevents race-conditions since all mapping must be - // applied once we took the snapshot and if somebody fucks things up and switches the index read/write and adds docs we miss - // the mappings for everything is corrupted and hard to debug - .ackTimeout(targetIndex.timeout()) - .masterNodeTimeout(targetIndex.masterNodeTimeout()) - .settings(targetIndex.settings()) - .aliases(targetIndex.aliases()) - .customs(targetIndex.customs()) - .waitForActiveShards(targetIndex.waitForActiveShards()) - .shrinkFrom(metaData.getIndex()); - } - } diff --git a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java index 8cff2213c2111..feb47aa34fd86 100644 --- a/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java +++ b/core/src/main/java/org/elasticsearch/action/support/master/TransportMasterNodeAction.java @@ -32,6 +32,7 @@ import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; @@ -189,7 +190,10 @@ protected void doRun() throws Exception { logger.debug("no known master node, scheduling a retry"); retry(null, masterChangePredicate); } else { - transportService.sendRequest(nodes.getMasterNode(), actionName, request, new ActionListenerResponseHandler(listener, TransportMasterNodeAction.this::newResponse) { + DiscoveryNode masterNode = nodes.getMasterNode(); + final String actionName = getMasterActionName(masterNode); + transportService.sendRequest(masterNode, actionName, request, new ActionListenerResponseHandler(listener, + TransportMasterNodeAction.this::newResponse) { @Override public void handleException(final TransportException exp) { Throwable cause = exp.unwrapCause(); @@ -229,4 +233,12 @@ public void onTimeout(TimeValue timeout) { ); } } + + /** + * Allows to conditionally return a different master node action name in the case an action gets renamed. + * This mainly for backwards compatibility should be used rarely + */ + protected String getMasterActionName(DiscoveryNode node) { + return actionName; + } } diff --git a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java index b254039910c01..81de57f91afee 100644 --- a/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java +++ b/core/src/main/java/org/elasticsearch/client/IndicesAdminClient.java @@ -50,9 +50,6 @@ import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequest; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsRequestBuilder; import org.elasticsearch.action.admin.indices.exists.types.TypesExistsResponse; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequest; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesRequestBuilder; -import org.elasticsearch.action.fieldcaps.FieldCapabilitiesResponse; import org.elasticsearch.action.admin.indices.flush.FlushRequest; import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder; import org.elasticsearch.action.admin.indices.flush.FlushResponse; @@ -98,9 +95,9 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoreRequestBuilder; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; -import org.elasticsearch.action.admin.indices.shrink.ShrinkRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; @@ -792,19 +789,19 @@ public interface IndicesAdminClient extends ElasticsearchClient { GetSettingsRequestBuilder prepareGetSettings(String... indices); /** - * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. + * Resize an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. */ - ShrinkRequestBuilder prepareShrinkIndex(String sourceIndex, String targetIndex); + ResizeRequestBuilder prepareResizeIndex(String sourceIndex, String targetIndex); /** - * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. + * Resize an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. */ - ActionFuture shrinkIndex(ShrinkRequest request); + ActionFuture resizeIndex(ResizeRequest request); /** * Shrinks an index using an explicit request allowing to specify the settings, mappings and aliases of the target index of the index. */ - void shrinkIndex(ShrinkRequest request, ActionListener listener); + void resizeIndex(ResizeRequest request, ActionListener listener); /** * Swaps the index pointed to by an alias given all provided conditions are satisfied diff --git a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java index c2b813d3d659e..c0da35a307981 100644 --- a/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java +++ b/core/src/main/java/org/elasticsearch/client/support/AbstractClient.java @@ -232,10 +232,10 @@ import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresRequest; import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresResponse; -import org.elasticsearch.action.admin.indices.shrink.ShrinkAction; -import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; -import org.elasticsearch.action.admin.indices.shrink.ShrinkRequestBuilder; -import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequestBuilder; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequest; import org.elasticsearch.action.admin.indices.stats.IndicesStatsRequestBuilder; @@ -1730,19 +1730,19 @@ public GetSettingsRequestBuilder prepareGetSettings(String... indices) { } @Override - public ShrinkRequestBuilder prepareShrinkIndex(String sourceIndex, String targetIndex) { - return new ShrinkRequestBuilder(this, ShrinkAction.INSTANCE).setSourceIndex(sourceIndex) + public ResizeRequestBuilder prepareResizeIndex(String sourceIndex, String targetIndex) { + return new ResizeRequestBuilder(this, ResizeAction.INSTANCE).setSourceIndex(sourceIndex) .setTargetIndex(new CreateIndexRequest(targetIndex)); } @Override - public ActionFuture shrinkIndex(ShrinkRequest request) { - return execute(ShrinkAction.INSTANCE, request); + public ActionFuture resizeIndex(ResizeRequest request) { + return execute(ResizeAction.INSTANCE, request); } @Override - public void shrinkIndex(ShrinkRequest request, ActionListener listener) { - execute(ShrinkAction.INSTANCE, request, listener); + public void resizeIndex(ResizeRequest request, ActionListener listener) { + execute(ResizeAction.INSTANCE, request, listener); } @Override diff --git a/core/src/main/java/org/elasticsearch/client/transport/TransportProxyClient.java b/core/src/main/java/org/elasticsearch/client/transport/TransportProxyClient.java index 5436bef172a47..e07fab0092d0e 100644 --- a/core/src/main/java/org/elasticsearch/client/transport/TransportProxyClient.java +++ b/core/src/main/java/org/elasticsearch/client/transport/TransportProxyClient.java @@ -56,6 +56,7 @@ final class TransportProxyClient { ActionRequestBuilder> void execute(final Action action, final Request request, ActionListener listener) { final TransportActionNodeProxy proxy = proxies.get(action); + assert proxy != null : "no proxy found for action: " + action; nodesService.execute((n, l) -> proxy.execute(n, request, l), listener); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java index a5b7f422a9322..a4bb6a559254c 100644 --- a/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/core/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -50,6 +50,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; @@ -182,6 +183,7 @@ public static Collection createAllocationDeciders(Settings se // collect deciders by class so that we can detect duplicates Map deciders = new LinkedHashMap<>(); addAllocationDecider(deciders, new MaxRetryAllocationDecider(settings)); + addAllocationDecider(deciders, new ResizeAllocationDecider(settings)); addAllocationDecider(deciders, new ReplicaAfterPrimaryActiveAllocationDecider(settings)); addAllocationDecider(deciders, new RebalanceOnlyWhenActiveAllocationDecider(settings)); addAllocationDecider(deciders, new ClusterRebalanceAllocationDecider(settings, clusterSettings)); diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java index 06f203595b313..3d14670e52771 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/IndexMetaData.java @@ -65,6 +65,7 @@ import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.Locale; import java.util.Map; import java.util.Set; @@ -195,6 +196,24 @@ static Setting buildNumberOfShardsSetting() { public static final Setting INDEX_ROUTING_PARTITION_SIZE_SETTING = Setting.intSetting(SETTING_ROUTING_PARTITION_SIZE, 1, 1, Property.IndexScope); + public static final Setting INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING = + Setting.intSetting("index.number_of_routing_shards", INDEX_NUMBER_OF_SHARDS_SETTING, 1, new Setting.Validator() { + @Override + public void validate(Integer numRoutingShards, Map, Integer> settings) { + Integer numShards = settings.get(INDEX_NUMBER_OF_SHARDS_SETTING); + if (numRoutingShards < numShards) { + throw new IllegalArgumentException("index.number_of_routing_shards [" + numRoutingShards + + "] must be >= index.number_of_shards [" + numShards + "]"); + } + getRoutingFactor(numShards, numRoutingShards); + } + + @Override + public Iterator> settings() { + return Collections.singleton(INDEX_NUMBER_OF_SHARDS_SETTING).iterator(); + } + }, Property.IndexScope); + public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas"; public static final Setting INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING; public static final String SETTING_READ_ONLY = "index.blocks.read_only"; @@ -453,14 +472,22 @@ public MappingMetaData mapping(String mappingType) { return mappings.get(mappingType); } + // we keep the shrink settings for BWC - this can be removed in 8.0 + // we can't remove in 7 since this setting might be baked into an index coming in via a full cluster restart from 6.0 public static final String INDEX_SHRINK_SOURCE_UUID_KEY = "index.shrink.source.uuid"; public static final String INDEX_SHRINK_SOURCE_NAME_KEY = "index.shrink.source.name"; + public static final String INDEX_RESIZE_SOURCE_UUID_KEY = "index.resize.source.uuid"; + public static final String INDEX_RESIZE_SOURCE_NAME_KEY = "index.resize.source.name"; public static final Setting INDEX_SHRINK_SOURCE_UUID = Setting.simpleString(INDEX_SHRINK_SOURCE_UUID_KEY); public static final Setting INDEX_SHRINK_SOURCE_NAME = Setting.simpleString(INDEX_SHRINK_SOURCE_NAME_KEY); - - - public Index getMergeSourceIndex() { - return INDEX_SHRINK_SOURCE_UUID.exists(settings) ? new Index(INDEX_SHRINK_SOURCE_NAME.get(settings), INDEX_SHRINK_SOURCE_UUID.get(settings)) : null; + public static final Setting INDEX_RESIZE_SOURCE_UUID = Setting.simpleString(INDEX_RESIZE_SOURCE_UUID_KEY, + INDEX_SHRINK_SOURCE_UUID); + public static final Setting INDEX_RESIZE_SOURCE_NAME = Setting.simpleString(INDEX_RESIZE_SOURCE_NAME_KEY, + INDEX_SHRINK_SOURCE_NAME); + + public Index getResizeSourceIndex() { + return INDEX_RESIZE_SOURCE_UUID.exists(settings) || INDEX_SHRINK_SOURCE_UUID.exists(settings) + ? new Index(INDEX_RESIZE_SOURCE_NAME.get(settings), INDEX_RESIZE_SOURCE_UUID.get(settings)) : null; } /** @@ -1006,7 +1033,6 @@ public IndexMetaData build() { throw new IllegalArgumentException("routing partition size [" + routingPartitionSize + "] should be a positive number" + " less than the number of shards [" + getRoutingNumShards() + "] for [" + index + "]"); } - // fill missing slots in inSyncAllocationIds with empty set if needed and make all entries immutable ImmutableOpenIntMap.Builder> filledInSyncAllocationIds = ImmutableOpenIntMap.builder(); for (int i = 0; i < numberOfShards; i++) { @@ -1293,12 +1319,50 @@ public int getRoutingNumShards() { /** * Returns the routing factor for this index. The default is 1. * - * @see #getRoutingFactor(IndexMetaData, int) for details + * @see #getRoutingFactor(int, int) for details */ public int getRoutingFactor() { return routingFactor; } + /** + * Returns the source shard ID to split the given target shard off + * @param shardId the id of the target shard to split into + * @param sourceIndexMetadata the source index metadata + * @param numTargetShards the total number of shards in the target index + * @return a the source shard ID to split off from + */ + public static ShardId selectSplitShard(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) { + if (shardId >= numTargetShards) { + throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be greater than the shard id: " + + shardId); + } + int numSourceShards = sourceIndexMetadata.getNumberOfShards(); + if (numSourceShards > numTargetShards) { + throw new IllegalArgumentException("the number of source shards [" + numSourceShards + + "] must be less that the number of target shards [" + numTargetShards + "]"); + } + int routingFactor = getRoutingFactor(numSourceShards, numTargetShards); + // this is just an additional assertion that ensures we are a factor of the routing num shards. + assert getRoutingFactor(numTargetShards, sourceIndexMetadata.getRoutingNumShards()) >= 0; + return new ShardId(sourceIndexMetadata.getIndex(), shardId/routingFactor); + } + + /** + * Selects the source shards for a local shard recovery. This might either be a split or a shrink operation. + * @param shardId the target shard ID to select the source shards for + * @param sourceIndexMetadata the source metadata + * @param numTargetShards the number of target shards + */ + public static Set selectRecoverFromShards(int shardId, IndexMetaData sourceIndexMetadata, int numTargetShards) { + if (sourceIndexMetadata.getNumberOfShards() > numTargetShards) { + return selectShrinkShards(shardId, sourceIndexMetadata, numTargetShards); + } else if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) { + return Collections.singleton(selectSplitShard(shardId, sourceIndexMetadata, numTargetShards)); + } + throw new IllegalArgumentException("can't select recover from shards if both indices have the same number of shards"); + } + /** * Returns the source shard ids to shrink into the given shard id. * @param shardId the id of the target shard to shrink to @@ -1311,7 +1375,11 @@ public static Set selectShrinkShards(int shardId, IndexMetaData sourceI throw new IllegalArgumentException("the number of target shards (" + numTargetShards + ") must be greater than the shard id: " + shardId); } - int routingFactor = getRoutingFactor(sourceIndexMetadata, numTargetShards); + if (sourceIndexMetadata.getNumberOfShards() < numTargetShards) { + throw new IllegalArgumentException("the number of target shards [" + numTargetShards + +"] must be less that the number of source shards [" + sourceIndexMetadata.getNumberOfShards() + "]"); + } + int routingFactor = getRoutingFactor(sourceIndexMetadata.getNumberOfShards(), numTargetShards); Set shards = new HashSet<>(routingFactor); for (int i = shardId * routingFactor; i < routingFactor*shardId + routingFactor; i++) { shards.add(new ShardId(sourceIndexMetadata.getIndex(), i)); @@ -1325,21 +1393,30 @@ public static Set selectShrinkShards(int shardId, IndexMetaData sourceI * {@link org.elasticsearch.cluster.routing.OperationRouting#generateShardId(IndexMetaData, String, String)} to guarantee consistent * hashing / routing of documents even if the number of shards changed (ie. a shrunk index). * - * @param sourceIndexMetadata the metadata of the source index + * @param sourceNumberOfShards the total number of shards in the source index * @param targetNumberOfShards the total number of shards in the target index * @return the routing factor for and shrunk index with the given number of target shards. * @throws IllegalArgumentException if the number of source shards is less than the number of target shards or if the source shards * are not divisible by the number of target shards. */ - public static int getRoutingFactor(IndexMetaData sourceIndexMetadata, int targetNumberOfShards) { - int sourceNumberOfShards = sourceIndexMetadata.getNumberOfShards(); - if (sourceNumberOfShards < targetNumberOfShards) { - throw new IllegalArgumentException("the number of target shards must be less that the number of source shards"); - } - int factor = sourceNumberOfShards / targetNumberOfShards; - if (factor * targetNumberOfShards != sourceNumberOfShards || factor <= 1) { - throw new IllegalArgumentException("the number of source shards [" + sourceNumberOfShards + "] must be a must be a multiple of [" - + targetNumberOfShards + "]"); + public static int getRoutingFactor(int sourceNumberOfShards, int targetNumberOfShards) { + final int factor; + if (sourceNumberOfShards < targetNumberOfShards) { // split + factor = targetNumberOfShards / sourceNumberOfShards; + if (factor * sourceNumberOfShards != targetNumberOfShards || factor <= 1) { + throw new IllegalArgumentException("the number of source shards [" + sourceNumberOfShards + "] must be a must be a " + + "factor of [" + + targetNumberOfShards + "]"); + } + } else if (sourceNumberOfShards > targetNumberOfShards) { // shrink + factor = sourceNumberOfShards / targetNumberOfShards; + if (factor * targetNumberOfShards != sourceNumberOfShards || factor <= 1) { + throw new IllegalArgumentException("the number of source shards [" + sourceNumberOfShards + "] must be a must be a " + + "multiple of [" + + targetNumberOfShards + "]"); + } + } else { + factor = 1; } return factor; } diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java index 643987862ff2f..49568ab300f03 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexService.java @@ -31,6 +31,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.ActiveShardsObserver; import org.elasticsearch.cluster.AckedClusterStateUpdateTask; @@ -116,7 +117,6 @@ public class MetaDataCreateIndexService extends AbstractComponent { private final IndexScopedSettings indexScopedSettings; private final ActiveShardsObserver activeShardsObserver; private final NamedXContentRegistry xContentRegistry; - private final ThreadPool threadPool; @Inject public MetaDataCreateIndexService(Settings settings, ClusterService clusterService, @@ -132,7 +132,6 @@ public MetaDataCreateIndexService(Settings settings, ClusterService clusterServi this.env = env; this.indexScopedSettings = indexScopedSettings; this.activeShardsObserver = new ActiveShardsObserver(settings, clusterService, threadPool); - this.threadPool = threadPool; this.xContentRegistry = xContentRegistry; } @@ -298,9 +297,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { customs.put(entry.getKey(), entry.getValue()); } - final Index shrinkFromIndex = request.shrinkFrom(); + final Index recoverFromIndex = request.recoverFrom(); - if (shrinkFromIndex == null) { + if (recoverFromIndex == null) { // apply templates, merging the mappings into the request mapping if exists for (IndexTemplateMetaData template : templates) { templateNames.add(template.getName()); @@ -351,7 +350,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } Settings.Builder indexSettingsBuilder = Settings.builder(); - if (shrinkFromIndex == null) { + if (recoverFromIndex == null) { // apply templates, here, in reverse order, since first ones are better matching for (int i = templates.size() - 1; i >= 0; i--) { indexSettingsBuilder.put(templates.get(i).settings()); @@ -383,28 +382,34 @@ public ClusterState execute(ClusterState currentState) throws Exception { final IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()); final int routingNumShards; - if (shrinkFromIndex == null) { - routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build()); + if (recoverFromIndex == null) { + Settings idxSettings = indexSettingsBuilder.build(); + routingNumShards = IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(idxSettings); } else { - final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); + assert IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.exists(indexSettingsBuilder.build()) == false + : "index.number_of_routing_shards should be present on the target index on resize"; + final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex); routingNumShards = sourceMetaData.getRoutingNumShards(); } + // remove the setting it's temporary and is only relevant once we create the index + indexSettingsBuilder.remove(IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.getKey()); tmpImdBuilder.setRoutingNumShards(routingNumShards); - if (shrinkFromIndex != null) { - prepareShrinkIndexSettings( - currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index()); + if (recoverFromIndex != null) { + assert request.resizeType() != null; + prepareResizeIndexSettings( + currentState, mappings.keySet(), indexSettingsBuilder, recoverFromIndex, request.index(), request.resizeType()); } final Settings actualIndexSettings = indexSettingsBuilder.build(); tmpImdBuilder.settings(actualIndexSettings); - if (shrinkFromIndex != null) { + if (recoverFromIndex != null) { /* * We need to arrange that the primary term on all the shards in the shrunken index is at least as large as * the maximum primary term on all the shards in the source index. This ensures that we have correct * document-level semantics regarding sequence numbers in the shrunken index. */ - final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex); + final IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(recoverFromIndex); final long primaryTerm = IntStream .range(0, sourceMetaData.getNumberOfShards()) @@ -439,7 +444,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { throw e; } - if (request.shrinkFrom() == null) { + if (request.recoverFrom() == null) { // now that the mapping is merged we can validate the index sort. // we cannot validate for index shrinking since the mapping is empty // at this point. The validation will take place later in the process @@ -606,35 +611,14 @@ List getIndexSettingsValidationErrors(Settings settings) { static List validateShrinkIndex(ClusterState state, String sourceIndex, Set targetIndexMappingsTypes, String targetIndexName, Settings targetIndexSettings) { - if (state.metaData().hasIndex(targetIndexName)) { - throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex()); - } - final IndexMetaData sourceMetaData = state.metaData().index(sourceIndex); - if (sourceMetaData == null) { - throw new IndexNotFoundException(sourceIndex); - } - // ensure index is read-only - if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { - throw new IllegalStateException("index " + sourceIndex + " must be read-only to shrink index. use \"index.blocks.write=true\""); - } + IndexMetaData sourceMetaData = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings); + assert IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings); + IndexMetaData.selectShrinkShards(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); if (sourceMetaData.getNumberOfShards() == 1) { throw new IllegalArgumentException("can't shrink an index with only one shard"); } - - if ((targetIndexMappingsTypes.size() > 1 || - (targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) { - throw new IllegalArgumentException("mappings are not allowed when shrinking indices" + - ", all mappings are copied from the source index"); - } - - if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { - // this method applies all necessary checks ie. if the target shards are less than the source shards - // of if the source shards are divisible by the number of target shards - IndexMetaData.getRoutingFactor(sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); - } - // now check that index is all on one node final IndexRoutingTable table = state.routingTable().index(sourceIndex); Map nodesToNumRouting = new HashMap<>(); @@ -657,27 +641,82 @@ static List validateShrinkIndex(ClusterState state, String sourceIndex, return nodesToAllocateOn; } - static void prepareShrinkIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, Index shrinkFromIndex, String shrinkIntoName) { - final IndexMetaData sourceMetaData = currentState.metaData().index(shrinkFromIndex.getName()); + static void validateSplitIndex(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { + IndexMetaData sourceMetaData = validateResize(state, sourceIndex, targetIndexMappingsTypes, targetIndexName, targetIndexSettings); + IndexMetaData.selectSplitShard(0, sourceMetaData, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); + if (sourceMetaData.getCreationVersion().before(Version.V_6_0_0_alpha1)) { + // ensure we have a single type since this would make the splitting code considerably more complex + // and a 5.x index would not be splittable unless it has been shrunk before so rather opt out of the complexity + // since in 5.x we don't have a setting to artificially set the number of routing shards + throw new IllegalStateException("source index created version is too old to apply a split operation"); + } + + } + + static IndexMetaData validateResize(ClusterState state, String sourceIndex, + Set targetIndexMappingsTypes, String targetIndexName, + Settings targetIndexSettings) { + if (state.metaData().hasIndex(targetIndexName)) { + throw new ResourceAlreadyExistsException(state.metaData().index(targetIndexName).getIndex()); + } + final IndexMetaData sourceMetaData = state.metaData().index(sourceIndex); + if (sourceMetaData == null) { + throw new IndexNotFoundException(sourceIndex); + } + // ensure index is read-only + if (state.blocks().indexBlocked(ClusterBlockLevel.WRITE, sourceIndex) == false) { + throw new IllegalStateException("index " + sourceIndex + " must be read-only to resize index. use \"index.blocks.write=true\""); + } + + if ((targetIndexMappingsTypes.size() > 1 || + (targetIndexMappingsTypes.isEmpty() || targetIndexMappingsTypes.contains(MapperService.DEFAULT_MAPPING)) == false)) { + throw new IllegalArgumentException("mappings are not allowed when resizing indices" + + ", all mappings are copied from the source index"); + } + + if (IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.exists(targetIndexSettings)) { + // this method applies all necessary checks ie. if the target shards are less than the source shards + // of if the source shards are divisible by the number of target shards + IndexMetaData.getRoutingFactor(sourceMetaData.getNumberOfShards(), + IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(targetIndexSettings)); + } + return sourceMetaData; + } + + static void prepareResizeIndexSettings(ClusterState currentState, Set mappingKeys, Settings.Builder indexSettingsBuilder, + Index resizeSourceIndex, String resizeIntoName, ResizeType type) { + final IndexMetaData sourceMetaData = currentState.metaData().index(resizeSourceIndex.getName()); + if (type == ResizeType.SHRINK) { + final List nodesToAllocateOn = validateShrinkIndex(currentState, resizeSourceIndex.getName(), + mappingKeys, resizeIntoName, indexSettingsBuilder.build()); + indexSettingsBuilder + // we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away + // once we are allocated. + .put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id", + Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) + // we only try once and then give up with a shrink index + .put("index.allocation.max_retries", 1) + // we add the legacy way of specifying it here for BWC. We can remove this once it's backported to 6.x + .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) + .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); + } else if (type == ResizeType.SPLIT) { + validateSplitIndex(currentState, resizeSourceIndex.getName(), mappingKeys, resizeIntoName, indexSettingsBuilder.build()); + } else { + throw new IllegalStateException("unknown resize type is " + type); + } - final List nodesToAllocateOn = validateShrinkIndex(currentState, shrinkFromIndex.getName(), - mappingKeys, shrinkIntoName, indexSettingsBuilder.build()); final Predicate sourceSettingsPredicate = (s) -> s.startsWith("index.similarity.") || s.startsWith("index.analysis.") || s.startsWith("index.sort."); indexSettingsBuilder - // we use "i.r.a.initial_recovery" rather than "i.r.a.require|include" since we want the replica to allocate right away - // once we are allocated. - .put(IndexMetaData.INDEX_ROUTING_INITIAL_RECOVERY_GROUP_SETTING.getKey() + "_id", - Strings.arrayToCommaDelimitedString(nodesToAllocateOn.toArray())) - // we only try once and then give up with a shrink index - .put("index.allocation.max_retries", 1) // now copy all similarity / analysis / sort settings - this overrides all settings from the user unless they // wanna add extra settings .put(IndexMetaData.SETTING_VERSION_CREATED, sourceMetaData.getCreationVersion()) .put(IndexMetaData.SETTING_VERSION_UPGRADED, sourceMetaData.getUpgradedVersion()) .put(sourceMetaData.getSettings().filter(sourceSettingsPredicate)) .put(IndexMetaData.SETTING_ROUTING_PARTITION_SIZE, sourceMetaData.getRoutingPartitionSize()) - .put(IndexMetaData.INDEX_SHRINK_SOURCE_NAME.getKey(), shrinkFromIndex.getName()) - .put(IndexMetaData.INDEX_SHRINK_SOURCE_UUID.getKey(), shrinkFromIndex.getUUID()); + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), resizeSourceIndex.getName()) + .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID.getKey(), resizeSourceIndex.getUUID()); } } diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java index 5a0bd0d426313..5a4e0c78414dd 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java @@ -411,7 +411,7 @@ private Builder initializeEmpty(IndexMetaData indexMetaData, UnassignedInfo unas if (indexMetaData.inSyncAllocationIds(shardNumber).isEmpty() == false) { // we have previous valid copies for this shard. use them for recovery primaryRecoverySource = StoreRecoverySource.EXISTING_STORE_INSTANCE; - } else if (indexMetaData.getMergeSourceIndex() != null) { + } else if (indexMetaData.getResizeSourceIndex() != null) { // this is a new index but the initial shards should merged from another index primaryRecoverySource = LocalShardsRecoverySource.INSTANCE; } else { diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java index 87adb55704a25..005600ceb4431 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/OperationRouting.java @@ -259,7 +259,7 @@ public ShardId shardId(ClusterState clusterState, String index, String id, @Null return new ShardId(indexMetaData.getIndex(), generateShardId(indexMetaData, id, routing)); } - static int generateShardId(IndexMetaData indexMetaData, @Nullable String id, @Nullable String routing) { + public static int generateShardId(IndexMetaData indexMetaData, @Nullable String id, @Nullable String routing) { final String effectiveRouting; final int partitionOffset; diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java index 56663be1ef427..2a323af5f8435 100644 --- a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java @@ -403,14 +403,14 @@ private Decision earlyTerminate(RoutingAllocation allocation, ImmutableOpenMap shardIds = IndexMetaData.selectShrinkShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); + final Set shardIds = IndexMetaData.selectRecoverFromShards(shard.id(), sourceIndexMeta, metaData.getNumberOfShards()); for (IndexShardRoutingTable shardRoutingTable : allocation.routingTable().index(mergeSourceIndex.getName())) { if (shardIds.contains(shardRoutingTable.shardId())) { targetShardSize += info.getShardSize(shardRoutingTable.primaryShard(), 0); diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java new file mode 100644 index 0000000000000..a0ebf7ddba923 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/ResizeAllocationDecider.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.cluster.routing.allocation.decider; + +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.shrink.ResizeAction; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.shard.ShardId; + +import java.util.Set; + +/** + * An allocation decider that ensures we allocate the shards of a target index for resize operations next to the source primaries + */ +public class ResizeAllocationDecider extends AllocationDecider { + + public static final String NAME = "resize"; + + /** + * Initializes a new {@link ResizeAllocationDecider} + * + * @param settings {@link Settings} used by this {@link AllocationDecider} + */ + public ResizeAllocationDecider(Settings settings) { + super(settings); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocation) { + return canAllocate(shardRouting, null, allocation); + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); + if (unassignedInfo != null && shardRouting.recoverySource().getType() == RecoverySource.Type.LOCAL_SHARDS) { + // we only make decisions here if we have an unassigned info and we have to recover from another index ie. split / shrink + final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index()); + Index resizeSourceIndex = indexMetaData.getResizeSourceIndex(); + assert resizeSourceIndex != null; + if (allocation.metaData().index(resizeSourceIndex) == null) { + return allocation.decision(Decision.NO, NAME, "resize source index [%s] doesn't exists", resizeSourceIndex.toString()); + } + IndexMetaData sourceIndexMetaData = allocation.metaData().getIndexSafe(resizeSourceIndex); + if (indexMetaData.getNumberOfShards() < sourceIndexMetaData.getNumberOfShards()) { + // this only handles splits so far. + return Decision.ALWAYS; + } + + ShardId shardId = IndexMetaData.selectSplitShard(shardRouting.id(), sourceIndexMetaData, indexMetaData.getNumberOfShards()); + ShardRouting sourceShardRouting = allocation.routingNodes().activePrimary(shardId); + if (sourceShardRouting == null) { + return allocation.decision(Decision.NO, NAME, "source primary shard [%s] is not active", shardId); + } + if (node != null) { // we might get called from the 2 param canAllocate method.. + if (node.node().getVersion().before(ResizeAction.COMPATIBILITY_VERSION)) { + return allocation.decision(Decision.NO, NAME, "node [%s] is too old to split a shard", node.nodeId()); + } + if (sourceShardRouting.currentNodeId().equals(node.nodeId())) { + return allocation.decision(Decision.YES, NAME, "source primary is allocated on this node"); + } else { + return allocation.decision(Decision.NO, NAME, "source primary is allocated on another node"); + } + } else { + return allocation.decision(Decision.YES, NAME, "source primary is active"); + } + } + return super.canAllocate(shardRouting, node, allocation); + } + + @Override + public Decision canForceAllocatePrimary(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + assert shardRouting.primary() : "must not call canForceAllocatePrimary on a non-primary shard " + shardRouting; + return canAllocate(shardRouting, node, allocation); + } +} diff --git a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 235300b8267f6..b37fbb0dce65c 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/core/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -70,6 +70,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING, IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING, IndexMetaData.INDEX_ROUTING_PARTITION_SIZE_SETTING, + IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING, IndexMetaData.INDEX_READ_ONLY_SETTING, IndexMetaData.INDEX_BLOCKS_READ_SETTING, IndexMetaData.INDEX_BLOCKS_WRITE_SETTING, @@ -197,6 +198,8 @@ protected boolean isPrivateSetting(String key) { case MergePolicyConfig.INDEX_MERGE_ENABLED: case IndexMetaData.INDEX_SHRINK_SOURCE_UUID_KEY: case IndexMetaData.INDEX_SHRINK_SOURCE_NAME_KEY: + case IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY: + case IndexMetaData.INDEX_RESIZE_SOURCE_NAME_KEY: case IndexSettings.INDEX_MAPPING_SINGLE_TYPE_SETTING_KEY: // this was settable in 5.x but not anymore in 6.x so we have to preserve the value ie. make it read-only // this can be removed in later versions diff --git a/core/src/main/java/org/elasticsearch/common/settings/Setting.java b/core/src/main/java/org/elasticsearch/common/settings/Setting.java index f35df27e3b338..9b99e67c8c4da 100644 --- a/core/src/main/java/org/elasticsearch/common/settings/Setting.java +++ b/core/src/main/java/org/elasticsearch/common/settings/Setting.java @@ -51,6 +51,7 @@ import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.IntConsumer; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; @@ -907,6 +908,12 @@ public static Setting intSetting(String key, Setting fallbackS return new Setting<>(key, fallbackSetting, (s) -> parseInt(s, minValue, key), properties); } + public static Setting intSetting(String key, Setting fallbackSetting, int minValue, Validator validator, + Property... properties) { + return new Setting<>(new SimpleKey(key), fallbackSetting, fallbackSetting::getRaw, (s) -> parseInt(s, minValue, key),validator, + properties); + } + public static Setting longSetting(String key, long defaultValue, long minValue, Property... properties) { return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties); } @@ -915,6 +922,10 @@ public static Setting simpleString(String key, Property... properties) { return new Setting<>(key, s -> "", Function.identity(), properties); } + public static Setting simpleString(String key, Setting fallback, Property... properties) { + return new Setting<>(key, fallback, Function.identity(), properties); + } + public static Setting simpleString(String key, Validator validator, Property... properties) { return new Setting<>(new SimpleKey(key), null, s -> "", Function.identity(), validator, properties); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java index fc47c71573c1f..f2aab70e81920 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -139,6 +139,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Locale; @@ -1996,25 +1997,32 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService break; case LOCAL_SHARDS: final IndexMetaData indexMetaData = indexSettings().getIndexMetaData(); - final Index mergeSourceIndex = indexMetaData.getMergeSourceIndex(); + final Index resizeSourceIndex = indexMetaData.getResizeSourceIndex(); final List startedShards = new ArrayList<>(); - final IndexService sourceIndexService = indicesService.indexService(mergeSourceIndex); - final int numShards = sourceIndexService != null ? sourceIndexService.getIndexSettings().getNumberOfShards() : -1; + final IndexService sourceIndexService = indicesService.indexService(resizeSourceIndex); + final Set requiredShards; + final int numShards; if (sourceIndexService != null) { + requiredShards = IndexMetaData.selectRecoverFromShards(shardId().id(), + sourceIndexService.getMetaData(), indexMetaData.getNumberOfShards()); for (IndexShard shard : sourceIndexService) { - if (shard.state() == IndexShardState.STARTED) { + if (shard.state() == IndexShardState.STARTED && requiredShards.contains(shard.shardId())) { startedShards.add(shard); } } + numShards = requiredShards.size(); + } else { + numShards = -1; + requiredShards = Collections.emptySet(); } + if (numShards == startedShards.size()) { + assert requiredShards.isEmpty() == false; markAsRecovering("from local shards", recoveryState); // mark the shard as recovering on the cluster state thread threadPool.generic().execute(() -> { try { - final Set shards = IndexMetaData.selectShrinkShards(shardId().id(), sourceIndexService.getMetaData(), - +indexMetaData.getNumberOfShards()); if (recoverFromLocalShards(mappingUpdateConsumer, startedShards.stream() - .filter((s) -> shards.contains(s.shardId())).collect(Collectors.toList()))) { + .filter((s) -> requiredShards.contains(s.shardId())).collect(Collectors.toList()))) { recoveryListener.onRecoveryDone(recoveryState); } } catch (Exception e) { @@ -2025,9 +2033,9 @@ public void startRecovery(RecoveryState recoveryState, PeerRecoveryTargetService } else { final RuntimeException e; if (numShards == -1) { - e = new IndexNotFoundException(mergeSourceIndex); + e = new IndexNotFoundException(resizeSourceIndex); } else { - e = new IllegalStateException("not all shards from index " + mergeSourceIndex + e = new IllegalStateException("not all required shards of index " + resizeSourceIndex + " are started yet, expected " + numShards + " found " + startedShards.size() + " can't recover shard " + shardId()); } diff --git a/core/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java b/core/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java new file mode 100644 index 0000000000000..94aee085175a0 --- /dev/null +++ b/core/src/main/java/org/elasticsearch/index/shard/ShardSplittingQuery.java @@ -0,0 +1,245 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.PostingsEnum; +import org.apache.lucene.index.StoredFieldVisitor; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; +import org.apache.lucene.search.ConstantScoreScorer; +import org.apache.lucene.search.ConstantScoreWeight; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.TwoPhaseIterator; +import org.apache.lucene.search.Weight; +import org.apache.lucene.util.BitSetIterator; +import org.apache.lucene.util.Bits; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FixedBitSet; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.Uid; + +import java.io.IOException; +import java.util.function.IntConsumer; +import java.util.function.Predicate; + +/** + * A query that selects all docs that do NOT belong in the current shards this query is executed on. + * It can be used to split a shard into N shards marking every document that doesn't belong into the shard + * as deleted. See {@link org.apache.lucene.index.IndexWriter#deleteDocuments(Query...)} + */ +final class ShardSplittingQuery extends Query { + private final IndexMetaData indexMetaData; + private final int shardId; + + ShardSplittingQuery(IndexMetaData indexMetaData, int shardId) { + if (indexMetaData.getCreationVersion().before(Version.V_6_0_0_rc2)) { + throw new IllegalArgumentException("Splitting query can only be executed on an index created with version " + + Version.V_6_0_0_rc2 + " or higher"); + } + this.indexMetaData = indexMetaData; + this.shardId = shardId; + } + + @Override + public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) { + return new ConstantScoreWeight(this, boost) { + @Override + public String toString() { + return "weight(delete docs query)"; + } + + @Override + public Scorer scorer(LeafReaderContext context) throws IOException { + LeafReader leafReader = context.reader(); + FixedBitSet bitSet = new FixedBitSet(leafReader.maxDoc()); + Terms terms = leafReader.terms(RoutingFieldMapper.NAME); + Predicate includeInShard = ref -> { + int targetShardId = OperationRouting.generateShardId(indexMetaData, + Uid.decodeId(ref.bytes, ref.offset, ref.length), null); + return shardId == targetShardId; + }; + if (terms == null) { // this is the common case - no partitioning and no _routing values + assert indexMetaData.isRoutingPartitionedIndex() == false; + findSplitDocs(IdFieldMapper.NAME, includeInShard, leafReader, bitSet::set); + } else { + if (indexMetaData.isRoutingPartitionedIndex()) { + // this is the heaviest invariant. Here we have to visit all docs stored fields do extract _id and _routing + // this this index is routing partitioned. + Visitor visitor = new Visitor(); + return new ConstantScoreScorer(this, score(), + new RoutingPartitionedDocIdSetIterator(leafReader, visitor)); + } else { + // in the _routing case we first go and find all docs that have a routing value and mark the ones we have to delete + findSplitDocs(RoutingFieldMapper.NAME, ref -> { + int targetShardId = OperationRouting.generateShardId(indexMetaData, null, ref.utf8ToString()); + return shardId == targetShardId; + }, leafReader, bitSet::set); + // now if we have a mixed index where some docs have a _routing value and some don't we have to exclude the ones + // with a routing value from the next iteration an delete / select based on the ID. + if (terms.getDocCount() != leafReader.maxDoc()) { + // this is a special case where some of the docs have no routing values this sucks but it's possible today + FixedBitSet hasRoutingValue = new FixedBitSet(leafReader.maxDoc()); + findSplitDocs(RoutingFieldMapper.NAME, ref -> false, leafReader, + hasRoutingValue::set); + findSplitDocs(IdFieldMapper.NAME, includeInShard, leafReader, docId -> { + if (hasRoutingValue.get(docId) == false) { + bitSet.set(docId); + } + }); + } + } + } + return new ConstantScoreScorer(this, score(), new BitSetIterator(bitSet, bitSet.length())); + } + + + }; + } + + @Override + public String toString(String field) { + return "shard_splitting_query"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ShardSplittingQuery that = (ShardSplittingQuery) o; + + if (shardId != that.shardId) return false; + return indexMetaData.equals(that.indexMetaData); + } + + @Override + public int hashCode() { + int result = indexMetaData.hashCode(); + result = 31 * result + shardId; + return classHash() ^ result; + } + + private static void findSplitDocs(String idField, Predicate includeInShard, + LeafReader leafReader, IntConsumer consumer) throws IOException { + Terms terms = leafReader.terms(idField); + TermsEnum iterator = terms.iterator(); + BytesRef idTerm; + PostingsEnum postingsEnum = null; + while ((idTerm = iterator.next()) != null) { + if (includeInShard.test(idTerm) == false) { + postingsEnum = iterator.postings(postingsEnum); + int doc; + while ((doc = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + consumer.accept(doc); + } + } + } + } + + private static final class Visitor extends StoredFieldVisitor { + int leftToVisit = 2; + final BytesRef spare = new BytesRef(); + String routing; + String id; + + void reset() { + routing = id = null; + leftToVisit = 2; + } + + @Override + public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + switch (fieldInfo.name) { + case IdFieldMapper.NAME: + id = Uid.decodeId(value); + break; + default: + throw new IllegalStateException("Unexpected field: " + fieldInfo.name); + } + } + + @Override + public void stringField(FieldInfo fieldInfo, byte[] value) throws IOException { + spare.bytes = value; + spare.offset = 0; + spare.length = value.length; + switch (fieldInfo.name) { + case RoutingFieldMapper.NAME: + routing = spare.utf8ToString(); + break; + default: + throw new IllegalStateException("Unexpected field: " + fieldInfo.name); + } + } + + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + // we don't support 5.x so no need for the uid field + switch (fieldInfo.name) { + case IdFieldMapper.NAME: + case RoutingFieldMapper.NAME: + leftToVisit--; + return Status.YES; + default: + return leftToVisit == 0 ? Status.STOP : Status.NO; + } + } + } + + /** + * This two phase iterator visits every live doc and selects all docs that don't belong into this + * shard based on their id and routing value. This is only used in a routing partitioned index. + */ + private final class RoutingPartitionedDocIdSetIterator extends TwoPhaseIterator { + private final LeafReader leafReader; + private final Visitor visitor; + + RoutingPartitionedDocIdSetIterator(LeafReader leafReader, Visitor visitor) { + super(DocIdSetIterator.all(leafReader.maxDoc())); // we iterate all live-docs + this.leafReader = leafReader; + this.visitor = visitor; + } + + @Override + public boolean matches() throws IOException { + int doc = approximation.docID(); + visitor.reset(); + leafReader.document(doc, visitor); + int targetShardId = OperationRouting.generateShardId(indexMetaData, visitor.id, visitor.routing); + return targetShardId != shardId; + } + + @Override + public float matchCost() { + return 42; // that's obvious, right? + } + } +} + + diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index e5053fc7882e0..b59ab14961769 100644 --- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -31,6 +31,7 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.MappingMetaData; import org.elasticsearch.cluster.routing.RecoverySource; @@ -107,13 +108,16 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate if (indices.size() > 1) { throw new IllegalArgumentException("can't add shards from more than one index"); } - IndexMetaData indexMetaData = shards.get(0).getIndexMetaData(); - for (ObjectObjectCursor mapping : indexMetaData.getMappings()) { + IndexMetaData sourceMetaData = shards.get(0).getIndexMetaData(); + for (ObjectObjectCursor mapping : sourceMetaData.getMappings()) { mappingUpdateConsumer.accept(mapping.key, mapping.value); } - indexShard.mapperService().merge(indexMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); + indexShard.mapperService().merge(sourceMetaData, MapperService.MergeReason.MAPPING_RECOVERY, true); // now that the mapping is merged we can validate the index sort configuration. Sort indexSort = indexShard.getIndexSort(); + final boolean isSplit = sourceMetaData.getNumberOfShards() < indexShard.indexSettings().getNumberOfShards(); + assert isSplit == false || sourceMetaData.getCreationVersion().onOrAfter(Version.V_6_0_0_alpha1) : "for split we require a " + + "single type but the index is created before 6.0.0"; return executeRecovery(indexShard, () -> { logger.debug("starting recovery from local shards {}", shards); try { @@ -122,7 +126,8 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate final long maxSeqNo = shards.stream().mapToLong(LocalShardSnapshot::maxSeqNo).max().getAsLong(); final long maxUnsafeAutoIdTimestamp = shards.stream().mapToLong(LocalShardSnapshot::maxUnsafeAutoIdTimestamp).max().getAsLong(); - addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, sources, maxSeqNo, maxUnsafeAutoIdTimestamp); + addIndices(indexShard.recoveryState().getIndex(), directory, indexSort, sources, maxSeqNo, maxUnsafeAutoIdTimestamp, + indexShard.indexSettings().getIndexMetaData(), indexShard.shardId().id(), isSplit); internalRecoverFromStore(indexShard); // just trigger a merge to do housekeeping on the // copied segments - we will also see them in stats etc. @@ -136,13 +141,9 @@ boolean recoverFromLocalShards(BiConsumer mappingUpdate return false; } - void addIndices( - final RecoveryState.Index indexRecoveryStats, - final Directory target, - final Sort indexSort, - final Directory[] sources, - final long maxSeqNo, - final long maxUnsafeAutoIdTimestamp) throws IOException { + void addIndices(final RecoveryState.Index indexRecoveryStats, final Directory target, final Sort indexSort, final Directory[] sources, + final long maxSeqNo, final long maxUnsafeAutoIdTimestamp, IndexMetaData indexMetaData, int shardId, boolean split) + throws IOException { final Directory hardLinkOrCopyTarget = new org.apache.lucene.store.HardlinkCopyDirectoryWrapper(target); IndexWriterConfig iwc = new IndexWriterConfig(null) .setCommitOnClose(false) @@ -154,8 +155,13 @@ void addIndices( if (indexSort != null) { iwc.setIndexSort(indexSort); } + try (IndexWriter writer = new IndexWriter(new StatsDirectoryWrapper(hardLinkOrCopyTarget, indexRecoveryStats), iwc)) { writer.addIndexes(sources); + + if (split) { + writer.deleteDocuments(new ShardSplittingQuery(indexMetaData, shardId)); + } /* * We set the maximum sequence number and the local checkpoint on the target to the maximum of the maximum sequence numbers on * the source shards. This ensures that history after this maximum sequence number can advance and we have correct @@ -272,7 +278,7 @@ private boolean canRecover(IndexShard indexShard) { // got closed on us, just ignore this recovery return false; } - if (!indexShard.routingEntry().primary()) { + if (indexShard.routingEntry().primary() == false) { throw new IndexShardRecoveryException(shardId, "Trying to recover when the shard is in backup state", null); } return true; diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java index 10b46be6760bb..a0071d70758af 100644 --- a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestShrinkIndexAction.java @@ -19,8 +19,9 @@ package org.elasticsearch.rest.action.admin.indices; -import org.elasticsearch.action.admin.indices.shrink.ShrinkRequest; -import org.elasticsearch.action.admin.indices.shrink.ShrinkResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.client.node.NodeClient; import org.elasticsearch.common.settings.Settings; @@ -52,14 +53,15 @@ public RestChannelConsumer prepareRequest(final RestRequest request, final NodeC if (request.param("index") == null) { throw new IllegalArgumentException("no source index"); } - ShrinkRequest shrinkIndexRequest = new ShrinkRequest(request.param("target"), request.param("index")); - request.applyContentParser(parser -> ShrinkRequest.PARSER.parse(parser, shrinkIndexRequest, null)); + ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); + shrinkIndexRequest.setResizeType(ResizeType.SHRINK); + request.applyContentParser(parser -> ResizeRequest.PARSER.parse(parser, shrinkIndexRequest, null)); shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); - return channel -> client.admin().indices().shrinkIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { + return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { @Override - public void addCustomFields(XContentBuilder builder, ShrinkResponse response) throws IOException { + public void addCustomFields(XContentBuilder builder, ResizeResponse response) throws IOException { response.addCustomFields(builder); } }); diff --git a/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java new file mode 100644 index 0000000000000..dcc811bd0177b --- /dev/null +++ b/core/src/main/java/org/elasticsearch/rest/action/admin/indices/RestSplitIndexAction.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.rest.action.admin.indices; + +import org.elasticsearch.action.admin.indices.shrink.ResizeRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.support.ActiveShardCount; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.AcknowledgedRestListener; + +import java.io.IOException; + +public class RestSplitIndexAction extends BaseRestHandler { + public RestSplitIndexAction(Settings settings, RestController controller) { + super(settings); + controller.registerHandler(RestRequest.Method.PUT, "/{index}/_split/{target}", this); + controller.registerHandler(RestRequest.Method.POST, "/{index}/_split/{target}", this); + } + + @Override + public String getName() { + return "split_index_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException { + if (request.param("target") == null) { + throw new IllegalArgumentException("no target index"); + } + if (request.param("index") == null) { + throw new IllegalArgumentException("no source index"); + } + ResizeRequest shrinkIndexRequest = new ResizeRequest(request.param("target"), request.param("index")); + shrinkIndexRequest.setResizeType(ResizeType.SPLIT); + request.applyContentParser(parser -> ResizeRequest.PARSER.parse(parser, shrinkIndexRequest, null)); + shrinkIndexRequest.timeout(request.paramAsTime("timeout", shrinkIndexRequest.timeout())); + shrinkIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", shrinkIndexRequest.masterNodeTimeout())); + shrinkIndexRequest.setWaitForActiveShards(ActiveShardCount.parseString(request.param("wait_for_active_shards"))); + return channel -> client.admin().indices().resizeIndex(shrinkIndexRequest, new AcknowledgedRestListener(channel) { + @Override + public void addCustomFields(XContentBuilder builder, ResizeResponse response) throws IOException { + response.addCustomFields(builder); + } + }); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java index 3c2e10d181b58..982b9456b8cdf 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/ShrinkIndexIT.java @@ -105,7 +105,7 @@ public void testCreateShrinkIndexToN() { .put("index.blocks.write", true)).get(); ensureGreen(); // now merge source into a 4 shard index - assertAcked(client().admin().indices().prepareShrinkIndex("source", "first_shrink") + assertAcked(client().admin().indices().prepareResizeIndex("source", "first_shrink") .setSettings(Settings.builder() .put("index.number_of_replicas", 0) .put("index.number_of_shards", shardSplits[1]).build()).get()); @@ -127,7 +127,7 @@ public void testCreateShrinkIndexToN() { .put("index.blocks.write", true)).get(); ensureGreen(); // now merge source into a 2 shard index - assertAcked(client().admin().indices().prepareShrinkIndex("first_shrink", "second_shrink") + assertAcked(client().admin().indices().prepareResizeIndex("first_shrink", "second_shrink") .setSettings(Settings.builder() .put("index.number_of_replicas", 0) .put("index.number_of_shards", shardSplits[2]).build()).get()); @@ -211,7 +211,7 @@ public void testShrinkIndexPrimaryTerm() throws Exception { // now merge source into target final Settings shrinkSettings = Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); - assertAcked(client().admin().indices().prepareShrinkIndex("source", "target").setSettings(shrinkSettings).get()); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target").setSettings(shrinkSettings).get()); ensureGreen(); @@ -264,7 +264,7 @@ public void testCreateShrinkIndex() { // now merge source into a single shard index final boolean createWithReplicas = randomBoolean(); - assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") .setSettings(Settings.builder().put("index.number_of_replicas", createWithReplicas ? 1 : 0).build()).get()); ensureGreen(); @@ -350,7 +350,7 @@ public void testCreateShrinkIndexFails() throws Exception { ensureGreen(); // now merge source into a single shard index - client().admin().indices().prepareShrinkIndex("source", "target") + client().admin().indices().prepareResizeIndex("source", "target") .setWaitForActiveShards(ActiveShardCount.NONE) .setSettings(Settings.builder() .put("index.routing.allocation.exclude._name", mergeNode) // we manually exclude the merge node to forcefully fuck it up @@ -436,16 +436,16 @@ public void testCreateShrinkWithIndexSort() throws Exception { // check that index sort cannot be set on the target index IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, - () -> client().admin().indices().prepareShrinkIndex("source", "target") + () -> client().admin().indices().prepareResizeIndex("source", "target") .setSettings(Settings.builder() .put("index.number_of_replicas", 0) .put("index.number_of_shards", "2") .put("index.sort.field", "foo") .build()).get()); - assertThat(exc.getMessage(), containsString("can't override index sort when shrinking index")); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); // check that the index sort order of `source` is correctly applied to the `target` - assertAcked(client().admin().indices().prepareShrinkIndex("source", "target") + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") .setSettings(Settings.builder() .put("index.number_of_replicas", 0) .put("index.number_of_shards", "2").build()).get()); diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java b/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java new file mode 100644 index 0000000000000..8f24edf8577e4 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/create/SplitIndexIT.java @@ -0,0 +1,462 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.action.admin.indices.create; + +import org.apache.lucene.search.Sort; +import org.apache.lucene.search.SortField; +import org.apache.lucene.search.SortedSetSelector; +import org.apache.lucene.search.SortedSetSortField; +import org.elasticsearch.Version; +import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; +import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; +import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; +import org.elasticsearch.action.admin.indices.stats.CommonStats; +import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse; +import org.elasticsearch.action.admin.indices.stats.ShardStats; +import org.elasticsearch.action.get.GetResponse; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.Murmur3HashFunction; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; +import org.elasticsearch.common.collect.ImmutableOpenMap; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexService; +import org.elasticsearch.index.engine.SegmentsStats; +import org.elasticsearch.index.query.TermsQueryBuilder; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.test.InternalSettingsPlugin; +import org.elasticsearch.test.VersionUtils; + +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.stream.IntStream; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; + + +public class SplitIndexIT extends ESIntegTestCase { + + @Override + protected Collection> nodePlugins() { + return Arrays.asList(InternalSettingsPlugin.class); + } + + public void testCreateSplitIndexToN() { + int[][] possibleShardSplits = new int[][] {{2,4,8}, {3, 6, 12}, {1, 2, 4}}; + int[] shardSplits = randomFrom(possibleShardSplits); + assertEquals(shardSplits[0], (shardSplits[0] * shardSplits[1]) / shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] * shardSplits[2]) / shardSplits[2]); + internalCluster().ensureAtLeastNumDataNodes(2); + final boolean useRouting = randomBoolean(); + final boolean useMixedRouting = useRouting ? randomBoolean() : false; + CreateIndexRequestBuilder createInitialIndex = prepareCreate("source"); + Settings.Builder settings = Settings.builder().put(indexSettings()) + .put("number_of_shards", shardSplits[0]) + .put("index.number_of_routing_shards", shardSplits[2] * randomIntBetween(1, 10)); + if (useRouting && useMixedRouting == false && randomBoolean()) { + settings.put("index.routing_partition_size", randomIntBetween(1, 10)); + createInitialIndex.addMapping("t1", "_routing", "required=true"); + } + logger.info("use routing {} use mixed routing {}", useRouting, useMixedRouting); + createInitialIndex.setSettings(settings).get(); + + int numDocs = randomIntBetween(10, 50); + String[] routingValue = new String[numDocs]; + for (int i = 0; i < numDocs; i++) { + IndexRequestBuilder builder = client().prepareIndex("source", "t1", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON); + if (useRouting) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 10); + if (useMixedRouting && randomBoolean()) { + routingValue[i] = null; + } else { + routingValue[i] = routing; + } + builder.setRouting(routingValue[i]); + } + builder.get(); + } + + if (randomBoolean()) { + for (int i = 0; i < numDocs; i++) { // let's introduce some updates / deletes on the index + if (randomBoolean()) { + IndexRequestBuilder builder = client().prepareIndex("source", "t1", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + } + } + + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + ensureYellow(); + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.blocks.write", true)).get(); + ensureGreen(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "first_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[1]).build()).get()); + ensureGreen(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = client().prepareIndex("first_split", "t1", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("first_split", "t1", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + + client().admin().indices().prepareUpdateSettings("first_split") + .setSettings(Settings.builder() + .put("index.blocks.write", true)).get(); + ensureGreen(); + // now split source into a new index + assertAcked(client().admin().indices().prepareResizeIndex("first_split", "second_split") + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", shardSplits[2]).build()).get()); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + // let it be allocated anywhere and bump replicas + client().admin().indices().prepareUpdateSettings("second_split") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 1)).get(); + ensureGreen(); + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + for (int i = 0; i < numDocs; i++) { // now update + IndexRequestBuilder builder = client().prepareIndex("second_split", "t1", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON); + if (useRouting) { + builder.setRouting(routingValue[i]); + } + builder.get(); + } + flushAndRefresh(); + for (int i = 0; i < numDocs; i++) { + GetResponse getResponse = client().prepareGet("second_split", "t1", Integer.toString(i)).setRouting(routingValue[i]).get(); + assertTrue(getResponse.isExists()); + } + assertHitCount(client().prepareSearch("second_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("first_split").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertHitCount(client().prepareSearch("source").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + assertAllUniqueDocs(client().prepareSearch("second_split").setSize(100) + .setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertAllUniqueDocs(client().prepareSearch("first_split").setSize(100) + .setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + assertAllUniqueDocs(client().prepareSearch("source").setSize(100) + .setQuery(new TermsQueryBuilder("foo", "bar")).get(), numDocs); + + } + + public void assertAllUniqueDocs(SearchResponse response, int numDocs) { + Set ids = new HashSet<>(); + for (int i = 0; i < response.getHits().getHits().length; i++) { + String id = response.getHits().getHits()[i].getId(); + assertTrue("found ID "+ id + " more than once", ids.add(id)); + } + assertEquals(numDocs, ids.size()); + } + + public void testSplitIndexPrimaryTerm() throws Exception { + final List factors = Arrays.asList(1, 2, 4, 8); + final List numberOfShardsFactors = randomSubsetOf(scaledRandomIntBetween(1, factors.size()), factors); + final int numberOfShards = randomSubsetOf(numberOfShardsFactors).stream().reduce(1, (x, y) -> x * y); + final int numberOfTargetShards = numberOfShardsFactors.stream().reduce(2, (x, y) -> x * y); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("number_of_shards", numberOfShards) + .put("index.number_of_routing_shards", numberOfTargetShards)).get(); + + final ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertThat(dataNodes.size(), greaterThanOrEqualTo(2)); + ensureYellow(); + + // fail random primary shards to force primary terms to increase + final Index source = resolveIndex("source"); + final int iterations = scaledRandomIntBetween(0, 16); + for (int i = 0; i < iterations; i++) { + final String node = randomSubsetOf(1, internalCluster().nodesInclude("source")).get(0); + final IndicesService indexServices = internalCluster().getInstance(IndicesService.class, node); + final IndexService indexShards = indexServices.indexServiceSafe(source); + for (final Integer shardId : indexShards.shardIds()) { + final IndexShard shard = indexShards.getShard(shardId); + if (shard.routingEntry().primary() && randomBoolean()) { + disableAllocation("source"); + shard.failShard("test", new Exception("test")); + // this can not succeed until the shard is failed and a replica is promoted + int id = 0; + while (true) { + // find an ID that routes to the right shard, we will only index to the shard that saw a primary failure + final String s = Integer.toString(id); + final int hash = Math.floorMod(Murmur3HashFunction.hash(s), numberOfShards); + if (hash == shardId) { + final IndexRequest request = + new IndexRequest("source", "type", s).source("{ \"f\": \"" + s + "\"}", XContentType.JSON); + client().index(request).get(); + break; + } else { + id++; + } + } + enableAllocation("source"); + ensureGreen(); + } + } + } + + final Settings.Builder prepareSplitSettings = Settings.builder().put("index.blocks.write", true); + client().admin().indices().prepareUpdateSettings("source").setSettings(prepareSplitSettings).get(); + ensureYellow(); + + final IndexMetaData indexMetaData = indexMetaData(client(), "source"); + final long beforeSplitPrimaryTerm = IntStream.range(0, numberOfShards).mapToLong(indexMetaData::primaryTerm).max().getAsLong(); + + // now split source into target + final Settings splitSettings = + Settings.builder().put("index.number_of_replicas", 0).put("index.number_of_shards", numberOfTargetShards).build(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(splitSettings).get()); + + ensureGreen(); + + final IndexMetaData aftersplitIndexMetaData = indexMetaData(client(), "target"); + for (int shardId = 0; shardId < numberOfTargetShards; shardId++) { + assertThat(aftersplitIndexMetaData.primaryTerm(shardId), equalTo(beforeSplitPrimaryTerm + 1)); + } + } + + private static IndexMetaData indexMetaData(final Client client, final String index) { + final ClusterStateResponse clusterStateResponse = client.admin().cluster().state(new ClusterStateRequest()).actionGet(); + return clusterStateResponse.getState().metaData().index(index); + } + + public void testCreateSplitIndex() { + internalCluster().ensureAtLeastNumDataNodes(2); + Version version = VersionUtils.randomVersionBetween(random(), Version.V_6_0_0_rc2, Version.CURRENT); + prepareCreate("source").setSettings(Settings.builder().put(indexSettings()) + .put("number_of_shards", 1) + .put("index.version.created", version) + .put("index.number_of_routing_shards", 2) + ).get(); + final int docs = randomIntBetween(0, 128); + for (int i = 0; i < docs; i++) { + client().prepareIndex("source", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + ImmutableOpenMap dataNodes = + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + // relocate all shards to one node such that we can merge it. + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.blocks.write", true)).get(); + ensureGreen(); + + final IndicesStatsResponse sourceStats = client().admin().indices().prepareStats("source").setSegments(true).get(); + + // disable rebalancing to be able to capture the right stats. balancing can move the target primary + // making it hard to pin point the source shards. + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), "none" + )).get(); + try { + + final boolean createWithReplicas = randomBoolean(); + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", createWithReplicas ? 1 : 0) + .put("index.number_of_shards", 2).build()).get()); + ensureGreen(); + + final ClusterState state = client().admin().cluster().prepareState().get().getState(); + DiscoveryNode mergeNode = state.nodes().get(state.getRoutingTable().index("target").shard(0).primaryShard().currentNodeId()); + logger.info("split node {}", mergeNode); + + final long maxSeqNo = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getSeqNoStats).mapToLong(SeqNoStats::getMaxSeqNo).max().getAsLong(); + final long maxUnsafeAutoIdTimestamp = Arrays.stream(sourceStats.getShards()) + .filter(shard -> shard.getShardRouting().currentNodeId().equals(mergeNode.getId())) + .map(ShardStats::getStats) + .map(CommonStats::getSegments) + .mapToLong(SegmentsStats::getMaxUnsafeAutoIdTimestamp) + .max() + .getAsLong(); + + final IndicesStatsResponse targetStats = client().admin().indices().prepareStats("target").get(); + for (final ShardStats shardStats : targetStats.getShards()) { + final SeqNoStats seqNoStats = shardStats.getSeqNoStats(); + final ShardRouting shardRouting = shardStats.getShardRouting(); + assertThat("failed on " + shardRouting, seqNoStats.getMaxSeqNo(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, seqNoStats.getLocalCheckpoint(), equalTo(maxSeqNo)); + assertThat("failed on " + shardRouting, + shardStats.getStats().getSegments().getMaxUnsafeAutoIdTimestamp(), equalTo(maxUnsafeAutoIdTimestamp)); + } + + final int size = docs > 0 ? 2 * docs : 1; + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + + if (createWithReplicas == false) { + // bump replicas + client().admin().indices().prepareUpdateSettings("target") + .setSettings(Settings.builder() + .put("index.number_of_replicas", 1)).get(); + ensureGreen(); + assertHitCount(client().prepareSearch("target").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + } + + for (int i = docs; i < 2 * docs; i++) { + client().prepareIndex("target", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + flushAndRefresh(); + assertHitCount(client().prepareSearch("target").setSize(2 * size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), + 2 * docs); + assertHitCount(client().prepareSearch("source").setSize(size).setQuery(new TermsQueryBuilder("foo", "bar")).get(), docs); + GetSettingsResponse target = client().admin().indices().prepareGetSettings("target").get(); + assertEquals(version, target.getIndexToSettings().get("target").getAsVersion("index.version.created", null)); + } finally { + // clean up + client().admin().cluster().prepareUpdateSettings().setTransientSettings(Settings.builder().put( + EnableAllocationDecider.CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), (String)null + )).get(); + } + + } + + public void testCreateSplitWithIndexSort() throws Exception { + SortField expectedSortField = new SortedSetSortField("id", true, SortedSetSelector.Type.MAX); + expectedSortField.setMissingValue(SortedSetSortField.STRING_FIRST); + Sort expectedIndexSort = new Sort(expectedSortField); + internalCluster().ensureAtLeastNumDataNodes(2); + prepareCreate("source") + .setSettings( + Settings.builder() + .put(indexSettings()) + .put("sort.field", "id") + .put("index.number_of_routing_shards", 16) + .put("sort.order", "desc") + .put("number_of_shards", 2) + .put("number_of_replicas", 0) + ) + .addMapping("type", "id", "type=keyword,doc_values=true") + .get(); + for (int i = 0; i < 20; i++) { + client().prepareIndex("source", "type", Integer.toString(i)) + .setSource("{\"foo\" : \"bar\", \"id\" : " + i + "}", XContentType.JSON).get(); + } + ImmutableOpenMap dataNodes = client().admin().cluster().prepareState().get().getState().nodes() + .getDataNodes(); + assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2); + DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class); + String mergeNode = discoveryNodes[0].getName(); + // ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node + // if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due + // to the require._name below. + ensureGreen(); + + flushAndRefresh(); + assertSortedSegments("source", expectedIndexSort); + + client().admin().indices().prepareUpdateSettings("source") + .setSettings(Settings.builder() + .put("index.blocks.write", true)).get(); + ensureYellow(); + + // check that index sort cannot be set on the target index + IllegalArgumentException exc = expectThrows(IllegalArgumentException.class, + () -> client().admin().indices().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 4) + .put("index.sort.field", "foo") + .build()).get()); + assertThat(exc.getMessage(), containsString("can't override index sort when resizing an index")); + + // check that the index sort order of `source` is correctly applied to the `target` + assertAcked(client().admin().indices().prepareResizeIndex("source", "target") + .setResizeType(ResizeType.SPLIT) + .setSettings(Settings.builder() + .put("index.number_of_replicas", 0) + .put("index.number_of_shards", 4).build()).get()); + ensureGreen(); + flushAndRefresh(); + GetSettingsResponse settingsResponse = + client().admin().indices().prepareGetSettings("target").execute().actionGet(); + assertEquals(settingsResponse.getSetting("target", "index.sort.field"), "id"); + assertEquals(settingsResponse.getSetting("target", "index.sort.order"), "desc"); + assertSortedSegments("target", expectedIndexSort); + + // ... and that the index sort is also applied to updates + for (int i = 20; i < 40; i++) { + client().prepareIndex("target", "type") + .setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get(); + } + flushAndRefresh(); + assertSortedSegments("target", expectedIndexSort); + } +} diff --git a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java similarity index 87% rename from core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java rename to core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java index 83e9cf89d9c75..b03b043f03e14 100644 --- a/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportShrinkActionTests.java +++ b/core/src/test/java/org/elasticsearch/action/admin/indices/shrink/TransportResizeActionTests.java @@ -28,7 +28,6 @@ import org.elasticsearch.cluster.EmptyClusterInfoService; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -49,7 +48,7 @@ import static java.util.Collections.emptyMap; -public class TransportShrinkActionTests extends ESTestCase { +public class TransportResizeActionTests extends ESTestCase { private ClusterState createClusterState(String name, int numShards, int numReplicas, Settings settings) { MetaData.Builder metaBuilder = MetaData.builder(); @@ -72,20 +71,20 @@ public void testErrorCondition() { Settings.builder().put("index.blocks.write", true).build()); assertTrue( expectThrows(IllegalStateException.class, () -> - TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), state, - (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), new IndexNameExpressionResolver(Settings.EMPTY)) + TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), state, + (i) -> new DocsStats(Integer.MAX_VALUE, between(1, 1000), between(1, 100)), "source", "target") ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); assertTrue( expectThrows(IllegalStateException.class, () -> { - ShrinkRequest req = new ShrinkRequest("target", "source"); - req.getShrinkIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4)); + ResizeRequest req = new ResizeRequest("target", "source"); + req.getTargetIndexRequest().settings(Settings.builder().put("index.number_of_shards", 4)); ClusterState clusterState = createClusterState("source", 8, 1, Settings.builder().put("index.blocks.write", true).build()); - TransportShrinkAction.prepareCreateIndexRequest(req, clusterState, - (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null, - new IndexNameExpressionResolver(Settings.EMPTY)); + TransportResizeAction.prepareCreateIndexRequest(req, clusterState, + (i) -> i == 2 || i == 3 ? new DocsStats(Integer.MAX_VALUE / 2, between(1, 1000), between(1, 10000)) : null + , "source", "target"); } ).getMessage().startsWith("Can't merge index with more than [2147483519] docs - too many documents in shards ")); @@ -105,8 +104,8 @@ public void testErrorCondition() { routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); - TransportShrinkAction.prepareCreateIndexRequest(new ShrinkRequest("target", "source"), clusterState, - (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), new IndexNameExpressionResolver(Settings.EMPTY)); + TransportResizeAction.prepareCreateIndexRequest(new ResizeRequest("target", "source"), clusterState, + (i) -> new DocsStats(between(1, 1000), between(1, 1000), between(0, 10000)), "source", "target"); } public void testShrinkIndexSettings() { @@ -129,14 +128,13 @@ public void testShrinkIndexSettings() { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); int numSourceShards = clusterState.metaData().index(indexName).getNumberOfShards(); DocsStats stats = new DocsStats(between(0, (IndexWriter.MAX_DOCS) / numSourceShards), between(1, 1000), between(1, 10000)); - ShrinkRequest target = new ShrinkRequest("target", indexName); + ResizeRequest target = new ResizeRequest("target", indexName); final ActiveShardCount activeShardCount = randomBoolean() ? ActiveShardCount.ALL : ActiveShardCount.ONE; target.setWaitForActiveShards(activeShardCount); - CreateIndexClusterStateUpdateRequest request = TransportShrinkAction.prepareCreateIndexRequest( - target, clusterState, (i) -> stats, - new IndexNameExpressionResolver(Settings.EMPTY)); - assertNotNull(request.shrinkFrom()); - assertEquals(indexName, request.shrinkFrom().getName()); + CreateIndexClusterStateUpdateRequest request = TransportResizeAction.prepareCreateIndexRequest( + target, clusterState, (i) -> stats, indexName, "target"); + assertNotNull(request.recoverFrom()); + assertEquals(indexName, request.recoverFrom().getName()); assertEquals("1", request.settings().get("index.number_of_shards")); assertEquals("shrink_index", request.cause()); assertEquals(request.waitForActiveShards(), activeShardCount); diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index 81acd138d26fb..6fd3d66c8f81b 100644 --- a/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -34,6 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.NodeVersionAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.RebalanceOnlyWhenActiveAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ReplicaAfterPrimaryActiveAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.SnapshotInProgressAllocationDecider; @@ -174,6 +175,7 @@ public void testShardsAllocatorFactoryNull() { public void testAllocationDeciderOrder() { List> expectedDeciders = Arrays.asList( MaxRetryAllocationDecider.class, + ResizeAllocationDecider.class, ReplicaAfterPrimaryActiveAllocationDecider.class, RebalanceOnlyWhenActiveAllocationDecider.class, ClusterRebalanceAllocationDecider.class, diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java index 4dd757c140311..f44d0b7c4036e 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexCreationTaskTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.indices.alias.Alias; import org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlock; @@ -258,8 +259,8 @@ public void testIndexRemovalOnFailure() throws Exception { public void testShrinkIndexIgnoresTemplates() throws Exception { final Index source = new Index("source_idx", "aaa111bbb222"); - when(request.shrinkFrom()).thenReturn(source); - + when(request.recoverFrom()).thenReturn(source); + when(request.resizeType()).thenReturn(ResizeType.SHRINK); currentStateMetaDataBuilder.put(createIndexMetaDataBuilder("source_idx", "aaa111bbb222", 2, 2)); routingTableBuilder.add(createIndexRoutingTableWithStartedShards(source)); diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java index fa56c756fcc35..e83d1fa706cfd 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/IndexMetaDataTests.java @@ -30,6 +30,7 @@ import org.elasticsearch.test.ESTestCase; import java.io.IOException; +import java.util.Collections; import java.util.Set; import static org.hamcrest.Matchers.is; @@ -84,21 +85,12 @@ public void testIndexMetaDataSerialization() throws IOException { } public void testGetRoutingFactor() { - int numberOfReplicas = randomIntBetween(0, 10); - IndexMetaData metaData = IndexMetaData.builder("foo") - .settings(Settings.builder() - .put("index.version.created", 1) - .put("index.number_of_shards", 32) - .put("index.number_of_replicas", numberOfReplicas) - .build()) - .creationDate(randomLong()) - .build(); Integer numShard = randomFrom(1, 2, 4, 8, 16); - int routingFactor = IndexMetaData.getRoutingFactor(metaData, numShard); - assertEquals(routingFactor * numShard, metaData.getNumberOfShards()); + int routingFactor = IndexMetaData.getRoutingFactor(32, numShard); + assertEquals(routingFactor * numShard, 32); - Integer brokenNumShards = randomFrom(3, 5, 9, 12, 29, 42, 64); - expectThrows(IllegalArgumentException.class, () -> IndexMetaData.getRoutingFactor(metaData, brokenNumShards)); + Integer brokenNumShards = randomFrom(3, 5, 9, 12, 29, 42); + expectThrows(IllegalArgumentException.class, () -> IndexMetaData.getRoutingFactor(32, brokenNumShards)); } public void testSelectShrinkShards() { @@ -125,6 +117,64 @@ public void testSelectShrinkShards() { expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectShrinkShards(8, metaData, 8)).getMessage()); } + public void testSelectResizeShards() { + IndexMetaData split = IndexMetaData.builder("foo") + .settings(Settings.builder() + .put("index.version.created", 1) + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 0) + .build()) + .creationDate(randomLong()) + .build(); + + IndexMetaData shrink = IndexMetaData.builder("foo") + .settings(Settings.builder() + .put("index.version.created", 1) + .put("index.number_of_shards", 32) + .put("index.number_of_replicas", 0) + .build()) + .creationDate(randomLong()) + .build(); + int numTargetShards = randomFrom(4, 6, 8, 12); + int shard = randomIntBetween(0, numTargetShards-1); + assertEquals(Collections.singleton(IndexMetaData.selectSplitShard(shard, split, numTargetShards)), + IndexMetaData.selectRecoverFromShards(shard, split, numTargetShards)); + + numTargetShards = randomFrom(1, 2, 4, 8, 16); + shard = randomIntBetween(0, numTargetShards-1); + assertEquals(IndexMetaData.selectShrinkShards(shard, shrink, numTargetShards), + IndexMetaData.selectRecoverFromShards(shard, shrink, numTargetShards)); + + assertEquals("can't select recover from shards if both indices have the same number of shards", + expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectRecoverFromShards(0, shrink, 32)).getMessage()); + } + + public void testSelectSplitShard() { + IndexMetaData metaData = IndexMetaData.builder("foo") + .settings(Settings.builder() + .put("index.version.created", 1) + .put("index.number_of_shards", 2) + .put("index.number_of_replicas", 0) + .build()) + .creationDate(randomLong()) + .setRoutingNumShards(4) + .build(); + ShardId shardId = IndexMetaData.selectSplitShard(0, metaData, 4); + assertEquals(0, shardId.getId()); + shardId = IndexMetaData.selectSplitShard(1, metaData, 4); + assertEquals(0, shardId.getId()); + shardId = IndexMetaData.selectSplitShard(2, metaData, 4); + assertEquals(1, shardId.getId()); + shardId = IndexMetaData.selectSplitShard(3, metaData, 4); + assertEquals(1, shardId.getId()); + + assertEquals("the number of target shards (0) must be greater than the shard id: 0", + expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectSplitShard(0, metaData, 0)).getMessage()); + + assertEquals("the number of source shards [2] must be a must be a factor of [3]", + expectThrows(IllegalArgumentException.class, () -> IndexMetaData.selectSplitShard(0, metaData, 3)).getMessage()); + } + public void testIndexFormat() { Settings defaultSettings = Settings.builder() .put("index.version.created", 1) @@ -156,4 +206,26 @@ public void testIndexFormat() { assertThat(metaData.getSettings().getAsInt(IndexMetaData.INDEX_FORMAT_SETTING.getKey(), 0), is(0)); } } + + public void testNumberOfRoutingShards() { + Settings build = Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 10).build(); + assertEquals(10, IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(build).intValue()); + + build = Settings.builder().put("index.number_of_shards", 5).put("index.number_of_routing_shards", 5).build(); + assertEquals(5, IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(build).intValue()); + + int numShards = randomIntBetween(1, 10); + build = Settings.builder().put("index.number_of_shards", numShards).build(); + assertEquals(numShards, IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(build).intValue()); + + Settings lessThanSettings = Settings.builder().put("index.number_of_shards", 8).put("index.number_of_routing_shards", 4).build(); + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, + () -> IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(lessThanSettings)); + assertEquals("index.number_of_routing_shards [4] must be >= index.number_of_shards [8]", iae.getMessage()); + + Settings notAFactorySettings = Settings.builder().put("index.number_of_shards", 2).put("index.number_of_routing_shards", 3).build(); + iae = expectThrows(IllegalArgumentException.class, + () -> IndexMetaData.INDEX_NUMBER_OF_ROUTING_SHARDS_SETTING.get(notAFactorySettings)); + assertEquals("the number of source shards [2] must be a must be a factor of [3]", iae.getMessage()); + } } diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java index 7bfc7872f816a..39e4a18440931 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/MetaDataCreateIndexServiceTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.Version; +import org.elasticsearch.action.admin.indices.shrink.ResizeType; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.EmptyClusterInfoService; @@ -33,6 +34,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.ResourceAlreadyExistsException; @@ -43,6 +45,7 @@ import java.util.Arrays; import java.util.Collections; +import java.util.Comparator; import java.util.HashSet; import java.util.List; @@ -75,6 +78,12 @@ public static boolean isShrinkable(int source, int target) { return target * x == source; } + public static boolean isSplitable(int source, int target) { + int x = target / source; + assert source < target : source + " >= " + target; + return source * x == target; + } + public void testValidateShrinkIndex() { int numShards = randomIntBetween(2, 42); ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), @@ -90,29 +99,28 @@ public void testValidateShrinkIndex() { MetaDataCreateIndexService.validateShrinkIndex(state, "no such index", Collections.emptySet(), "target", Settings.EMPTY) ).getMessage()); + Settings targetSettings = Settings.builder().put("index.number_of_shards", 1).build(); assertEquals("can't shrink an index with only one shard", expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", - 1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), - "target", Settings.EMPTY) - ).getMessage()); + 1, 0, Settings.builder().put("index.blocks.write", true).build()), "source", + Collections.emptySet(), "target", targetSettings)).getMessage()); - assertEquals("the number of target shards must be less that the number of source shards", + assertEquals("the number of target shards [10] must be less that the number of source shards [5]", expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateShrinkIndex(createClusterState("source", - 5, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), - "target", Settings.builder().put("index.number_of_shards", 10).build()) - ).getMessage()); + 5, 0, Settings.builder().put("index.blocks.write", true).build()), "source", + Collections.emptySet(), "target", Settings.builder().put("index.number_of_shards", 10).build())).getMessage()); - assertEquals("index source must be read-only to shrink index. use \"index.blocks.write=true\"", + assertEquals("index source must be read-only to resize index. use \"index.blocks.write=true\"", expectThrows(IllegalStateException.class, () -> MetaDataCreateIndexService.validateShrinkIndex( createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY) - , "source", Collections.emptySet(), "target", Settings.EMPTY) + , "source", Collections.emptySet(), "target", targetSettings) ).getMessage()); assertEquals("index source must have all shards allocated on the same node to shrink index", expectThrows(IllegalStateException.class, () -> - MetaDataCreateIndexService.validateShrinkIndex(state, "source", Collections.emptySet(), "target", Settings.EMPTY) + MetaDataCreateIndexService.validateShrinkIndex(state, "source", Collections.emptySet(), "target", targetSettings) ).getMessage()); assertEquals("the number of source shards [8] must be a must be a multiple of [3]", @@ -122,10 +130,10 @@ public void testValidateShrinkIndex() { Settings.builder().put("index.number_of_shards", 3).build()) ).getMessage()); - assertEquals("mappings are not allowed when shrinking indices, all mappings are copied from the source index", + assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index", expectThrows(IllegalArgumentException.class, () -> { MetaDataCreateIndexService.validateShrinkIndex(state, "source", Collections.singleton("foo"), - "target", Settings.EMPTY); + "target", targetSettings); } ).getMessage()); @@ -151,11 +159,78 @@ public void testValidateShrinkIndex() { Settings.builder().put("index.number_of_shards", targetShards).build()); } - public void testShrinkIndexSettings() { + public void testValidateSplitIndex() { + int numShards = randomIntBetween(1, 42); + Settings targetSettings = Settings.builder().put("index.number_of_shards", numShards * 2).build(); + ClusterState state = createClusterState("source", numShards, randomIntBetween(0, 10), + Settings.builder().put("index.blocks.write", true).build()); + + assertEquals("index [source] already exists", + expectThrows(ResourceAlreadyExistsException.class, () -> + MetaDataCreateIndexService.validateSplitIndex(state, "target", Collections.emptySet(), "source", targetSettings) + ).getMessage()); + + assertEquals("no such index", + expectThrows(IndexNotFoundException.class, () -> + MetaDataCreateIndexService.validateSplitIndex(state, "no such index", Collections.emptySet(), "target", targetSettings) + ).getMessage()); + + assertEquals("the number of source shards [10] must be less that the number of target shards [5]", + expectThrows(IllegalArgumentException.class, () -> MetaDataCreateIndexService.validateSplitIndex(createClusterState("source", + 10, 0, Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), + "target", Settings.builder().put("index.number_of_shards", 5).build()) + ).getMessage()); + + + assertEquals("index source must be read-only to resize index. use \"index.blocks.write=true\"", + expectThrows(IllegalStateException.class, () -> + MetaDataCreateIndexService.validateSplitIndex( + createClusterState("source", randomIntBetween(2, 100), randomIntBetween(0, 10), Settings.EMPTY) + , "source", Collections.emptySet(), "target", targetSettings) + ).getMessage()); + + + assertEquals("the number of source shards [3] must be a must be a factor of [4]", + expectThrows(IllegalArgumentException.class, () -> + MetaDataCreateIndexService.validateSplitIndex(createClusterState("source", 3, randomIntBetween(0, 10), + Settings.builder().put("index.blocks.write", true).build()), "source", Collections.emptySet(), "target", + Settings.builder().put("index.number_of_shards", 4).build()) + ).getMessage()); + + assertEquals("mappings are not allowed when resizing indices, all mappings are copied from the source index", + expectThrows(IllegalArgumentException.class, () -> { + MetaDataCreateIndexService.validateSplitIndex(state, "source", Collections.singleton("foo"), + "target", targetSettings); + } + ).getMessage()); + + + ClusterState clusterState = ClusterState.builder(createClusterState("source", numShards, 0, + Settings.builder().put("index.blocks.write", true).build())).nodes(DiscoveryNodes.builder().add(newNode("node1"))) + .build(); + AllocationService service = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, + Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + + RoutingTable routingTable = service.reroute(clusterState, "reroute").routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + // now we start the shard + routingTable = service.applyStartedShards(clusterState, + routingTable.index("source").shardsWithState(ShardRoutingState.INITIALIZING)).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + int targetShards; + do { + targetShards = randomIntBetween(numShards+1, 100); + } while (isSplitable(numShards, targetShards) == false); + MetaDataCreateIndexService.validateSplitIndex(clusterState, "source", Collections.emptySet(), "target", + Settings.builder().put("index.number_of_shards", targetShards).build()); + } + + public void testResizeIndexSettings() { String indexName = randomAlphaOfLength(10); List versions = Arrays.asList(VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random()), VersionUtils.randomVersion(random())); - versions.sort((l, r) -> Long.compare(l.id, r.id)); + versions.sort(Comparator.comparingLong(l -> l.id)); Version version = versions.get(0); Version minCompat = versions.get(1); Version upgraded = versions.get(2); @@ -182,8 +257,9 @@ public void testShrinkIndexSettings() { clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); Settings.Builder builder = Settings.builder(); - MetaDataCreateIndexService.prepareShrinkIndexSettings( - clusterState, Collections.emptySet(), builder, clusterState.metaData().index(indexName).getIndex(), "target"); + builder.put("index.number_of_shards", 1); + MetaDataCreateIndexService.prepareResizeIndexSettings(clusterState, Collections.emptySet(), builder, + clusterState.metaData().index(indexName).getIndex(), "target", ResizeType.SHRINK); assertEquals("similarity settings must be copied", "BM25", builder.build().get("index.similarity.default.type")); assertEquals("analysis settings must be copied", "keyword", builder.build().get("index.analysis.analyzer.my_analyzer.tokenizer")); diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java index 498edee12f90a..1f8de1ca02fd7 100644 --- a/core/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/routing/OperationRoutingTests.java @@ -23,9 +23,7 @@ import org.elasticsearch.action.support.replication.ClusterStateCreationUtils; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.TimeValue; @@ -54,6 +52,7 @@ public class OperationRoutingTests extends ESTestCase{ + public void testGenerateShardId() { int[][] possibleValues = new int[][] { {8,4,2}, {20, 10, 2}, {36, 12, 3}, {15,5,1} @@ -70,6 +69,7 @@ public void testGenerateShardId() { .numberOfReplicas(1) .setRoutingNumShards(shardSplits[0]).build(); int shrunkShard = OperationRouting.generateShardId(shrunk, term, null); + Set shardIds = IndexMetaData.selectShrinkShards(shrunkShard, metaData, shrunk.getNumberOfShards()); assertEquals(1, shardIds.stream().filter((sid) -> sid.id() == shard).count()); @@ -81,6 +81,36 @@ public void testGenerateShardId() { } } + public void testGenerateShardIdSplit() { + int[][] possibleValues = new int[][] { + {2,4,8}, {2, 10, 20}, {3, 12, 36}, {1,5,15} + }; + for (int i = 0; i < 10; i++) { + int[] shardSplits = randomFrom(possibleValues); + assertEquals(shardSplits[0], (shardSplits[0] * shardSplits[1]) / shardSplits[1]); + assertEquals(shardSplits[1], (shardSplits[1] * shardSplits[2]) / shardSplits[2]); + IndexMetaData metaData = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(shardSplits[0]) + .numberOfReplicas(1).setRoutingNumShards(shardSplits[2]).build(); + String term = randomAlphaOfLength(10); + final int shard = OperationRouting.generateShardId(metaData, term, null); + IndexMetaData split = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(shardSplits[1]) + .numberOfReplicas(1) + .setRoutingNumShards(shardSplits[2]).build(); + int shrunkShard = OperationRouting.generateShardId(split, term, null); + + ShardId shardId = IndexMetaData.selectSplitShard(shrunkShard, metaData, split.getNumberOfShards()); + assertNotNull(shardId); + assertEquals(shard, shardId.getId()); + + split = IndexMetaData.builder("test").settings(settings(Version.CURRENT)).numberOfShards(shardSplits[2]).numberOfReplicas(1) + .setRoutingNumShards(shardSplits[2]).build(); + shrunkShard = OperationRouting.generateShardId(split, term, null); + shardId = IndexMetaData.selectSplitShard(shrunkShard, metaData, split.getNumberOfShards()); + assertNotNull(shardId); + assertEquals(shard, shardId.getId()); + } + } + public void testPartitionedIndex() { // make sure the same routing value always has each _id fall within the configured partition size for (int shards = 1; shards < 5; shards++) { @@ -373,7 +403,7 @@ public void testPreferNodes() throws InterruptedException, IOException { terminate(threadPool); } } - + public void testFairSessionIdPreferences() throws InterruptedException, IOException { // Ensure that a user session is re-routed back to same nodes for // subsequent searches and that the nodes are selected fairly i.e. @@ -424,13 +454,13 @@ public void testFairSessionIdPreferences() throws InterruptedException, IOExcept assertThat("Search should use more than one of the nodes", selectedNodes.size(), greaterThan(1)); } } - + // Regression test for the routing logic - implements same hashing logic private ShardIterator duelGetShards(ClusterState clusterState, ShardId shardId, String sessionId) { final IndexShardRoutingTable indexShard = clusterState.getRoutingTable().shardRoutingTable(shardId.getIndexName(), shardId.getId()); int routingHash = Murmur3HashFunction.hash(sessionId); routingHash = 31 * routingHash + indexShard.shardId.hashCode(); - return indexShard.activeInitializingShardsIt(routingHash); + return indexShard.activeInitializingShardsIt(routingHash); } public void testThatOnlyNodesSupportNodeIds() throws InterruptedException, IOException { diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java new file mode 100644 index 0000000000000..cb9919216bdd0 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/ResizeAllocationDeciderTests.java @@ -0,0 +1,287 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ESAllocationTestCase; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.metadata.MetaData; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.ResizeAllocationDecider; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.gateway.TestGatewayAllocator; + +import java.util.Arrays; +import java.util.Collections; + +import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING; +import static org.elasticsearch.cluster.routing.ShardRoutingState.STARTED; +import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED; + + +public class ResizeAllocationDeciderTests extends ESAllocationTestCase { + + private AllocationService strategy; + + @Override + public void setUp() throws Exception { + super.setUp(); + strategy = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, + Collections.singleton(new ResizeAllocationDecider(Settings.EMPTY))), + new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE); + } + + private ClusterState createInitialClusterState(boolean startShards) { + return createInitialClusterState(startShards, Version.CURRENT); + } + + private ClusterState createInitialClusterState(boolean startShards, Version nodeVersion) { + MetaData.Builder metaBuilder = MetaData.builder(); + metaBuilder.put(IndexMetaData.builder("source").settings(settings(Version.CURRENT)) + .numberOfShards(2).numberOfReplicas(0).setRoutingNumShards(16)); + MetaData metaData = metaBuilder.build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(); + routingTableBuilder.addAsNew(metaData.index("source")); + + RoutingTable routingTable = routingTableBuilder.build(); + ClusterState clusterState = ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY)) + .metaData(metaData).routingTable(routingTable).build(); + clusterState = ClusterState.builder(clusterState).nodes(DiscoveryNodes.builder().add(newNode("node1", nodeVersion)).add(newNode + ("node2", nodeVersion))) + .build(); + RoutingTable prevRoutingTable = routingTable; + routingTable = strategy.reroute(clusterState, "reroute", false).routingTable(); + clusterState = ClusterState.builder(clusterState).routingTable(routingTable).build(); + + assertEquals(prevRoutingTable.index("source").shards().size(), 2); + assertEquals(prevRoutingTable.index("source").shard(0).shards().get(0).state(), UNASSIGNED); + assertEquals(prevRoutingTable.index("source").shard(1).shards().get(0).state(), UNASSIGNED); + + + assertEquals(routingTable.index("source").shards().size(), 2); + + assertEquals(routingTable.index("source").shard(0).shards().get(0).state(), INITIALIZING); + assertEquals(routingTable.index("source").shard(1).shards().get(0).state(), INITIALIZING); + + + if (startShards) { + clusterState = strategy.applyStartedShards(clusterState, + Arrays.asList(routingTable.index("source").shard(0).shards().get(0), + routingTable.index("source").shard(1).shards().get(0))); + routingTable = clusterState.routingTable(); + assertEquals(routingTable.index("source").shards().size(), 2); + assertEquals(routingTable.index("source").shard(0).shards().get(0).state(), STARTED); + assertEquals(routingTable.index("source").shard(1).shards().get(0).state(), STARTED); + + } + return clusterState; + } + + public void testNonResizeRouting() { + ClusterState clusterState = createInitialClusterState(true); + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting("non-resize", 0, null, true, ShardRoutingState.UNASSIGNED); + assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); + assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + } + + public void testShrink() { // we don't handle shrink yet + ClusterState clusterState = createInitialClusterState(true); + MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); + metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") + .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) + .numberOfShards(1).numberOfReplicas(0)); + MetaData metaData = metaBuilder.build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + routingTableBuilder.addAsNew(metaData.index("target")); + + clusterState = ClusterState.builder(clusterState) + .routingTable(routingTableBuilder.build()) + .metaData(metaData).build(); + Index idx = clusterState.metaData().index("target").getIndex(); + + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + RoutingAllocation routingAllocation = new RoutingAllocation(null, null, clusterState, null, 0); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, 0), null, true, RecoverySource + .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); + assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + assertEquals(Decision.ALWAYS, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation)); + } + + public void testSourceNotActive() { + ClusterState clusterState = createInitialClusterState(false); + MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); + metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") + .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) + .numberOfShards(4).numberOfReplicas(0)); + MetaData metaData = metaBuilder.build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + routingTableBuilder.addAsNew(metaData.index("target")); + + clusterState = ClusterState.builder(clusterState) + .routingTable(routingTableBuilder.build()) + .metaData(metaData).build(); + Index idx = clusterState.metaData().index("target").getIndex(); + + + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); + int shardId = randomIntBetween(0, 3); + int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource + .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation)); + + routingAllocation.debugDecision(true); + assertEquals("source primary shard [[source][" + sourceShardId + "]] is not active", + resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); + assertEquals("source primary shard [[source][" + sourceShardId + "]] is not active", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node0"), + routingAllocation).getExplanation()); + assertEquals("source primary shard [[source][" + sourceShardId + "]] is not active", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation).getExplanation()); + } + + public void testSourcePrimaryActive() { + ClusterState clusterState = createInitialClusterState(true); + MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); + metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") + .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) + .numberOfShards(4).numberOfReplicas(0)); + MetaData metaData = metaBuilder.build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + routingTableBuilder.addAsNew(metaData.index("target")); + + clusterState = ClusterState.builder(clusterState) + .routingTable(routingTableBuilder.build()) + .metaData(metaData).build(); + Index idx = clusterState.metaData().index("target").getIndex(); + + + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); + int shardId = randomIntBetween(0, 3); + int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource + .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); + + String allowedNode = clusterState.getRoutingTable().index("source").shard(sourceShardId).primaryShard().currentNodeId(); + + if ("node1".equals(allowedNode)) { + assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation)); + } else { + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation)); + } + + routingAllocation.debugDecision(true); + assertEquals("source primary is active", resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); + + if ("node1".equals(allowedNode)) { + assertEquals("source primary is allocated on this node", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation).getExplanation()); + assertEquals("source primary is allocated on another node", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation).getExplanation()); + } else { + assertEquals("source primary is allocated on another node", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation).getExplanation()); + assertEquals("source primary is allocated on this node", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation).getExplanation()); + } + } + + public void testAllocateOnOldNode() { + Version version = VersionUtils.randomVersionBetween(random(), Version.V_5_0_0, + VersionUtils.getPreviousVersion(Version.V_7_0_0_alpha1)); + ClusterState clusterState = createInitialClusterState(true, version); + MetaData.Builder metaBuilder = MetaData.builder(clusterState.metaData()); + metaBuilder.put(IndexMetaData.builder("target").settings(settings(Version.CURRENT) + .put(IndexMetaData.INDEX_RESIZE_SOURCE_NAME.getKey(), "source") + .put(IndexMetaData.INDEX_RESIZE_SOURCE_UUID_KEY, IndexMetaData.INDEX_UUID_NA_VALUE)) + .numberOfShards(4).numberOfReplicas(0)); + MetaData metaData = metaBuilder.build(); + RoutingTable.Builder routingTableBuilder = RoutingTable.builder(clusterState.routingTable()); + routingTableBuilder.addAsNew(metaData.index("target")); + + clusterState = ClusterState.builder(clusterState) + .routingTable(routingTableBuilder.build()) + .metaData(metaData).build(); + Index idx = clusterState.metaData().index("target").getIndex(); + + + ResizeAllocationDecider resizeAllocationDecider = new ResizeAllocationDecider(Settings.EMPTY); + RoutingAllocation routingAllocation = new RoutingAllocation(null, clusterState.getRoutingNodes(), clusterState, null, 0); + int shardId = randomIntBetween(0, 3); + int sourceShardId = IndexMetaData.selectSplitShard(shardId, clusterState.metaData().index("source"), 4).id(); + ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId(idx, shardId), null, true, RecoverySource + .LocalShardsRecoverySource.INSTANCE, ShardRoutingState.UNASSIGNED); + assertEquals(Decision.YES, resizeAllocationDecider.canAllocate(shardRouting, routingAllocation)); + + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation)); + assertEquals(Decision.NO, resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation)); + + routingAllocation.debugDecision(true); + assertEquals("source primary is active", resizeAllocationDecider.canAllocate(shardRouting, routingAllocation).getExplanation()); + assertEquals("node [node1] is too old to split a shard", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node1"), + routingAllocation).getExplanation()); + assertEquals("node [node2] is too old to split a shard", + resizeAllocationDecider.canAllocate(shardRouting, clusterState.getRoutingNodes().node("node2"), + routingAllocation).getExplanation()); + } +} diff --git a/core/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/core/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java new file mode 100644 index 0000000000000..7351372620fc9 --- /dev/null +++ b/core/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -0,0 +1,193 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.index.shard; + +import org.apache.lucene.document.Field; +import org.apache.lucene.document.SortedNumericDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.SortedNumericDocValues; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.Weight; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.RoutingFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.test.ESTestCase; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; + +public class ShardSplittingQueryTests extends ESTestCase { + + public void testSplitOnID() throws IOException { + Directory dir = newFSDirectory(createTempDir()); + final int numDocs = randomIntBetween(50, 100); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + int numShards = randomIntBetween(2, 10); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(numShards) + .setRoutingNumShards(numShards * 1000000) + .numberOfReplicas(0).build(); + int targetShardId = randomIntBetween(0, numShards-1); + for (int j = 0; j < numDocs; j++) { + int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null); + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new SortedNumericDocValuesField("shard_id", shardId) + )); + } + writer.commit(); + writer.close(); + + + assertSplit(dir, metaData, targetShardId); + dir.close(); + } + + public void testSplitOnRouting() throws IOException { + Directory dir = newFSDirectory(createTempDir()); + final int numDocs = randomIntBetween(50, 100); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + int numShards = randomIntBetween(2, 10); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(numShards) + .setRoutingNumShards(numShards * 1000000) + .numberOfReplicas(0).build(); + int targetShardId = randomIntBetween(0, numShards-1); + for (int j = 0; j < numDocs; j++) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5); + final int shardId = OperationRouting.generateShardId(metaData, null, routing); + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES), + new SortedNumericDocValuesField("shard_id", shardId) + )); + } + writer.commit(); + writer.close(); + assertSplit(dir, metaData, targetShardId); + dir.close(); + } + + public void testSplitOnIdOrRouting() throws IOException { + Directory dir = newFSDirectory(createTempDir()); + final int numDocs = randomIntBetween(50, 100); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + int numShards = randomIntBetween(2, 10); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(numShards) + .setRoutingNumShards(numShards * 1000000) + .numberOfReplicas(0).build(); + int targetShardId = randomIntBetween(0, numShards-1); + for (int j = 0; j < numDocs; j++) { + if (randomBoolean()) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5); + final int shardId = OperationRouting.generateShardId(metaData, null, routing); + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES), + new SortedNumericDocValuesField("shard_id", shardId) + )); + } else { + int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), null); + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new SortedNumericDocValuesField("shard_id", shardId) + )); + } + } + writer.commit(); + writer.close(); + assertSplit(dir, metaData, targetShardId); + dir.close(); + } + + + public void testSplitOnRoutingPartitioned() throws IOException { + Directory dir = newFSDirectory(createTempDir()); + final int numDocs = randomIntBetween(50, 100); + RandomIndexWriter writer = new RandomIndexWriter(random(), dir); + int numShards = randomIntBetween(2, 10); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(numShards) + .setRoutingNumShards(numShards * 1000000) + .routingPartitionSize(randomIntBetween(1, 10)) + .numberOfReplicas(0).build(); + int targetShardId = randomIntBetween(0, numShards-1); + for (int j = 0; j < numDocs; j++) { + String routing = randomRealisticUnicodeOfCodepointLengthBetween(1, 5); + final int shardId = OperationRouting.generateShardId(metaData, Integer.toString(j), routing); + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new StringField(RoutingFieldMapper.NAME, routing, Field.Store.YES), + new SortedNumericDocValuesField("shard_id", shardId) + )); + } + writer.commit(); + writer.close(); + assertSplit(dir, metaData, targetShardId); + dir.close(); + } + + + + + void assertSplit(Directory dir, IndexMetaData metaData, int targetShardId) throws IOException { + try (IndexReader reader = DirectoryReader.open(dir)) { + IndexSearcher searcher = new IndexSearcher(reader); + searcher.setQueryCache(null); + final boolean needsScores = false; + final Weight splitWeight = searcher.createNormalizedWeight(new ShardSplittingQuery(metaData, targetShardId), needsScores); + final List leaves = reader.leaves(); + for (final LeafReaderContext ctx : leaves) { + Scorer scorer = splitWeight.scorer(ctx); + DocIdSetIterator iterator = scorer.iterator(); + SortedNumericDocValues shard_id = ctx.reader().getSortedNumericDocValues("shard_id"); + int doc; + while ((doc = iterator.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + while (shard_id.nextDoc() < doc) { + long shardID = shard_id.nextValue(); + assertEquals(shardID, targetShardId); + } + assertEquals(shard_id.docID(), doc); + long shardID = shard_id.nextValue(); + BytesRef id = reader.document(doc).getBinaryValue("_id"); + String actualId = Uid.decodeId(id.bytes, id.offset, id.length); + assertNotEquals(ctx.reader() + " docID: " + doc + " actualID: " + actualId, shardID, targetShardId); + } + } + } + } +} diff --git a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java index 8d3ac8433d17d..05b092ff3a461 100644 --- a/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/index/shard/StoreRecoveryTests.java @@ -25,17 +25,28 @@ import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.SegmentCommitInfo; import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.Terms; +import org.apache.lucene.index.TermsEnum; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.store.Directory; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.IOUtils; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.OperationRouting; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.engine.InternalEngine; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.test.ESTestCase; @@ -46,7 +57,9 @@ import java.nio.file.attribute.BasicFileAttributes; import java.security.AccessControlException; import java.util.Arrays; +import java.util.HashSet; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import static org.hamcrest.CoreMatchers.equalTo; @@ -87,7 +100,7 @@ public void testAddIndices() throws IOException { Directory target = newFSDirectory(createTempDir()); final long maxSeqNo = randomNonNegativeLong(); final long maxUnsafeAutoIdTimestamp = randomNonNegativeLong(); - storeRecovery.addIndices(indexStats, target, indexSort, dirs, maxSeqNo, maxUnsafeAutoIdTimestamp); + storeRecovery.addIndices(indexStats, target, indexSort, dirs, maxSeqNo, maxUnsafeAutoIdTimestamp, null, 0, false); int numFiles = 0; Predicate filesFilter = (f) -> f.startsWith("segments") == false && f.equals("write.lock") == false && f.startsWith("extra") == false; @@ -122,6 +135,99 @@ public void testAddIndices() throws IOException { IOUtils.close(dirs); } + public void testSplitShard() throws IOException { + Directory dir = newFSDirectory(createTempDir()); + final int numDocs = randomIntBetween(50, 100); + final Sort indexSort; + if (randomBoolean()) { + indexSort = new Sort(new SortedNumericSortField("num", SortField.Type.LONG, true)); + } else { + indexSort = null; + } + int id = 0; + IndexWriterConfig iwc = newIndexWriterConfig() + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE); + if (indexSort != null) { + iwc.setIndexSort(indexSort); + } + IndexWriter writer = new IndexWriter(dir, iwc); + for (int j = 0; j < numDocs; j++) { + writer.addDocument(Arrays.asList( + new StringField(IdFieldMapper.NAME, Uid.encodeId(Integer.toString(j)), Field.Store.YES), + new SortedNumericDocValuesField("num", randomLong()) + )); + } + + writer.commit(); + writer.close(); + StoreRecovery storeRecovery = new StoreRecovery(new ShardId("foo", "bar", 1), logger); + RecoveryState.Index indexStats = new RecoveryState.Index(); + Directory target = newFSDirectory(createTempDir()); + final long maxSeqNo = randomNonNegativeLong(); + final long maxUnsafeAutoIdTimestamp = randomNonNegativeLong(); + int numShards = randomIntBetween(2, 10); + int targetShardId = randomIntBetween(0, numShards-1); + IndexMetaData metaData = IndexMetaData.builder("test") + .settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)) + .numberOfShards(numShards) + .setRoutingNumShards(numShards * 1000000) + .numberOfReplicas(0).build(); + storeRecovery.addIndices(indexStats, target, indexSort, new Directory[] {dir}, maxSeqNo, maxUnsafeAutoIdTimestamp, metaData, + targetShardId, true); + + + SegmentInfos segmentCommitInfos = SegmentInfos.readLatestCommit(target); + final Map userData = segmentCommitInfos.getUserData(); + assertThat(userData.get(SequenceNumbers.MAX_SEQ_NO), equalTo(Long.toString(maxSeqNo))); + assertThat(userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY), equalTo(Long.toString(maxSeqNo))); + assertThat(userData.get(InternalEngine.MAX_UNSAFE_AUTO_ID_TIMESTAMP_COMMIT_ID), equalTo(Long.toString(maxUnsafeAutoIdTimestamp))); + for (SegmentCommitInfo info : segmentCommitInfos) { // check that we didn't merge + assertEquals("all sources must be flush", + info.info.getDiagnostics().get("source"), "flush"); + if (indexSort != null) { + assertEquals(indexSort, info.info.getIndexSort()); + } + } + + iwc = newIndexWriterConfig() + .setMergePolicy(NoMergePolicy.INSTANCE) + .setOpenMode(IndexWriterConfig.OpenMode.CREATE); + if (indexSort != null) { + iwc.setIndexSort(indexSort); + } + writer = new IndexWriter(target, iwc); + writer.forceMerge(1, true); + writer.commit(); + writer.close(); + + DirectoryReader reader = DirectoryReader.open(target); + for (LeafReaderContext ctx : reader.leaves()) { + LeafReader leafReader = ctx.reader(); + Terms terms = leafReader.terms(IdFieldMapper.NAME); + TermsEnum iterator = terms.iterator(); + BytesRef ref; + while((ref = iterator.next()) != null) { + String value = ref.utf8ToString(); + assertEquals("value has wrong shards: " + value, targetShardId, OperationRouting.generateShardId(metaData, value, null)); + } + for (int i = 0; i < numDocs; i++) { + ref = new BytesRef(Integer.toString(i)); + int shardId = OperationRouting.generateShardId(metaData, ref.utf8ToString(), null); + if (shardId == targetShardId) { + assertTrue(ref.utf8ToString() + " is missing", terms.iterator().seekExact(ref)); + } else { + assertFalse(ref.utf8ToString() + " was found but shouldn't", terms.iterator().seekExact(ref)); + } + + } + } + + reader.close(); + target.close(); + IOUtils.close(dir); + } + public void testStatsDirWrapper() throws IOException { Directory dir = newDirectory(); Directory target = newDirectory(); diff --git a/core/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java b/core/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java index b23ce6a9286bb..07a73a09f4ab4 100644 --- a/core/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java +++ b/core/src/test/java/org/elasticsearch/routing/PartitionedRoutingIT.java @@ -105,7 +105,7 @@ public void testShrinking() throws Exception { index = "index_" + currentShards; logger.info("--> shrinking index [" + previousIndex + "] to [" + index + "]"); - client().admin().indices().prepareShrinkIndex(previousIndex, index) + client().admin().indices().prepareResizeIndex(previousIndex, index) .setSettings(Settings.builder() .put("index.number_of_shards", currentShards) .put("index.number_of_replicas", numberOfReplicas()) diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java index 17412f8f724e4..5341b268544e7 100644 --- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java +++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java @@ -950,7 +950,7 @@ public void testRestoreShrinkIndex() throws Exception { logger.info("--> shrink the index"); assertAcked(client.admin().indices().prepareUpdateSettings(sourceIdx) .setSettings(Settings.builder().put("index.blocks.write", true)).get()); - assertAcked(client.admin().indices().prepareShrinkIndex(sourceIdx, shrunkIdx).get()); + assertAcked(client.admin().indices().prepareResizeIndex(sourceIdx, shrunkIdx).get()); logger.info("--> snapshot the shrunk index"); CreateSnapshotResponse createResponse = client.admin().cluster() diff --git a/core/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java b/core/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java index 6dd4fa384e99b..e5081481859ab 100644 --- a/core/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java +++ b/core/src/test/java/org/elasticsearch/test/search/aggregations/bucket/SharedSignificantTermsTestMethods.java @@ -50,7 +50,7 @@ public class SharedSignificantTermsTestMethods { public static void aggregateAndCheckFromSeveralShards(ESIntegTestCase testCase) throws ExecutionException, InterruptedException { String type = ESTestCase.randomBoolean() ? "text" : "keyword"; - String settings = "{\"index.number_of_shards\": 5, \"index.number_of_replicas\": 0}"; + String settings = "{\"index.number_of_shards\": 7, \"index.number_of_replicas\": 0}"; index01Docs(type, settings, testCase); testCase.ensureGreen(); testCase.logClusterState(); diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 873021c420636..70d3b19d3fd59 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -16,6 +16,7 @@ index settings, aliases, mappings, and index templates. * <> * <> * <> +* <> * <> [float] diff --git a/docs/reference/indices/split-index.asciidoc b/docs/reference/indices/split-index.asciidoc new file mode 100644 index 0000000000000..467c09baa2432 --- /dev/null +++ b/docs/reference/indices/split-index.asciidoc @@ -0,0 +1,165 @@ +[[indices-split-index]] +== Split Index + +number_of_routing_shards + +The split index API allows you to split an existing index into a new index +with multiple of it's primary shards. Similarly to the <> +where the number of primary shards in the shrunk index must be a factor of the source index. +The `_split` API requires the source index to be created with a specific number of routing shards +in order to be split in the future. (Note: this requirement might be remove in future releases) +The number of routing shards specify the hashing space that is used internally to distribute documents +across shards, in oder to have a consistent hashing that is compatible with the method elasticsearch +uses today. +For example an index with `8` primary shards and a `index.number_of_routing_shards` of `32` +can be split into `16` and `32` primary shards. An index with `1` primary shard +and `index.number_of_routing_shards` of `64` can be split into `2`, `4`, `8`, `16`, `32` or `64`. +The same works for non power of two routing shards ie. an index with `1` primary shard and +`index.number_of_routing_shards` set to `15` can be split into `3` and `15` or alternatively`5` and `15`. +The number of shards in the split index must always be a factor of `index.number_of_routing_shards` +in the source index. Before splitting, a (primary) copy of every shard in the index must be active in the cluster. + +Splitting works as follows: + +* First, it creates a new target index with the same definition as the source + index, but with a larger number of primary shards. + +* Then it hard-links segments from the source index into the target index. (If + the file system doesn't support hard-linking, then all segments are copied + into the new index, which is a much more time consuming process.) + +* Once the low level files are created all documents will be `hashed` again to delete + documents that belong in a different shard. + +* Finally, it recovers the target index as though it were a closed index which + had just been re-opened. + +[float] +=== Preparing an index for splitting + +Create an index with a routing shards factor: + +[source,js] +-------------------------------------------------- +PUT my_source_index +{ + "settings": { + "index.number_of_shards" : 1, + "index.number_of_routing_shards" : 2 <1> + } +} +------------------------------------------------- +// CONSOLE + +<1> Allows to split the index into two shards or in other words, it allows + for a single split operation. + +In order to split an index, the index must be marked as read-only, +and have <> `green`. + +This can be achieved with the following request: + +[source,js] +-------------------------------------------------- +PUT /my_source_index/_settings +{ + "settings": { + "index.blocks.write": true <1> + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +<1> Prevents write operations to this index while still allowing metadata + changes like deleting the index. + +[float] +=== Spitting an index + +To split `my_source_index` into a new index called `my_target_index`, issue +the following request: + +[source,js] +-------------------------------------------------- +POST my_source_index/_split/my_target_index +{ + "settings": { + "index.number_of_shards": 2 + } +} +-------------------------------------------------- +// CONSOLE +// TEST[continued] + +The above request returns immediately once the target index has been added to +the cluster state -- it doesn't wait for the split operation to start. + +[IMPORTANT] +===================================== + +Indices can only be split if they satisfy the following requirements: + +* the target index must not exist + +* The index must have less primary shards than the target index. + +* The number of primary shards in the target index must be a factor of the + number of primary shards in the source index. + +* The node handling the split process must have sufficient free disk space to + accommodate a second copy of the existing index. + +===================================== + +The `_split` API is similar to the <> +and accepts `settings` and `aliases` parameters for the target index: + +[source,js] +-------------------------------------------------- +POST my_source_index/_split/my_target_index +{ + "settings": { + "index.number_of_shards": 5 <1> + }, + "aliases": { + "my_search_indices": {} + } +} +-------------------------------------------------- +// CONSOLE +// TEST[s/^/PUT my_source_index\n{"settings": {"index.blocks.write": true, "index.number_of_routing_shards" : 5, "index.number_of_shards": "1"}}\n/] + +<1> The number of shards in the target index. This must be a factor of the + number of shards in the source index. + + +NOTE: Mappings may not be specified in the `_split` request, and all +`index.analysis.*` and `index.similarity.*` settings will be overwritten with +the settings from the source index. + +[float] +=== Monitoring the split process + +The split process can be monitored with the <>, or the <> can be used to wait +until all primary shards have been allocated by setting the `wait_for_status` +parameter to `yellow`. + +The `_split` API returns as soon as the target index has been added to the +cluster state, before any shards have been allocated. At this point, all +shards are in the state `unassigned`. If, for any reason, the target index +can't be allocated, its primary shard will remain `unassigned` until it +can be allocated on that node. + +Once the primary shard is allocated, it moves to state `initializing`, and the +split process begins. When the split operation completes, the shard will +become `active`. At that point, Elasticsearch will try to allocate any +replicas and may decide to relocate the primary shard to another node. + +[float] +=== Wait For Active Shards + +Because the split operation creates a new index to split the shards to, +the <> setting +on index creation applies to the split index action as well. diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json new file mode 100644 index 0000000000000..a79fa7b708269 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.split.json @@ -0,0 +1,39 @@ +{ + "indices.split": { + "documentation": "http://www.elastic.co/guide/en/elasticsearch/reference/master/indices-split-index.html", + "methods": ["PUT", "POST"], + "url": { + "path": "/{index}/_split/{target}", + "paths": ["/{index}/_split/{target}"], + "parts": { + "index": { + "type" : "string", + "required" : true, + "description" : "The name of the source index to split" + }, + "target": { + "type" : "string", + "required" : true, + "description" : "The name of the target index to split into" + } + }, + "params": { + "timeout": { + "type" : "time", + "description" : "Explicit operation timeout" + }, + "master_timeout": { + "type" : "time", + "description" : "Specify timeout for connection to master" + }, + "wait_for_active_shards": { + "type" : "string", + "description" : "Set the number of active shards to wait for on the shrunken index before the operation returns." + } + } + }, + "body": { + "description" : "The configuration for the target index (`settings` and `aliases`)" + } + } +} diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml new file mode 100644 index 0000000000000..f51fc808b4623 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/10_basic.yml @@ -0,0 +1,101 @@ +--- +"Split index via API": + - skip: + version: " - 6.99.99" + reason: Added in 7.0.0 + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + index.number_of_shards: 1 + index.number_of_replicas: 0 + index.number_of_routing_shards: 2 + - do: + index: + index: source + type: doc + id: "1" + body: { "foo": "hello world" } + + - do: + index: + index: source + type: doc + id: "2" + body: { "foo": "hello world 2" } + + - do: + index: + index: source + type: doc + id: "3" + body: { "foo": "hello world 3" } + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_replicas: 0 + index.number_of_shards: 2 + + - do: + cluster.health: + wait_for_status: green + + - do: + get: + index: target + type: doc + id: "1" + + - match: { _index: target } + - match: { _type: doc } + - match: { _id: "1" } + - match: { _source: { foo: "hello world" } } + + + - do: + get: + index: target + type: doc + id: "2" + + - match: { _index: target } + - match: { _type: doc } + - match: { _id: "2" } + - match: { _source: { foo: "hello world 2" } } + + + - do: + get: + index: target + type: doc + id: "3" + + - match: { _index: target } + - match: { _type: doc } + - match: { _id: "3" } + - match: { _source: { foo: "hello world 3" } } + + + diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml new file mode 100644 index 0000000000000..ffd7ffe7a2946 --- /dev/null +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/indices.split/20_source_mapping.yml @@ -0,0 +1,72 @@ +--- +"Split index ignores target template mapping": + - skip: + version: " - 6.99.99" + reason: added in 7.0.0 + + # create index + - do: + indices.create: + index: source + wait_for_active_shards: 1 + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index.number_of_routing_shards: 2 + mappings: + test: + properties: + count: + type: text + + # index document + - do: + index: + index: source + type: test + id: "1" + body: { "count": "1" } + + # create template matching shrink target + - do: + indices.put_template: + name: tpl1 + body: + index_patterns: targ* + mappings: + test: + properties: + count: + type: integer + + # make it read-only + - do: + indices.put_settings: + index: source + body: + index.blocks.write: true + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + index: source + + # now we do the actual split + - do: + indices.split: + index: "source" + target: "target" + wait_for_active_shards: 1 + master_timeout: 10s + body: + settings: + index.number_of_shards: 2 + index.number_of_replicas: 0 + + - do: + cluster.health: + wait_for_status: green + + diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java index a4ac6fad241a5..2291c3d39e200 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/routing/TestShardRouting.java @@ -39,6 +39,10 @@ public static ShardRouting newShardRouting(String index, int shardId, String cur return newShardRouting(new ShardId(index, IndexMetaData.INDEX_UUID_NA_VALUE, shardId), currentNodeId, primary, state); } + public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, RecoverySource recoverySource, ShardRoutingState state) { + return new ShardRouting(shardId, currentNodeId, null, primary, state, recoverySource, buildUnassignedInfo(state), buildAllocationId(state), -1); + } + public static ShardRouting newShardRouting(ShardId shardId, String currentNodeId, boolean primary, ShardRoutingState state) { return new ShardRouting(shardId, currentNodeId, null, primary, state, buildRecoveryTarget(primary, state), buildUnassignedInfo(state), buildAllocationId(state), -1); } From a8ff4960f3d6876bf359de005c5c3528619389dc Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Mon, 6 Nov 2017 12:55:41 +0100 Subject: [PATCH 13/25] add split index reference in indices.asciidoc Relates to #26931 --- docs/reference/indices.asciidoc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/reference/indices.asciidoc b/docs/reference/indices.asciidoc index 70d3b19d3fd59..cda7c41cb42d1 100644 --- a/docs/reference/indices.asciidoc +++ b/docs/reference/indices.asciidoc @@ -71,6 +71,8 @@ include::indices/open-close.asciidoc[] include::indices/shrink-index.asciidoc[] +include::indices/split-index.asciidoc[] + include::indices/rollover-index.asciidoc[] include::indices/put-mapping.asciidoc[] From d7fa09153a01ef6695622bb735ff4bd1e91d59f1 Mon Sep 17 00:00:00 2001 From: kel Date: Mon, 6 Nov 2017 09:19:16 -0600 Subject: [PATCH 14/25] Remove duplicated SnapshotStatus (#27276) --- .../index/shard/SnapshotStatus.java | 142 ------------------ 1 file changed, 142 deletions(-) delete mode 100644 core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java diff --git a/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java b/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java deleted file mode 100644 index 32ddcd1c2733b..0000000000000 --- a/core/src/main/java/org/elasticsearch/index/shard/SnapshotStatus.java +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.index.shard; - -public class SnapshotStatus { - - public enum Stage { - NONE, - INDEX, - TRANSLOG, - FINALIZE, - DONE, - FAILURE - } - - private Stage stage = Stage.NONE; - - private long startTime; - - private long time; - - private Index index = new Index(); - - private Translog translog = new Translog(); - - public Stage stage() { - return this.stage; - } - - public SnapshotStatus updateStage(Stage stage) { - this.stage = stage; - return this; - } - - public long startTime() { - return this.startTime; - } - - public void startTime(long startTime) { - this.startTime = startTime; - } - - public long time() { - return this.time; - } - - public void time(long time) { - this.time = time; - } - - public Index index() { - return index; - } - - public Translog translog() { - return translog; - } - - public static class Index { - private long startTime; - private long time; - - private int numberOfFiles; - private long totalSize; - - public long startTime() { - return this.startTime; - } - - public void startTime(long startTime) { - this.startTime = startTime; - } - - public long time() { - return this.time; - } - - public void time(long time) { - this.time = time; - } - - public void files(int numberOfFiles, long totalSize) { - this.numberOfFiles = numberOfFiles; - this.totalSize = totalSize; - } - - public int numberOfFiles() { - return numberOfFiles; - } - - public long totalSize() { - return totalSize; - } - } - - public static class Translog { - private long startTime; - private long time; - private int expectedNumberOfOperations; - - public long startTime() { - return this.startTime; - } - - public void startTime(long startTime) { - this.startTime = startTime; - } - - public long time() { - return this.time; - } - - public void time(long time) { - this.time = time; - } - - public int expectedNumberOfOperations() { - return expectedNumberOfOperations; - } - - public void expectedNumberOfOperations(int expectedNumberOfOperations) { - this.expectedNumberOfOperations = expectedNumberOfOperations; - } - } -} From 8e9b30417c14334d870dedbc71b8991ddac7f37f Mon Sep 17 00:00:00 2001 From: Boris Tyukin Date: Mon, 6 Nov 2017 11:29:48 -0500 Subject: [PATCH 15/25] Update to support bulk updates by query (#27172) Getting started doc stated that bulk updates by query are not supported but they are now --- docs/reference/getting-started.asciidoc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/getting-started.asciidoc b/docs/reference/getting-started.asciidoc index f72d08397a150..b738c1c6186a2 100755 --- a/docs/reference/getting-started.asciidoc +++ b/docs/reference/getting-started.asciidoc @@ -586,7 +586,7 @@ POST /customer/doc/1/_update?pretty In the above example, `ctx._source` refers to the current source document that is about to be updated. -Note that as of this writing, updates can only be performed on a single document at a time. In the future, Elasticsearch might provide the ability to update multiple documents given a query condition (like an `SQL UPDATE-WHERE` statement). +Elasticsearch provides the ability to update multiple documents given a query condition (like an `SQL UPDATE-WHERE` statement). See {ref}/docs-update-by-query.html[`docs-update-by-query` API] === Deleting Documents From a0bdedb143f937b69a498799d07dd767cec0c798 Mon Sep 17 00:00:00 2001 From: Russ Cam Date: Tue, 7 Nov 2017 03:34:22 +1100 Subject: [PATCH 16/25] Align routing param type with search.json (#26958) Relates https://github.com/elastic/elasticsearch-net/issues/2869 --- rest-api-spec/src/main/resources/rest-api-spec/api/count.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json index 1275983ef238f..96fa4daf12b95 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/count.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/count.json @@ -39,8 +39,8 @@ "description" : "Specify the node or shard the operation should be performed on (default: random)" }, "routing": { - "type" : "string", - "description" : "Specific routing value" + "type" : "list", + "description" : "A comma-separated list of specific routing values" }, "q": { "type" : "string", From 09294a9b9a143cc954e8b888e8504580f05523dc Mon Sep 17 00:00:00 2001 From: Nick Lang Date: Mon, 6 Nov 2017 09:45:04 -0700 Subject: [PATCH 17/25] keys in aggs percentiles need to be in quotes. (#26905) Languages which are stronger typed will failed when comparing these results --- .../180_percentiles_tdigest_metric.yml | 140 +++++++-------- .../190_percentiles_hdr_metric.yml | 166 +++++++++--------- 2 files changed, 153 insertions(+), 153 deletions(-) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml index 1b985c668933f..4301e6824e16a 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/180_percentiles_tdigest_metric.yml @@ -67,22 +67,22 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 - match: aggregations.percentiles_double.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 - do: search: @@ -104,22 +104,22 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 - match: aggregations.percentiles_double.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 --- "Only aggs test": @@ -140,22 +140,22 @@ setup: - length: { hits.hits: 0 } - match: aggregations.percentiles_int.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 - match: aggregations.percentiles_double.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 --- "Filtered test": @@ -181,22 +181,22 @@ setup: - length: { hits.hits: 3 } - match: aggregations.percentiles_int.values: - 1.0: 52.0 - 5.0: 56.0 - 25.0: 76.0 - 50.0: 101.0 - 75.0: 126.0 - 95.0: 146.0 - 99.0: 150.0 + "1.0": 52.0 + "5.0": 56.0 + "25.0": 76.0 + "50.0": 101.0 + "75.0": 126.0 + "95.0": 146.0 + "99.0": 150.0 - match: aggregations.percentiles_double.values: - 1.0: 52.0 - 5.0: 56.0 - 25.0: 76.0 - 50.0: 101.0 - 75.0: 126.0 - 95.0: 146.0 - 99.0: 150.0 + "1.0": 52.0 + "5.0": 56.0 + "25.0": 76.0 + "50.0": 101.0 + "75.0": 126.0 + "95.0": 146.0 + "99.0": 150.0 --- "Missing field with missing param": @@ -214,13 +214,13 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_missing.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 1.0 - 75.0: 1.0 - 95.0: 1.0 - 99.0: 1.0 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 1.0 + "75.0": 1.0 + "95.0": 1.0 + "99.0": 1.0 --- "Missing field without missing param": @@ -255,13 +255,13 @@ setup: - match: { aggregations.percentiles_int.meta.foo: "bar" } - match: aggregations.percentiles_int.values: - 1.0: 2.5 - 5.0: 8.500000000000002 - 25.0: 38.5 - 50.0: 76.0 - 75.0: 113.5 - 95.0: 143.49999999999997 - 99.0: 149.5 + "1.0": 2.5 + "5.0": 8.500000000000002 + "25.0": 38.5 + "50.0": 76.0 + "75.0": 113.5 + "95.0": 143.49999999999997 + "99.0": 149.5 --- "Invalid params test": diff --git a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml index 1d527efbf8a29..426faae503517 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml +++ b/rest-api-spec/src/main/resources/rest-api-spec/test/search.aggregation/190_percentiles_hdr_metric.yml @@ -69,22 +69,22 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 - match: aggregations.percentiles_double.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 - do: search: @@ -106,22 +106,22 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 - match: aggregations.percentiles_double.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 --- "Only aggs test": @@ -144,22 +144,22 @@ setup: - length: { hits.hits: 0 } - match: aggregations.percentiles_int.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 - match: aggregations.percentiles_double.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 --- "Filtered test": @@ -187,22 +187,22 @@ setup: - length: { hits.hits: 3 } - match: aggregations.percentiles_int.values: - 1.0: 51.0 - 5.0: 51.0 - 25.0: 51.0 - 50.0: 101.03125 - 75.0: 101.03125 - 95.0: 151.09375 - 99.0: 151.09375 + "1.0": 51.0 + "5.0": 51.0 + "25.0": 51.0 + "50.0": 101.03125 + "75.0": 101.03125 + "95.0": 151.09375 + "99.0": 151.09375 - match: aggregations.percentiles_double.values: - 1.0: 51.0 - 5.0: 51.0 - 25.0: 51.0 - 50.0: 101.03125 - 75.0: 101.03125 - 95.0: 151.09375 - 99.0: 151.09375 + "1.0": 51.0 + "5.0": 51.0 + "25.0": 51.0 + "50.0": 101.03125 + "75.0": 101.03125 + "95.0": 151.09375 + "99.0": 151.09375 --- "Missing field with missing param": @@ -221,13 +221,13 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_missing.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 1.0 - 75.0: 1.0 - 95.0: 1.0 - 99.0: 1.0 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 1.0 + "75.0": 1.0 + "95.0": 1.0 + "99.0": 1.0 --- "Missing field without missing param": @@ -264,13 +264,13 @@ setup: - match: { aggregations.percentiles_int.meta.foo: "bar" } - match: aggregations.percentiles_int.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 --- "Invalid params test": @@ -374,14 +374,14 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 - match: aggregations.percentiles_double.values: - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 --- "Non-keyed test": @@ -435,13 +435,13 @@ setup: - length: { hits.hits: 4 } - match: aggregations.percentiles_int.values: - 1.0: 1.0 - 5.0: 1.0 - 25.0: 1.0 - 50.0: 51.0302734375 - 75.0: 101.0615234375 - 95.0: 151.1240234375 - 99.0: 151.1240234375 + "1.0": 1.0 + "5.0": 1.0 + "25.0": 1.0 + "50.0": 51.0302734375 + "75.0": 101.0615234375 + "95.0": 151.1240234375 + "99.0": 151.1240234375 - match: { _shards.failures.0.reason.type: array_index_out_of_bounds_exception } From 7f593a26a3ff5a0bab978cb39b9276a8ae910d12 Mon Sep 17 00:00:00 2001 From: olcbean Date: Mon, 6 Nov 2017 17:58:27 +0100 Subject: [PATCH 18/25] Setting url parts as required to reflect the code base (#27263) --- .../main/resources/rest-api-spec/api/indices.exists_alias.json | 1 + .../src/main/resources/rest-api-spec/api/reindex_rethrottle.json | 1 + .../src/main/resources/rest-api-spec/api/tasks.get.json | 1 + 3 files changed, 3 insertions(+) diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json index 8891aebd223ec..aea20b2b634d0 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/indices.exists_alias.json @@ -12,6 +12,7 @@ }, "name": { "type" : "list", + "required" : true, "description" : "A comma-separated list of alias names to return" } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json index 4bba41d37d504..4004409ab6883 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/reindex_rethrottle.json @@ -8,6 +8,7 @@ "parts": { "task_id": { "type": "string", + "required" : true, "description": "The task id to rethrottle" } }, diff --git a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json index f97206cd16f72..e17acb0512c9b 100644 --- a/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json +++ b/rest-api-spec/src/main/resources/rest-api-spec/api/tasks.get.json @@ -8,6 +8,7 @@ "parts": { "task_id": { "type": "string", + "required" : true, "description": "Return the task with specified id (node_id:task_number)" } }, From 4b7b1e270605b9375b6c1f65633155594bdf6736 Mon Sep 17 00:00:00 2001 From: Patrice Bourgougnon Date: Mon, 6 Nov 2017 18:13:27 +0100 Subject: [PATCH 19/25] Add an active Elasticsearch WordPress plugin link (#27279) --- docs/plugins/integrations.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/plugins/integrations.asciidoc b/docs/plugins/integrations.asciidoc index 4cfc5ab7539a8..9ece2febde180 100644 --- a/docs/plugins/integrations.asciidoc +++ b/docs/plugins/integrations.asciidoc @@ -17,6 +17,9 @@ Integrations are not plugins, but are external tools or modules that make it eas * https://drupal.org/project/elasticsearch_connector[Drupal]: Drupal Elasticsearch integration. +* https://wordpress.org/plugins/wpsolr-search-engine/[WPSOLR]: + Elasticsearch (and Apache Solr) WordPress Plugin + * http://searchbox-io.github.com/wp-elasticsearch/[Wp-Elasticsearch]: Elasticsearch WordPress Plugin From 5a925cd40cf97bdb6c3b94db0fbb0fa19f12388f Mon Sep 17 00:00:00 2001 From: Shubham Aggarwal Date: Mon, 6 Nov 2017 23:45:36 +0530 Subject: [PATCH 20/25] Fixed references to Multi Index Syntax (#27283) --- docs/reference/modules/snapshots.asciidoc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/reference/modules/snapshots.asciidoc b/docs/reference/modules/snapshots.asciidoc index 59d951f6aeacd..f5b561600492b 100644 --- a/docs/reference/modules/snapshots.asciidoc +++ b/docs/reference/modules/snapshots.asciidoc @@ -282,7 +282,7 @@ PUT /_snapshot/my_backup/snapshot_2?wait_for_completion=true // TEST[continued] The list of indices that should be included into the snapshot can be specified using the `indices` parameter that -supports <>. The snapshot request also supports the +supports <>. The snapshot request also supports the `ignore_unavailable` option. Setting it to `true` will cause indices that do not exist to be ignored during snapshot creation. By default, when `ignore_unavailable` option is not set and an index is missing the snapshot request will fail. By setting `include_global_state` to false it's possible to prevent the cluster global state to be stored as part of @@ -429,7 +429,7 @@ By default, all indices in the snapshot are restored, and the cluster state is *not* restored. It's possible to select indices that should be restored as well as to allow the global cluster state from being restored by using `indices` and `include_global_state` options in the restore request body. The list of indices -supports <>. The `rename_pattern` +supports <>. The `rename_pattern` and `rename_replacement` options can be also used to rename indices on restore using regular expression that supports referencing the original text as explained From 766d29e7cf195443559421a2eb0bae3453b7316d Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 6 Nov 2017 13:20:30 -0500 Subject: [PATCH 21/25] Correctly encode warning headers The warnings headers have a fairly limited set of valid characters (cf. quoted-text in RFC 7230). While we have assertions that we adhere to this set of valid characters ensuring that our warning messages do not violate the specificaion, we were neglecting the possibility that arbitrary user input would trickle into these warning headers. Thus, missing here was tests for these situations and encoding of characters that appear outside the set of valid characters. This commit addresses this by encoding any characters in a deprecation message that are not from the set of valid characters. Relates #27269 --- .../common/logging/DeprecationLogger.java | 94 ++++++++++++++++++- .../logging/DeprecationLoggerTests.java | 77 +++++++++++++-- .../common/logging/EvilLoggerTests.java | 5 +- .../org/elasticsearch/test/ESTestCase.java | 2 +- .../test/rest/yaml/section/DoSection.java | 2 +- 5 files changed, 165 insertions(+), 15 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java index 3ed1d9d30ac1a..1c559cf64fbb7 100644 --- a/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java +++ b/core/src/main/java/org/elasticsearch/common/logging/DeprecationLogger.java @@ -26,11 +26,14 @@ import org.elasticsearch.common.SuppressLoggerChecks; import org.elasticsearch.common.util.concurrent.ThreadContext; +import java.io.CharArrayWriter; +import java.nio.charset.Charset; import java.time.ZoneId; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; import java.time.format.DateTimeFormatterBuilder; import java.time.format.SignStyle; +import java.util.BitSet; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -228,7 +231,7 @@ public void deprecatedAndMaybeLog(final String key, final String msg, final Obje public static Pattern WARNING_HEADER_PATTERN = Pattern.compile( "299 " + // warn code "Elasticsearch-\\d+\\.\\d+\\.\\d+(?:-(?:alpha|beta|rc)\\d+)?(?:-SNAPSHOT)?-(?:[a-f0-9]{7}|Unknown) " + // warn agent - "\"((?:\t| |!|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x80-\\xff]|\\\\|\\\\\")*)\" " + // quoted warning value, captured + "\"((?:\t| |!|[\\x23-\\x5B]|[\\x5D-\\x7E]|[\\x80-\\xFF]|\\\\|\\\\\")*)\" " + // quoted warning value, captured // quoted RFC 1123 date format "\"" + // opening quote "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun), " + // weekday @@ -304,7 +307,7 @@ void deprecated(final Set threadContexts, final String message, f final String formattedMessage = LoggerMessageFormat.format(message, params); final String warningHeaderValue = formatWarning(formattedMessage); assert WARNING_HEADER_PATTERN.matcher(warningHeaderValue).matches(); - assert extractWarningValueFromWarningHeader(warningHeaderValue).equals(escape(formattedMessage)); + assert extractWarningValueFromWarningHeader(warningHeaderValue).equals(escapeAndEncode(formattedMessage)); while (iterator.hasNext()) { try { final ThreadContext next = iterator.next(); @@ -328,7 +331,17 @@ void deprecated(final Set threadContexts, final String message, f * @return a warning value formatted according to RFC 7234 */ public static String formatWarning(final String s) { - return String.format(Locale.ROOT, WARNING_FORMAT, escape(s), RFC_7231_DATE_TIME.format(ZonedDateTime.now(GMT))); + return String.format(Locale.ROOT, WARNING_FORMAT, escapeAndEncode(s), RFC_7231_DATE_TIME.format(ZonedDateTime.now(GMT))); + } + + /** + * Escape and encode a string as a valid RFC 7230 quoted-string. + * + * @param s the string to escape and encode + * @return the escaped and encoded string + */ + public static String escapeAndEncode(final String s) { + return encode(escapeBackslashesAndQuotes(s)); } /** @@ -337,8 +350,81 @@ public static String formatWarning(final String s) { * @param s the string to escape * @return the escaped string */ - public static String escape(String s) { + static String escapeBackslashesAndQuotes(final String s) { return s.replaceAll("([\"\\\\])", "\\\\$1"); } + private static BitSet doesNotNeedEncoding; + + static { + doesNotNeedEncoding = new BitSet(1 + 0xFF); + doesNotNeedEncoding.set('\t'); + doesNotNeedEncoding.set(' '); + doesNotNeedEncoding.set('!'); + doesNotNeedEncoding.set('\\'); + doesNotNeedEncoding.set('"'); + // we have to skip '%' which is 0x25 so that it is percent-encoded too + for (int i = 0x23; i <= 0x24; i++) { + doesNotNeedEncoding.set(i); + } + for (int i = 0x26; i <= 0x5B; i++) { + doesNotNeedEncoding.set(i); + } + for (int i = 0x5D; i <= 0x7E; i++) { + doesNotNeedEncoding.set(i); + } + for (int i = 0x80; i <= 0xFF; i++) { + doesNotNeedEncoding.set(i); + } + assert !doesNotNeedEncoding.get('%'); + } + + private static final Charset UTF_8 = Charset.forName("UTF-8"); + + /** + * Encode a string containing characters outside of the legal characters for an RFC 7230 quoted-string. + * + * @param s the string to encode + * @return the encoded string + */ + static String encode(final String s) { + final StringBuilder sb = new StringBuilder(s.length()); + boolean encodingNeeded = false; + for (int i = 0; i < s.length();) { + int current = (int) s.charAt(i); + /* + * Either the character does not need encoding or it does; when the character does not need encoding we append the character to + * a buffer and move to the next character and when the character does need encoding, we peel off as many characters as possible + * which we encode using UTF-8 until we encounter another character that does not need encoding. + */ + if (doesNotNeedEncoding.get(current)) { + // append directly and move to the next character + sb.append((char) current); + i++; + } else { + int startIndex = i; + do { + i++; + } while (i < s.length() && !doesNotNeedEncoding.get(s.charAt(i))); + + final byte[] bytes = s.substring(startIndex, i).getBytes(UTF_8); + // noinspection ForLoopReplaceableByForEach + for (int j = 0; j < bytes.length; j++) { + sb.append('%').append(hex(bytes[j] >> 4)).append(hex(bytes[j])); + } + encodingNeeded = true; + } + } + return encodingNeeded ? sb.toString() : s; + } + + private static char hex(int b) { + final char ch = Character.forDigit(b & 0xF, 16); + if (Character.isLetter(ch)) { + return Character.toUpperCase(ch); + } else { + return ch; + } + } + } diff --git a/core/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java b/core/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java index 3f2274321a249..fdb530749e105 100644 --- a/core/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java +++ b/core/src/test/java/org/elasticsearch/common/logging/DeprecationLoggerTests.java @@ -23,11 +23,13 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.hamcrest.RegexMatcher; +import org.hamcrest.core.IsSame; import java.io.IOException; import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.IntStream; @@ -71,6 +73,54 @@ public void testAddsHeaderWithThreadContext() throws IOException { } } + public void testContainingNewline() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + final Set threadContexts = Collections.singleton(threadContext); + + logger.deprecated(threadContexts, "this message contains a newline\n"); + + final Map> responseHeaders = threadContext.getResponseHeaders(); + + assertThat(responseHeaders.size(), equalTo(1)); + final List responses = responseHeaders.get("Warning"); + assertThat(responses, hasSize(1)); + assertThat(responses.get(0), warningValueMatcher); + assertThat(responses.get(0), containsString("\"this message contains a newline%0A\"")); + } + } + + public void testSurrogatePair() throws IOException { + try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { + final Set threadContexts = Collections.singleton(threadContext); + + logger.deprecated(threadContexts, "this message contains a surrogate pair 😱"); + + final Map> responseHeaders = threadContext.getResponseHeaders(); + + assertThat(responseHeaders.size(), equalTo(1)); + final List responses = responseHeaders.get("Warning"); + assertThat(responses, hasSize(1)); + assertThat(responses.get(0), warningValueMatcher); + + // convert UTF-16 to UTF-8 by hand to show the hard-coded constant below is correct + assertThat("😱", equalTo("\uD83D\uDE31")); + final int code = 0x10000 + ((0xD83D & 0x3FF) << 10) + (0xDE31 & 0x3FF); + @SuppressWarnings("PointlessBitwiseExpression") + final int[] points = new int[] { + (code >> 18) & 0x07 | 0xF0, + (code >> 12) & 0x3F | 0x80, + (code >> 6) & 0x3F | 0x80, + (code >> 0) & 0x3F | 0x80}; + final StringBuilder sb = new StringBuilder(); + // noinspection ForLoopReplaceableByForEach + for (int i = 0; i < points.length; i++) { + sb.append("%").append(Integer.toString(points[i], 16).toUpperCase(Locale.ROOT)); + } + assertThat(sb.toString(), equalTo("%F0%9F%98%B1")); + assertThat(responses.get(0), containsString("\"this message contains a surrogate pair %F0%9F%98%B1\"")); + } + } + public void testAddsCombinedHeaderWithThreadContext() throws IOException { try (ThreadContext threadContext = new ThreadContext(Settings.EMPTY)) { final Set threadContexts = Collections.singleton(threadContext); @@ -172,15 +222,28 @@ public void testWarningValueFromWarningHeader() throws InterruptedException { assertThat(DeprecationLogger.extractWarningValueFromWarningHeader(first), equalTo(s)); } - public void testEscape() { - assertThat(DeprecationLogger.escape("\\"), equalTo("\\\\")); - assertThat(DeprecationLogger.escape("\""), equalTo("\\\"")); - assertThat(DeprecationLogger.escape("\\\""), equalTo("\\\\\\\"")); - assertThat(DeprecationLogger.escape("\"foo\\bar\""),equalTo("\\\"foo\\\\bar\\\"")); + public void testEscapeBackslashesAndQuotes() { + assertThat(DeprecationLogger.escapeBackslashesAndQuotes("\\"), equalTo("\\\\")); + assertThat(DeprecationLogger.escapeBackslashesAndQuotes("\""), equalTo("\\\"")); + assertThat(DeprecationLogger.escapeBackslashesAndQuotes("\\\""), equalTo("\\\\\\\"")); + assertThat(DeprecationLogger.escapeBackslashesAndQuotes("\"foo\\bar\""),equalTo("\\\"foo\\\\bar\\\"")); // test that characters other than '\' and '"' are left unchanged - String chars = "\t !" + range(0x23, 0x5b) + range(0x5d, 0x73) + range(0x80, 0xff); + String chars = "\t !" + range(0x23, 0x24) + range(0x26, 0x5b) + range(0x5d, 0x73) + range(0x80, 0xff); + final String s = new CodepointSetGenerator(chars.toCharArray()).ofCodePointsLength(random(), 16, 16); + assertThat(DeprecationLogger.escapeBackslashesAndQuotes(s), equalTo(s)); + } + + public void testEncode() { + assertThat(DeprecationLogger.encode("\n"), equalTo("%0A")); + assertThat(DeprecationLogger.encode("😱"), equalTo("%F0%9F%98%B1")); + assertThat(DeprecationLogger.encode("福島深雪"), equalTo("%E7%A6%8F%E5%B3%B6%E6%B7%B1%E9%9B%AA")); + assertThat(DeprecationLogger.encode("100%\n"), equalTo("100%25%0A")); + // test that valid characters are left unchanged + String chars = "\t !" + range(0x23, 0x24) + range(0x26, 0x5b) + range(0x5d, 0x73) + range(0x80, 0xff) + '\\' + '"'; final String s = new CodepointSetGenerator(chars.toCharArray()).ofCodePointsLength(random(), 16, 16); - assertThat(DeprecationLogger.escape(s), equalTo(s)); + assertThat(DeprecationLogger.encode(s), equalTo(s)); + // when no encoding is needed, the original string is returned (optimization) + assertThat(DeprecationLogger.encode(s), IsSame.sameInstance(s)); } private String range(int lowerInclusive, int upperInclusive) { diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java index 97692e5ea6b6e..d4bc754689e68 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/common/logging/EvilLoggerTests.java @@ -28,7 +28,6 @@ import org.apache.logging.log4j.core.appender.CountingNoOpAppender; import org.apache.logging.log4j.core.config.Configurator; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.util.Constants; import org.elasticsearch.cli.UserException; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.common.Randomness; @@ -165,7 +164,9 @@ public void testConcurrentDeprecationLogger() throws IOException, UserException, final Set actualWarningValues = warnings.stream().map(DeprecationLogger::extractWarningValueFromWarningHeader).collect(Collectors.toSet()); for (int j = 0; j < 128; j++) { - assertThat(actualWarningValues, hasItem(DeprecationLogger.escape("This is a maybe logged deprecation message" + j))); + assertThat( + actualWarningValues, + hasItem(DeprecationLogger.escapeAndEncode("This is a maybe logged deprecation message" + j))); } try { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index db43b5c9c599a..e10411e5a435e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -341,7 +341,7 @@ protected final void assertWarnings(String... expectedWarnings) { final Set actualWarningValues = actualWarnings.stream().map(DeprecationLogger::extractWarningValueFromWarningHeader).collect(Collectors.toSet()); for (String msg : expectedWarnings) { - assertThat(actualWarningValues, hasItem(DeprecationLogger.escape(msg))); + assertThat(actualWarningValues, hasItem(DeprecationLogger.escapeAndEncode(msg))); } assertEquals("Expected " + expectedWarnings.length + " warnings but found " + actualWarnings.size() + "\nExpected: " + Arrays.asList(expectedWarnings) + "\nActual: " + actualWarnings, diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java index d509b6685a290..082040fb1eb47 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/yaml/section/DoSection.java @@ -263,7 +263,7 @@ void checkWarningHeaders(final List warningHeaders) { final List missing = new ArrayList<>(); // LinkedHashSet so that missing expected warnings come back in a predictable order which is nice for testing final Set expected = - new LinkedHashSet<>(expectedWarningHeaders.stream().map(DeprecationLogger::escape).collect(Collectors.toList())); + new LinkedHashSet<>(expectedWarningHeaders.stream().map(DeprecationLogger::escapeAndEncode).collect(Collectors.toList())); for (final String header : warningHeaders) { final Matcher matcher = WARNING_HEADER_PATTERN.matcher(header); final boolean matches = matcher.matches(); From 6e9e07d6f89536a9f97b29bcc7905de9bc863a86 Mon Sep 17 00:00:00 2001 From: Zachary Tong Date: Mon, 6 Nov 2017 16:37:33 -0500 Subject: [PATCH 22/25] Fix profiling naming issues (#27133) Some code-paths use anonymous classes (such as NonCollectingAggregator in terms agg), which messes up the display name of the profiler. If we encounter an anonymous class, we need to grab the super's name. Another naming issue was that ProfileAggs were not delegating to the wrapped agg's name for toString(), leading to ugly display. This PR also fixes up the profile documentation. Some of the examples were executing against empty indices, which shows different profile results than a populated index (and made for confusing examples). Finally, I switched the agg display names from the fully qualified name to the simple name, so that it's similar to how the query profiles work. Closes #26405 --- .../InternalAggregationProfileTree.java | 10 +- .../aggregation/ProfilingAggregator.java | 4 + .../query/InternalQueryProfileTree.java | 5 + .../aggregation/AggregationProfilerIT.java | 38 ++-- docs/reference/search/profile.asciidoc | 197 +++++++++--------- 5 files changed, 139 insertions(+), 115 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java index f367595c84c87..f3e66c1a9fda9 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/InternalAggregationProfileTree.java @@ -32,10 +32,16 @@ protected AggregationProfileBreakdown createProfileBreakdown() { @Override protected String getTypeFromElement(Aggregator element) { + + // Anonymous classes (such as NonCollectingAggregator in TermsAgg) won't have a name, + // we need to get the super class + if (element.getClass().getSimpleName().isEmpty() == true) { + return element.getClass().getSuperclass().getSimpleName(); + } if (element instanceof MultiBucketAggregatorWrapper) { - return ((MultiBucketAggregatorWrapper) element).getWrappedClass().getName(); + return ((MultiBucketAggregatorWrapper) element).getWrappedClass().getSimpleName(); } - return element.getClass().getName(); + return element.getClass().getSimpleName(); } @Override diff --git a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java index d96fbe0d86697..522910e0ab9eb 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java +++ b/core/src/main/java/org/elasticsearch/search/profile/aggregation/ProfilingAggregator.java @@ -110,4 +110,8 @@ public void postCollection() throws IOException { delegate.postCollection(); } + @Override + public String toString() { + return delegate.toString(); + } } diff --git a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java index 013b7d3a506cd..6a69ea968f0bd 100644 --- a/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java +++ b/core/src/main/java/org/elasticsearch/search/profile/query/InternalQueryProfileTree.java @@ -41,6 +41,11 @@ protected QueryProfileBreakdown createProfileBreakdown() { @Override protected String getTypeFromElement(Query query) { + // Anonymous classes won't have a name, + // we need to get the super class + if (query.getClass().getSimpleName().isEmpty() == true) { + return query.getClass().getSuperclass().getSimpleName(); + } return query.getClass().getSimpleName(); } diff --git a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java index 9914938854d03..e0cc63beeab4c 100644 --- a/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java +++ b/core/src/test/java/org/elasticsearch/search/profile/aggregation/AggregationProfilerIT.java @@ -100,7 +100,7 @@ public void testSimpleProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator")); + equalTo("HistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getProfiledChildren().size(), equalTo(0)); assertThat(histoAggResult.getTime(), greaterThan(0L)); @@ -137,7 +137,7 @@ public void testMultiLevelProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator")); + equalTo("HistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -154,7 +154,7 @@ public void testMultiLevelProfile() { ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getName())); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); assertThat(termsAggResult.getTime(), greaterThan(0L)); Map termsBreakdown = termsAggResult.getTimeBreakdown(); @@ -171,7 +171,7 @@ public void testMultiLevelProfile() { ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = termsAggResult.getTimeBreakdown(); @@ -207,7 +207,7 @@ public void testMultiLevelProfileBreadthFirst() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator")); + equalTo("HistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -224,7 +224,7 @@ public void testMultiLevelProfileBreadthFirst() { ProfileResult termsAggResult = histoAggResult.getProfiledChildren().get(0); assertThat(termsAggResult, notNullValue()); - assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getName())); + assertThat(termsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); assertThat(termsAggResult.getLuceneDescription(), equalTo("terms")); assertThat(termsAggResult.getTime(), greaterThan(0L)); Map termsBreakdown = termsAggResult.getTimeBreakdown(); @@ -241,7 +241,7 @@ public void testMultiLevelProfileBreadthFirst() { ProfileResult avgAggResult = termsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = termsAggResult.getTimeBreakdown(); @@ -277,7 +277,7 @@ public void testDiversifiedAggProfile() { ProfileResult diversifyAggResult = aggProfileResultsList.get(0); assertThat(diversifyAggResult, notNullValue()); assertThat(diversifyAggResult.getQueryName(), - equalTo(DiversifiedOrdinalsSamplerAggregator.class.getName())); + equalTo(DiversifiedOrdinalsSamplerAggregator.class.getSimpleName())); assertThat(diversifyAggResult.getLuceneDescription(), equalTo("diversify")); assertThat(diversifyAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = diversifyAggResult.getTimeBreakdown(); @@ -294,7 +294,7 @@ public void testDiversifiedAggProfile() { ProfileResult maxAggResult = diversifyAggResult.getProfiledChildren().get(0); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); Map termsBreakdown = maxAggResult.getTimeBreakdown(); @@ -338,7 +338,7 @@ public void testComplexProfile() { ProfileResult histoAggResult = aggProfileResultsList.get(0); assertThat(histoAggResult, notNullValue()); assertThat(histoAggResult.getQueryName(), - equalTo("org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregator")); + equalTo("HistogramAggregator")); assertThat(histoAggResult.getLuceneDescription(), equalTo("histo")); assertThat(histoAggResult.getTime(), greaterThan(0L)); Map histoBreakdown = histoAggResult.getTimeBreakdown(); @@ -355,7 +355,7 @@ public void testComplexProfile() { ProfileResult tagsAggResult = histoAggResult.getProfiledChildren().get(0); assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getName())); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); assertThat(tagsAggResult.getTime(), greaterThan(0L)); Map tagsBreakdown = tagsAggResult.getTimeBreakdown(); @@ -372,7 +372,7 @@ public void testComplexProfile() { ProfileResult avgAggResult = tagsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); Map avgBreakdown = tagsAggResult.getTimeBreakdown(); @@ -389,7 +389,7 @@ public void testComplexProfile() { ProfileResult maxAggResult = tagsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); Map maxBreakdown = tagsAggResult.getTimeBreakdown(); @@ -406,7 +406,7 @@ public void testComplexProfile() { ProfileResult stringsAggResult = histoAggResult.getProfiledChildren().get(1); assertThat(stringsAggResult, notNullValue()); - assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getName())); + assertThat(stringsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); assertThat(stringsAggResult.getLuceneDescription(), equalTo("strings")); assertThat(stringsAggResult.getTime(), greaterThan(0L)); Map stringsBreakdown = stringsAggResult.getTimeBreakdown(); @@ -423,7 +423,7 @@ public void testComplexProfile() { avgAggResult = stringsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = stringsAggResult.getTimeBreakdown(); @@ -440,7 +440,7 @@ public void testComplexProfile() { maxAggResult = stringsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = stringsAggResult.getTimeBreakdown(); @@ -457,7 +457,7 @@ public void testComplexProfile() { tagsAggResult = stringsAggResult.getProfiledChildren().get(2); assertThat(tagsAggResult, notNullValue()); - assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getName())); + assertThat(tagsAggResult.getQueryName(), equalTo(GlobalOrdinalsStringTermsAggregator.class.getSimpleName())); assertThat(tagsAggResult.getLuceneDescription(), equalTo("tags")); assertThat(tagsAggResult.getTime(), greaterThan(0L)); tagsBreakdown = tagsAggResult.getTimeBreakdown(); @@ -474,7 +474,7 @@ public void testComplexProfile() { avgAggResult = tagsAggResult.getProfiledChildren().get(0); assertThat(avgAggResult, notNullValue()); - assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getName())); + assertThat(avgAggResult.getQueryName(), equalTo(AvgAggregator.class.getSimpleName())); assertThat(avgAggResult.getLuceneDescription(), equalTo("avg")); assertThat(avgAggResult.getTime(), greaterThan(0L)); avgBreakdown = tagsAggResult.getTimeBreakdown(); @@ -491,7 +491,7 @@ public void testComplexProfile() { maxAggResult = tagsAggResult.getProfiledChildren().get(1); assertThat(maxAggResult, notNullValue()); - assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getName())); + assertThat(maxAggResult.getQueryName(), equalTo(MaxAggregator.class.getSimpleName())); assertThat(maxAggResult.getLuceneDescription(), equalTo("max")); assertThat(maxAggResult.getTime(), greaterThan(0L)); maxBreakdown = tagsAggResult.getTimeBreakdown(); diff --git a/docs/reference/search/profile.asciidoc b/docs/reference/search/profile.asciidoc index f18dab54ccefb..c864c643c8f6b 100644 --- a/docs/reference/search/profile.asciidoc +++ b/docs/reference/search/profile.asciidoc @@ -1,7 +1,7 @@ [[search-profile]] == Profile API -WARNING: The Profile API is a debugging tool and adds signficant overhead to search execution. +WARNING: The Profile API is a debugging tool and adds significant overhead to search execution. The Profile API provides detailed timing information about the execution of individual components in a search request. It gives the user insight into how search requests are executed at a low level so that @@ -17,11 +17,11 @@ Any `_search` request can be profiled by adding a top-level `profile` parameter: [source,js] -------------------------------------------------- -GET /_search +GET /twitter/_search { "profile": true,<1> "query" : { - "match" : { "message" : "message number" } + "match" : { "message" : "some number" } } } -------------------------------------------------- @@ -58,7 +58,7 @@ This will yield the following result: "query": [ { "type": "BooleanQuery", - "description": "message:message message:number", + "description": "message:some message:number", "time_in_nanos": "1873811", "breakdown": { "score": 51306, @@ -77,7 +77,7 @@ This will yield the following result: "children": [ { "type": "TermQuery", - "description": "message:message", + "description": "message:some", "time_in_nanos": "391943", "breakdown": { "score": 28776, @@ -230,13 +230,13 @@ The overall structure of this query tree will resemble your original Elasticsear "query": [ { "type": "BooleanQuery", - "description": "message:message message:number", + "description": "message:some message:number", "time_in_nanos": "1873811", "breakdown": {...}, <1> "children": [ { "type": "TermQuery", - "description": "message:message", + "description": "message:some", "time_in_nanos": "391943", "breakdown": {...} }, @@ -291,7 +291,7 @@ The `breakdown` component lists detailed timing statistics about low-level Lucen "advance_count": 0 } -------------------------------------------------- -// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:message message:number",\n"time_in_nanos": $body.$_path,/] +// TESTRESPONSE[s/^/{\n"took": $body.took,\n"timed_out": $body.timed_out,\n"_shards": $body._shards,\n"hits": $body.hits,\n"profile": {\n"shards": [ {\n"id": "$body.$_path",\n"searches": [{\n"query": [{\n"type": "BooleanQuery",\n"description": "message:some message:number",\n"time_in_nanos": $body.$_path,/] // TESTRESPONSE[s/}$/},\n"children": $body.$_path}],\n"rewrite_time": $body.$_path, "collector": $body.$_path}], "aggregations": []}]}}/] // TESTRESPONSE[s/(?<=[" ])\d+(\.\d+)?/$body.$_path/] @@ -469,35 +469,25 @@ value is cumulative and contains the total time for all queries being rewritten. ==== A more complex example -////////////////////////// - -[source,js] --------------------------------------------------- -PUT test -{"settings": {"index.number_of_shards": 1, "number_of_replicas": 0}} -------------------------------------------------- -// CONSOLE - -////////////////////////// To demonstrate a slightly more complex query and the associated results, we can profile the following query: [source,js] -------------------------------------------------- -GET /test/_search +GET /twitter/_search { "profile": true, "query": { "term": { - "message": { - "value": "search" + "user": { + "value": "test" } } }, "aggs": { "my_scoped_agg": { "terms": { - "field": "level" + "field": "likes" } }, "my_global_agg": { @@ -505,21 +495,21 @@ GET /test/_search "aggs": { "my_level_agg": { "terms": { - "field": "level" + "field": "likes" } } } } }, "post_filter": { - "term": { - "tag": "elastic" + "match": { + "message": "some" } } } -------------------------------------------------- // CONSOLE -// TEST[s/GET \/test\/_search/GET \/test\/_search\?filter_path=profile.shards.id,profile.shards.searches/] +// TEST[s/_search/_search\?filter_path=profile.shards.id,profile.shards.searches,profile.shards.aggregations/] // TEST[continued] This example has: @@ -544,7 +534,7 @@ And the response: "query": [ { "type": "TermQuery", - "description": "my_field:foo", + "description": "message:some", "time_in_nanos": "409456", "breakdown": { "score": 0, @@ -563,7 +553,7 @@ And the response: }, { "type": "TermQuery", - "description": "message:search", + "description": "user:test", "time_in_nanos": "303702", "breakdown": { "score": 0, @@ -606,7 +596,7 @@ And the response: ] }, { - "name": "BucketCollector: [[org.elasticsearch.search.profile.aggregation.ProfilingAggregator@222b076, org.elasticsearch.search.profile.aggregation.ProfilingAggregator@3000ab31]]", + "name": "BucketCollector: [[my_scoped_agg, my_global_agg]]", "reason": "aggregation", "time_in_nanos": 8273 } @@ -616,21 +606,24 @@ And the response: } ] } - ] + ], + "aggregations": [...] <1> } ] } } -------------------------------------------------- +// TESTRESPONSE[s/"aggregations": \[\.\.\.\]/"aggregations": $body.$_path/] // TESTRESPONSE[s/\.\.\.//] -// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] -// TESTRESPONSE[s/: "[^"]*"/: $body.$_path/] +// TESTRESPONSE[s/(?<=[" ])\d+(\.\d+)?/$body.$_path/] +// TESTRESPONSE[s/"id": "\[P6-vulHtQRWuD4YnubWb7A\]\[test\]\[0\]"/"id": $body.profile.shards.0.id/] +<1> The ``"aggregations"` portion has been omitted because it will be covered in the next section As you can see, the output is significantly verbose from before. All the major portions of the query are represented: -1. The first `TermQuery` (message:search) represents the main `term` query -2. The second `TermQuery` (my_field:foo) represents the `post_filter` query +1. The first `TermQuery` (user:test) represents the main `term` query +2. The second `TermQuery` (message:some) represents the `post_filter` query The Collector tree is fairly straightforward, showing how a single CancellableCollector wraps a MultiCollector which also wraps a FilteredCollector to execute the post_filter (and in turn wraps the normal scoring SimpleCollector), @@ -651,7 +644,7 @@ Due to this dynamic, per-segment rewriting, the clean tree structure becomes dis "lineage" showing how one query rewrites into the next. At present time, all we can do is apologize, and suggest you collapse the details for that query's children if it is too confusing. Luckily, all the timing statistics are correct, just not the physical layout in the response, so it is sufficient to just analyze the top-level MultiTermQuery and -ignore it's children if you find the details too tricky to interpret. +ignore its children if you find the details too tricky to interpret. Hopefully this will be fixed in future iterations, but it is a tricky problem to solve and still in-progress :) @@ -659,45 +652,49 @@ Hopefully this will be fixed in future iterations, but it is a tricky problem to ==== `aggregations` Section -////////////////////////// - -[source,js] --------------------------------------------------- -PUT house-prices -{"settings": {"index.number_of_shards": 1, "number_of_replicas": 0}} -------------------------------------------------- -// CONSOLE - -////////////////////////// The `aggregations` section contains detailed timing of the aggregation tree executed by a particular shard. -The overall structure of this aggregation tree will resemble your original Elasticsearch request. Let's consider -the following example aggregations request: +The overall structure of this aggregation tree will resemble your original Elasticsearch request. Let's +execute the previous query again and look at the aggregation profile this time: [source,js] -------------------------------------------------- -GET /house-prices/_search +GET /twitter/_search { "profile": true, - "size": 0, + "query": { + "term": { + "user": { + "value": "test" + } + } + }, "aggs": { - "property_type": { + "my_scoped_agg": { "terms": { - "field": "propertyType" - }, + "field": "likes" + } + }, + "my_global_agg": { + "global": {}, "aggs": { - "avg_price": { - "avg": { - "field": "price" + "my_level_agg": { + "terms": { + "field": "likes" } } } } + }, + "post_filter": { + "match": { + "message": "some" + } } } -------------------------------------------------- // CONSOLE -// TEST[s/GET \/house-prices\/_search/GET \/house-prices\/_search\?filter_path=profile.shards.aggregations/] +// TEST[s/_search/_search\?filter_path=profile.shards.aggregations/] // TEST[continued] Which yields the following aggregation profile output @@ -705,39 +702,53 @@ Which yields the following aggregation profile output [source,js] -------------------------------------------------- { - "profile": { - "shards": [ + "profile" : { + "shards" : [ { - ... - "aggregations": [ + "aggregations" : [ { - "type": "org.elasticsearch.search.aggregations.bucket.terms.TermsAggregatorFactory$1", - "description": "property_type", - "time_in_nanos": 26234, - "breakdown": { - "reduce": 0, - "build_aggregation": 817, - "build_aggregation_count": 1, - "initialize": 25415, - "initialize_count": 1, - "reduce_count": 0, - "collect": 0, - "collect_count": 0 + "type" : "LongTermsAggregator", + "description" : "my_scoped_agg", + "time_in_nanos" : 195386, + "breakdown" : { + "reduce" : 0, + "build_aggregation" : 81171, + "build_aggregation_count" : 1, + "initialize" : 22753, + "initialize_count" : 1, + "reduce_count" : 0, + "collect" : 91456, + "collect_count" : 4 + } + }, + { + "type" : "GlobalAggregator", + "description" : "my_global_agg", + "time_in_nanos" : 190430, + "breakdown" : { + "reduce" : 0, + "build_aggregation" : 59990, + "build_aggregation_count" : 1, + "initialize" : 29619, + "initialize_count" : 1, + "reduce_count" : 0, + "collect" : 100815, + "collect_count" : 4 }, - "children": [ + "children" : [ { - "type": "org.elasticsearch.search.aggregations.metrics.avg.AvgAggregator", - "description": "avg_price", - "time_in_nanos": 5610, - "breakdown": { - "reduce": 0, - "build_aggregation": 0, - "build_aggregation_count": 0, - "initialize": 5609, - "initialize_count": 1, - "reduce_count": 0, - "collect": 0, - "collect_count": 0 + "type" : "LongTermsAggregator", + "description" : "my_level_agg", + "time_in_nanos" : 160329, + "breakdown" : { + "reduce" : 0, + "build_aggregation" : 55712, + "build_aggregation_count" : 1, + "initialize" : 10559, + "initialize_count" : 1, + "reduce_count" : 0, + "collect" : 94052, + "collect_count" : 4 } } ] @@ -749,17 +760,15 @@ Which yields the following aggregation profile output } -------------------------------------------------- // TESTRESPONSE[s/\.\.\.//] -// TESTRESPONSE[s/: (\-)?[0-9]+/: $body.$_path/] - -From the profile structure we can see our `property_type` terms aggregation which is internally represented by the -`GlobalOrdinalsStringTermsAggregator` class and the sub aggregator `avg_price` which is internally represented by the `AvgAggregator` class. The `type` field displays the class used internally to represent the aggregation. The `description` field displays the name of the aggregation. +// TESTRESPONSE[s/(?<=[" ])\d+(\.\d+)?/$body.$_path/] +// TESTRESPONSE[s/"id": "\[P6-vulHtQRWuD4YnubWb7A\]\[test\]\[0\]"/"id": $body.profile.shards.0.id/] -The `time_in_nanos` field shows that it took ~4 seconds for the entire aggregation to execute. The recorded time is inclusive -of all children. +From the profile structure we can see that the `my_scoped_agg` is internally being run as a `LongTermsAggregator` (because the field it is +aggregating, `likes`, is a numeric field). At the same level, we see a `GlobalAggregator` which comes from `my_global_agg`. That +aggregation then has a child `LongTermsAggregator` which from the second terms aggregation on `likes`. -The `breakdown` field will give detailed stats about how the time was spent, we'll look at -that in a moment. Finally, the `children` array lists any sub-aggregations that may be present. Because we have an `avg_price` aggregation as a sub-aggregation to the `property_type` aggregation we see it listed as a child of the `property_type` aggregation. the two aggregation outputs have identical information (type, time, -breakdown, etc). Children are allowed to have their own children. +The `time_in_nanos` field shows the time executed by each aggregation, and is inclusive of all children. While the overall time is useful, +the `breakdown` field will give detailed stats about how the time was spent. ===== Timing Breakdown From d5451b20371b31c49cb1f93a9af17eede796fd16 Mon Sep 17 00:00:00 2001 From: Jason Tedor Date: Mon, 6 Nov 2017 17:55:11 -0500 Subject: [PATCH 23/25] Die with dignity while merging If an out of memory error is thrown while merging, today we quietly rewrap it into a merge exception and the out of memory error is lost. Instead, we need to rethrow out of memory errors, and in fact any fatal error here, and let those go uncaught so that the node is torn down. This commit causes this to be the case. Relates #27265 --- ...ElasticsearchUncaughtExceptionHandler.java | 7 +- .../index/engine/InternalEngine.java | 45 +- ...icsearchUncaughtExceptionHandlerTests.java | 2 - .../engine/CombinedDeletionPolicyTests.java | 2 +- .../index/engine/InternalEngineTests.java | 348 +------------- .../translog/TranslogDeletionPolicyTests.java | 12 - .../index/translog/TranslogTests.java | 2 +- .../indices/recovery/RecoveryTests.java | 2 +- .../index/engine/EvilInternalEngineTests.java | 107 +++++ .../index/engine/EngineTestCase.java | 441 ++++++++++++++++++ .../translog/TranslogDeletionPolicies.java | 39 ++ 11 files changed, 624 insertions(+), 383 deletions(-) create mode 100644 qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java create mode 100644 test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java create mode 100644 test/framework/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicies.java diff --git a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java index b1df4f5ccc0ea..c6692cec08b7a 100644 --- a/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java +++ b/core/src/main/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandler.java @@ -21,7 +21,6 @@ import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; -import org.apache.lucene.index.MergePolicy; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.logging.Loggers; @@ -68,11 +67,7 @@ public void uncaughtException(Thread t, Throwable e) { // visible for testing static boolean isFatalUncaught(Throwable e) { - return isFatalCause(e) || (e instanceof MergePolicy.MergeException && isFatalCause(e.getCause())); - } - - private static boolean isFatalCause(Throwable cause) { - return cause instanceof Error; + return e instanceof Error; } // visible for testing diff --git a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index ac02099987373..cb07cf5e6966a 100644 --- a/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/core/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -1579,23 +1579,15 @@ public IndexCommitRef acquireIndexCommit(final boolean flushFirst) throws Engine } } - @SuppressWarnings("finally") private boolean failOnTragicEvent(AlreadyClosedException ex) { final boolean engineFailed; // if we are already closed due to some tragic exception // we need to fail the engine. it might have already been failed before // but we are double-checking it's failed and closed if (indexWriter.isOpen() == false && indexWriter.getTragicException() != null) { - if (indexWriter.getTragicException() instanceof Error) { - try { - logger.error("tragic event in index writer", ex); - } finally { - throw (Error) indexWriter.getTragicException(); - } - } else { - failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException()); - engineFailed = true; - } + maybeDie("tragic event in index writer", indexWriter.getTragicException()); + failEngine("already closed by tragic event on the index writer", (Exception) indexWriter.getTragicException()); + engineFailed = true; } else if (translog.isOpen() == false && translog.getTragicException() != null) { failEngine("already closed by tragic event on the translog", translog.getTragicException()); engineFailed = true; @@ -1916,7 +1908,6 @@ protected void doRun() throws Exception { @Override protected void handleMergeException(final Directory dir, final Throwable exc) { - logger.error("failed to merge", exc); engineConfig.getThreadPool().generic().execute(new AbstractRunnable() { @Override public void onFailure(Exception e) { @@ -1925,13 +1916,39 @@ public void onFailure(Exception e) { @Override protected void doRun() throws Exception { - MergePolicy.MergeException e = new MergePolicy.MergeException(exc, dir); - failEngine("merge failed", e); + /* + * We do this on another thread rather than the merge thread that we are initially called on so that we have complete + * confidence that the call stack does not contain catch statements that would cause the error that might be thrown + * here from being caught and never reaching the uncaught exception handler. + */ + maybeDie("fatal error while merging", exc); + logger.error("failed to merge", exc); + failEngine("merge failed", new MergePolicy.MergeException(exc, dir)); } }); } } + /** + * If the specified throwable is a fatal error, this throwable will be thrown. Callers should ensure that there are no catch statements + * that would catch an error in the stack as the fatal error here should go uncaught and be handled by the uncaught exception handler + * that we install during bootstrap. If the specified throwable is indeed a fatal error, the specified message will attempt to be logged + * before throwing the fatal error. If the specified throwable is not a fatal error, this method is a no-op. + * + * @param maybeMessage the message to maybe log + * @param maybeFatal the throwable that is maybe fatal + */ + @SuppressWarnings("finally") + private void maybeDie(final String maybeMessage, final Throwable maybeFatal) { + if (maybeFatal instanceof Error) { + try { + logger.error(maybeMessage, maybeFatal); + } finally { + throw (Error) maybeFatal; + } + } + } + /** * Commits the specified index writer. * diff --git a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandlerTests.java b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandlerTests.java index 6e40153b467d9..e2bf07b7d0bb4 100644 --- a/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandlerTests.java +++ b/core/src/test/java/org/elasticsearch/bootstrap/ElasticsearchUncaughtExceptionHandlerTests.java @@ -19,7 +19,6 @@ package org.elasticsearch.bootstrap; -import org.apache.lucene.index.MergePolicy; import org.elasticsearch.test.ESTestCase; import org.junit.Before; @@ -131,7 +130,6 @@ void onNonFatalUncaught(String threadName, Throwable t) { } public void testIsFatalCause() { - assertFatal(new MergePolicy.MergeException(new OutOfMemoryError(), null)); assertFatal(new OutOfMemoryError()); assertFatal(new StackOverflowError()); assertFatal(new InternalError()); diff --git a/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java b/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java index d1eef05c2efa1..5d4385cbd384b 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/CombinedDeletionPolicyTests.java @@ -30,7 +30,7 @@ import java.util.Collections; import java.util.List; -import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; +import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java index f6d2d99658098..e196c6b4d0bbe 100644 --- a/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java +++ b/core/src/test/java/org/elasticsearch/index/engine/InternalEngineTests.java @@ -26,8 +26,6 @@ import org.apache.logging.log4j.core.LogEvent; import org.apache.logging.log4j.core.appender.AbstractAppender; import org.apache.logging.log4j.core.filter.RegexFilter; -import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.codecs.Codec; import org.apache.lucene.codecs.lucene50.Lucene50StoredFieldsFormat; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; @@ -45,7 +43,6 @@ import org.apache.lucene.index.LiveIndexWriterConfig; import org.apache.lucene.index.LogByteSizeMergePolicy; import org.apache.lucene.index.LogDocMergePolicy; -import org.apache.lucene.index.MergePolicy; import org.apache.lucene.index.NoMergePolicy; import org.apache.lucene.index.NumericDocValues; import org.apache.lucene.index.PointValues; @@ -72,19 +69,16 @@ import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.TransportActions; import org.elasticsearch.cluster.metadata.IndexMetaData; -import org.elasticsearch.cluster.routing.AllocationId; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; -import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.uid.Versions; import org.elasticsearch.common.lucene.uid.VersionsAndSeqNoResolver; @@ -95,7 +89,6 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.codec.CodecService; @@ -119,20 +112,13 @@ import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardUtils; -import org.elasticsearch.index.store.DirectoryService; import org.elasticsearch.index.store.DirectoryUtils; import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; -import org.elasticsearch.test.DummyShardLock; -import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.threadpool.TestThreadPool; -import org.elasticsearch.threadpool.ThreadPool; import org.hamcrest.MatcherAssert; import org.hamcrest.Matchers; -import org.junit.After; -import org.junit.Before; import java.io.IOException; import java.io.UncheckedIOException; @@ -166,14 +152,13 @@ import java.util.stream.Collectors; import java.util.stream.LongStream; -import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; import static java.util.Collections.shuffle; import static org.elasticsearch.index.engine.Engine.Operation.Origin.LOCAL_TRANSLOG_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PEER_RECOVERY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.PRIMARY; import static org.elasticsearch.index.engine.Engine.Operation.Origin.REPLICA; -import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; +import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.everyItem; @@ -185,313 +170,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; -public class InternalEngineTests extends ESTestCase { - - protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 0); - protected final AllocationId allocationId = AllocationId.newInitializing(); - private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); - - protected ThreadPool threadPool; - - private Store store; - private Store storeReplica; - - protected InternalEngine engine; - protected InternalEngine replicaEngine; - - private IndexSettings defaultSettings; - private String codecName; - private Path primaryTranslogDir; - private Path replicaTranslogDir; - - @Override - @Before - public void setUp() throws Exception { - super.setUp(); - - CodecService codecService = new CodecService(null, logger); - String name = Codec.getDefault().getName(); - if (Arrays.asList(codecService.availableCodecs()).contains(name)) { - // some codecs are read only so we only take the ones that we have in the service and randomly - // selected by lucene test case. - codecName = name; - } else { - codecName = "default"; - } - defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() - .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us - .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) - .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) - .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), - between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) - .build()); // TODO randomize more settings - threadPool = new TestThreadPool(getClass().getName()); - store = createStore(); - storeReplica = createStore(); - Lucene.cleanLuceneIndex(store.directory()); - Lucene.cleanLuceneIndex(storeReplica.directory()); - primaryTranslogDir = createTempDir("translog-primary"); - engine = createEngine(store, primaryTranslogDir); - LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); - - assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - engine.config().setEnableGcDeletes(false); - } - replicaTranslogDir = createTempDir("translog-replica"); - replicaEngine = createEngine(storeReplica, replicaTranslogDir); - currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig(); - - assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); - assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); - if (randomBoolean()) { - engine.config().setEnableGcDeletes(false); - } - } - - public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) { - return copy(config, openMode, config.getAnalyzer()); - } - - public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) { - return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), - config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), - new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), - config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), - config.getIndexSort(), config.getTranslogRecoveryRunner()); - } - - @Override - @After - public void tearDown() throws Exception { - super.tearDown(); - if (engine != null && engine.isClosed.get() == false) { - engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - } - if (replicaEngine != null && replicaEngine.isClosed.get() == false) { - replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); - } - IOUtils.close( - replicaEngine, storeReplica, - engine, store); - terminate(threadPool); - } - - - private static Document testDocumentWithTextField() { - return testDocumentWithTextField("test"); - } - - private static Document testDocumentWithTextField(String value) { - Document document = testDocument(); - document.add(new TextField("value", value, Field.Store.YES)); - return document; - } - - - private static Document testDocument() { - return new Document(); - } - - public static ParsedDocument createParsedDoc(String id, String routing) { - return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null); - } - - private static ParsedDocument testParsedDocument(String id, String routing, Document document, BytesReference source, Mapping mappingUpdate) { - Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); - Field versionField = new NumericDocValuesField("_version", 0); - SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); - document.add(uidField); - document.add(versionField); - document.add(seqID.seqNo); - document.add(seqID.seqNoDocValue); - document.add(seqID.primaryTerm); - BytesRef ref = source.toBytesRef(); - document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); - return new ParsedDocument(versionField, seqID, id, "test", routing, Arrays.asList(document), source, XContentType.JSON, - mappingUpdate); - } - - protected Store createStore() throws IOException { - return createStore(newDirectory()); - } - - protected Store createStore(final Directory directory) throws IOException { - return createStore(INDEX_SETTINGS, directory); - } - - protected Store createStore(final IndexSettings indexSettings, final Directory directory) throws IOException { - final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { - @Override - public Directory newDirectory() throws IOException { - return directory; - } - }; - return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); - } - - protected Translog createTranslog() throws IOException { - return createTranslog(primaryTranslogDir); - } - - protected Translog createTranslog(Path translogPath) throws IOException { - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); - return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); - } - - protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { - return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null); - } - - protected InternalEngine createEngine(Store store, - Path translogPath, - BiFunction sequenceNumbersServiceSupplier) throws IOException { - return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null, sequenceNumbersServiceSupplier); - } - - protected InternalEngine createEngine(Store store, - Path translogPath, - BiFunction sequenceNumbersServiceSupplier, - ToLongBiFunction seqNoForOperation) throws IOException { - return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null, sequenceNumbersServiceSupplier, seqNoForOperation, null); - } - - protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException { - return createEngine(indexSettings, store, translogPath, mergePolicy, null); - - } - - protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - @Nullable IndexWriterFactory indexWriterFactory) throws IOException { - return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, null); - } - - protected InternalEngine createEngine( - IndexSettings indexSettings, - Store store, - Path translogPath, - MergePolicy mergePolicy, - @Nullable IndexWriterFactory indexWriterFactory, - @Nullable BiFunction sequenceNumbersServiceSupplier) throws IOException { - return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, sequenceNumbersServiceSupplier, null, null); - } - - protected InternalEngine createEngine( - IndexSettings indexSettings, - Store store, - Path translogPath, - MergePolicy mergePolicy, - @Nullable IndexWriterFactory indexWriterFactory, - @Nullable BiFunction sequenceNumbersServiceSupplier, - @Nullable ToLongBiFunction seqNoForOperation) throws IOException { - return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, sequenceNumbersServiceSupplier, seqNoForOperation, null); - } - - protected InternalEngine createEngine( - IndexSettings indexSettings, - Store store, - Path translogPath, - MergePolicy mergePolicy, - @Nullable IndexWriterFactory indexWriterFactory, - @Nullable BiFunction sequenceNumbersServiceSupplier, - @Nullable ToLongBiFunction seqNoForOperation, - @Nullable Sort indexSort) throws IOException { - EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null, indexSort); - InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, seqNoForOperation, config); - if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { - internalEngine.recoverFromTranslog(); - } - return internalEngine; - } - - @FunctionalInterface - public interface IndexWriterFactory { - - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException; - } - - public static InternalEngine createInternalEngine(@Nullable final IndexWriterFactory indexWriterFactory, - @Nullable final BiFunction sequenceNumbersServiceSupplier, - @Nullable final ToLongBiFunction seqNoForOperation, - final EngineConfig config) { - if (sequenceNumbersServiceSupplier == null) { - return new InternalEngine(config) { - @Override - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return (indexWriterFactory != null) ? - indexWriterFactory.createWriter(directory, iwc) : - super.createWriter(directory, iwc); - } - - @Override - protected long doGenerateSeqNoForOperation(final Operation operation) { - return seqNoForOperation != null ? seqNoForOperation.applyAsLong(this, operation) : super.doGenerateSeqNoForOperation(operation); - } - }; - } else { - return new InternalEngine(config, sequenceNumbersServiceSupplier) { - @Override - IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { - return (indexWriterFactory != null) ? - indexWriterFactory.createWriter(directory, iwc) : - super.createWriter(directory, iwc); - } - - @Override - protected long doGenerateSeqNoForOperation(final Operation operation) { - return seqNoForOperation != null ? seqNoForOperation.applyAsLong(this, operation) : super.doGenerateSeqNoForOperation(operation); - } - }; - } - - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - ReferenceManager.RefreshListener refreshListener) { - return config(indexSettings, store, translogPath, mergePolicy, refreshListener, null); - } - - public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, - ReferenceManager.RefreshListener refreshListener, Sort indexSort) { - IndexWriterConfig iwc = newIndexWriterConfig(); - TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); - final EngineConfig.OpenMode openMode; - try { - if (Lucene.indexExists(store.directory()) == false) { - openMode = EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG; - } else { - openMode = EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; - } - } catch (IOException e) { - throw new ElasticsearchException("can't find index?", e); - } - Engine.EventListener listener = new Engine.EventListener() { - @Override - public void onFailedEngine(String reason, @Nullable Exception e) { - // we don't need to notify anybody in this test - } - }; - final TranslogHandler handler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(), - indexSettings.getSettings())); - final List refreshListenerList = - refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); - EngineConfig config = new EngineConfig(openMode, shardId, allocationId.getId(), threadPool, indexSettings, null, store, - mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, - IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, - TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); - - return config; - } - - private static final BytesReference B_1 = new BytesArray(new byte[]{1}); - private static final BytesReference B_2 = new BytesArray(new byte[]{2}); - private static final BytesReference B_3 = new BytesArray(new byte[]{3}); - private static final BytesArray SOURCE = bytesArray("{}"); - - private static BytesArray bytesArray(String string) { - return new BytesArray(string.getBytes(Charset.defaultCharset())); - } +public class InternalEngineTests extends EngineTestCase { public void testSegments() throws Exception { try (Store store = createStore(); @@ -2487,29 +2166,6 @@ public void testEnableGcDeletes() throws Exception { } } - protected Term newUid(String id) { - return new Term("_id", Uid.encodeId(id)); - } - - protected Term newUid(ParsedDocument doc) { - return newUid(doc.id()); - } - - protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { - return new Engine.Get(realtime, doc.type(), doc.id(), newUid(doc)); - } - - private Engine.Index indexForDoc(ParsedDocument doc) { - return new Engine.Index(newUid(doc), doc); - } - - private Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo, - boolean isRetry) { - return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, - Engine.Operation.Origin.REPLICA, System.nanoTime(), - IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); - } - public void testExtractShardId() { try (Engine.Searcher test = this.engine.acquireSearcher("test")) { ShardId shardId = ShardUtils.extractShardId(test.getDirectoryReader()); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java index 94a631906fcea..f62d292730e43 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogDeletionPolicyTests.java @@ -43,18 +43,6 @@ public class TranslogDeletionPolicyTests extends ESTestCase { - public static TranslogDeletionPolicy createTranslogDeletionPolicy() { - return new TranslogDeletionPolicy( - IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), - IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getDefault(Settings.EMPTY).getMillis() - ); - } - - public static TranslogDeletionPolicy createTranslogDeletionPolicy(IndexSettings indexSettings) { - return new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), - indexSettings.getTranslogRetentionAge().getMillis()); - } - public void testNoRetention() throws IOException { long now = System.currentTimeMillis(); Tuple, TranslogWriter> readersAndWriter = createReadersAndWriter(now); diff --git a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 771302a903f32..78ed6697b22b4 100644 --- a/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/core/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -107,7 +107,7 @@ import static com.carrotsearch.randomizedtesting.RandomizedTest.randomLongBetween; import static org.elasticsearch.common.util.BigArrays.NON_RECYCLING_INSTANCE; -import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; +import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; diff --git a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java index e2314cff014bc..55b7e22eb8a38 100644 --- a/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java +++ b/core/src/test/java/org/elasticsearch/indices/recovery/RecoveryTests.java @@ -44,7 +44,7 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; -import static org.elasticsearch.index.translog.TranslogDeletionPolicyTests.createTranslogDeletionPolicy; +import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java new file mode 100644 index 0000000000000..c32b3ab202080 --- /dev/null +++ b/qa/evil-tests/src/test/java/org/elasticsearch/index/engine/EvilInternalEngineTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.SegmentCommitInfo; +import org.elasticsearch.index.mapper.ParsedDocument; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasToString; +import static org.hamcrest.Matchers.instanceOf; + +public class EvilInternalEngineTests extends EngineTestCase { + + public void testOutOfMemoryErrorWhileMergingIsRethrownAndIsUncaught() throws IOException, InterruptedException { + engine.close(); + final AtomicReference maybeFatal = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(1); + final Thread.UncaughtExceptionHandler uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler(); + try { + /* + * We want to test that the out of memory error thrown from the merge goes uncaught; this gives us confidence that an out of + * memory error thrown while merging will lead to the node being torn down. + */ + Thread.setDefaultUncaughtExceptionHandler((t, e) -> { + maybeFatal.set(e); + latch.countDown(); + }); + final AtomicReference> segmentsReference = new AtomicReference<>(); + + try (Engine e = createEngine( + defaultSettings, + store, + primaryTranslogDir, + newMergePolicy(), + (directory, iwc) -> new IndexWriter(directory, iwc) { + @Override + public void merge(final MergePolicy.OneMerge merge) throws IOException { + throw new OutOfMemoryError("640K ought to be enough for anybody"); + } + + @Override + public synchronized MergePolicy.OneMerge getNextMerge() { + /* + * This will be called when we flush when we will not be ready to return the segments. After the segments are on + * disk, we can only return them from here once or the merge scheduler will be stuck in a loop repeatedly + * peeling off the same segments to schedule for merging. + */ + if (segmentsReference.get() == null) { + return super.getNextMerge(); + } else { + final List segments = segmentsReference.getAndSet(null); + return new MergePolicy.OneMerge(segments); + } + } + }, + null)) { + // force segments to exist on disk + final ParsedDocument doc1 = testParsedDocument("1", null, testDocumentWithTextField(), B_1, null); + e.index(indexForDoc(doc1)); + e.flush(); + final List segments = + StreamSupport.stream(e.getLastCommittedSegmentInfos().spliterator(), false).collect(Collectors.toList()); + segmentsReference.set(segments); + // trigger a background merge that will be managed by the concurrent merge scheduler + e.forceMerge(randomBoolean(), 0, false, false, false); + /* + * Merging happens in the background on a merge thread, and the maybeDie handler is invoked on yet another thread; we have + * to wait for these events to finish. + */ + latch.await(); + assertNotNull(maybeFatal.get()); + assertThat(maybeFatal.get(), instanceOf(OutOfMemoryError.class)); + assertThat(maybeFatal.get(), hasToString(containsString("640K ought to be enough for anybody"))); + } + } finally { + Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler); + } + } + + +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java new file mode 100644 index 0000000000000..5c2ef977b163e --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/engine/EngineTestCase.java @@ -0,0 +1,441 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.engine; + +import org.apache.lucene.analysis.Analyzer; +import org.apache.lucene.codecs.Codec; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StoredField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LiveIndexWriterConfig; +import org.apache.lucene.index.MergePolicy; +import org.apache.lucene.index.Term; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ReferenceManager; +import org.apache.lucene.search.Sort; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.Version; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.cluster.metadata.IndexMetaData; +import org.elasticsearch.cluster.routing.AllocationId; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.bytes.BytesArray; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.VersionType; +import org.elasticsearch.index.codec.CodecService; +import org.elasticsearch.index.mapper.IdFieldMapper; +import org.elasticsearch.index.mapper.Mapping; +import org.elasticsearch.index.mapper.ParseContext; +import org.elasticsearch.index.mapper.ParsedDocument; +import org.elasticsearch.index.mapper.SeqNoFieldMapper; +import org.elasticsearch.index.mapper.SourceFieldMapper; +import org.elasticsearch.index.mapper.Uid; +import org.elasticsearch.index.seqno.SeqNoStats; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.seqno.SequenceNumbersService; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.DirectoryService; +import org.elasticsearch.index.store.Store; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogConfig; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.Charset; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.ToLongBiFunction; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.index.translog.TranslogDeletionPolicies.createTranslogDeletionPolicy; + +public abstract class EngineTestCase extends ESTestCase { + + protected final ShardId shardId = new ShardId(new Index("index", "_na_"), 0); + protected final AllocationId allocationId = AllocationId.newInitializing(); + protected static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY); + + protected ThreadPool threadPool; + + protected Store store; + protected Store storeReplica; + + protected InternalEngine engine; + protected InternalEngine replicaEngine; + + protected IndexSettings defaultSettings; + protected String codecName; + protected Path primaryTranslogDir; + protected Path replicaTranslogDir; + + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + + CodecService codecService = new CodecService(null, logger); + String name = Codec.getDefault().getName(); + if (Arrays.asList(codecService.availableCodecs()).contains(name)) { + // some codecs are read only so we only take the ones that we have in the service and randomly + // selected by lucene test case. + codecName = name; + } else { + codecName = "default"; + } + defaultSettings = IndexSettingsModule.newIndexSettings("test", Settings.builder() + .put(IndexSettings.INDEX_GC_DELETES_SETTING.getKey(), "1h") // make sure this doesn't kick in on us + .put(EngineConfig.INDEX_CODEC_SETTING.getKey(), codecName) + .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT) + .put(IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.getKey(), + between(10, 10 * IndexSettings.MAX_REFRESH_LISTENERS_PER_SHARD.get(Settings.EMPTY))) + .build()); // TODO randomize more settings + threadPool = new TestThreadPool(getClass().getName()); + store = createStore(); + storeReplica = createStore(); + Lucene.cleanLuceneIndex(store.directory()); + Lucene.cleanLuceneIndex(storeReplica.directory()); + primaryTranslogDir = createTempDir("translog-primary"); + engine = createEngine(store, primaryTranslogDir); + LiveIndexWriterConfig currentIndexWriterConfig = engine.getCurrentIndexWriterConfig(); + + assertEquals(engine.config().getCodec().getName(), codecService.codec(codecName).getName()); + assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); + if (randomBoolean()) { + engine.config().setEnableGcDeletes(false); + } + replicaTranslogDir = createTempDir("translog-replica"); + replicaEngine = createEngine(storeReplica, replicaTranslogDir); + currentIndexWriterConfig = replicaEngine.getCurrentIndexWriterConfig(); + + assertEquals(replicaEngine.config().getCodec().getName(), codecService.codec(codecName).getName()); + assertEquals(currentIndexWriterConfig.getCodec().getName(), codecService.codec(codecName).getName()); + if (randomBoolean()) { + engine.config().setEnableGcDeletes(false); + } + } + + public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode) { + return copy(config, openMode, config.getAnalyzer()); + } + + public EngineConfig copy(EngineConfig config, EngineConfig.OpenMode openMode, Analyzer analyzer) { + return new EngineConfig(openMode, config.getShardId(), config.getAllocationId(), config.getThreadPool(), config.getIndexSettings(), + config.getWarmer(), config.getStore(), config.getMergePolicy(), analyzer, config.getSimilarity(), + new CodecService(null, logger), config.getEventListener(), config.getQueryCache(), config.getQueryCachingPolicy(), + config.getForceNewHistoryUUID(), config.getTranslogConfig(), config.getFlushMergesAfter(), config.getRefreshListeners(), + config.getIndexSort(), config.getTranslogRecoveryRunner()); + } + + @Override + @After + public void tearDown() throws Exception { + super.tearDown(); + if (engine != null && engine.isClosed.get() == false) { + engine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + } + if (replicaEngine != null && replicaEngine.isClosed.get() == false) { + replicaEngine.getTranslog().getDeletionPolicy().assertNoOpenTranslogRefs(); + } + IOUtils.close( + replicaEngine, storeReplica, + engine, store); + terminate(threadPool); + } + + + protected static ParseContext.Document testDocumentWithTextField() { + return testDocumentWithTextField("test"); + } + + protected static ParseContext.Document testDocumentWithTextField(String value) { + ParseContext.Document document = testDocument(); + document.add(new TextField("value", value, Field.Store.YES)); + return document; + } + + + protected static ParseContext.Document testDocument() { + return new ParseContext.Document(); + } + + public static ParsedDocument createParsedDoc(String id, String routing) { + return testParsedDocument(id, routing, testDocumentWithTextField(), new BytesArray("{ \"value\" : \"test\" }"), null); + } + + protected static ParsedDocument testParsedDocument( + String id, String routing, ParseContext.Document document, BytesReference source, Mapping mappingUpdate) { + Field uidField = new Field("_id", Uid.encodeId(id), IdFieldMapper.Defaults.FIELD_TYPE); + Field versionField = new NumericDocValuesField("_version", 0); + SeqNoFieldMapper.SequenceIDFields seqID = SeqNoFieldMapper.SequenceIDFields.emptySeqID(); + document.add(uidField); + document.add(versionField); + document.add(seqID.seqNo); + document.add(seqID.seqNoDocValue); + document.add(seqID.primaryTerm); + BytesRef ref = source.toBytesRef(); + document.add(new StoredField(SourceFieldMapper.NAME, ref.bytes, ref.offset, ref.length)); + return new ParsedDocument(versionField, seqID, id, "test", routing, Arrays.asList(document), source, XContentType.JSON, + mappingUpdate); + } + + protected Store createStore() throws IOException { + return createStore(newDirectory()); + } + + protected Store createStore(final Directory directory) throws IOException { + return createStore(INDEX_SETTINGS, directory); + } + + protected Store createStore(final IndexSettings indexSettings, final Directory directory) throws IOException { + final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) { + @Override + public Directory newDirectory() throws IOException { + return directory; + } + }; + return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId)); + } + + protected Translog createTranslog() throws IOException { + return createTranslog(primaryTranslogDir); + } + + protected Translog createTranslog(Path translogPath) throws IOException { + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, INDEX_SETTINGS, BigArrays.NON_RECYCLING_INSTANCE); + return new Translog(translogConfig, null, createTranslogDeletionPolicy(INDEX_SETTINGS), () -> SequenceNumbers.UNASSIGNED_SEQ_NO); + } + + protected InternalEngine createEngine(Store store, Path translogPath) throws IOException { + return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null); + } + + protected InternalEngine createEngine( + Store store, + Path translogPath, + BiFunction sequenceNumbersServiceSupplier) throws IOException { + return createEngine(defaultSettings, store, translogPath, newMergePolicy(), null, sequenceNumbersServiceSupplier); + } + + protected InternalEngine createEngine( + Store store, + Path translogPath, + BiFunction sequenceNumbersServiceSupplier, + ToLongBiFunction seqNoForOperation) throws IOException { + return createEngine( + defaultSettings, store, translogPath, newMergePolicy(), null, sequenceNumbersServiceSupplier, seqNoForOperation, null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy) throws IOException { + return createEngine(indexSettings, store, translogPath, mergePolicy, null); + + } + + protected InternalEngine createEngine(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + @Nullable IndexWriterFactory indexWriterFactory) throws IOException { + return createEngine(indexSettings, store, translogPath, mergePolicy, indexWriterFactory, null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, + Store store, + Path translogPath, + MergePolicy mergePolicy, + @Nullable IndexWriterFactory indexWriterFactory, + @Nullable BiFunction sequenceNumbersServiceSupplier) throws IOException { + return createEngine( + indexSettings, store, translogPath, mergePolicy, indexWriterFactory, sequenceNumbersServiceSupplier, null, null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, + Store store, + Path translogPath, + MergePolicy mergePolicy, + @Nullable IndexWriterFactory indexWriterFactory, + @Nullable BiFunction sequenceNumbersServiceSupplier, + @Nullable ToLongBiFunction seqNoForOperation) throws IOException { + return createEngine( + indexSettings, + store, + translogPath, + mergePolicy, + indexWriterFactory, + sequenceNumbersServiceSupplier, + seqNoForOperation, + null); + } + + protected InternalEngine createEngine( + IndexSettings indexSettings, + Store store, + Path translogPath, + MergePolicy mergePolicy, + @Nullable IndexWriterFactory indexWriterFactory, + @Nullable BiFunction sequenceNumbersServiceSupplier, + @Nullable ToLongBiFunction seqNoForOperation, + @Nullable Sort indexSort) throws IOException { + EngineConfig config = config(indexSettings, store, translogPath, mergePolicy, null, indexSort); + InternalEngine internalEngine = createInternalEngine(indexWriterFactory, sequenceNumbersServiceSupplier, seqNoForOperation, config); + if (config.getOpenMode() == EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG) { + internalEngine.recoverFromTranslog(); + } + return internalEngine; + } + + @FunctionalInterface + public interface IndexWriterFactory { + + IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException; + } + + public static InternalEngine createInternalEngine( + @Nullable final IndexWriterFactory indexWriterFactory, + @Nullable final BiFunction sequenceNumbersServiceSupplier, + @Nullable final ToLongBiFunction seqNoForOperation, + final EngineConfig config) { + if (sequenceNumbersServiceSupplier == null) { + return new InternalEngine(config) { + @Override + IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { + return (indexWriterFactory != null) ? + indexWriterFactory.createWriter(directory, iwc) : + super.createWriter(directory, iwc); + } + + @Override + protected long doGenerateSeqNoForOperation(final Operation operation) { + return seqNoForOperation != null + ? seqNoForOperation.applyAsLong(this, operation) + : super.doGenerateSeqNoForOperation(operation); + } + }; + } else { + return new InternalEngine(config, sequenceNumbersServiceSupplier) { + @Override + IndexWriter createWriter(Directory directory, IndexWriterConfig iwc) throws IOException { + return (indexWriterFactory != null) ? + indexWriterFactory.createWriter(directory, iwc) : + super.createWriter(directory, iwc); + } + + @Override + protected long doGenerateSeqNoForOperation(final Operation operation) { + return seqNoForOperation != null + ? seqNoForOperation.applyAsLong(this, operation) + : super.doGenerateSeqNoForOperation(operation); + } + }; + } + + } + + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + ReferenceManager.RefreshListener refreshListener) { + return config(indexSettings, store, translogPath, mergePolicy, refreshListener, null); + } + + public EngineConfig config(IndexSettings indexSettings, Store store, Path translogPath, MergePolicy mergePolicy, + ReferenceManager.RefreshListener refreshListener, Sort indexSort) { + IndexWriterConfig iwc = newIndexWriterConfig(); + TranslogConfig translogConfig = new TranslogConfig(shardId, translogPath, indexSettings, BigArrays.NON_RECYCLING_INSTANCE); + final EngineConfig.OpenMode openMode; + try { + if (Lucene.indexExists(store.directory()) == false) { + openMode = EngineConfig.OpenMode.CREATE_INDEX_AND_TRANSLOG; + } else { + openMode = EngineConfig.OpenMode.OPEN_INDEX_AND_TRANSLOG; + } + } catch (IOException e) { + throw new ElasticsearchException("can't find index?", e); + } + Engine.EventListener listener = new Engine.EventListener() { + @Override + public void onFailedEngine(String reason, @Nullable Exception e) { + // we don't need to notify anybody in this test + } + }; + final TranslogHandler handler = new TranslogHandler(xContentRegistry(), IndexSettingsModule.newIndexSettings(shardId.getIndexName(), + indexSettings.getSettings())); + final List refreshListenerList = + refreshListener == null ? emptyList() : Collections.singletonList(refreshListener); + EngineConfig config = new EngineConfig(openMode, shardId, allocationId.getId(), threadPool, indexSettings, null, store, + mergePolicy, iwc.getAnalyzer(), iwc.getSimilarity(), new CodecService(null, logger), listener, + IndexSearcher.getDefaultQueryCache(), IndexSearcher.getDefaultQueryCachingPolicy(), false, translogConfig, + TimeValue.timeValueMinutes(5), refreshListenerList, indexSort, handler); + + return config; + } + + protected static final BytesReference B_1 = new BytesArray(new byte[]{1}); + protected static final BytesReference B_2 = new BytesArray(new byte[]{2}); + protected static final BytesReference B_3 = new BytesArray(new byte[]{3}); + protected static final BytesArray SOURCE = bytesArray("{}"); + + protected static BytesArray bytesArray(String string) { + return new BytesArray(string.getBytes(Charset.defaultCharset())); + } + + protected Term newUid(String id) { + return new Term("_id", Uid.encodeId(id)); + } + + protected Term newUid(ParsedDocument doc) { + return newUid(doc.id()); + } + + protected Engine.Get newGet(boolean realtime, ParsedDocument doc) { + return new Engine.Get(realtime, doc.type(), doc.id(), newUid(doc)); + } + + protected Engine.Index indexForDoc(ParsedDocument doc) { + return new Engine.Index(newUid(doc), doc); + } + + protected Engine.Index replicaIndexForDoc(ParsedDocument doc, long version, long seqNo, + boolean isRetry) { + return new Engine.Index(newUid(doc), doc, seqNo, 1, version, VersionType.EXTERNAL, + Engine.Operation.Origin.REPLICA, System.nanoTime(), + IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, isRetry); + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicies.java b/test/framework/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicies.java new file mode 100644 index 0000000000000..3ab55b687bd20 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/index/translog/TranslogDeletionPolicies.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.index.translog; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; + +public class TranslogDeletionPolicies { + + public static TranslogDeletionPolicy createTranslogDeletionPolicy() { + return new TranslogDeletionPolicy( + IndexSettings.INDEX_TRANSLOG_RETENTION_SIZE_SETTING.getDefault(Settings.EMPTY).getBytes(), + IndexSettings.INDEX_TRANSLOG_RETENTION_AGE_SETTING.getDefault(Settings.EMPTY).getMillis() + ); + } + + public static TranslogDeletionPolicy createTranslogDeletionPolicy(IndexSettings indexSettings) { + return new TranslogDeletionPolicy(indexSettings.getTranslogRetentionSize().getBytes(), + indexSettings.getTranslogRetentionAge().getMillis()); + } + +} From 95cf3df6ac6eb37be7f81462b5bd5b35e8a3c2bc Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 7 Nov 2017 08:35:00 +0100 Subject: [PATCH 24/25] TemplateUpgradeService should only run on the master (#27294) The `TemplateUpgradeService` allows plugins to register a call back that mutates index templates upon recovery. This is handy for upgrade logic that needs to make sure that an existing index template is updated once the cluster is upgraded to a new version of the plugin (and ES). Currently, the service has complicated logic to decide which node should perform the upgrade. It will prefer the master node, if it is of the highest version of the cluster and otherwise it will fall back to one of the non-coordinating nodes which are on the latest version. While this attempts to make sure that new nodes can assume their template version is in place (but old node still need to be able to operate under both old and new template), it has an inherent problem in that the master (on an old version) may not be able to process the put template request with the new template - it may miss certain features. This PR changes the logic to be simpler and always rely on the current master nodes. This comes at the price that new nodes need to operate both with old templates and new. That price is small as they need to operate with old indices regardless of the template. On the flip side we reduce a lot of complexity in what can happen in the cluster. --- .../metadata/TemplateUpgradeService.java | 39 +--------------- .../metadata/TemplateUpgradeServiceTests.java | 45 ------------------- 2 files changed, 1 insertion(+), 83 deletions(-) diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java b/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java index 8e8f0c594bc71..c0d8d1ceab6d5 100644 --- a/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java +++ b/core/src/main/java/org/elasticsearch/cluster/metadata/TemplateUpgradeService.java @@ -116,7 +116,7 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - if (shouldLocalNodeUpdateTemplates(state.nodes()) == false) { + if (state.nodes().isLocalNodeElectedMaster() == false) { return; } @@ -133,43 +133,6 @@ public void clusterChanged(ClusterChangedEvent event) { } } - /** - * Checks if the current node should update the templates - * - * If the master has the newest verison in the cluster - it will be dedicated template updater. - * Otherwise the node with the highest id among nodes with the highest version should update the templates - */ - boolean shouldLocalNodeUpdateTemplates(DiscoveryNodes nodes) { - DiscoveryNode localNode = nodes.getLocalNode(); - // Only data and master nodes should update the template - if (localNode.isDataNode() || localNode.isMasterNode()) { - DiscoveryNode masterNode = nodes.getMasterNode(); - if (masterNode == null) { - return false; - } - Version maxVersion = nodes.getLargestNonClientNodeVersion(); - if (maxVersion.equals(masterNode.getVersion())) { - // If the master has the latest version - we will allow it to handle the update - return nodes.isLocalNodeElectedMaster(); - } else { - if (maxVersion.equals(localNode.getVersion()) == false) { - // The localhost node doesn't have the latest version - not going to update - return false; - } - for (ObjectCursor node : nodes.getMasterAndDataNodes().values()) { - if (node.value.getVersion().equals(maxVersion) && node.value.getId().compareTo(localNode.getId()) > 0) { - // We have a node with higher id then mine - it should update - return false; - } - } - // We have the highest version and highest id - we should perform the update - return true; - } - } else { - return false; - } - } - void updateTemplates(Map changes, Set deletions) { for (Map.Entry change : changes.entrySet()) { PutIndexTemplateRequest request = diff --git a/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java index 0b32ae2eb99f2..e1763fa6a5d60 100644 --- a/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java +++ b/core/src/test/java/org/elasticsearch/cluster/metadata/TemplateUpgradeServiceTests.java @@ -341,51 +341,6 @@ public void testClusterStateUpdate() { private static final int NODE_TEST_ITERS = 100; - public void testOnlyOneNodeRunsTemplateUpdates() { - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, Collections.emptyList()); - for (int i = 0; i < NODE_TEST_ITERS; i++) { - int nodesCount = randomIntBetween(1, 10); - int clientNodesCount = randomIntBetween(0, 4); - DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); - int updaterNode = -1; - for (int j = 0; j < nodesCount; j++) { - DiscoveryNodes localNodes = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("node_" + j).getId()).build(); - if (service.shouldLocalNodeUpdateTemplates(localNodes)) { - assertThat("Expected only one node to update template, found " + updaterNode + " and " + j, updaterNode, lessThan(0)); - updaterNode = j; - } - } - assertThat("Expected one node to update template", updaterNode, greaterThanOrEqualTo(0)); - } - } - - public void testIfMasterHasTheHighestVersionItShouldRunsTemplateUpdates() { - for (int i = 0; i < NODE_TEST_ITERS; i++) { - int nodesCount = randomIntBetween(1, 10); - int clientNodesCount = randomIntBetween(0, 4); - DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("_master").getId()); - nodes = builder.build(); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, - Collections.emptyList()); - assertThat(service.shouldLocalNodeUpdateTemplates(nodes), - equalTo(nodes.getLargestNonClientNodeVersion().equals(nodes.getMasterNode().getVersion()))); - } - } - - public void testClientNodeDontRunTemplateUpdates() { - for (int i = 0; i < NODE_TEST_ITERS; i++) { - int nodesCount = randomIntBetween(1, 10); - int clientNodesCount = randomIntBetween(1, 4); - DiscoveryNodes nodes = randomNodes(nodesCount, clientNodesCount); - int testClient = randomIntBetween(0, clientNodesCount - 1); - DiscoveryNodes.Builder builder = DiscoveryNodes.builder(nodes).localNodeId(nodes.resolveNode("client_" + testClient).getId()); - TemplateUpgradeService service = new TemplateUpgradeService(Settings.EMPTY, null, clusterService, null, - Collections.emptyList()); - assertThat(service.shouldLocalNodeUpdateTemplates(builder.build()), equalTo(false)); - } - } - private DiscoveryNodes randomNodes(int dataAndMasterNodes, int clientNodes) { DiscoveryNodes.Builder builder = DiscoveryNodes.builder(); String masterNodeId = null; From 2fc6c64c8232d7e178f0bae69a353b19627a724d Mon Sep 17 00:00:00 2001 From: Boaz Leskes Date: Tue, 7 Nov 2017 10:58:45 +0100 Subject: [PATCH 25/25] Disable bwc tests in preparation of backporting #26931 --- build.gradle | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/build.gradle b/build.gradle index cfc8401a934e0..7b1e517a8586b 100644 --- a/build.gradle +++ b/build.gradle @@ -186,7 +186,7 @@ task verifyVersions { * after the backport of the backcompat code is complete. */ allprojects { - ext.bwc_tests_enabled = true + ext.bwc_tests_enabled = false } task verifyBwcTestsEnabled {