From 53f704bb54929a3312176469aa6d5e7ac91dd706 Mon Sep 17 00:00:00 2001 From: DongDongLee Date: Tue, 13 Feb 2024 19:20:19 +0800 Subject: [PATCH] MR: Migrate parameterized tests to JUni5 (#9711) --- .../iceberg/mr/TestIcebergInputFormats.java | 96 +++--- .../HiveIcebergStorageHandlerTestUtils.java | 17 +- ...estHiveIcebergStorageHandlerLocalScan.java | 220 ++++++------- .../TestHiveIcebergStorageHandlerNoScan.java | 293 +++++++++--------- ...TestHiveIcebergStorageHandlerTimezone.java | 73 +++-- ...stHiveIcebergStorageHandlerWithEngine.java | 282 +++++++++-------- ...ergStorageHandlerWithMultipleCatalogs.java | 71 +++-- .../apache/iceberg/mr/hive/TestTables.java | 67 ++-- 8 files changed, 549 insertions(+), 570 deletions(-) diff --git a/mr/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java b/mr/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java index 8d0aad44cfc0..c9d5d487de38 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java +++ b/mr/src/test/java/org/apache/iceberg/mr/TestIcebergInputFormats.java @@ -19,10 +19,14 @@ package org.apache.iceberg.mr; import static org.apache.iceberg.types.Types.NestedField.required; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; import java.io.File; import java.io.IOException; import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.Set; @@ -40,6 +44,9 @@ import org.apache.iceberg.CatalogUtil; import org.apache.iceberg.DataFile; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; @@ -60,16 +67,12 @@ import org.apache.iceberg.relocated.com.google.common.collect.Sets; import org.apache.iceberg.types.TypeUtil; import org.apache.iceberg.types.Types; -import org.assertj.core.api.Assertions; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) public class TestIcebergInputFormats { public static final List> TESTED_INPUT_FORMATS = @@ -90,7 +93,7 @@ public class TestIcebergInputFormats { private static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA).identity("date").bucket("id", 1).build(); - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; // before variables private Configuration conf; @@ -98,23 +101,26 @@ public class TestIcebergInputFormats { private InputFormatConfig.ConfigBuilder builder; // parametrized variables - private final TestInputFormat.Factory testInputFormat; - private final FileFormat fileFormat; + @Parameter(index = 0) + private TestInputFormat.Factory testInputFormat; + + @Parameter(index = 1) + private FileFormat fileFormat; - @Before + @BeforeEach public void before() throws IOException { conf = new Configuration(); conf.set(CatalogUtil.ICEBERG_CATALOG_TYPE, Catalogs.LOCATION); HadoopTables tables = new HadoopTables(conf); - File location = temp.newFolder(testInputFormat.name(), fileFormat.name()); - Assert.assertTrue(location.delete()); + File location = temp.resolve(Paths.get(testInputFormat.name(), fileFormat.name())).toFile(); + assertThat(location).doesNotExist(); helper = new TestHelper(conf, tables, location.toString(), SCHEMA, SPEC, fileFormat, temp); builder = new InputFormatConfig.ConfigBuilder(conf).readFrom(location.toString()); } - @Parameterized.Parameters(name = "testInputFormat = {0}, fileFormat = {1}") + @Parameters(name = "testInputFormat = {0}, fileFormat = {1}") public static Object[][] parameters() { Object[][] parameters = new Object[TESTED_INPUT_FORMATS.size() * TESTED_FILE_FORMATS.size()][2]; @@ -122,20 +128,14 @@ public static Object[][] parameters() { for (TestInputFormat.Factory inputFormat : TESTED_INPUT_FORMATS) { for (String fileFormat : TESTED_FILE_FORMATS) { - parameters[idx++] = new Object[] {inputFormat, fileFormat}; + parameters[idx++] = new Object[] {inputFormat, FileFormat.fromString(fileFormat)}; } } return parameters; } - public TestIcebergInputFormats( - TestInputFormat.Factory testInputFormat, String fileFormat) { - this.testInputFormat = testInputFormat; - this.fileFormat = FileFormat.fromString(fileFormat); - } - - @Test + @TestTemplate public void testUnpartitionedTable() throws Exception { helper.createUnpartitionedTable(); List expectedRecords = helper.generateRandomRecords(1, 0L); @@ -144,7 +144,7 @@ public void testUnpartitionedTable() throws Exception { testInputFormat.create(builder.conf()).validate(expectedRecords); } - @Test + @TestTemplate public void testPartitionedTable() throws Exception { helper.createTable(); List expectedRecords = helper.generateRandomRecords(1, 0L); @@ -154,7 +154,7 @@ public void testPartitionedTable() throws Exception { testInputFormat.create(builder.conf()).validate(expectedRecords); } - @Test + @TestTemplate public void testFilterExp() throws Exception { helper.createTable(); @@ -171,7 +171,7 @@ public void testFilterExp() throws Exception { testInputFormat.create(builder.conf()).validate(expectedRecords); } - @Test + @TestTemplate public void testResiduals() throws Exception { helper.createTable(); @@ -198,7 +198,7 @@ public void testResiduals() throws Exception { testInputFormat.create(builder.conf()).validate(writeRecords); } - @Test + @TestTemplate public void testFailedResidualFiltering() throws Exception { helper.createTable(); @@ -213,20 +213,20 @@ public void testFailedResidualFiltering() throws Exception { .filter( Expressions.and(Expressions.equal("date", "2020-03-20"), Expressions.equal("id", 0))); - Assertions.assertThatThrownBy(() -> testInputFormat.create(builder.conf())) + assertThatThrownBy(() -> testInputFormat.create(builder.conf())) .isInstanceOf(UnsupportedOperationException.class) .hasMessage( "Filter expression ref(name=\"id\") == 0 is not completely satisfied. Additional rows can be returned not satisfied by the filter expression"); builder.usePigTuples(); - Assertions.assertThatThrownBy(() -> testInputFormat.create(builder.conf())) + assertThatThrownBy(() -> testInputFormat.create(builder.conf())) .isInstanceOf(UnsupportedOperationException.class) .hasMessage( "Filter expression ref(name=\"id\") == 0 is not completely satisfied. Additional rows can be returned not satisfied by the filter expression"); } - @Test + @TestTemplate public void testProjection() throws Exception { helper.createTable(); List inputRecords = helper.generateRandomRecords(1, 0L); @@ -237,8 +237,8 @@ public void testProjection() throws Exception { List outputRecords = testInputFormat.create(builder.conf()).getRecords(); - Assert.assertEquals(inputRecords.size(), outputRecords.size()); - Assert.assertEquals(projection.asStruct(), outputRecords.get(0).struct()); + assertThat(outputRecords).hasSameSizeAs(inputRecords); + assertThat(outputRecords.get(0).struct()).isEqualTo(projection.asStruct()); } private static final Schema LOG_SCHEMA = @@ -251,7 +251,7 @@ public void testProjection() throws Exception { private static final PartitionSpec IDENTITY_PARTITION_SPEC = PartitionSpec.builderFor(LOG_SCHEMA).identity("date").identity("level").build(); - @Test + @TestTemplate public void testIdentityPartitionProjections() throws Exception { helper.createTable(LOG_SCHEMA, IDENTITY_PARTITION_SPEC); List inputRecords = helper.generateRandomRecords(10, 0L); @@ -310,19 +310,19 @@ private void validateIdentityPartitionProjections( for (int pos = 0; pos < inputRecords.size(); pos++) { Record inputRecord = inputRecords.get(pos); Record actualRecord = actualRecords.get(pos); - Assert.assertEquals( - "Projected schema should match", projectedSchema.asStruct(), actualRecord.struct()); + assertThat(actualRecord.struct()) + .as("Projected schema should match") + .isEqualTo(projectedSchema.asStruct()); for (String name : fieldNames) { - Assert.assertEquals( - "Projected field " + name + " should match", - inputRecord.getField(name), - actualRecord.getField(name)); + assertThat(actualRecord.getField(name)) + .as("Projected field " + name + " should match") + .isEqualTo(inputRecord.getField(name)); } } } - @Test + @TestTemplate public void testSnapshotReads() throws Exception { helper.createUnpartitionedTable(); @@ -336,26 +336,26 @@ public void testSnapshotReads() throws Exception { testInputFormat.create(builder.conf()).validate(expectedRecords); } - @Test + @TestTemplate public void testLocality() throws Exception { helper.createUnpartitionedTable(); List expectedRecords = helper.generateRandomRecords(1, 0L); helper.appendToTable(null, expectedRecords); for (InputSplit split : testInputFormat.create(builder.conf()).getSplits()) { - Assert.assertArrayEquals(new String[] {"*"}, split.getLocations()); + assertThat(split.getLocations()).containsExactly("*"); } builder.preferLocality(); for (InputSplit split : testInputFormat.create(builder.conf()).getSplits()) { - Assert.assertArrayEquals(new String[] {"localhost"}, split.getLocations()); + assertThat(split.getLocations()).containsExactly("localhost"); } } - @Test + @TestTemplate public void testCustomCatalog() throws IOException { - String warehouseLocation = temp.newFolder("hadoop_catalog").getAbsolutePath(); + String warehouseLocation = temp.resolve("hadoop_catalog").toAbsolutePath().toString(); conf.set("warehouse.location", warehouseLocation); conf.set(InputFormatConfig.CATALOG_NAME, Catalogs.ICEBERG_DEFAULT_CATALOG_NAME); conf.set( @@ -402,7 +402,7 @@ public List getSplits() { } public void validate(List expected) { - Assert.assertEquals(expected, records); + assertThat(records).isEqualTo(expected); } public interface Factory { diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerTestUtils.java b/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerTestUtils.java index f20b28a35fd2..72b5034051da 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerTestUtils.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandlerTestUtils.java @@ -21,6 +21,7 @@ import static org.apache.iceberg.types.Types.NestedField.optional; import java.io.IOException; +import java.nio.file.Path; import java.util.Collections; import java.util.List; import java.util.Map; @@ -31,7 +32,6 @@ import org.apache.iceberg.mr.TestHelper; import org.apache.iceberg.types.Types; import org.apache.orc.OrcConf; -import org.junit.rules.TemporaryFolder; public class HiveIcebergStorageHandlerTestUtils { static final FileFormat[] FILE_FORMATS = @@ -78,22 +78,17 @@ static TestHiveShell shell(Map configs) { } static TestTables testTables( - TestHiveShell shell, TestTables.TestTableType testTableType, TemporaryFolder temp) - throws IOException { + TestHiveShell shell, TestTables.TestTableType testTableType, Path temp) throws IOException { return testTables(shell, testTableType, temp, Catalogs.ICEBERG_DEFAULT_CATALOG_NAME); } static TestTables testTables( - TestHiveShell shell, - TestTables.TestTableType testTableType, - TemporaryFolder temp, - String catalogName) + TestHiveShell shell, TestTables.TestTableType testTableType, Path temp, String catalogName) throws IOException { return testTableType.instance(shell.metastore().hiveConf(), temp, catalogName); } - static void init( - TestHiveShell shell, TestTables testTables, TemporaryFolder temp, String engine) { + static void init(TestHiveShell shell, TestTables testTables, Path temp, String engine) { shell.openSession(); for (Map.Entry property : testTables.properties().entrySet()) { @@ -101,8 +96,8 @@ static void init( } shell.setHiveSessionValue("hive.execution.engine", engine); - shell.setHiveSessionValue("hive.jar.directory", temp.getRoot().getAbsolutePath()); - shell.setHiveSessionValue("tez.staging-dir", temp.getRoot().getAbsolutePath()); + shell.setHiveSessionValue("hive.jar.directory", temp.toAbsolutePath().toString()); + shell.setHiveSessionValue("tez.staging-dir", temp.toAbsolutePath().toString()); } static void close(TestHiveShell shell) throws Exception { diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java index 5aeb825e7ba5..fd15a9a62621 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerLocalScan.java @@ -19,16 +19,19 @@ package org.apache.iceberg.mr.hive; import static org.apache.iceberg.types.Types.NestedField.required; -import static org.junit.runners.Parameterized.Parameter; -import static org.junit.runners.Parameterized.Parameters; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; import java.math.BigDecimal; +import java.nio.file.Path; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Map; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; @@ -45,18 +48,15 @@ import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.relocated.com.google.common.collect.Maps; import org.apache.iceberg.types.Types; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) public class TestHiveIcebergStorageHandlerLocalScan { @Parameters(name = "fileFormat={0}, catalog={1}") @@ -83,25 +83,25 @@ public static Collection parameters() { private TestTables testTables; - @Parameter(0) - public FileFormat fileFormat; + @Parameter(index = 0) + private FileFormat fileFormat; - @Parameter(1) - public TestTables.TestTableType testTableType; + @Parameter(index = 1) + private TestTables.TestTableType testTableType; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; - @BeforeClass + @BeforeAll public static void beforeClass() { shell = HiveIcebergStorageHandlerTestUtils.shell(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { shell.stop(); } - @Before + @BeforeEach public void before() throws IOException { testTables = HiveIcebergStorageHandlerTestUtils.testTables(shell, testTableType, temp); // Uses spark as an engine so we can detect if we unintentionally try to use any execution @@ -109,21 +109,21 @@ public void before() throws IOException { HiveIcebergStorageHandlerTestUtils.init(shell, testTables, temp, "spark"); } - @After + @AfterEach public void after() throws Exception { HiveIcebergStorageHandlerTestUtils.close(shell); } - @Test + @TestTemplate public void testScanEmptyTable() throws IOException { Schema emptySchema = new Schema(required(1, "empty", Types.StringType.get())); testTables.createTable(shell, "empty", emptySchema, fileFormat, ImmutableList.of()); List rows = shell.executeStatement("SELECT * FROM default.empty"); - Assert.assertEquals(0, rows.size()); + assertThat(rows).isEmpty(); } - @Test + @TestTemplate public void testScanTable() throws IOException { testTables.createTable( shell, @@ -135,13 +135,14 @@ public void testScanTable() throws IOException { // Single fetch task: no MR job. List rows = shell.executeStatement("SELECT * FROM default.customers"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {0L, "Alice", "Brown"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {1L, "Bob", "Green"}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {2L, "Trudy", "Pink"}, rows.get(2)); + assertThat(rows) + .containsExactly( + new Object[] {0L, "Alice", "Brown"}, + new Object[] {1L, "Bob", "Green"}, + new Object[] {2L, "Trudy", "Pink"}); } - @Test + @TestTemplate public void testScanTableCaseInsensitive() throws IOException { testTables.createTable( shell, @@ -152,22 +153,22 @@ public void testScanTableCaseInsensitive() throws IOException { List rows = shell.executeStatement("SELECT * FROM default.customers"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {0L, "Alice", "Brown"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {1L, "Bob", "Green"}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {2L, "Trudy", "Pink"}, rows.get(2)); + assertThat(rows) + .containsExactly( + new Object[] {0L, "Alice", "Brown"}, + new Object[] {1L, "Bob", "Green"}, + new Object[] {2L, "Trudy", "Pink"}); rows = shell.executeStatement( "SELECT * FROM default.customers where CustomER_Id < 2 " + "and first_name in ('Alice', 'Bob')"); - Assert.assertEquals(2, rows.size()); - Assert.assertArrayEquals(new Object[] {0L, "Alice", "Brown"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {1L, "Bob", "Green"}, rows.get(1)); + assertThat(rows) + .containsExactly(new Object[] {0L, "Alice", "Brown"}, new Object[] {1L, "Bob", "Green"}); } - @Test + @TestTemplate public void testDecimalTableWithPredicateLiterals() throws IOException { Schema schema = new Schema(required(1, "decimal_field", Types.DecimalType.of(7, 2))); List records = @@ -181,28 +182,23 @@ public void testDecimalTableWithPredicateLiterals() throws IOException { // Use integer literal in predicate List rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field >= 85"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {"85.00"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {"100.56"}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {"100.57"}, rows.get(2)); + assertThat(rows) + .containsExactly(new Object[] {"85.00"}, new Object[] {"100.56"}, new Object[] {"100.57"}); // Use decimal literal in predicate with smaller scale than schema type definition rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 99.1"); - Assert.assertEquals(2, rows.size()); - Assert.assertArrayEquals(new Object[] {"100.56"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {"100.57"}, rows.get(1)); + assertThat(rows).containsExactly(new Object[] {"100.56"}, new Object[] {"100.57"}); // Use decimal literal in predicate with higher scale than schema type definition rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 100.565"); - Assert.assertEquals(1, rows.size()); - Assert.assertArrayEquals(new Object[] {"100.57"}, rows.get(0)); + assertThat(rows).containsExactly(new Object[] {"100.57"}); // Use decimal literal in predicate with the same scale as schema type definition rows = shell.executeStatement("SELECT * FROM default.dec_test where decimal_field > 640.34"); - Assert.assertEquals(0, rows.size()); + assertThat(rows).isEmpty(); } - @Test + @TestTemplate public void testColumnSelection() throws IOException { testTables.createTable( shell, @@ -214,37 +210,37 @@ public void testColumnSelection() throws IOException { List outOfOrderColumns = shell.executeStatement("SELECT first_name, customer_id, last_name FROM default.customers"); - Assert.assertEquals(3, outOfOrderColumns.size()); - Assert.assertArrayEquals(new Object[] {"Alice", 0L, "Brown"}, outOfOrderColumns.get(0)); - Assert.assertArrayEquals(new Object[] {"Bob", 1L, "Green"}, outOfOrderColumns.get(1)); - Assert.assertArrayEquals(new Object[] {"Trudy", 2L, "Pink"}, outOfOrderColumns.get(2)); + assertThat(outOfOrderColumns) + .containsExactly( + new Object[] {"Alice", 0L, "Brown"}, + new Object[] {"Bob", 1L, "Green"}, + new Object[] {"Trudy", 2L, "Pink"}); List allButFirstColumn = shell.executeStatement("SELECT first_name, last_name FROM default.customers"); - Assert.assertEquals(3, allButFirstColumn.size()); - Assert.assertArrayEquals(new Object[] {"Alice", "Brown"}, allButFirstColumn.get(0)); - Assert.assertArrayEquals(new Object[] {"Bob", "Green"}, allButFirstColumn.get(1)); - Assert.assertArrayEquals(new Object[] {"Trudy", "Pink"}, allButFirstColumn.get(2)); + assertThat(allButFirstColumn) + .containsExactly( + new Object[] {"Alice", "Brown"}, + new Object[] {"Bob", "Green"}, + new Object[] {"Trudy", "Pink"}); List allButMiddleColumn = shell.executeStatement("SELECT customer_id, last_name FROM default.customers"); - Assert.assertEquals(3, allButMiddleColumn.size()); - Assert.assertArrayEquals(new Object[] {0L, "Brown"}, allButMiddleColumn.get(0)); - Assert.assertArrayEquals(new Object[] {1L, "Green"}, allButMiddleColumn.get(1)); - Assert.assertArrayEquals(new Object[] {2L, "Pink"}, allButMiddleColumn.get(2)); + assertThat(allButMiddleColumn) + .containsExactly( + new Object[] {0L, "Brown"}, new Object[] {1L, "Green"}, new Object[] {2L, "Pink"}); List allButLastColumn = shell.executeStatement("SELECT customer_id, first_name FROM default.customers"); - Assert.assertEquals(3, allButLastColumn.size()); - Assert.assertArrayEquals(new Object[] {0L, "Alice"}, allButLastColumn.get(0)); - Assert.assertArrayEquals(new Object[] {1L, "Bob"}, allButLastColumn.get(1)); - Assert.assertArrayEquals(new Object[] {2L, "Trudy"}, allButLastColumn.get(2)); + assertThat(allButLastColumn) + .containsExactly( + new Object[] {0L, "Alice"}, new Object[] {1L, "Bob"}, new Object[] {2L, "Trudy"}); } - @Test + @TestTemplate public void selectSameColumnTwice() throws IOException { testTables.createTable( shell, @@ -256,13 +252,14 @@ public void selectSameColumnTwice() throws IOException { List columns = shell.executeStatement("SELECT first_name, first_name FROM default.customers"); - Assert.assertEquals(3, columns.size()); - Assert.assertArrayEquals(new Object[] {"Alice", "Alice"}, columns.get(0)); - Assert.assertArrayEquals(new Object[] {"Bob", "Bob"}, columns.get(1)); - Assert.assertArrayEquals(new Object[] {"Trudy", "Trudy"}, columns.get(2)); + assertThat(columns) + .containsExactly( + new Object[] {"Alice", "Alice"}, + new Object[] {"Bob", "Bob"}, + new Object[] {"Trudy", "Trudy"}); } - @Test + @TestTemplate public void testCreateTableWithColumnSpecification() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); Map> data = Maps.newHashMapWithExpectedSize(1); @@ -283,7 +280,7 @@ public void testCreateTableWithColumnSpecification() throws IOException { data); } - @Test + @TestTemplate public void testCreateTableWithColumnSpecificationPartitioned() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); PartitionSpec spec = @@ -313,7 +310,7 @@ public void testCreateTableWithColumnSpecificationPartitioned() throws IOExcepti identifier, createSql, HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, data); } - @Test + @TestTemplate public void testCreatePartitionedTableByProperty() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); PartitionSpec spec = @@ -355,7 +352,7 @@ public void testCreatePartitionedTableByProperty() throws IOException { identifier, createSql, HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, data); } - @Test + @TestTemplate public void testCreateTableWithColumnSpecificationMultilevelPartitioned() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); PartitionSpec spec = @@ -387,7 +384,7 @@ public void testCreateTableWithColumnSpecificationMultilevelPartitioned() throws identifier, createSql, HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, spec, data); } - @Test + @TestTemplate public void testArrayOfPrimitivesInTable() throws IOException { Schema schema = new Schema( @@ -404,12 +401,12 @@ public void testArrayOfPrimitivesInTable() throws IOException { String.format( "SELECT arrayofprimitives[%d] FROM default.arraytable " + "LIMIT 1 OFFSET %d", j, i)); - Assert.assertEquals(expectedList.get(j), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(expectedList.get(j)); } } } - @Test + @TestTemplate public void testArrayOfArraysInTable() throws IOException { Schema schema = new Schema( @@ -430,13 +427,13 @@ public void testArrayOfArraysInTable() throws IOException { String.format( "SELECT arrayofarrays[%d][%d] FROM default.arraytable " + "LIMIT 1 OFFSET %d", j, k, i)); - Assert.assertEquals(expectedInnerList.get(k).toString(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(expectedInnerList.get(k).toString()); } } } } - @Test + @TestTemplate public void testArrayOfMapsInTable() throws IOException { Schema schema = new Schema( @@ -460,13 +457,13 @@ public void testArrayOfMapsInTable() throws IOException { String.format( "SELECT arrayofmaps[%d][\"%s\"] FROM default.arraytable LIMIT 1 OFFSET %d", j, entry.getKey(), i)); - Assert.assertEquals(entry.getValue(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(entry.getValue()); } } } } - @Test + @TestTemplate public void testArrayOfStructsInTable() throws IOException { Schema schema = new Schema( @@ -493,14 +490,16 @@ public void testArrayOfStructsInTable() throws IOException { + "OFFSET %d", j, j, j, i)); GenericRecord genericRecord = (GenericRecord) expectedList.get(j); - Assert.assertEquals(genericRecord.getField("something"), queryResult.get(0)[0]); - Assert.assertEquals(genericRecord.getField("someone"), queryResult.get(0)[1]); - Assert.assertEquals(genericRecord.getField("somewhere"), queryResult.get(0)[2]); + assertThat(queryResult.get(0)) + .containsExactly( + genericRecord.getField("something"), + genericRecord.getField("someone"), + genericRecord.getField("somewhere")); } } } - @Test + @TestTemplate public void testMapOfPrimitivesInTable() throws IOException { Schema schema = new Schema( @@ -519,12 +518,12 @@ public void testMapOfPrimitivesInTable() throws IOException { String.format( "SELECT mapofprimitives[\"%s\"] " + "FROM default.maptable LIMIT 1 OFFSET %d", entry.getKey(), i)); - Assert.assertEquals(entry.getValue(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(entry.getValue()); } } } - @Test + @TestTemplate public void testMapOfArraysInTable() throws IOException { Schema schema = new Schema( @@ -549,13 +548,13 @@ public void testMapOfArraysInTable() throws IOException { String.format( "SELECT mapofarrays[\"%s\"]" + "[%d] FROM maptable LIMIT 1 OFFSET %d", entry.getKey(), j, i)); - Assert.assertEquals(expectedList.get(j).toString(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(expectedList.get(j).toString()); } } } } - @Test + @TestTemplate public void testMapOfMapsInTable() throws IOException { Schema schema = new Schema( @@ -581,13 +580,13 @@ public void testMapOfMapsInTable() throws IOException { String.format( "SELECT mapofmaps[\"%s\"]" + "[\"%s\"] FROM maptable LIMIT 1 OFFSET %d", entry.getKey(), innerEntry.getKey(), i)); - Assert.assertEquals(innerEntry.getValue(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(innerEntry.getValue()); } } } } - @Test + @TestTemplate public void testMapOfStructsInTable() throws IOException { Schema schema = new Schema( @@ -616,14 +615,16 @@ public void testMapOfStructsInTable() throws IOException { + "OFFSET %d", entry.getKey(), entry.getKey(), entry.getKey(), i)); GenericRecord genericRecord = (GenericRecord) entry.getValue(); - Assert.assertEquals(genericRecord.getField("something"), queryResult.get(0)[0]); - Assert.assertEquals(genericRecord.getField("someone"), queryResult.get(0)[1]); - Assert.assertEquals(genericRecord.getField("somewhere"), queryResult.get(0)[2]); + assertThat(queryResult.get(0)) + .containsExactly( + genericRecord.getField("something"), + genericRecord.getField("someone"), + genericRecord.getField("somewhere")); } } } - @Test + @TestTemplate public void testStructOfPrimitivesInTable() throws IOException { Schema schema = new Schema( @@ -643,12 +644,12 @@ public void testStructOfPrimitivesInTable() throws IOException { String.format( "SELECT structofprimitives.key, structofprimitives.value FROM default.structtable LIMIT 1 OFFSET %d", i)); - Assert.assertEquals(expectedStruct.getField("key"), queryResult.get(0)[0]); - Assert.assertEquals(expectedStruct.getField("value"), queryResult.get(0)[1]); + assertThat(queryResult.get(0)) + .containsExactly(expectedStruct.getField("key"), expectedStruct.getField("value")); } } - @Test + @TestTemplate public void testStructOfArraysInTable() throws IOException { Schema schema = new Schema( @@ -670,7 +671,7 @@ public void testStructOfArraysInTable() throws IOException { String.format( "SELECT structofarrays.names[%d] FROM default.structtable LIMIT 1 OFFSET %d", j, i)); - Assert.assertEquals(expectedList.get(j), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(expectedList.get(j)); } expectedList = (List) expectedStruct.getField("birthdays"); for (int j = 0; j < expectedList.size(); j++) { @@ -679,12 +680,12 @@ public void testStructOfArraysInTable() throws IOException { String.format( "SELECT structofarrays.birthdays[%d] FROM default.structtable LIMIT 1 OFFSET %d", j, i)); - Assert.assertEquals(expectedList.get(j).toString(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(expectedList.get(j).toString()); } } } - @Test + @TestTemplate public void testStructOfMapsInTable() throws IOException { Schema schema = new Schema( @@ -714,7 +715,7 @@ public void testStructOfMapsInTable() throws IOException { String.format( "SELECT structofmaps.map1[\"%s\"] from default.structtable LIMIT 1 OFFSET %d", entry.getKey(), i)); - Assert.assertEquals(entry.getValue(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(entry.getValue()); } expectedMap = (Map) expectedStruct.getField("map2"); for (Map.Entry entry : expectedMap.entrySet()) { @@ -723,12 +724,12 @@ public void testStructOfMapsInTable() throws IOException { String.format( "SELECT structofmaps.map2[\"%s\"] from default.structtable LIMIT 1 OFFSET %d", entry.getKey(), i)); - Assert.assertEquals(entry.getValue(), queryResult.get(0)[0]); + assertThat(queryResult.get(0)[0]).isEqualTo(entry.getValue()); } } } - @Test + @TestTemplate public void testStructOfStructsInTable() throws IOException { Schema schema = new Schema( @@ -754,8 +755,9 @@ public void testStructOfStructsInTable() throws IOException { "SELECT structofstructs.struct1.key, structofstructs.struct1.value FROM default.structtable " + "LIMIT 1 OFFSET %d", i)); - Assert.assertEquals(expectedInnerStruct.getField("key"), queryResult.get(0)[0]); - Assert.assertEquals(expectedInnerStruct.getField("value"), queryResult.get(0)[1]); + assertThat(queryResult.get(0)) + .containsExactly( + expectedInnerStruct.getField("key"), expectedInnerStruct.getField("value")); } } @@ -769,8 +771,8 @@ private void runCreateAndReadTest( shell.executeStatement(createSQL); org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals(expectedSchema.asStruct(), icebergTable.schema().asStruct()); - Assert.assertEquals(expectedSpec, icebergTable.spec()); + assertThat(icebergTable.schema().asStruct()).isEqualTo(expectedSchema.asStruct()); + assertThat(icebergTable.spec()).isEqualTo(expectedSpec); List expected = Lists.newArrayList(); for (StructLike partition : data.keySet()) { diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java index 81e2ffcc84da..534cc7d7476c 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerNoScan.java @@ -20,8 +20,9 @@ import static org.apache.iceberg.TableProperties.GC_ENABLED; import static org.apache.iceberg.types.Types.NestedField.optional; -import static org.junit.runners.Parameterized.Parameter; -import static org.junit.runners.Parameterized.Parameters; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; import java.util.Collection; @@ -38,6 +39,9 @@ import org.apache.iceberg.BaseMetastoreTableOperations; import org.apache.iceberg.BaseTable; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.PartitionSpecParser; import org.apache.iceberg.Schema; @@ -61,20 +65,15 @@ import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; import org.apache.thrift.TException; -import org.assertj.core.api.Assertions; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) public class TestHiveIcebergStorageHandlerNoScan { private static final PartitionSpec SPEC = PartitionSpec.unpartitioned(); @@ -140,22 +139,21 @@ public static Collection parameters() { private TestTables testTables; - @Parameter(0) - public TestTables.TestTableType testTableType; + @Parameter private TestTables.TestTableType testTableType; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private java.nio.file.Path temp; - @BeforeClass + @BeforeAll public static void beforeClass() { shell = HiveIcebergStorageHandlerTestUtils.shell(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { shell.stop(); } - @Before + @BeforeEach public void before() throws IOException { testTables = HiveIcebergStorageHandlerTestUtils.testTables(shell, testTableType, temp); // Uses spark as an engine so we can detect if we unintentionally try to use any execution @@ -163,12 +161,12 @@ public void before() throws IOException { HiveIcebergStorageHandlerTestUtils.init(shell, testTables, temp, "spark"); } - @After + @AfterEach public void after() throws Exception { HiveIcebergStorageHandlerTestUtils.close(shell); } - @Test + @TestTemplate public void testCreateDropTable() throws TException, IOException, InterruptedException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -195,10 +193,9 @@ public void testCreateDropTable() throws TException, IOException, InterruptedExc // Check the Iceberg table data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals( - HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct(), - icebergTable.schema().asStruct()); - Assert.assertEquals(PartitionSpec.unpartitioned(), icebergTable.spec()); + assertThat(icebergTable.schema().asStruct()) + .isEqualTo(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct()); + assertThat(icebergTable.spec()).isEqualTo(PartitionSpec.unpartitioned()); org.apache.hadoop.hive.metastore.api.Table hmsTable = shell.metastore().getTable("default", "customers"); @@ -210,7 +207,7 @@ public void testCreateDropTable() throws TException, IOException, InterruptedExc shell.executeStatement("DROP TABLE customers"); // Check if the table was really dropped even from the Catalog - Assertions.assertThatThrownBy(() -> testTables.loadTable(identifier)) + assertThatThrownBy(() -> testTables.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessageStartingWith("Table does not exist"); } else { @@ -220,7 +217,7 @@ public void testCreateDropTable() throws TException, IOException, InterruptedExc shell.executeStatement("DROP TABLE customers"); // Check if we drop an exception when trying to load the table - Assertions.assertThatThrownBy(() -> testTables.loadTable(identifier)) + assertThatThrownBy(() -> testTables.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessage("Table does not exist: default.customers"); // Check if the files are removed @@ -229,13 +226,13 @@ public void testCreateDropTable() throws TException, IOException, InterruptedExc // if table directory has been deleted, we're good. This is the expected behavior in Hive4. // if table directory exists, its contents should have been cleaned up, save for an empty // metadata dir (Hive3). - Assert.assertEquals(1, fs.listStatus(hmsTableLocation).length); - Assert.assertEquals(0, fs.listStatus(new Path(hmsTableLocation, "metadata")).length); + assertThat(fs.listStatus(hmsTableLocation)).hasSize(1); + assertThat(fs.listStatus(new Path(hmsTableLocation, "metadata"))).isEmpty(); } } } - @Test + @TestTemplate public void testCreateDropTableNonDefaultCatalog() throws TException, InterruptedException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); String catalogName = "nondefaultcatalog"; @@ -257,18 +254,17 @@ public void testCreateDropTableNonDefaultCatalog() throws TException, Interrupte shell.executeStatement(createSql); Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals( - HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct(), - icebergTable.schema().asStruct()); + assertThat(icebergTable.schema().asStruct()) + .isEqualTo(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.asStruct()); shell.executeStatement("DROP TABLE default.customers"); // Check if the table was really dropped even from the Catalog - Assertions.assertThatThrownBy(() -> testTables.loadTable(identifier)) + assertThatThrownBy(() -> testTables.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessageStartingWith("Table does not exist"); } - @Test + @TestTemplate public void testCreateTableWithoutSpec() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -288,10 +284,10 @@ public void testCreateTableWithoutSpec() { // Check the Iceberg table partition data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals(PartitionSpec.unpartitioned(), icebergTable.spec()); + assertThat(icebergTable.spec()).isEqualTo(PartitionSpec.unpartitioned()); } - @Test + @TestTemplate public void testCreateTableWithUnpartitionedSpec() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); // We need the location for HadoopTable based tests only @@ -317,10 +313,10 @@ public void testCreateTableWithUnpartitionedSpec() { // Check the Iceberg table partition data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals(SPEC, icebergTable.spec()); + assertThat(icebergTable.spec()).isEqualTo(SPEC); } - @Test + @TestTemplate public void testCreateTableWithFormatV2ThroughTableProperty() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); // We need the location for HadoopTable based tests only @@ -351,13 +347,12 @@ public void testCreateTableWithFormatV2ThroughTableProperty() { // Check the Iceberg table partition data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals( - "should create table using format v2", - 2, - ((BaseTable) icebergTable).operations().current().formatVersion()); + assertThat(((BaseTable) icebergTable).operations().current().formatVersion()) + .as("should create table using format v2") + .isEqualTo(2); } - @Test + @TestTemplate public void testDeleteBackingTable() throws TException, IOException, InterruptedException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -398,23 +393,23 @@ public void testDeleteBackingTable() throws TException, IOException, Interrupted shell.executeStatement("DROP TABLE customers"); // Check if we drop an exception when trying to drop the table - Assertions.assertThatThrownBy(() -> testTables.loadTable(identifier)) + assertThatThrownBy(() -> testTables.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessage("Table does not exist: default.customers"); // Check if the files are kept FileSystem fs = Util.getFs(hmsTableLocation, shell.getHiveConf()); - Assert.assertEquals(1, fs.listStatus(hmsTableLocation).length); - Assert.assertEquals(1, fs.listStatus(new Path(hmsTableLocation, "metadata")).length); + assertThat(fs.listStatus(hmsTableLocation)).hasSize(1); + assertThat(fs.listStatus(new Path(hmsTableLocation, "metadata"))).hasSize(1); } } - @Test + @TestTemplate public void testDropTableWithCorruptedMetadata() throws TException, IOException, InterruptedException { - Assume.assumeTrue( - "Only HiveCatalog attempts to load the Iceberg table prior to dropping it.", - testTableType == TestTables.TestTableType.HIVE_CATALOG); + assumeThat(testTableType) + .as("Only HiveCatalog attempts to load the Iceberg table prior to dropping it.") + .isEqualTo(TestTables.TestTableType.HIVE_CATALOG); // create test table TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -441,17 +436,17 @@ public void testDropTableWithCorruptedMetadata() // check if HMS table is nonetheless still droppable shell.executeStatement(String.format("DROP TABLE %s", identifier)); - Assertions.assertThatThrownBy(() -> testTables.loadTable(identifier)) + assertThatThrownBy(() -> testTables.loadTable(identifier)) .isInstanceOf(NoSuchTableException.class) .hasMessage("Table does not exist: default.customers"); } - @Test + @TestTemplate public void testCreateTableError() { TableIdentifier identifier = TableIdentifier.of("default", "withShell2"); // Wrong schema - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE withShell2 " @@ -470,7 +465,7 @@ public void testCreateTableError() { .hasMessageContaining("Unrecognized token 'WrongSchema'"); // Missing schema, we try to get the schema from the table and fail - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE withShell2 " @@ -483,7 +478,7 @@ public void testCreateTableError() { if (!testTables.locationForCreateTableSQL(identifier).isEmpty()) { // Only test this if the location is required - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE withShell2 " @@ -503,7 +498,7 @@ public void testCreateTableError() { } } - @Test + @TestTemplate public void testCreateTableAboveExistingTable() throws IOException { // Create the Iceberg table testTables.createIcebergTable( @@ -515,7 +510,7 @@ public void testCreateTableAboveExistingTable() throws IOException { if (testTableType == TestTables.TestTableType.HIVE_CATALOG) { // In HiveCatalog we just expect an exception since the table is already exists - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE customers " @@ -542,14 +537,14 @@ public void testCreateTableAboveExistingTable() throws IOException { } } - @Test + @TestTemplate public void testCreatePartitionedTableWithPropertiesAndWithColumnSpecification() { PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) .identity("last_name") .build(); - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE customers (customer_id BIGINT) " @@ -568,7 +563,7 @@ public void testCreatePartitionedTableWithPropertiesAndWithColumnSpecification() "Provide only one of the following: Hive partition specification, or the iceberg.mr.table.partition.spec property"); } - @Test + @TestTemplate public void testCreateTableWithColumnSpecificationHierarchy() { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -586,10 +581,10 @@ public void testCreateTableWithColumnSpecificationHierarchy() { // Check the Iceberg table data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals(COMPLEX_SCHEMA.asStruct(), icebergTable.schema().asStruct()); + assertThat(icebergTable.schema().asStruct()).isEqualTo(COMPLEX_SCHEMA.asStruct()); } - @Test + @TestTemplate public void testCreateTableWithAllSupportedTypes() { TableIdentifier identifier = TableIdentifier.of("default", "all_types"); Schema allSupportedSchema = @@ -616,10 +611,10 @@ public void testCreateTableWithAllSupportedTypes() { // Check the Iceberg table data org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals(allSupportedSchema.asStruct(), icebergTable.schema().asStruct()); + assertThat(icebergTable.schema().asStruct()).isEqualTo(allSupportedSchema.asStruct()); } - @Test + @TestTemplate public void testCreateTableWithNotSupportedTypes() { TableIdentifier identifier = TableIdentifier.of("default", "not_supported_types"); // Can not create INTERVAL types from normal create table, so leave them out from this test @@ -631,7 +626,7 @@ public void testCreateTableWithNotSupportedTypes() { "CHAR(1)", Types.StringType.get()); for (String notSupportedType : notSupportedTypes.keySet()) { - Assertions.assertThatThrownBy( + assertThatThrownBy( () -> shell.executeStatement( "CREATE EXTERNAL TABLE not_supported_types " @@ -647,7 +642,7 @@ public void testCreateTableWithNotSupportedTypes() { } } - @Test + @TestTemplate public void testCreateTableWithNotSupportedTypesWithAutoConversion() { TableIdentifier identifier = TableIdentifier.of("default", "not_supported_types"); // Can not create INTERVAL types from normal create table, so leave them out from this test @@ -674,13 +669,13 @@ public void testCreateTableWithNotSupportedTypesWithAutoConversion() { + testTables.propertiesForCreateTableSQL(ImmutableMap.of())); org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals( - notSupportedTypes.get(notSupportedType), icebergTable.schema().columns().get(0).type()); + assertThat(icebergTable.schema().columns().get(0).type()) + .isEqualTo(notSupportedTypes.get(notSupportedType)); shell.executeStatement("DROP TABLE not_supported_types"); } } - @Test + @TestTemplate public void testCreateTableWithColumnComments() { TableIdentifier identifier = TableIdentifier.of("default", "comment_table"); shell.executeStatement( @@ -694,20 +689,18 @@ public void testCreateTableWithColumnComments() { org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); List rows = shell.executeStatement("DESCRIBE default.comment_table"); - Assert.assertEquals(icebergTable.schema().columns().size(), rows.size()); + assertThat(rows).hasSameSizeAs(icebergTable.schema().columns()); for (int i = 0; i < icebergTable.schema().columns().size(); i++) { Types.NestedField field = icebergTable.schema().columns().get(i); - Assert.assertArrayEquals( - new Object[] { - field.name(), - HiveSchemaUtil.convert(field.type()).getTypeName(), - (field.doc() != null ? field.doc() : "from deserializer") - }, - rows.get(i)); + assertThat(rows.get(i)) + .containsExactly( + field.name(), + HiveSchemaUtil.convert(field.type()).getTypeName(), + (field.doc() != null ? field.doc() : "from deserializer")); } } - @Test + @TestTemplate public void testCreateTableWithoutColumnComments() { TableIdentifier identifier = TableIdentifier.of("default", "without_comment_table"); shell.executeStatement( @@ -720,19 +713,19 @@ public void testCreateTableWithoutColumnComments() { org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); List rows = shell.executeStatement("DESCRIBE default.without_comment_table"); - Assert.assertEquals(icebergTable.schema().columns().size(), rows.size()); + assertThat(rows).hasSameSizeAs(icebergTable.schema().columns()); for (int i = 0; i < icebergTable.schema().columns().size(); i++) { Types.NestedField field = icebergTable.schema().columns().get(i); - Assert.assertNull(field.doc()); - Assert.assertArrayEquals( - new Object[] { - field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), "from deserializer" - }, - rows.get(i)); + assertThat(field.doc()).isNull(); + assertThat(rows.get(i)) + .containsExactly( + field.name(), + HiveSchemaUtil.convert(field.type()).getTypeName(), + (field.doc() != null ? field.doc() : "from deserializer")); } } - @Test + @TestTemplate public void testIcebergAndHmsTableProperties() throws Exception { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -779,38 +772,36 @@ public void testIcebergAndHmsTableProperties() throws Exception { if (HiveVersion.min(HiveVersion.HIVE_3)) { expectedIcebergProperties.put("bucketing_version", "2"); } - Assert.assertEquals(expectedIcebergProperties, icebergTable.properties()); + assertThat(icebergTable.properties()).isEqualTo((expectedIcebergProperties)); if (Catalogs.hiveCatalog(shell.getHiveConf(), tableProperties)) { - Assert.assertEquals(14, hmsParams.size()); - Assert.assertEquals("initial_val", hmsParams.get("custom_property")); - Assert.assertEquals("TRUE", hmsParams.get(InputFormatConfig.EXTERNAL_TABLE_PURGE)); - Assert.assertEquals("TRUE", hmsParams.get("EXTERNAL")); - Assert.assertEquals("true", hmsParams.get(TableProperties.ENGINE_HIVE_ENABLED)); - Assert.assertEquals( - HiveIcebergStorageHandler.class.getName(), - hmsParams.get(hive_metastoreConstants.META_TABLE_STORAGE)); - Assert.assertEquals( - BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase(), - hmsParams.get(BaseMetastoreTableOperations.TABLE_TYPE_PROP)); - Assert.assertEquals( - hmsParams.get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP), - getCurrentSnapshotForHiveCatalogTable(icebergTable)); - Assert.assertNull( - hmsParams.get(BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP)); - Assert.assertNotNull(hmsParams.get(hive_metastoreConstants.DDL_TIME)); - Assert.assertNotNull(hmsParams.get(InputFormatConfig.PARTITION_SPEC)); + assertThat(hmsParams) + .hasSize(14) + .containsEntry("custom_property", "initial_val") + .containsEntry(InputFormatConfig.EXTERNAL_TABLE_PURGE, "TRUE") + .containsEntry("EXTERNAL", "TRUE") + .containsEntry(TableProperties.ENGINE_HIVE_ENABLED, "true") + .containsEntry( + hive_metastoreConstants.META_TABLE_STORAGE, HiveIcebergStorageHandler.class.getName()) + .containsEntry( + BaseMetastoreTableOperations.TABLE_TYPE_PROP, + BaseMetastoreTableOperations.ICEBERG_TABLE_TYPE_VALUE.toUpperCase()) + .containsEntry( + BaseMetastoreTableOperations.METADATA_LOCATION_PROP, + getCurrentSnapshotForHiveCatalogTable(icebergTable)) + .doesNotContainKey(BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP) + .containsKey(hive_metastoreConstants.DDL_TIME) + .containsKey(InputFormatConfig.PARTITION_SPEC); } else { - Assert.assertEquals(8, hmsParams.size()); - Assert.assertNull(hmsParams.get(TableProperties.ENGINE_HIVE_ENABLED)); + assertThat(hmsParams).hasSize(8).doesNotContainKey(TableProperties.ENGINE_HIVE_ENABLED); } // Check HMS inputformat/outputformat/serde - Assert.assertEquals(HiveIcebergInputFormat.class.getName(), hmsTable.getSd().getInputFormat()); - Assert.assertEquals( - HiveIcebergOutputFormat.class.getName(), hmsTable.getSd().getOutputFormat()); - Assert.assertEquals( - HiveIcebergSerDe.class.getName(), hmsTable.getSd().getSerdeInfo().getSerializationLib()); + assertThat(hmsTable.getSd().getInputFormat()).isEqualTo(HiveIcebergInputFormat.class.getName()); + assertThat(hmsTable.getSd().getOutputFormat()) + .isEqualTo(HiveIcebergOutputFormat.class.getName()); + assertThat(hmsTable.getSd().getSerdeInfo().getSerializationLib()) + .isEqualTo(HiveIcebergSerDe.class.getName()); // Add two new properties to the Iceberg table and update an existing one icebergTable @@ -827,30 +818,29 @@ public void testIcebergAndHmsTableProperties() throws Exception { .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); if (Catalogs.hiveCatalog(shell.getHiveConf(), tableProperties)) { - // 2 newly-added properties + previous_metadata_location prop + explicit Parquet compression - Assert.assertEquals(17, hmsParams.size()); - Assert.assertEquals("true", hmsParams.get("new_prop_1")); - Assert.assertEquals("false", hmsParams.get("new_prop_2")); - Assert.assertEquals("new_val", hmsParams.get("custom_property")); + assertThat(hmsParams) + .hasSize(17) + .containsEntry("new_prop_1", "true") + .containsEntry("new_prop_2", "false") + .containsEntry("custom_property", "new_val"); String prevSnapshot = getCurrentSnapshotForHiveCatalogTable(icebergTable); icebergTable.refresh(); String newSnapshot = getCurrentSnapshotForHiveCatalogTable(icebergTable); - Assert.assertEquals( - hmsParams.get(BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP), - prevSnapshot); - Assert.assertEquals( - hmsParams.get(BaseMetastoreTableOperations.METADATA_LOCATION_PROP), newSnapshot); + assertThat(hmsParams) + .containsEntry(BaseMetastoreTableOperations.PREVIOUS_METADATA_LOCATION_PROP, prevSnapshot) + .containsEntry(BaseMetastoreTableOperations.METADATA_LOCATION_PROP, newSnapshot); } else { - Assert.assertEquals(8, hmsParams.size()); + assertThat(hmsParams).hasSize(8); } // Remove some Iceberg props and see if they're removed from HMS table props as well if (Catalogs.hiveCatalog(shell.getHiveConf(), tableProperties)) { icebergTable.updateProperties().remove("custom_property").remove("new_prop_1").commit(); hmsParams = shell.metastore().getTable("default", "customers").getParameters(); - Assert.assertFalse(hmsParams.containsKey("custom_property")); - Assert.assertFalse(hmsParams.containsKey("new_prop_1")); - Assert.assertTrue(hmsParams.containsKey("new_prop_2")); + assertThat(hmsParams) + .doesNotContainKey("custom_property") + .doesNotContainKey("new_prop_1") + .containsKey("new_prop_2"); } // append some data and check whether HMS stats are aligned with snapshot summary @@ -860,23 +850,20 @@ public void testIcebergAndHmsTableProperties() throws Exception { shell.getHiveConf(), icebergTable, FileFormat.PARQUET, null, records); hmsParams = shell.metastore().getTable("default", "customers").getParameters(); Map summary = icebergTable.currentSnapshot().summary(); - Assert.assertEquals( - summary.get(SnapshotSummary.TOTAL_DATA_FILES_PROP), - hmsParams.get(StatsSetupConst.NUM_FILES)); - Assert.assertEquals( - summary.get(SnapshotSummary.TOTAL_RECORDS_PROP), - hmsParams.get(StatsSetupConst.ROW_COUNT)); - Assert.assertEquals( - summary.get(SnapshotSummary.TOTAL_FILE_SIZE_PROP), - hmsParams.get(StatsSetupConst.TOTAL_SIZE)); + assertThat(hmsParams) + .containsEntry( + StatsSetupConst.NUM_FILES, summary.get(SnapshotSummary.TOTAL_DATA_FILES_PROP)) + .containsEntry(StatsSetupConst.ROW_COUNT, summary.get(SnapshotSummary.TOTAL_RECORDS_PROP)) + .containsEntry( + StatsSetupConst.TOTAL_SIZE, summary.get(SnapshotSummary.TOTAL_FILE_SIZE_PROP)); } } - @Test + @TestTemplate public void testIcebergHMSPropertiesTranslation() throws Exception { - Assume.assumeTrue( - "Iceberg - HMS property translation is only relevant for HiveCatalog", - testTableType == TestTables.TestTableType.HIVE_CATALOG); + assumeThat(testTableType) + .as("Iceberg - HMS property translation is only relevant for HiveCatalog") + .isEqualTo(TestTables.TestTableType.HIVE_CATALOG); TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -895,8 +882,9 @@ public void testIcebergHMSPropertiesTranslation() throws Exception { // Check that HMS table prop was translated to equivalent Iceberg prop (purge -> gc.enabled) org.apache.iceberg.Table icebergTable = testTables.loadTable(identifier); - Assert.assertEquals("false", icebergTable.properties().get(GC_ENABLED)); - Assert.assertNull(icebergTable.properties().get(InputFormatConfig.EXTERNAL_TABLE_PURGE)); + assertThat(icebergTable.properties()) + .containsEntry(GC_ENABLED, "false") + .doesNotContainKey(InputFormatConfig.EXTERNAL_TABLE_PURGE); // Change Iceberg prop icebergTable.updateProperties().set(GC_ENABLED, "true").commit(); @@ -904,11 +892,12 @@ public void testIcebergHMSPropertiesTranslation() throws Exception { // Check that Iceberg prop was translated to equivalent HMS prop (gc.enabled -> purge) Map hmsParams = shell.metastore().getTable("default", "customers").getParameters(); - Assert.assertEquals("true", hmsParams.get(InputFormatConfig.EXTERNAL_TABLE_PURGE)); - Assert.assertNull(hmsParams.get(GC_ENABLED)); + assertThat(hmsParams) + .containsEntry(InputFormatConfig.EXTERNAL_TABLE_PURGE, "true") + .doesNotContainKey(GC_ENABLED); } - @Test + @TestTemplate public void testDropTableWithAppendedData() throws IOException { TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -931,11 +920,11 @@ public void testDropTableWithAppendedData() throws IOException { shell.executeStatement("DROP TABLE customers"); } - @Test + @TestTemplate public void testDropHiveTableWithoutUnderlyingTable() throws IOException { - Assume.assumeFalse( - "Not relevant for HiveCatalog", - testTableType.equals(TestTables.TestTableType.HIVE_CATALOG)); + assumeThat(testTableType) + .as("Not relevant for HiveCatalog") + .isNotEqualTo(TestTables.TestTableType.HIVE_CATALOG); TableIdentifier identifier = TableIdentifier.of("default", "customers"); // Create the Iceberg table in non-HiveCatalog diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerTimezone.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerTimezone.java index 86e3baf8e759..a7aa5126e2e2 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerTimezone.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerTimezone.java @@ -19,10 +19,10 @@ package org.apache.iceberg.mr.hive; import static org.apache.iceberg.types.Types.NestedField.optional; -import static org.junit.runners.Parameterized.Parameter; -import static org.junit.runners.Parameterized.Parameters; +import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; +import java.nio.file.Path; import java.text.DateFormat; import java.time.LocalDate; import java.time.LocalDateTime; @@ -33,24 +33,24 @@ import org.apache.hadoop.hive.serde2.io.DateWritable; import org.apache.hadoop.hive.serde2.io.TimestampWritable; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.Schema; import org.apache.iceberg.common.DynFields; import org.apache.iceberg.data.Record; import org.apache.iceberg.mr.TestHelper; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableList; import org.apache.iceberg.types.Types; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) public class TestHiveIcebergStorageHandlerTimezone { private static final Optional> dateFormat = Optional.ofNullable( @@ -82,22 +82,21 @@ public static Collection parameters() { private TestTables testTables; - @Parameter(0) - public String timezoneString; + @Parameter private String timezoneString; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; - @BeforeClass + @BeforeAll public static void beforeClass() { shell = HiveIcebergStorageHandlerTestUtils.shell(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { shell.stop(); } - @Before + @BeforeEach public void before() throws IOException { TimeZone.setDefault(TimeZone.getTimeZone(timezoneString)); @@ -115,12 +114,12 @@ public void before() throws IOException { HiveIcebergStorageHandlerTestUtils.init(shell, testTables, temp, "spark"); } - @After + @AfterEach public void after() throws Exception { HiveIcebergStorageHandlerTestUtils.close(shell); } - @Test + @TestTemplate public void testDateQuery() throws IOException { Schema dateSchema = new Schema(optional(1, "d_date", Types.DateType.get())); @@ -134,24 +133,24 @@ public void testDateQuery() throws IOException { List result = shell.executeStatement("SELECT * from date_test WHERE d_date='2020-01-21'"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2020-01-21", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2020-01-21"); result = shell.executeStatement( "SELECT * from date_test WHERE d_date in ('2020-01-21', '2020-01-22')"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2020-01-21", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2020-01-21"); result = shell.executeStatement("SELECT * from date_test WHERE d_date > '2020-01-21'"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2020-01-24", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2020-01-24"); result = shell.executeStatement("SELECT * from date_test WHERE d_date='2020-01-20'"); - Assert.assertEquals(0, result.size()); + assertThat(result).isEmpty(); } - @Test + @TestTemplate public void testTimestampQuery() throws IOException { Schema timestampSchema = new Schema(optional(1, "d_ts", Types.TimestampType.withoutZone())); @@ -165,21 +164,21 @@ public void testTimestampQuery() throws IOException { List result = shell.executeStatement("SELECT d_ts FROM ts_test WHERE d_ts='2019-02-22 09:44:54.2'"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2019-02-22 09:44:54.2", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2019-02-22 09:44:54.2"); result = shell.executeStatement( "SELECT * FROM ts_test WHERE d_ts in ('2017-01-01 22:30:57.1', '2019-02-22 09:44:54.2')"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2019-02-22 09:44:54.2", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2019-02-22 09:44:54.2"); result = shell.executeStatement("SELECT d_ts FROM ts_test WHERE d_ts < '2019-02-22 09:44:54.2'"); - Assert.assertEquals(1, result.size()); - Assert.assertEquals("2019-01-22 09:44:54.1", result.get(0)[0]); + assertThat(result).hasSize(1); + assertThat(result.get(0)[0]).isEqualTo("2019-01-22 09:44:54.1"); result = shell.executeStatement("SELECT * FROM ts_test WHERE d_ts='2017-01-01 22:30:57.3'"); - Assert.assertEquals(0, result.size()); + assertThat(result).isEmpty(); } } diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java index 09242ead0969..c8e91de9b859 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithEngine.java @@ -20,11 +20,11 @@ import static org.apache.iceberg.types.Types.NestedField.optional; import static org.apache.iceberg.types.Types.NestedField.required; -import static org.junit.Assume.assumeTrue; -import static org.junit.runners.Parameterized.Parameter; -import static org.junit.runners.Parameterized.Parameters; +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assumptions.assumeThat; import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.List; import java.util.Map; @@ -34,6 +34,9 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.mr.ExecMapper; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.PartitionSpec; import org.apache.iceberg.Schema; import org.apache.iceberg.Table; @@ -50,20 +53,17 @@ import org.apache.iceberg.relocated.com.google.common.collect.Lists; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Assume; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.rules.Timeout; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) +@Timeout(value = 200_000, unit = TimeUnit.MILLISECONDS) public class TestHiveIcebergStorageHandlerWithEngine { private static final String[] EXECUTION_ENGINES = new String[] {"tez", "mr"}; @@ -150,33 +150,31 @@ public static Collection parameters() { private TestTables testTables; - @Parameter(0) - public FileFormat fileFormat; + @Parameter(index = 0) + private FileFormat fileFormat; - @Parameter(1) - public String executionEngine; + @Parameter(index = 1) + private String executionEngine; - @Parameter(2) - public TestTables.TestTableType testTableType; + @Parameter(index = 2) + private TestTables.TestTableType testTableType; - @Parameter(3) - public boolean isVectorized; + @Parameter(index = 3) + private boolean isVectorized; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; - @Rule public Timeout timeout = new Timeout(200_000, TimeUnit.MILLISECONDS); - - @BeforeClass + @BeforeAll public static void beforeClass() { shell = HiveIcebergStorageHandlerTestUtils.shell(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { shell.stop(); } - @Before + @BeforeEach public void before() throws IOException { testTables = HiveIcebergStorageHandlerTestUtils.testTables(shell, testTableType, temp); HiveIcebergStorageHandlerTestUtils.init(shell, testTables, temp, executionEngine); @@ -189,7 +187,7 @@ public void before() throws IOException { } } - @After + @AfterEach public void after() throws Exception { HiveIcebergStorageHandlerTestUtils.close(shell); // Mixing mr and tez jobs within the same JVM can cause problems. Mr jobs set the ExecMapper @@ -203,7 +201,7 @@ public void after() throws Exception { ExecMapper.setDone(false); } - @Test + @TestTemplate public void testScanTable() throws IOException { testTables.createTable( shell, @@ -217,13 +215,12 @@ public void testScanTable() throws IOException { shell.executeStatement( "SELECT first_name, customer_id FROM default.customers ORDER BY customer_id DESC"); - Assert.assertEquals(3, descRows.size()); - Assert.assertArrayEquals(new Object[] {"Trudy", 2L}, descRows.get(0)); - Assert.assertArrayEquals(new Object[] {"Bob", 1L}, descRows.get(1)); - Assert.assertArrayEquals(new Object[] {"Alice", 0L}, descRows.get(2)); + assertThat(descRows) + .containsExactly( + new Object[] {"Trudy", 2L}, new Object[] {"Bob", 1L}, new Object[] {"Alice", 0L}); } - @Test + @TestTemplate public void testCBOWithSelectedColumnsNonOverlapJoin() throws IOException { shell.setHiveSessionValue("hive.cbo.enable", true); @@ -235,13 +232,14 @@ public void testCBOWithSelectedColumnsNonOverlapJoin() throws IOException { "SELECT o.order_id, o.customer_id, o.total, p.name " + "FROM default.orders o JOIN default.products p ON o.product_id = p.id ORDER BY o.order_id"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {100L, 0L, 11.11d, "skirt"}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {101L, 0L, 22.22d, "tee"}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {102L, 1L, 33.33d, "watch"}, rows.get(2)); + assertThat(rows) + .containsExactly( + new Object[] {100L, 0L, 11.11d, "skirt"}, + new Object[] {101L, 0L, 22.22d, "tee"}, + new Object[] {102L, 1L, 33.33d, "watch"}); } - @Test + @TestTemplate public void testDescribeTable() throws IOException { testTables.createTable( shell, @@ -250,18 +248,17 @@ public void testDescribeTable() throws IOException { fileFormat, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS); List rows = shell.executeStatement("DESCRIBE default.customers"); - Assert.assertEquals( - HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.columns().size(), rows.size()); + assertThat(rows).hasSameSizeAs(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.columns()); for (int i = 0; i < HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.columns().size(); i++) { Types.NestedField field = HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA.columns().get(i); String comment = field.doc() == null ? "from deserializer" : field.doc(); - Assert.assertArrayEquals( - new Object[] {field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), comment}, - rows.get(i)); + assertThat(rows.get(i)) + .containsExactly( + field.name(), HiveSchemaUtil.convert(field.type()).getTypeName(), comment); } } - @Test + @TestTemplate public void testCBOWithSelectedColumnsOverlapJoin() throws IOException { shell.setHiveSessionValue("hive.cbo.enable", true); testTables.createTable( @@ -278,13 +275,12 @@ public void testCBOWithSelectedColumnsOverlapJoin() throws IOException { + "FROM default.orders o JOIN default.customers c ON o.customer_id = c.customer_id " + "ORDER BY o.order_id DESC"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {"Bob", 102L}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {"Alice", 101L}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {"Alice", 100L}, rows.get(2)); + assertThat(rows) + .containsExactly( + new Object[] {"Bob", 102L}, new Object[] {"Alice", 101L}, new Object[] {"Alice", 100L}); } - @Test + @TestTemplate public void testCBOWithSelfJoin() throws IOException { shell.setHiveSessionValue("hive.cbo.enable", true); @@ -295,13 +291,14 @@ public void testCBOWithSelfJoin() throws IOException { "SELECT o1.order_id, o1.customer_id, o1.total " + "FROM default.orders o1 JOIN default.orders o2 ON o1.order_id = o2.order_id ORDER BY o1.order_id"); - Assert.assertEquals(3, rows.size()); - Assert.assertArrayEquals(new Object[] {100L, 0L, 11.11d}, rows.get(0)); - Assert.assertArrayEquals(new Object[] {101L, 0L, 22.22d}, rows.get(1)); - Assert.assertArrayEquals(new Object[] {102L, 1L, 33.33d}, rows.get(2)); + assertThat(rows) + .containsExactly( + new Object[] {100L, 0L, 11.11d}, + new Object[] {101L, 0L, 22.22d}, + new Object[] {102L, 1L, 33.33d}); } - @Test + @TestTemplate public void testJoinTablesSupportedTypes() throws IOException { for (int i = 0; i < SUPPORTED_TYPES.size(); i++) { Type type = SUPPORTED_TYPES.get(i); @@ -333,14 +330,13 @@ public void testJoinTablesSupportedTypes() throws IOException { + columnName + "=s." + columnName); - Assert.assertEquals( - "Non matching record count for table " + tableName + " with type " + type, - 1, - queryResult.size()); + assertThat(queryResult) + .as("Non matching record count for table " + tableName + " with type " + type) + .hasSize(1); } } - @Test + @TestTemplate public void testSelectDistinctFromTable() throws IOException { for (int i = 0; i < SUPPORTED_TYPES.size(); i++) { Type type = SUPPORTED_TYPES.get(i); @@ -363,13 +359,13 @@ public void testSelectDistinctFromTable() throws IOException { shell.executeStatement( "select count(distinct(" + columnName + ")) from default." + tableName); int distinctIds = ((Long) queryResult.get(0)[0]).intValue(); - Assert.assertEquals(tableName, size, distinctIds); + assertThat(distinctIds).as(tableName).isEqualTo(size); } } - @Test + @TestTemplate public void testInsert() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Table table = testTables.createTable( @@ -400,9 +396,9 @@ record -> table, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 0); } - @Test + @TestTemplate public void testInsertSupportedTypes() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); for (int i = 0; i < SUPPORTED_TYPES.size(); i++) { Type type = SUPPORTED_TYPES.get(i); // TODO: remove this filter when issue #1881 is resolved @@ -437,9 +433,9 @@ public void testInsertSupportedTypes() throws IOException { * * @throws IOException If there is an underlying IOException */ - @Test + @TestTemplate public void testInsertFromSelect() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Table table = testTables.createTable( @@ -462,9 +458,9 @@ public void testInsertFromSelect() throws IOException { * * @throws IOException If there is an underlying IOException */ - @Test + @TestTemplate public void testInsertFromSelectWithOrderBy() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Table table = testTables.createTable( @@ -483,9 +479,9 @@ public void testInsertFromSelectWithOrderBy() throws IOException { HiveIcebergTestUtils.validateData(table, records, 0); } - @Test + @TestTemplate public void testInsertFromSelectWithProjection() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Table table = testTables.createTable( @@ -508,9 +504,9 @@ public void testInsertFromSelectWithProjection() throws IOException { HiveIcebergTestUtils.validateData(table, expected, 0); } - @Test + @TestTemplate public void testInsertUsingSourceTableWithSharedColumnsNames() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); List records = HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS; PartitionSpec spec = @@ -550,9 +546,9 @@ public void testInsertUsingSourceTableWithSharedColumnsNames() throws IOExceptio HiveIcebergTestUtils.validateData(table, expected, 0); } - @Test + @TestTemplate public void testInsertFromJoiningTwoIcebergTables() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) @@ -589,9 +585,9 @@ public void testInsertFromJoiningTwoIcebergTables() throws IOException { table, HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS, 0); } - @Test + @TestTemplate public void testWriteArrayOfPrimitivesInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -600,9 +596,9 @@ public void testWriteArrayOfPrimitivesInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteArrayOfArraysInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -615,9 +611,9 @@ public void testWriteArrayOfArraysInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteArrayOfMapsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -632,9 +628,9 @@ public void testWriteArrayOfMapsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteArrayOfStructsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -651,9 +647,9 @@ public void testWriteArrayOfStructsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteMapOfPrimitivesInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -665,9 +661,9 @@ public void testWriteMapOfPrimitivesInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteMapOfArraysInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -683,9 +679,9 @@ public void testWriteMapOfArraysInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteMapOfMapsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -702,9 +698,9 @@ public void testWriteMapOfMapsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteMapOfStructsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -723,9 +719,9 @@ public void testWriteMapOfStructsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteStructOfPrimitivesInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -739,9 +735,9 @@ public void testWriteStructOfPrimitivesInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteStructOfArraysInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -756,9 +752,9 @@ public void testWriteStructOfArraysInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteStructOfMapsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -780,9 +776,9 @@ public void testWriteStructOfMapsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testWriteStructOfStructsInTable() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -800,9 +796,9 @@ public void testWriteStructOfStructsInTable() throws IOException { testComplexTypeWrite(schema, records); } - @Test + @TestTemplate public void testPartitionedWrite() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) @@ -824,9 +820,9 @@ public void testPartitionedWrite() throws IOException { HiveIcebergTestUtils.validateData(table, records, 0); } - @Test + @TestTemplate public void testIdentityPartitionedWrite() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) @@ -848,9 +844,9 @@ public void testIdentityPartitionedWrite() throws IOException { HiveIcebergTestUtils.validateData(table, records, 0); } - @Test + @TestTemplate public void testMultilevelIdentityPartitionedWrite() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); PartitionSpec spec = PartitionSpec.builderFor(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA) @@ -873,9 +869,9 @@ public void testMultilevelIdentityPartitionedWrite() throws IOException { HiveIcebergTestUtils.validateData(table, records, 0); } - @Test + @TestTemplate public void testMultiTableInsert() throws IOException { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); testTables.createTable( shell, @@ -941,11 +937,11 @@ public void testMultiTableInsert() throws IOException { /** * Fix vectorized parquet issue-4403. */ - @Test + @TestTemplate public void testStructMapWithNull() throws IOException { - Assume.assumeTrue( - "Vectorized parquet throw class cast exception see : issue 4403", - !("PARQUET".equals(fileFormat.name()) && isVectorized)); + assumeThat(!("PARQUET".equals(fileFormat.name()) && isVectorized)) + .as("Vectorized parquet throw class cast exception see : issue 4403") + .isTrue(); Schema schema = new Schema( required(1, "id", Types.LongType.get()), @@ -968,17 +964,18 @@ public void testStructMapWithNull() throws IOException { List results = shell.executeStatement("select mapofstructs['context'].someone FROM mapwithnull"); - Assert.assertEquals(1, results.size()); - Assert.assertNull(results.get(0)[0]); + assertThat(results).hasSize(1); + assertThat(results.get(0)[0]).isNull(); } - @Test + @TestTemplate public void testWriteWithDefaultWriteFormat() { - Assume.assumeTrue( - "Testing the default file format is enough for a single scenario.", - executionEngine.equals("mr") - && testTableType == TestTables.TestTableType.HIVE_CATALOG - && fileFormat == FileFormat.ORC); + assumeThat( + executionEngine.equals("mr") + && testTableType == TestTables.TestTableType.HIVE_CATALOG + && fileFormat == FileFormat.ORC) + .as("Testing the default file format is enough for a single scenario.") + .isTrue(); TableIdentifier identifier = TableIdentifier.of("default", "customers"); @@ -994,12 +991,11 @@ public void testWriteWithDefaultWriteFormat() { shell.executeStatement(String.format("INSERT INTO %s VALUES (10, 'Linda')", identifier)); List results = shell.executeStatement(String.format("SELECT * FROM %s", identifier)); - Assert.assertEquals(1, results.size()); - Assert.assertEquals(10L, results.get(0)[0]); - Assert.assertEquals("Linda", results.get(0)[1]); + assertThat(results).hasSize(1); + assertThat(results.get(0)).containsExactly(10L, "Linda"); } - @Test + @TestTemplate public void testInsertEmptyResultSet() throws IOException { Table source = testTables.createTable( @@ -1029,12 +1025,12 @@ public void testInsertEmptyResultSet() throws IOException { HiveIcebergTestUtils.validateData(target, ImmutableList.of(), 0); } - @Test + @TestTemplate public void testStatsPopulation() throws Exception { - Assume.assumeTrue("Tez write is not implemented yet", executionEngine.equals("mr")); - Assume.assumeTrue( - "Only HiveCatalog can remove stats which become obsolete", - testTableType == TestTables.TestTableType.HIVE_CATALOG); + assumeThat(executionEngine).as("Tez write is not implemented yet").isEqualTo("mr"); + assumeThat(testTableType) + .as("Only HiveCatalog can remove stats which become obsolete") + .isEqualTo(TestTables.TestTableType.HIVE_CATALOG); shell.setHiveSessionValue(HiveConf.ConfVars.HIVESTATSAUTOGATHER.varname, true); // create the table using a catalog which supports updating Hive stats (KEEP_HIVE_STATS is true) @@ -1059,8 +1055,8 @@ public void testStatsPopulation() throws Exception { .getTable(identifier) .getParameters() .get(StatsSetupConst.COLUMN_STATS_ACCURATE); - Assert.assertTrue( - stats.startsWith("{\"BASIC_STATS\":\"true\"")); // it's followed by column stats in Hive3 + assertThat(stats) + .startsWith("{\"BASIC_STATS\":\"true\""); // it's followed by column stats in Hive3 // Create a Catalog where the KEEP_HIVE_STATS is false shell.metastore().hiveConf().set(ConfigProperties.KEEP_HIVE_STATS, StatsSetupConst.FALSE); @@ -1082,7 +1078,7 @@ public void testStatsPopulation() throws Exception { .getTable(identifier) .getParameters() .get(StatsSetupConst.COLUMN_STATS_ACCURATE); - Assert.assertNull(stats); + assertThat(stats).isNull(); // insert some data again using Hive catalog, and check the stats are back shell.executeStatement(insert); @@ -1092,8 +1088,8 @@ public void testStatsPopulation() throws Exception { .getTable(identifier) .getParameters() .get(StatsSetupConst.COLUMN_STATS_ACCURATE); - Assert.assertTrue( - stats.startsWith("{\"BASIC_STATS\":\"true\"")); // it's followed by column stats in Hive3 + assertThat(stats) + .startsWith("{\"BASIC_STATS\":\"true\""); // it's followed by column stats in Hive3 } /** @@ -1105,9 +1101,9 @@ public void testStatsPopulation() throws Exception { * * @throws Exception - any test error */ - @Test + @TestTemplate public void testVectorizedOrcMultipleSplits() throws Exception { - assumeTrue(isVectorized && FileFormat.ORC.equals(fileFormat)); + assumeThat(isVectorized && FileFormat.ORC.equals(fileFormat)).isTrue(); // This data will be held by a ~870kB ORC file List records = @@ -1132,12 +1128,12 @@ public void testVectorizedOrcMultipleSplits() throws Exception { shell.setHiveSessionValue(InputFormatConfig.SPLIT_SIZE, "210000"); List result = shell.executeStatement("SELECT * FROM targettab ORDER BY last_name"); - Assert.assertEquals(20000, result.size()); + assertThat(result).hasSize(20000); } - @Test + @TestTemplate public void testRemoveAndAddBackColumnFromIcebergTable() throws IOException { - assumeTrue(isVectorized && FileFormat.PARQUET.equals(fileFormat)); + assumeThat(isVectorized && FileFormat.PARQUET.equals(fileFormat)).isTrue(); // Create an Iceberg table with the columns customer_id, first_name and last_name with some // initial data. Table icebergTable = @@ -1267,7 +1263,7 @@ private StringBuilder buildComplexTypeInnerQuery(Object field, Type type) { if (type instanceof Types.ListType) { query.append("array("); List elements = (List) field; - Assert.assertFalse("Hive can not handle empty array() inserts", elements.isEmpty()); + assertThat(elements).as("Hive can not handle empty array() inserts").isNotEmpty(); Type innerType = ((Types.ListType) type).fields().get(0).type(); if (!elements.isEmpty()) { elements.forEach(e -> query.append(buildComplexTypeInnerQuery(e, innerType))); diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithMultipleCatalogs.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithMultipleCatalogs.java index b24959bbe8e7..c2cf8f675007 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithMultipleCatalogs.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestHiveIcebergStorageHandlerWithMultipleCatalogs.java @@ -18,27 +18,30 @@ */ package org.apache.iceberg.mr.hive; +import static org.assertj.core.api.Assertions.assertThat; + import java.io.IOException; +import java.nio.file.Path; import java.util.Collection; import java.util.List; import org.apache.iceberg.FileFormat; +import org.apache.iceberg.Parameter; +import org.apache.iceberg.ParameterizedTestExtension; +import org.apache.iceberg.Parameters; import org.apache.iceberg.Table; import org.apache.iceberg.catalog.TableIdentifier; import org.apache.iceberg.data.Record; import org.apache.iceberg.mr.InputFormatConfig; import org.apache.iceberg.relocated.com.google.common.collect.Lists; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -@RunWith(Parameterized.class) +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; + +@ExtendWith(ParameterizedTestExtension.class) public class TestHiveIcebergStorageHandlerWithMultipleCatalogs { private static final String[] EXECUTION_ENGINES = new String[] {"tez", "mr"}; @@ -46,32 +49,32 @@ public class TestHiveIcebergStorageHandlerWithMultipleCatalogs { private static final String OTHERCATALOGNAME = "table2_catalog"; private static TestHiveShell shell; - @Parameterized.Parameter(0) - public FileFormat fileFormat1; + @Parameter(index = 0) + private FileFormat fileFormat1; - @Parameterized.Parameter(1) - public FileFormat fileFormat2; + @Parameter(index = 1) + private FileFormat fileFormat2; - @Parameterized.Parameter(2) - public String executionEngine; + @Parameter(index = 2) + private String executionEngine; - @Parameterized.Parameter(3) - public TestTables.TestTableType testTableType1; + @Parameter(index = 3) + private TestTables.TestTableType testTableType1; - @Parameterized.Parameter(4) - public String table1CatalogName; + @Parameter(index = 4) + private String table1CatalogName; - @Parameterized.Parameter(5) - public TestTables.TestTableType testTableType2; + @Parameter(index = 5) + private TestTables.TestTableType testTableType2; - @Parameterized.Parameter(6) - public String table2CatalogName; + @Parameter(index = 6) + private String table2CatalogName; - @Rule public TemporaryFolder temp = new TemporaryFolder(); + @TempDir private Path temp; private TestTables testTables1; private TestTables testTables2; - @Parameterized.Parameters( + @Parameters( name = "fileFormat1={0}, fileFormat2={1}, engine={2}, tableType1={3}, catalogName1={4}, " + "tableType2={5}, catalogName2={6}") @@ -102,17 +105,17 @@ public static Collection parameters() { return testParams; } - @BeforeClass + @BeforeAll public static void beforeClass() { shell = HiveIcebergStorageHandlerTestUtils.shell(); } - @AfterClass + @AfterAll public static void afterClass() throws Exception { shell.stop(); } - @Before + @BeforeEach public void before() throws IOException { testTables1 = HiveIcebergStorageHandlerTestUtils.testTables( @@ -132,12 +135,12 @@ public void before() throws IOException { .forEach(e -> shell.setHiveSessionValue(e.getKey(), e.getValue())); } - @After + @AfterEach public void after() throws Exception { HiveIcebergStorageHandlerTestUtils.close(shell); } - @Test + @TestTemplate public void testJoinTablesFromDifferentCatalogs() throws IOException { createAndAddRecords( testTables1, @@ -155,7 +158,7 @@ public void testJoinTablesFromDifferentCatalogs() throws IOException { "SELECT c2.customer_id, c2.first_name, c2.last_name " + "FROM default.customers2 c2 JOIN default.customers1 c1 ON c2.customer_id = c1.customer_id " + "ORDER BY c2.customer_id"); - Assert.assertEquals(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS.size(), rows.size()); + assertThat(rows).hasSameSizeAs(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS); HiveIcebergTestUtils.validateData( Lists.newArrayList(HiveIcebergStorageHandlerTestUtils.CUSTOMER_RECORDS), HiveIcebergTestUtils.valueForRow(HiveIcebergStorageHandlerTestUtils.CUSTOMER_SCHEMA, rows), diff --git a/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java b/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java index faeb7d0df75e..8c8cf894c9f1 100644 --- a/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java +++ b/mr/src/test/java/org/apache/iceberg/mr/hive/TestTables.java @@ -18,9 +18,12 @@ */ package org.apache.iceberg.mr.hive; +import static org.assertj.core.api.Assertions.assertThat; + import java.io.File; import java.io.IOException; -import java.io.UncheckedIOException; +import java.nio.file.Path; +import java.nio.file.Paths; import java.sql.Timestamp; import java.time.LocalDateTime; import java.time.OffsetDateTime; @@ -57,11 +60,8 @@ import org.apache.iceberg.relocated.com.google.common.base.Preconditions; import org.apache.iceberg.relocated.com.google.common.collect.ImmutableMap; import org.apache.iceberg.relocated.com.google.common.collect.Maps; -import org.apache.iceberg.relocated.com.google.common.collect.ObjectArrays; import org.apache.iceberg.types.Type; import org.apache.iceberg.types.Types; -import org.junit.Assert; -import org.junit.rules.TemporaryFolder; // Helper class for setting up and testing various catalog implementations abstract class TestTables { @@ -74,16 +74,16 @@ abstract class TestTables { }; private final Tables tables; - protected final TemporaryFolder temp; + protected final Path temp; protected final String catalog; - protected TestTables(Tables tables, TemporaryFolder temp, String catalogName) { + protected TestTables(Tables tables, Path temp, String catalogName) { this.tables = tables; this.temp = temp; this.catalog = catalogName; } - protected TestTables(Catalog catalog, TemporaryFolder temp, String catalogName) { + protected TestTables(Catalog catalog, Path temp, String catalogName) { this(new CatalogToTables(catalog), temp, catalogName); } @@ -417,18 +417,17 @@ static class CustomCatalogTestTables extends TestTables { private final String warehouseLocation; - CustomCatalogTestTables(Configuration conf, TemporaryFolder temp, String catalogName) - throws IOException { + CustomCatalogTestTables(Configuration conf, Path temp, String catalogName) throws IOException { this( conf, temp, (HiveVersion.min(HiveVersion.HIVE_3) ? "file:" : "") - + temp.newFolder("custom", "warehouse").toString(), + + temp.resolve(Paths.get("custom", "warehouse")), catalogName); } CustomCatalogTestTables( - Configuration conf, TemporaryFolder temp, String warehouseLocation, String catalogName) { + Configuration conf, Path temp, String warehouseLocation, String catalogName) { super(new TestCatalogs.CustomHadoopCatalog(conf, warehouseLocation), temp, catalogName); this.warehouseLocation = warehouseLocation; } @@ -452,18 +451,17 @@ static class HadoopCatalogTestTables extends TestTables { private final String warehouseLocation; - HadoopCatalogTestTables(Configuration conf, TemporaryFolder temp, String catalogName) - throws IOException { + HadoopCatalogTestTables(Configuration conf, Path temp, String catalogName) throws IOException { this( conf, temp, (HiveVersion.min(HiveVersion.HIVE_3) ? "file:" : "") - + temp.newFolder("hadoop", "warehouse").toString(), + + temp.resolve(Paths.get("hadoop", "warehouse")), catalogName); } HadoopCatalogTestTables( - Configuration conf, TemporaryFolder temp, String warehouseLocation, String catalogName) { + Configuration conf, Path temp, String warehouseLocation, String catalogName) { super(new HadoopCatalog(conf, warehouseLocation), temp, catalogName); this.warehouseLocation = warehouseLocation; } @@ -484,7 +482,8 @@ public String locationForCreateTableSQL(TableIdentifier identifier) { } static class HadoopTestTables extends TestTables { - HadoopTestTables(Configuration conf, TemporaryFolder temp) { + + HadoopTestTables(Configuration conf, Path temp) { super(new HadoopTables(conf), temp, Catalogs.ICEBERG_HADOOP_TABLE_NAME); } @@ -492,32 +491,32 @@ static class HadoopTestTables extends TestTables { public String identifier(String tableIdentifier) { final File location; - try { - TableIdentifier identifier = TableIdentifier.parse(tableIdentifier); - location = - temp.newFolder(ObjectArrays.concat(identifier.namespace().levels(), identifier.name())); - } catch (IOException ioe) { - throw new UncheckedIOException(ioe); - } + TableIdentifier identifier = TableIdentifier.parse(tableIdentifier); + location = + temp.resolve( + Joiner.on(File.separator).join(identifier.namespace().levels()) + + File.separator + + identifier.name()) + .toFile(); - Assert.assertTrue(location.delete()); + assertThat(location).doesNotExist(); return location.toString(); } @Override public String locationForCreateTableSQL(TableIdentifier identifier) { - return "LOCATION '" + temp.getRoot().getPath() + tablePath(identifier) + "' "; + return "LOCATION '" + temp + tablePath(identifier) + "' "; } @Override public Table loadTable(TableIdentifier identifier) { - return tables().load(temp.getRoot().getPath() + TestTables.tablePath(identifier)); + return tables().load(temp + TestTables.tablePath(identifier)); } } static class HiveTestTables extends TestTables { - HiveTestTables(Configuration conf, TemporaryFolder temp, String catalogName) { + HiveTestTables(Configuration conf, Path temp, String catalogName) { super( CatalogUtil.loadCatalog( HiveCatalog.class.getName(), @@ -569,36 +568,32 @@ private String getStringValueForInsert(Object value, Type type) { enum TestTableType { HADOOP_TABLE { @Override - public TestTables instance( - Configuration conf, TemporaryFolder temporaryFolder, String catalogName) { + public TestTables instance(Configuration conf, Path temporaryFolder, String catalogName) { return new HadoopTestTables(conf, temporaryFolder); } }, HADOOP_CATALOG { @Override - public TestTables instance( - Configuration conf, TemporaryFolder temporaryFolder, String catalogName) + public TestTables instance(Configuration conf, Path temporaryFolder, String catalogName) throws IOException { return new HadoopCatalogTestTables(conf, temporaryFolder, catalogName); } }, CUSTOM_CATALOG { @Override - public TestTables instance( - Configuration conf, TemporaryFolder temporaryFolder, String catalogName) + public TestTables instance(Configuration conf, Path temporaryFolder, String catalogName) throws IOException { return new CustomCatalogTestTables(conf, temporaryFolder, catalogName); } }, HIVE_CATALOG { @Override - public TestTables instance( - Configuration conf, TemporaryFolder temporaryFolder, String catalogName) { + public TestTables instance(Configuration conf, Path temporaryFolder, String catalogName) { return new HiveTestTables(conf, temporaryFolder, catalogName); } }; public abstract TestTables instance( - Configuration conf, TemporaryFolder temporaryFolder, String catalogName) throws IOException; + Configuration conf, Path temporaryFolder, String catalogName) throws IOException; } }