From d1e4b71de144bd506e946dc57865a7e4b2e2e682 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 25 Sep 2021 19:26:44 -0700 Subject: [PATCH 01/18] Do not create eagerly metadata during session creation --- core/trino-main/src/main/java/io/trino/Session.java | 7 +++---- .../transaction/InMemoryTransactionManager.java | 12 +++++++++--- .../io/trino/transaction/NoOpTransactionManager.java | 6 ++++++ .../io/trino/transaction/TransactionManager.java | 2 ++ .../trino/transaction/TestingTransactionManager.java | 6 ++++++ 5 files changed, 26 insertions(+), 7 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/Session.java b/core/trino-main/src/main/java/io/trino/Session.java index f1ecab9ff677..8f6a3168afdc 100644 --- a/core/trino-main/src/main/java/io/trino/Session.java +++ b/core/trino-main/src/main/java/io/trino/Session.java @@ -300,9 +300,8 @@ public Session beginTransactionId(TransactionId transactionId, TransactionManage if (catalogProperties.isEmpty()) { continue; } - CatalogName catalog = transactionManager.getOptionalCatalogMetadata(transactionId, catalogName) - .orElseThrow(() -> new TrinoException(NOT_FOUND, "Session property catalog does not exist: " + catalogName)) - .getCatalogName(); + CatalogName catalog = transactionManager.getCatalogName(transactionId, catalogName) + .orElseThrow(() -> new TrinoException(NOT_FOUND, "Session property catalog does not exist: " + catalogName)); validateCatalogProperties(Optional.of(transactionId), accessControl, catalog, catalogProperties); connectorProperties.put(catalogName, catalogProperties); @@ -312,7 +311,7 @@ public Session beginTransactionId(TransactionId transactionId, TransactionManage for (Entry entry : identity.getCatalogRoles().entrySet()) { String catalogName = entry.getKey(); SelectedRole role = entry.getValue(); - if (transactionManager.getOptionalCatalogMetadata(transactionId, catalogName).isEmpty()) { + if (transactionManager.getCatalogName(transactionId, catalogName).isEmpty()) { throw new TrinoException(NOT_FOUND, "Catalog for role does not exist: " + catalogName); } if (role.getType() == SelectedRole.Type.ROLE) { diff --git a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java index 47115b18e16d..53abb686637b 100644 --- a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java +++ b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java @@ -184,11 +184,17 @@ public Map getCatalogs(TransactionId transactionId) return getTransactionMetadata(transactionId).getCatalogs(); } + @Override + public Optional getCatalogName(TransactionId transactionId, String catalogName) + { + return getTransactionMetadata(transactionId).getCalogName(catalogName); + } + @Override public Optional getOptionalCatalogMetadata(TransactionId transactionId, String catalogName) { TransactionMetadata transactionMetadata = getTransactionMetadata(transactionId); - return transactionMetadata.getConnectorId(catalogName) + return transactionMetadata.getCalogName(catalogName) .map(transactionMetadata::getTransactionCatalogMetadata); } @@ -212,7 +218,7 @@ public CatalogMetadata getCatalogMetadataForWrite(TransactionId transactionId, S TransactionMetadata transactionMetadata = getTransactionMetadata(transactionId); // there is no need to ask for a connector specific id since the overlay connectors are read only - CatalogName catalog = transactionMetadata.getConnectorId(catalogName) + CatalogName catalog = transactionMetadata.getCalogName(catalogName) .orElseThrow(() -> new TrinoException(NOT_FOUND, "Catalog does not exist: " + catalogName)); return getCatalogMetadataForWrite(transactionId, catalog); @@ -381,7 +387,7 @@ private synchronized Map getCatalogs() return ImmutableMap.copyOf(catalogs); } - private synchronized Optional getConnectorId(String catalogName) + private synchronized Optional getCalogName(String catalogName) { Optional catalog = catalogByName.get(catalogName); if (catalog == null) { diff --git a/core/trino-main/src/main/java/io/trino/transaction/NoOpTransactionManager.java b/core/trino-main/src/main/java/io/trino/transaction/NoOpTransactionManager.java index b306bfaad126..3def64cb221e 100644 --- a/core/trino-main/src/main/java/io/trino/transaction/NoOpTransactionManager.java +++ b/core/trino-main/src/main/java/io/trino/transaction/NoOpTransactionManager.java @@ -66,6 +66,12 @@ public Map getCatalogs(TransactionId transactionId) throw new UnsupportedOperationException(); } + @Override + public Optional getCatalogName(TransactionId transactionId, String catalogName) + { + throw new UnsupportedOperationException(); + } + @Override public Optional getOptionalCatalogMetadata(TransactionId transactionId, String catalogName) { diff --git a/core/trino-main/src/main/java/io/trino/transaction/TransactionManager.java b/core/trino-main/src/main/java/io/trino/transaction/TransactionManager.java index c30cb09f894d..24ea3b10b79e 100644 --- a/core/trino-main/src/main/java/io/trino/transaction/TransactionManager.java +++ b/core/trino-main/src/main/java/io/trino/transaction/TransactionManager.java @@ -46,6 +46,8 @@ default boolean isAutoCommit(TransactionId transactionId) Map getCatalogs(TransactionId transactionId); + Optional getCatalogName(TransactionId transactionId, String catalogName); + Optional getOptionalCatalogMetadata(TransactionId transactionId, String catalogName); CatalogMetadata getCatalogMetadata(TransactionId transactionId, CatalogName catalogName); diff --git a/core/trino-main/src/test/java/io/trino/transaction/TestingTransactionManager.java b/core/trino-main/src/test/java/io/trino/transaction/TestingTransactionManager.java index 76d175efc9be..b8194825d3c1 100644 --- a/core/trino-main/src/test/java/io/trino/transaction/TestingTransactionManager.java +++ b/core/trino-main/src/test/java/io/trino/transaction/TestingTransactionManager.java @@ -89,6 +89,12 @@ public Map getCatalogs(TransactionId transactionId) return ImmutableMap.of(); } + @Override + public Optional getCatalogName(TransactionId transactionId, String catalogName) + { + return Optional.empty(); + } + @Override public Optional getOptionalCatalogMetadata(TransactionId transactionId, String catalogName) { From 4f73ec77cdb77b96d40a6875bd8825612543e62d Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 25 Sep 2021 20:43:13 -0700 Subject: [PATCH 02/18] Provide session during metadata creation --- .../io/trino/metadata/CatalogMetadata.java | 27 ++-- .../io/trino/metadata/MetadataManager.java | 148 +++++++++--------- .../trino/security/AccessControlManager.java | 10 +- .../InMemoryTransactionManager.java | 16 +- .../transaction/TestTransactionManager.java | 6 +- 5 files changed, 103 insertions(+), 104 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/metadata/CatalogMetadata.java b/core/trino-main/src/main/java/io/trino/metadata/CatalogMetadata.java index afa162d5ce94..6dab2eb96cb3 100644 --- a/core/trino-main/src/main/java/io/trino/metadata/CatalogMetadata.java +++ b/core/trino-main/src/main/java/io/trino/metadata/CatalogMetadata.java @@ -24,6 +24,7 @@ import java.util.List; import java.util.Set; +import java.util.function.Function; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.collect.Sets.immutableEnumSet; @@ -34,28 +35,28 @@ public class CatalogMetadata private static final String INFORMATION_SCHEMA_NAME = "information_schema"; private final CatalogName catalogName; - private final ConnectorMetadata metadata; + private final Function metadata; private final ConnectorTransactionHandle transactionHandle; private final CatalogName informationSchemaId; - private final ConnectorMetadata informationSchema; + private final Function informationSchema; private final ConnectorTransactionHandle informationSchemaTransactionHandle; private final CatalogName systemTablesId; - private final ConnectorMetadata systemTables; + private final Function systemTables; private final ConnectorTransactionHandle systemTablesTransactionHandle; private final SecurityManagement securityManagement; private final Set connectorCapabilities; public CatalogMetadata( CatalogName catalogName, - ConnectorMetadata metadata, + Function metadata, ConnectorTransactionHandle transactionHandle, CatalogName informationSchemaId, - ConnectorMetadata informationSchema, + Function informationSchema, ConnectorTransactionHandle informationSchemaTransactionHandle, CatalogName systemTablesId, - ConnectorMetadata systemTables, + Function systemTables, ConnectorTransactionHandle systemTablesTransactionHandle, SecurityManagement securityManagement, Set connectorCapabilities) @@ -78,21 +79,21 @@ public CatalogName getCatalogName() return catalogName; } - public ConnectorMetadata getMetadata() + public ConnectorMetadata getMetadata(Session session) { - return metadata; + return metadata.apply(session); } - public ConnectorMetadata getMetadataFor(CatalogName catalogName) + public ConnectorMetadata getMetadataFor(Session session, CatalogName catalogName) { if (catalogName.equals(this.catalogName)) { - return metadata; + return metadata.apply(session); } if (catalogName.equals(informationSchemaId)) { - return informationSchema; + return informationSchema.apply(session); } if (catalogName.equals(systemTablesId)) { - return systemTables; + return systemTables.apply(session); } throw new IllegalArgumentException("Unknown connector id: " + catalogName); } @@ -125,7 +126,7 @@ public CatalogName getConnectorId(Session session, QualifiedObjectName table) return informationSchemaId; } - if (systemTables.getTableHandle(session.toConnectorSession(systemTablesId), table.asSchemaTableName()) != null) { + if (systemTables.apply(session).getTableHandle(session.toConnectorSession(systemTablesId), table.asSchemaTableName()) != null) { return systemTablesId; } diff --git a/core/trino-main/src/main/java/io/trino/metadata/MetadataManager.java b/core/trino-main/src/main/java/io/trino/metadata/MetadataManager.java index eef4e19cb4ff..021dd798eff9 100644 --- a/core/trino-main/src/main/java/io/trino/metadata/MetadataManager.java +++ b/core/trino-main/src/main/java/io/trino/metadata/MetadataManager.java @@ -267,7 +267,7 @@ public boolean schemaExists(Session session, CatalogSchemaName schema) CatalogMetadata catalogMetadata = catalog.get(); ConnectorSession connectorSession = session.toConnectorSession(catalogMetadata.getCatalogName()); return catalogMetadata.listConnectorIds().stream() - .map(catalogMetadata::getMetadataFor) + .map(catalogName -> catalogMetadata.getMetadataFor(session, catalogName)) .anyMatch(metadata -> metadata.schemaExists(connectorSession, schema.getSchemaName())); } @@ -281,7 +281,7 @@ public List listSchemaNames(Session session, String catalogName) CatalogMetadata catalogMetadata = catalog.get(); ConnectorSession connectorSession = session.toConnectorSession(catalogMetadata.getCatalogName()); for (CatalogName connectorId : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(connectorId); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, connectorId); metadata.listSchemaNames(connectorSession).stream() .map(schema -> schema.toLowerCase(Locale.ENGLISH)) .forEach(schemaNames::add); @@ -308,7 +308,7 @@ public Optional getTableHandle(Session session, QualifiedObjectName return getOptionalCatalogMetadata(session, table.getCatalogName()).flatMap(catalogMetadata -> { CatalogName catalogName = catalogMetadata.getConnectorId(session, table); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); @@ -343,7 +343,7 @@ public Optional getTableHandleForStatisticsCollection(Session sessi if (catalog.isPresent()) { CatalogMetadata catalogMetadata = catalog.get(); CatalogName catalogName = catalogMetadata.getConnectorId(session, table); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorTableHandle tableHandle = metadata.getTableHandleForStatisticsCollection(session.toConnectorSession(catalogName), table.asSchemaTableName(), analyzeProperties); if (tableHandle != null) { @@ -366,7 +366,7 @@ public Optional getTableHandleForExecute(Session session, Ta CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); Optional executeHandle = metadata.getTableHandleForExecute( session.toConnectorSession(catalogName), @@ -386,7 +386,7 @@ public Optional getLayoutForTableExecute(Session session, TableExec { CatalogName catalogName = tableExecuteHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); return metadata.getLayoutForTableExecute(session.toConnectorSession(catalogName), tableExecuteHandle.getConnectorHandle()) .map(layout -> new TableLayout(catalogName, catalogMetadata.getTransactionHandleFor(catalogName), layout)); @@ -397,7 +397,7 @@ public BeginTableExecuteResult beginTableExecut { CatalogName catalogName = tableExecuteHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); BeginTableExecuteResult connectorBeginResult = metadata.beginTableExecute(session.toConnectorSession(), tableExecuteHandle.getConnectorHandle(), sourceHandle.getConnectorHandle()); return new BeginTableExecuteResult<>( @@ -425,7 +425,7 @@ public Optional getSystemTable(Session session, QualifiedObjectName // we query only main connector for runtime system tables CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); return metadata.getSystemTable(session.toConnectorSession(catalogName), tableName.asSchemaTableName()); } @@ -437,7 +437,7 @@ public TableProperties getTableProperties(Session session, TableHandle handle) { CatalogName catalogName = handle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return new TableProperties(catalogName, handle.getTransaction(), metadata.getTableProperties(connectorSession, handle.getConnectorHandle())); @@ -450,7 +450,7 @@ public TableHandle makeCompatiblePartitioning(Session session, TableHandle table CatalogName catalogName = partitioningHandle.getConnectorId().get(); checkArgument(catalogName.equals(tableHandle.getCatalogName()), "ConnectorId of tableHandle and partitioningHandle does not match"); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName); ConnectorTableHandle newTableHandle = metadata.makeCompatiblePartitioning( @@ -473,7 +473,7 @@ public Optional getCommonPartitioning(Session session, Parti } CatalogName catalogName = leftConnectorId.get(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); Optional commonHandle = metadata.getCommonPartitioningHandle(session.toConnectorSession(catalogName), left.getConnectorHandle(), right.getConnectorHandle()); return commonHandle.map(handle -> new PartitioningHandle(Optional.of(catalogName), left.getTransactionHandle(), handle)); } @@ -561,7 +561,7 @@ public List listTables(Session session, QualifiedTablePrefi CatalogMetadata catalogMetadata = catalog.get(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); metadata.listTables(connectorSession, prefix.getSchemaName()).stream() .map(convertFromSchemaTableName(prefix.getCatalogName())) @@ -602,7 +602,7 @@ public List listTableColumns(Session session, QualifiedTab SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); @@ -649,7 +649,7 @@ public void createSchema(Session session, CatalogSchemaName schema, Map sourceTableName = getTableNameIfSystemSecurity(session, catalogMetadata, tableHandle); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.renameTable(session.toConnectorSession(catalog), tableHandle.getConnectorHandle(), newTableName.asSchemaTableName()); sourceTableName.ifPresent(name -> systemSecurityMetadata.tableRenamed(session, name, newTableName.asCatalogSchemaTableName())); } @@ -775,7 +775,7 @@ public void setTableAuthorization(Session session, CatalogSchemaTableName table, { CatalogName catalogName = new CatalogName(table.getCatalogName()); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { systemSecurityMetadata.setTableOwner(session, table, principal); } @@ -789,7 +789,7 @@ public void dropTable(Session session, TableHandle tableHandle) { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); Optional tableName = getTableNameIfSystemSecurity(session, catalogMetadata, tableHandle); metadata.dropTable(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle()); tableName.ifPresent(name -> systemSecurityMetadata.tableDropped(session, name)); @@ -808,7 +808,7 @@ public Optional getInsertLayout(Session session, TableHandle table) { CatalogName catalogName = table.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); return metadata.getInsertLayout(session.toConnectorSession(catalogName), table.getConnectorHandle()) .map(layout -> new TableLayout(catalogName, catalogMetadata.getTransactionHandleFor(catalogName), layout)); @@ -818,7 +818,7 @@ public Optional getInsertLayout(Session session, TableHandle table) public TableStatisticsMetadata getStatisticsCollectionMetadataForWrite(Session session, String catalogName, ConnectorTableMetadata tableMetadata) { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); CatalogName catalog = catalogMetadata.getCatalogName(); return metadata.getStatisticsCollectionMetadataForWrite(session.toConnectorSession(catalog), tableMetadata); } @@ -827,7 +827,7 @@ public TableStatisticsMetadata getStatisticsCollectionMetadataForWrite(Session s public TableStatisticsMetadata getStatisticsCollectionMetadata(Session session, String catalogName, ConnectorTableMetadata tableMetadata) { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); CatalogName catalog = catalogMetadata.getCatalogName(); return metadata.getStatisticsCollectionMetadata(session.toConnectorSession(catalog), tableMetadata); } @@ -837,7 +837,7 @@ public AnalyzeTableHandle beginStatisticsCollection(Session session, TableHandle { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalogName); ConnectorTableHandle connectorTableHandle = metadata.beginStatisticsCollection(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle()); @@ -849,7 +849,7 @@ public void finishStatisticsCollection(Session session, AnalyzeTableHandle table { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - catalogMetadata.getMetadata().finishStatisticsCollection(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), computedStatistics); + catalogMetadata.getMetadata(session).finishStatisticsCollection(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), computedStatistics); } @Override @@ -857,7 +857,7 @@ public Optional getNewTableLayout(Session session, String catalogNa { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); CatalogName catalog = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalog); ConnectorSession connectorSession = session.toConnectorSession(catalog); @@ -879,7 +879,7 @@ public OutputTableHandle beginCreateTable(Session session, String catalogName, C { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); CatalogName catalog = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalog); ConnectorSession connectorSession = session.toConnectorSession(catalog); @@ -904,7 +904,7 @@ public InsertTableHandle beginInsert(Session session, TableHandle tableHandle, L { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalogName); ConnectorInsertTableHandle handle = metadata.beginInsert(session.toConnectorSession(catalogName), tableHandle.getConnectorHandle(), columns, getRetryPolicy(session).getRetryMode()); return new InsertTableHandle(tableHandle.getCatalogName(), transactionHandle, handle); @@ -915,7 +915,7 @@ public boolean supportsMissingColumnsOnInsert(Session session, TableHandle table { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - return catalogMetadata.getMetadata().supportsMissingColumnsOnInsert(); + return catalogMetadata.getMetadata(session).supportsMissingColumnsOnInsert(); } @Override @@ -952,7 +952,7 @@ public InsertTableHandle beginRefreshMaterializedView(Session session, TableHand { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); ConnectorTransactionHandle transactionHandle = catalogMetadata.getTransactionHandleFor(catalogName); List sourceConnectorHandles = sourceTableHandles.stream() @@ -1093,7 +1093,7 @@ public List listViews(Session session, QualifiedTablePrefix CatalogMetadata catalogMetadata = catalog.get(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); metadata.listViews(connectorSession, prefix.getSchemaName()).stream() .map(convertFromSchemaTableName(prefix.getCatalogName())) @@ -1117,7 +1117,7 @@ public Map getViews(Session session, QualifiedTab SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); Map viewMap; @@ -1150,7 +1150,7 @@ public Map getSchemaProperties(Session session, CatalogSchemaNam } CatalogMetadata catalogMetadata = getCatalogMetadata(session, new CatalogName(schemaName.getCatalogName())); CatalogName catalogName = catalogMetadata.getConnectorIdForSchema(schemaName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.getSchemaProperties(connectorSession, schemaName); @@ -1167,7 +1167,7 @@ public Optional getSchemaOwner(Session session, CatalogSchemaNam return systemSecurityMetadata.getSchemaOwner(session, schemaName); } CatalogName catalogName = catalogMetadata.getConnectorIdForSchema(schemaName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.getSchemaOwner(connectorSession, schemaName); } @@ -1203,7 +1203,7 @@ private Optional getViewInternal(Session session, Quali if (catalog.isPresent()) { CatalogMetadata catalogMetadata = catalog.get(); CatalogName catalogName = catalogMetadata.getConnectorId(session, viewName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.getView(connectorSession, viewName.asSchemaTableName()); @@ -1216,7 +1216,7 @@ public void createView(Session session, QualifiedObjectName viewName, ViewDefini { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, viewName.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.createView(session.toConnectorSession(catalogName), viewName.asSchemaTableName(), definition.toConnectorViewDefinition(), replace); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { @@ -1229,7 +1229,7 @@ public void renameView(Session session, QualifiedObjectName source, QualifiedObj { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, target.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); if (!source.getCatalogName().equals(catalogName.getCatalogName())) { throw new TrinoException(SYNTAX_ERROR, "Cannot rename views across catalogs"); } @@ -1245,7 +1245,7 @@ public void setViewAuthorization(Session session, CatalogSchemaTableName view, T { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, view.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { systemSecurityMetadata.setViewOwner(session, view, principal); @@ -1260,7 +1260,7 @@ public void dropView(Session session, QualifiedObjectName viewName) { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, viewName.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.dropView(session.toConnectorSession(catalogName), viewName.asSchemaTableName()); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { @@ -1273,7 +1273,7 @@ public void createMaterializedView(Session session, QualifiedObjectName viewName { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, viewName.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.createMaterializedView( session.toConnectorSession(catalogName), @@ -1291,7 +1291,7 @@ public void dropMaterializedView(Session session, QualifiedObjectName viewName) { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, viewName.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.dropMaterializedView(session.toConnectorSession(catalogName), viewName.asSchemaTableName()); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { @@ -1316,7 +1316,7 @@ public List listMaterializedViews(Session session, Qualifie CatalogMetadata catalogMetadata = catalog.get(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); metadata.listMaterializedViews(connectorSession, prefix.getSchemaName()).stream() .map(convertFromSchemaTableName(prefix.getCatalogName())) @@ -1340,7 +1340,7 @@ public Map getMaterializedViews(Session session, SchemaTablePrefix tablePrefix = prefix.asSchemaTablePrefix(); for (CatalogName catalogName : catalogMetadata.listConnectorIds()) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); Map materializedViewMap; @@ -1399,7 +1399,7 @@ private Optional getMaterializedViewInterna if (catalog.isPresent()) { CatalogMetadata catalogMetadata = catalog.get(); CatalogName catalogName = catalogMetadata.getConnectorId(session, viewName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.getMaterializedView(connectorSession, viewName.asSchemaTableName()); @@ -1414,7 +1414,7 @@ public MaterializedViewFreshness getMaterializedViewFreshness(Session session, Q if (catalog.isPresent()) { CatalogMetadata catalogMetadata = catalog.get(); CatalogName catalogName = catalogMetadata.getConnectorId(session, viewName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.getMaterializedViewFreshness(connectorSession, viewName.asSchemaTableName()); @@ -1427,7 +1427,7 @@ public void renameMaterializedView(Session session, QualifiedObjectName source, { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, target.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); if (!source.getCatalogName().equals(catalogName.getCatalogName())) { throw new TrinoException(SYNTAX_ERROR, "Cannot rename materialized views across catalogs"); } @@ -1443,7 +1443,7 @@ public void setMaterializedViewProperties(Session session, QualifiedObjectName v { CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, viewName.getCatalogName()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.setMaterializedViewProperties(session.toConnectorSession(catalogName), viewName.asSchemaTableName(), properties); } @@ -1453,7 +1453,7 @@ public Optional applyTableScanRedirect(Sessi { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); return metadata.applyTableScanRedirect(connectorSession, tableHandle.getConnectorHandle()); } @@ -1482,7 +1482,7 @@ private QualifiedObjectName getRedirectedTableName(Session session, QualifiedObj CatalogMetadata catalogMetadata = catalog.get(); CatalogName catalogName = catalogMetadata.getConnectorId(session, tableName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); Optional redirectedTableName = metadata.redirectTable(session.toConnectorSession(catalogName), tableName.asSchemaTableName()) .map(name -> convertFromSchemaTableName(name.getCatalogName()).apply(name.getSchemaTableName())); @@ -1539,7 +1539,7 @@ public Optional resolveIndex(Session session, TableHandle tableHa { CatalogName catalogName = tableHandle.getCatalogName(); CatalogMetadata catalogMetadata = getCatalogMetadata(session, catalogName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); ConnectorTransactionHandle transaction = catalogMetadata.getTransactionHandleFor(catalogName); ConnectorSession connectorSession = session.toConnectorSession(catalogName); Optional resolvedIndex = metadata.resolveIndex(connectorSession, tableHandle.getConnectorHandle(), indexableColumns, outputColumns, tupleDomain); @@ -1767,7 +1767,7 @@ public boolean roleExists(Session session, String role, Optional catalog CatalogMetadata catalogMetadata = getCatalogMetadata(session, new CatalogName(catalog.get())); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); return metadata.roleExists(session.toConnectorSession(catalogName), role); } @@ -1781,7 +1781,7 @@ public void createRole(Session session, String role, Optional gr CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalog.get()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.createRole(session.toConnectorSession(catalogName), role, grantor); } @@ -1795,7 +1795,7 @@ public void dropRole(Session session, String role, Optional catalog) CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalog.get()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.dropRole(session.toConnectorSession(catalogName), role); } @@ -1812,7 +1812,7 @@ public Set listRoles(Session session, Optional catalog) if (catalogMetadata.get().getSecurityManagement() == SecurityManagement.CONNECTOR) { CatalogName catalogName = catalogMetadata.get().getCatalogName(); ConnectorSession connectorSession = session.toConnectorSession(catalogName); - ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(session, catalogName); return metadata.listRoles(connectorSession).stream() .map(role -> role.toLowerCase(ENGLISH)) .collect(toImmutableSet()); @@ -1835,7 +1835,7 @@ public Set listAllRoleGrants(Session session, Optional catalo if (catalogMetadata.get().getSecurityManagement() == SecurityManagement.CONNECTOR) { CatalogName catalogName = catalogMetadata.get().getCatalogName(); ConnectorSession connectorSession = session.toConnectorSession(catalogName); - ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(session, catalogName); return metadata.listAllRoleGrants(connectorSession, roles, grantees, limit); } } @@ -1856,7 +1856,7 @@ public Set listRoleGrants(Session session, Optional catalog, if (catalogMetadata.get().getSecurityManagement() == SecurityManagement.CONNECTOR) { CatalogName catalogName = catalogMetadata.get().getCatalogName(); ConnectorSession connectorSession = session.toConnectorSession(catalogName); - ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(session, catalogName); return metadata.listRoleGrants(connectorSession, principal); } } @@ -1874,7 +1874,7 @@ public void grantRoles(Session session, Set roles, Set g CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalog.get()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.grantRoles(session.toConnectorSession(catalogName), roles, grantees, adminOption, grantor); } @@ -1888,7 +1888,7 @@ public void revokeRoles(Session session, Set roles, Set CatalogMetadata catalogMetadata = getCatalogMetadataForWrite(session, catalog.get()); CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.revokeRoles(session.toConnectorSession(catalogName), roles, grantees, adminOption, grantor); } @@ -1905,7 +1905,7 @@ public Set listApplicableRoles(Session session, TrinoPrincipal princi if (catalogMetadata.get().getSecurityManagement() == SecurityManagement.CONNECTOR) { CatalogName catalogName = catalogMetadata.get().getCatalogName(); ConnectorSession connectorSession = session.toConnectorSession(catalogName); - ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(session, catalogName); return ImmutableSet.copyOf(metadata.listApplicableRoles(connectorSession, principal)); } } @@ -1934,7 +1934,7 @@ public Set listEnabledRoles(Session session, String catalog) CatalogName catalogName = catalogMetadata.get().getCatalogName(); ConnectorSession connectorSession = session.toConnectorSession(catalogName); - ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.get().getMetadataFor(session, catalogName); return ImmutableSet.copyOf(metadata.listEnabledRoles(connectorSession)); } @@ -1947,7 +1947,7 @@ public void grantTablePrivileges(Session session, QualifiedObjectName tableName, return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.grantTablePrivileges(session.toConnectorSession(catalogName), tableName.asSchemaTableName(), privileges, grantee, grantOption); } @@ -1961,7 +1961,7 @@ public void denyTablePrivileges(Session session, QualifiedObjectName tableName, return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.denyTablePrivileges(session.toConnectorSession(catalogName), tableName.asSchemaTableName(), privileges, grantee); } @@ -1975,7 +1975,7 @@ public void revokeTablePrivileges(Session session, QualifiedObjectName tableName return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.revokeTablePrivileges(session.toConnectorSession(catalogName), tableName.asSchemaTableName(), privileges, grantee, grantOption); } @@ -1989,7 +1989,7 @@ public void grantSchemaPrivileges(Session session, CatalogSchemaName schemaName, return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.grantSchemaPrivileges(session.toConnectorSession(catalogName), schemaName.getSchemaName(), privileges, grantee, grantOption); } @@ -2003,7 +2003,7 @@ public void denySchemaPrivileges(Session session, CatalogSchemaName schemaName, return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.denySchemaPrivileges(session.toConnectorSession(catalogName), schemaName.getSchemaName(), privileges, grantee); } @@ -2017,7 +2017,7 @@ public void revokeSchemaPrivileges(Session session, CatalogSchemaName schemaName return; } CatalogName catalogName = catalogMetadata.getCatalogName(); - ConnectorMetadata metadata = catalogMetadata.getMetadata(); + ConnectorMetadata metadata = catalogMetadata.getMetadata(session); metadata.revokeSchemaPrivileges(session.toConnectorSession(catalogName), schemaName.getSchemaName(), privileges, grantee, grantOption); } @@ -2039,7 +2039,7 @@ public List listTablePrivileges(Session session, QualifiedTablePrefix .map(qualifiedTableName -> singletonList(catalogMetadata.getConnectorId(session, qualifiedTableName))) .orElseGet(catalogMetadata::listConnectorIds); for (CatalogName catalogName : connectorIds) { - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(catalogName); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, catalogName); if (catalogMetadata.getSecurityManagement() == SecurityManagement.SYSTEM) { grantInfos.addAll(systemSecurityMetadata.listTablePrivileges(session, prefix)); } @@ -2420,7 +2420,7 @@ private static Optional getTableNameIfSystemSecurity(Ses if (catalogMetadata.getSecurityManagement() == SecurityManagement.CONNECTOR) { return Optional.empty(); } - ConnectorTableSchema tableSchema = catalogMetadata.getMetadata().getTableSchema(session.toConnectorSession(tableHandle.getCatalogName()), tableHandle.getConnectorHandle()); + ConnectorTableSchema tableSchema = catalogMetadata.getMetadata(session).getTableSchema(session.toConnectorSession(tableHandle.getCatalogName()), tableHandle.getConnectorHandle()); return Optional.of(new CatalogSchemaTableName(tableHandle.getCatalogName().getCatalogName(), tableSchema.getTable())); } @@ -2454,12 +2454,12 @@ private CatalogMetadata getCatalogMetadataForWrite(Session session, CatalogName private ConnectorMetadata getMetadata(Session session, CatalogName catalogName) { - return getCatalogMetadata(session, catalogName).getMetadataFor(catalogName); + return getCatalogMetadata(session, catalogName).getMetadataFor(session, catalogName); } private ConnectorMetadata getMetadataForWrite(Session session, CatalogName catalogName) { - return getCatalogMetadataForWrite(session, catalogName).getMetadata(); + return getCatalogMetadataForWrite(session, catalogName).getMetadata(session); } private void registerCatalogForQuery(Session session, CatalogMetadata catalogMetadata) @@ -2492,7 +2492,7 @@ private synchronized void registerCatalog(CatalogMetadata catalogMetadata) checkState(!finished, "Query is already finished"); if (catalogs.putIfAbsent(catalogMetadata.getCatalogName(), catalogMetadata) == null) { ConnectorSession connectorSession = session.toConnectorSession(catalogMetadata.getCatalogName()); - catalogMetadata.getMetadata().beginQuery(connectorSession); + catalogMetadata.getMetadata(session).beginQuery(connectorSession); } } @@ -2507,7 +2507,7 @@ private synchronized void finish() for (CatalogMetadata catalogMetadata : catalogs) { ConnectorSession connectorSession = session.toConnectorSession(catalogMetadata.getCatalogName()); - catalogMetadata.getMetadata().cleanupQuery(connectorSession); + catalogMetadata.getMetadata(session).cleanupQuery(connectorSession); } } } @@ -2524,7 +2524,7 @@ public boolean isValidTableVersion(Session session, QualifiedObjectName tableNam CatalogMetadata catalogMetadata = catalog.get(); CatalogName connectorId = catalogMetadata.getConnectorId(session, tableName); - ConnectorMetadata metadata = catalogMetadata.getMetadataFor(connectorId); + ConnectorMetadata metadata = catalogMetadata.getMetadataFor(session, connectorId); return metadata.isSupportedVersionType(session.toConnectorSession(), tableName.asSchemaTableName(), version.getPointerType(), version.getObjectType()); } diff --git a/core/trino-main/src/main/java/io/trino/security/AccessControlManager.java b/core/trino-main/src/main/java/io/trino/security/AccessControlManager.java index 9e14f0f8d3b6..ff3154307b85 100644 --- a/core/trino-main/src/main/java/io/trino/security/AccessControlManager.java +++ b/core/trino-main/src/main/java/io/trino/security/AccessControlManager.java @@ -34,7 +34,6 @@ import io.trino.spi.connector.CatalogSchemaTableName; import io.trino.spi.connector.ConnectorAccessControl; import io.trino.spi.connector.ConnectorSecurityContext; -import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SchemaTableName; import io.trino.spi.security.Identity; import io.trino.spi.security.PrincipalType; @@ -1200,8 +1199,8 @@ public List getColumnMasks(SecurityContext context, QualifiedObj private CatalogAccessControlEntry getConnectorAccessControl(TransactionId transactionId, String catalogName) { - return transactionManager.getOptionalCatalogMetadata(transactionId, catalogName) - .map(metadata -> connectorAccessControl.get(metadata.getCatalogName())) + return transactionManager.getCatalogName(transactionId, catalogName) + .map(connectorAccessControl::get) .orElse(null); } @@ -1299,11 +1298,6 @@ public ConnectorAccessControl getAccessControl() return accessControl; } - public ConnectorTransactionHandle getTransactionHandle(TransactionId transactionId) - { - return transactionManager.getConnectorTransaction(transactionId, catalogName); - } - public ConnectorSecurityContext toConnectorSecurityContext(SecurityContext securityContext) { return toConnectorSecurityContext(securityContext.getTransactionId(), securityContext.getIdentity(), securityContext.getQueryId()); diff --git a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java index 53abb686637b..adb92c89ac3f 100644 --- a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java +++ b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java @@ -21,6 +21,7 @@ import io.airlift.log.Logger; import io.airlift.units.Duration; import io.trino.NotInTransactionException; +import io.trino.Session; import io.trino.connector.CatalogName; import io.trino.metadata.Catalog; import io.trino.metadata.Catalog.SecurityManagement; @@ -423,13 +424,13 @@ private synchronized CatalogMetadata getTransactionCatalogMetadata(CatalogName c catalogMetadata = new CatalogMetadata( metadata.getCatalogName(), - metadata.getConnectorMetadata(), + metadata::getConnectorMetadata, metadata.getTransactionHandle(), informationSchema.getCatalogName(), - informationSchema.getConnectorMetadata(), + informationSchema::getConnectorMetadata, informationSchema.getTransactionHandle(), systemTables.getCatalogName(), - systemTables.getConnectorMetadata(), + systemTables::getConnectorMetadata, systemTables.getTransactionHandle(), metadata.getSecurityManagement(), connector.getCapabilities()); @@ -568,7 +569,8 @@ private static class ConnectorTransactionMetadata private final Connector connector; private final ConnectorTransactionHandle transactionHandle; private final SecurityManagement securityManagement; - private final ConnectorMetadata connectorMetadata; + @GuardedBy("this") + private ConnectorMetadata connectorMetadata; private final AtomicBoolean finished = new AtomicBoolean(); public ConnectorTransactionMetadata( @@ -581,7 +583,6 @@ public ConnectorTransactionMetadata( this.connector = requireNonNull(connector, "connector is null"); this.transactionHandle = requireNonNull(transactionHandle, "transactionHandle is null"); this.securityManagement = requireNonNull(securityManagement, "securityManagement is null"); - this.connectorMetadata = connector.getMetadata(transactionHandle); } public CatalogName getCatalogName() @@ -599,9 +600,12 @@ public SecurityManagement getSecurityManagement() return securityManagement; } - public synchronized ConnectorMetadata getConnectorMetadata() + public synchronized ConnectorMetadata getConnectorMetadata(Session session) { checkState(!finished.get(), "Already finished"); + if (connectorMetadata == null) { + connectorMetadata = connector.getMetadata(transactionHandle); + } return connectorMetadata; } diff --git a/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java b/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java index ae2421821a81..1e6298980d63 100644 --- a/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java +++ b/core/trino-main/src/test/java/io/trino/transaction/TestTransactionManager.java @@ -74,7 +74,7 @@ public void testTransactionWorkflow() assertTrue(transactionInfo.getCatalogNames().isEmpty()); assertFalse(transactionInfo.getWrittenConnectorId().isPresent()); - ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(); + ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(TEST_SESSION); metadata.listSchemaNames(TEST_SESSION.toConnectorSession(CATALOG_NAME)); transactionInfo = transactionManager.getTransactionInfo(transactionId); assertEquals(transactionInfo.getCatalogNames(), ImmutableList.of(CATALOG_NAME, INFORMATION_SCHEMA_ID, SYSTEM_TABLES_ID)); @@ -102,7 +102,7 @@ public void testAbortedTransactionWorkflow() assertTrue(transactionInfo.getCatalogNames().isEmpty()); assertFalse(transactionInfo.getWrittenConnectorId().isPresent()); - ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(); + ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(TEST_SESSION); metadata.listSchemaNames(TEST_SESSION.toConnectorSession(CATALOG_NAME)); transactionInfo = transactionManager.getTransactionInfo(transactionId); assertEquals(transactionInfo.getCatalogNames(), ImmutableList.of(CATALOG_NAME, INFORMATION_SCHEMA_ID, SYSTEM_TABLES_ID)); @@ -130,7 +130,7 @@ public void testFailedTransactionWorkflow() assertTrue(transactionInfo.getCatalogNames().isEmpty()); assertFalse(transactionInfo.getWrittenConnectorId().isPresent()); - ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(); + ConnectorMetadata metadata = transactionManager.getOptionalCatalogMetadata(transactionId, CATALOG).get().getMetadata(TEST_SESSION); metadata.listSchemaNames(TEST_SESSION.toConnectorSession(CATALOG_NAME)); transactionInfo = transactionManager.getTransactionInfo(transactionId); assertEquals(transactionInfo.getCatalogNames(), ImmutableList.of(CATALOG_NAME, INFORMATION_SCHEMA_ID, SYSTEM_TABLES_ID)); From 7ba5fe29c63b7e28310b98f94ab070e0e10468ab Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 25 Sep 2021 21:02:34 -0700 Subject: [PATCH 03/18] Provide connector session during connector metadata creation --- .../InformationSchemaConnector.java | 3 ++- .../system/GlobalSystemConnector.java | 3 ++- .../connector/system/SystemConnector.java | 3 ++- .../InMemoryTransactionManager.java | 4 +++- .../io/trino/connector/MockConnector.java | 2 +- .../io/trino/sql/analyzer/TestAnalyzer.java | 3 ++- .../sql/planner/TestMaterializedViews.java | 3 ++- .../io/trino/spi/connector/Connector.java | 16 ++++++++++++- .../plugin/accumulo/AccumuloConnector.java | 3 ++- .../io/trino/plugin/atop/AtopConnector.java | 3 ++- .../io/trino/plugin/jdbc/JdbcConnector.java | 3 ++- .../plugin/bigquery/BigQueryConnector.java | 3 ++- .../plugin/blackhole/BlackHoleConnector.java | 3 ++- .../plugin/cassandra/CassandraConnector.java | 3 ++- .../cassandra/TestCassandraConnector.java | 2 +- .../elasticsearch/ElasticsearchConnector.java | 3 ++- .../plugin/example/ExampleConnector.java | 3 ++- .../plugin/google/sheets/SheetsConnector.java | 3 ++- .../io/trino/plugin/hive/HiveConnector.java | 3 ++- .../plugin/hive/TestHiveConnectorFactory.java | 3 ++- .../plugin/iceberg/IcebergConnector.java | 3 ++- .../io/trino/plugin/jmx/JmxConnector.java | 3 ++- .../trino/plugin/jmx/TestJmxSplitManager.java | 2 +- .../io/trino/plugin/kafka/KafkaConnector.java | 3 ++- .../plugin/kinesis/KinesisConnector.java | 3 ++- .../plugin/kinesis/TestKinesisPlugin.java | 3 ++- .../TestKinesisTableDescriptionSupplier.java | 4 ++-- .../s3config/TestS3TableConfigClient.java | 2 +- .../io/trino/plugin/kudu/KuduConnector.java | 3 ++- .../plugin/localfile/LocalFileConnector.java | 3 ++- .../trino/plugin/memory/MemoryConnector.java | 3 ++- .../trino/plugin/mongodb/MongoConnector.java | 3 ++- .../plugin/phoenix/PhoenixConnector.java | 3 ++- .../plugin/phoenix5/PhoenixConnector.java | 3 ++- .../io/trino/plugin/pinot/PinotConnector.java | 3 ++- .../prometheus/PrometheusConnector.java | 3 ++- .../plugin/raptor/legacy/RaptorConnector.java | 3 ++- .../raptor/legacy/TestRaptorConnector.java | 24 +++++++++---------- .../io/trino/plugin/redis/RedisConnector.java | 3 ++- .../trino/plugin/thrift/ThriftConnector.java | 3 ++- .../plugin/tpcds/TpcdsConnectorFactory.java | 3 ++- .../plugin/tpch/TpchConnectorFactory.java | 3 ++- .../tpch/IndexedTpchConnectorFactory.java | 3 ++- .../io/trino/execution/TestBeginQuery.java | 2 +- .../TestCoordinatorDynamicFiltering.java | 2 +- 45 files changed, 108 insertions(+), 57 deletions(-) diff --git a/core/trino-main/src/main/java/io/trino/connector/informationschema/InformationSchemaConnector.java b/core/trino-main/src/main/java/io/trino/connector/informationschema/InformationSchemaConnector.java index 333cc376a350..f1be534b57dc 100644 --- a/core/trino-main/src/main/java/io/trino/connector/informationschema/InformationSchemaConnector.java +++ b/core/trino-main/src/main/java/io/trino/connector/informationschema/InformationSchemaConnector.java @@ -18,6 +18,7 @@ import io.trino.security.AccessControl; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -50,7 +51,7 @@ public ConnectorTransactionHandle beginTransaction(TransactionId transactionId, } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/core/trino-main/src/main/java/io/trino/connector/system/GlobalSystemConnector.java b/core/trino-main/src/main/java/io/trino/connector/system/GlobalSystemConnector.java index 7bb3bf38dd09..58626ff90e81 100644 --- a/core/trino-main/src/main/java/io/trino/connector/system/GlobalSystemConnector.java +++ b/core/trino-main/src/main/java/io/trino/connector/system/GlobalSystemConnector.java @@ -15,6 +15,7 @@ import com.google.common.collect.ImmutableSet; import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; import io.trino.spi.procedure.Procedure; @@ -47,7 +48,7 @@ public ConnectorTransactionHandle beginTransaction(TransactionId transactionId, } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return new ConnectorMetadata() {}; } diff --git a/core/trino-main/src/main/java/io/trino/connector/system/SystemConnector.java b/core/trino-main/src/main/java/io/trino/connector/system/SystemConnector.java index 58c20eb1242a..41018abbbbe2 100644 --- a/core/trino-main/src/main/java/io/trino/connector/system/SystemConnector.java +++ b/core/trino-main/src/main/java/io/trino/connector/system/SystemConnector.java @@ -16,6 +16,7 @@ import io.trino.metadata.InternalNodeManager; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; @@ -66,7 +67,7 @@ public ConnectorTransactionHandle beginTransaction(TransactionId transactionId, } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java index adb92c89ac3f..78af70f0a3eb 100644 --- a/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java +++ b/core/trino-main/src/main/java/io/trino/transaction/InMemoryTransactionManager.java @@ -30,6 +30,7 @@ import io.trino.spi.TrinoException; import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; import org.joda.time.DateTime; @@ -604,7 +605,8 @@ public synchronized ConnectorMetadata getConnectorMetadata(Session session) { checkState(!finished.get(), "Already finished"); if (connectorMetadata == null) { - connectorMetadata = connector.getMetadata(transactionHandle); + ConnectorSession connectorSession = session.toConnectorSession(catalogName); + connectorMetadata = connector.getMetadata(connectorSession, transactionHandle); } return connectorMetadata; } diff --git a/core/trino-main/src/test/java/io/trino/connector/MockConnector.java b/core/trino-main/src/test/java/io/trino/connector/MockConnector.java index 52bb02a4822f..bb66064a68b8 100644 --- a/core/trino-main/src/test/java/io/trino/connector/MockConnector.java +++ b/core/trino-main/src/test/java/io/trino/connector/MockConnector.java @@ -216,7 +216,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return new MockConnectorMetadata(); } diff --git a/core/trino-main/src/test/java/io/trino/sql/analyzer/TestAnalyzer.java b/core/trino-main/src/test/java/io/trino/sql/analyzer/TestAnalyzer.java index 07006a1cebc7..e87bd6084925 100644 --- a/core/trino-main/src/test/java/io/trino/sql/analyzer/TestAnalyzer.java +++ b/core/trino-main/src/test/java/io/trino/sql/analyzer/TestAnalyzer.java @@ -52,6 +52,7 @@ import io.trino.spi.connector.ColumnMetadata; import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorTableMetadata; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SchemaTableName; @@ -5715,7 +5716,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return metadata; } diff --git a/core/trino-main/src/test/java/io/trino/sql/planner/TestMaterializedViews.java b/core/trino-main/src/test/java/io/trino/sql/planner/TestMaterializedViews.java index 6805e71317ce..aa5becab8d38 100644 --- a/core/trino-main/src/test/java/io/trino/sql/planner/TestMaterializedViews.java +++ b/core/trino-main/src/test/java/io/trino/sql/planner/TestMaterializedViews.java @@ -25,6 +25,7 @@ import io.trino.spi.connector.ColumnMetadata; import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorTableMetadata; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SchemaTableName; @@ -217,7 +218,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return metadata; } diff --git a/core/trino-spi/src/main/java/io/trino/spi/connector/Connector.java b/core/trino-spi/src/main/java/io/trino/spi/connector/Connector.java index 65e194211105..b7463fc4fc40 100644 --- a/core/trino-spi/src/main/java/io/trino/spi/connector/Connector.java +++ b/core/trino-spi/src/main/java/io/trino/spi/connector/Connector.java @@ -59,7 +59,21 @@ default ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLeve * Guaranteed to be called at most once per transaction. The returned metadata will only be accessed * in a single threaded context. */ - ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle); + default ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) + { + return getMetadata(transactionHandle); + } + + /** + * Guaranteed to be called at most once per transaction. The returned metadata will only be accessed + * in a single threaded context. + * @deprecated use {@link #getMetadata(ConnectorSession, ConnectorTransactionHandle)} + */ + @Deprecated + default ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + { + throw new UnsupportedOperationException(); + } /** * @throws UnsupportedOperationException if this connector does not support tables with splits diff --git a/plugin/trino-accumulo/src/main/java/io/trino/plugin/accumulo/AccumuloConnector.java b/plugin/trino-accumulo/src/main/java/io/trino/plugin/accumulo/AccumuloConnector.java index bb76c8dd2b17..824e3eebe49f 100644 --- a/plugin/trino-accumulo/src/main/java/io/trino/plugin/accumulo/AccumuloConnector.java +++ b/plugin/trino-accumulo/src/main/java/io/trino/plugin/accumulo/AccumuloConnector.java @@ -22,6 +22,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -74,7 +75,7 @@ public AccumuloConnector( } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { ConnectorMetadata metadata = transactions.get(transactionHandle); checkArgument(metadata != null, "no such transaction: %s", transactionHandle); diff --git a/plugin/trino-atop/src/main/java/io/trino/plugin/atop/AtopConnector.java b/plugin/trino-atop/src/main/java/io/trino/plugin/atop/AtopConnector.java index 9ae775abd2d8..cf39e88c9884 100644 --- a/plugin/trino-atop/src/main/java/io/trino/plugin/atop/AtopConnector.java +++ b/plugin/trino-atop/src/main/java/io/trino/plugin/atop/AtopConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.ConnectorAccessControl; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -59,7 +60,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/JdbcConnector.java b/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/JdbcConnector.java index 77b61e64d62d..f803c8042df0 100644 --- a/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/JdbcConnector.java +++ b/plugin/trino-base-jdbc/src/main/java/io/trino/plugin/jdbc/JdbcConnector.java @@ -23,6 +23,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.procedure.Procedure; @@ -97,7 +98,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { JdbcMetadata metadata = transactions.get(transaction); checkArgument(metadata != null, "no such transaction: %s", transaction); diff --git a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnector.java b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnector.java index 170ab1aa5443..c35f948e2fd0 100644 --- a/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnector.java +++ b/plugin/trino-bigquery/src/main/java/io/trino/plugin/bigquery/BigQueryConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -56,7 +57,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-blackhole/src/main/java/io/trino/plugin/blackhole/BlackHoleConnector.java b/plugin/trino-blackhole/src/main/java/io/trino/plugin/blackhole/BlackHoleConnector.java index e3e8325416a2..3c9bb4d4e996 100644 --- a/plugin/trino-blackhole/src/main/java/io/trino/plugin/blackhole/BlackHoleConnector.java +++ b/plugin/trino-blackhole/src/main/java/io/trino/plugin/blackhole/BlackHoleConnector.java @@ -20,6 +20,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -81,7 +82,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraConnector.java b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraConnector.java index 3cbb9b190fbc..c7ab92cccde5 100644 --- a/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraConnector.java +++ b/plugin/trino-cassandra/src/main/java/io/trino/plugin/cassandra/CassandraConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -66,7 +67,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java index 7a407ee7472b..aa9d9cd2e8f0 100644 --- a/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java +++ b/plugin/trino-cassandra/src/test/java/io/trino/plugin/cassandra/TestCassandraConnector.java @@ -123,7 +123,7 @@ public void setup() "cassandra.native-protocol-port", Integer.toString(server.getPort())), new TestingConnectorContext()); - metadata = connector.getMetadata(CassandraTransactionHandle.INSTANCE); + metadata = connector.getMetadata(SESSION, CassandraTransactionHandle.INSTANCE); assertInstanceOf(metadata, CassandraMetadata.class); splitManager = connector.getSplitManager(); diff --git a/plugin/trino-elasticsearch/src/main/java/io/trino/plugin/elasticsearch/ElasticsearchConnector.java b/plugin/trino-elasticsearch/src/main/java/io/trino/plugin/elasticsearch/ElasticsearchConnector.java index 102c9f5a4694..98cf2a640f19 100644 --- a/plugin/trino-elasticsearch/src/main/java/io/trino/plugin/elasticsearch/ElasticsearchConnector.java +++ b/plugin/trino-elasticsearch/src/main/java/io/trino/plugin/elasticsearch/ElasticsearchConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; @@ -63,7 +64,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-example-http/src/main/java/io/trino/plugin/example/ExampleConnector.java b/plugin/trino-example-http/src/main/java/io/trino/plugin/example/ExampleConnector.java index 112b99345af4..ae72aea98f71 100644 --- a/plugin/trino-example-http/src/main/java/io/trino/plugin/example/ExampleConnector.java +++ b/plugin/trino-example-http/src/main/java/io/trino/plugin/example/ExampleConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -54,7 +55,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-google-sheets/src/main/java/io/trino/plugin/google/sheets/SheetsConnector.java b/plugin/trino-google-sheets/src/main/java/io/trino/plugin/google/sheets/SheetsConnector.java index 9bf5356ce6dc..d3d0c456a750 100644 --- a/plugin/trino-google-sheets/src/main/java/io/trino/plugin/google/sheets/SheetsConnector.java +++ b/plugin/trino-google-sheets/src/main/java/io/trino/plugin/google/sheets/SheetsConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -54,7 +55,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java index a95623570e63..1a361049edee 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java @@ -25,6 +25,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.TableProcedureMetadata; @@ -110,7 +111,7 @@ public HiveConnector( } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { ConnectorMetadata metadata = transactionManager.get(transaction); checkArgument(metadata != null, "no such transaction: %s", transaction); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorFactory.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorFactory.java index b9b0a3c2de13..6316df12d346 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorFactory.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveConnectorFactory.java @@ -26,6 +26,7 @@ import static io.airlift.testing.Assertions.assertInstanceOf; import static io.trino.spi.transaction.IsolationLevel.READ_UNCOMMITTED; +import static io.trino.testing.TestingConnectorSession.SESSION; import static org.assertj.core.api.Assertions.assertThatThrownBy; public class TestHiveConnectorFactory @@ -52,7 +53,7 @@ private static void assertCreateConnector(String metastoreUri) Connector connector = new HiveConnectorFactory("hive").create("hive-test", config, new TestingConnectorContext()); ConnectorTransactionHandle transaction = connector.beginTransaction(READ_UNCOMMITTED, true, true); - assertInstanceOf(connector.getMetadata(transaction), ClassLoaderSafeConnectorMetadata.class); + assertInstanceOf(connector.getMetadata(SESSION, transaction), ClassLoaderSafeConnectorMetadata.class); assertInstanceOf(connector.getSplitManager(), ClassLoaderSafeConnectorSplitManager.class); assertInstanceOf(connector.getPageSourceProvider(), ConnectorPageSourceProvider.class); connector.commit(transaction); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java index 3df797622e3d..a3d0deb6a2e6 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java @@ -27,6 +27,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; @@ -105,7 +106,7 @@ public Set getCapabilities() } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { ConnectorMetadata metadata = transactionManager.get(transaction); return new ClassLoaderSafeConnectorMetadata(metadata, getClass().getClassLoader()); diff --git a/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxConnector.java b/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxConnector.java index 17bf6d4f2cb1..73617e2e8679 100644 --- a/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxConnector.java +++ b/plugin/trino-jmx/src/main/java/io/trino/plugin/jmx/JmxConnector.java @@ -15,6 +15,7 @@ import io.airlift.log.Logger; import io.trino.spi.connector.Connector; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -48,7 +49,7 @@ public JmxConnector( } @Override - public JmxMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public JmxMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return jmxMetadata; } diff --git a/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java b/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java index 0243a8538b2e..ff4337111a7c 100644 --- a/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java +++ b/plugin/trino-jmx/src/test/java/io/trino/plugin/jmx/TestJmxSplitManager.java @@ -87,7 +87,7 @@ public NodeManager getNodeManager() private final JmxColumnHandle columnHandle = new JmxColumnHandle("node", createUnboundedVarcharType()); private final JmxSplitManager splitManager = jmxConnector.getSplitManager(); - private final JmxMetadata metadata = jmxConnector.getMetadata(new ConnectorTransactionHandle() {}); + private final JmxMetadata metadata = jmxConnector.getMetadata(SESSION, new ConnectorTransactionHandle() {}); private final JmxRecordSetProvider recordSetProvider = jmxConnector.getRecordSetProvider(); @AfterClass(alwaysRun = true) diff --git a/plugin/trino-kafka/src/main/java/io/trino/plugin/kafka/KafkaConnector.java b/plugin/trino-kafka/src/main/java/io/trino/plugin/kafka/KafkaConnector.java index 8e0292ee3cfd..46e642499bf2 100644 --- a/plugin/trino-kafka/src/main/java/io/trino/plugin/kafka/KafkaConnector.java +++ b/plugin/trino-kafka/src/main/java/io/trino/plugin/kafka/KafkaConnector.java @@ -19,6 +19,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -71,7 +72,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-kinesis/src/main/java/io/trino/plugin/kinesis/KinesisConnector.java b/plugin/trino-kinesis/src/main/java/io/trino/plugin/kinesis/KinesisConnector.java index e3d10236a76e..77d49e7105a2 100644 --- a/plugin/trino-kinesis/src/main/java/io/trino/plugin/kinesis/KinesisConnector.java +++ b/plugin/trino-kinesis/src/main/java/io/trino/plugin/kinesis/KinesisConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -52,7 +53,7 @@ public KinesisConnector( } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisPlugin.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisPlugin.java index 9ce9235f6371..9dd87f5792c6 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisPlugin.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisPlugin.java @@ -24,6 +24,7 @@ import static com.google.common.collect.Iterables.getOnlyElement; import static io.trino.spi.transaction.IsolationLevel.READ_COMMITTED; +import static io.trino.testing.TestingConnectorSession.SESSION; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertTrue; @@ -54,7 +55,7 @@ public void testCreateConnector() // Verify that the key objects have been created on the connector assertNotNull(c.getRecordSetProvider()); assertNotNull(c.getSplitManager()); - ConnectorMetadata md = c.getMetadata(KinesisTransactionHandle.INSTANCE); + ConnectorMetadata md = c.getMetadata(SESSION, KinesisTransactionHandle.INSTANCE); assertNotNull(md); ConnectorTransactionHandle handle = c.beginTransaction(READ_COMMITTED, true, true); diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java index 525c0761a1f1..a4d0707b5e26 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/TestKinesisTableDescriptionSupplier.java @@ -60,7 +60,7 @@ public void start() @Test public void testTableDefinition() { - KinesisMetadata metadata = (KinesisMetadata) connector.getMetadata(new ConnectorTransactionHandle() {}); + KinesisMetadata metadata = (KinesisMetadata) connector.getMetadata(SESSION, new ConnectorTransactionHandle() {}); SchemaTableName tblName = new SchemaTableName("prod", "test_table"); KinesisTableHandle tableHandle = metadata.getTableHandle(SESSION, tblName); assertNotNull(metadata); @@ -77,7 +77,7 @@ public void testTableDefinition() @Test public void testRelatedObjects() { - KinesisMetadata metadata = (KinesisMetadata) connector.getMetadata(new ConnectorTransactionHandle() {}); + KinesisMetadata metadata = (KinesisMetadata) connector.getMetadata(SESSION, new ConnectorTransactionHandle() {}); assertNotNull(metadata); SchemaTableName tblName = new SchemaTableName("prod", "test_table"); diff --git a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java index b235e710c1ff..0d0484162f96 100644 --- a/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java +++ b/plugin/trino-kinesis/src/test/java/io/trino/plugin/kinesis/s3config/TestS3TableConfigClient.java @@ -108,7 +108,7 @@ public void testTableReading(String tableDescriptionS3, String accessKey, String log.error("interrupted ..."); } - KinesisMetadata metadata = (KinesisMetadata) kinesisConnector.getMetadata(new ConnectorTransactionHandle() {}); + KinesisMetadata metadata = (KinesisMetadata) kinesisConnector.getMetadata(SESSION, new ConnectorTransactionHandle() {}); SchemaTableName tblName = new SchemaTableName("default", "test123"); KinesisTableHandle tableHandle = metadata.getTableHandle(SESSION, tblName); assertNotNull(metadata); diff --git a/plugin/trino-kudu/src/main/java/io/trino/plugin/kudu/KuduConnector.java b/plugin/trino-kudu/src/main/java/io/trino/plugin/kudu/KuduConnector.java index bb2236a9bfc2..86ea3d315a10 100755 --- a/plugin/trino-kudu/src/main/java/io/trino/plugin/kudu/KuduConnector.java +++ b/plugin/trino-kudu/src/main/java/io/trino/plugin/kudu/KuduConnector.java @@ -21,6 +21,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.procedure.Procedure; @@ -80,7 +81,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-local-file/src/main/java/io/trino/plugin/localfile/LocalFileConnector.java b/plugin/trino-local-file/src/main/java/io/trino/plugin/localfile/LocalFileConnector.java index 6ba792fe1162..15aa05b2d121 100644 --- a/plugin/trino-local-file/src/main/java/io/trino/plugin/localfile/LocalFileConnector.java +++ b/plugin/trino-local-file/src/main/java/io/trino/plugin/localfile/LocalFileConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -56,7 +57,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-memory/src/main/java/io/trino/plugin/memory/MemoryConnector.java b/plugin/trino-memory/src/main/java/io/trino/plugin/memory/MemoryConnector.java index 0d1f0477eae3..9d16a455bdb3 100644 --- a/plugin/trino-memory/src/main/java/io/trino/plugin/memory/MemoryConnector.java +++ b/plugin/trino-memory/src/main/java/io/trino/plugin/memory/MemoryConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -51,7 +52,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-mongodb/src/main/java/io/trino/plugin/mongodb/MongoConnector.java b/plugin/trino-mongodb/src/main/java/io/trino/plugin/mongodb/MongoConnector.java index 3e0462a5bcd3..8c7afa9e002c 100644 --- a/plugin/trino-mongodb/src/main/java/io/trino/plugin/mongodb/MongoConnector.java +++ b/plugin/trino-mongodb/src/main/java/io/trino/plugin/mongodb/MongoConnector.java @@ -17,6 +17,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -64,7 +65,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { MongoMetadata metadata = transactions.get(transaction); checkArgument(metadata != null, "no such transaction: %s", transaction); diff --git a/plugin/trino-phoenix/src/main/java/io/trino/plugin/phoenix/PhoenixConnector.java b/plugin/trino-phoenix/src/main/java/io/trino/plugin/phoenix/PhoenixConnector.java index b1181a80fcb1..ed7aff3fe4b9 100644 --- a/plugin/trino-phoenix/src/main/java/io/trino/plugin/phoenix/PhoenixConnector.java +++ b/plugin/trino-phoenix/src/main/java/io/trino/plugin/phoenix/PhoenixConnector.java @@ -22,6 +22,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -77,7 +78,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return metadata; } diff --git a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java index e6858cabe684..3f3b6fa2e3ed 100644 --- a/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java +++ b/plugin/trino-phoenix5/src/main/java/io/trino/plugin/phoenix5/PhoenixConnector.java @@ -22,6 +22,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -77,7 +78,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return metadata; } diff --git a/plugin/trino-pinot/src/main/java/io/trino/plugin/pinot/PinotConnector.java b/plugin/trino-pinot/src/main/java/io/trino/plugin/pinot/PinotConnector.java index 3908bb94392b..a6ed6baba7b4 100755 --- a/plugin/trino-pinot/src/main/java/io/trino/plugin/pinot/PinotConnector.java +++ b/plugin/trino-pinot/src/main/java/io/trino/plugin/pinot/PinotConnector.java @@ -19,6 +19,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -64,7 +65,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-prometheus/src/main/java/io/trino/plugin/prometheus/PrometheusConnector.java b/plugin/trino-prometheus/src/main/java/io/trino/plugin/prometheus/PrometheusConnector.java index 180093a01737..4478cb9f2c34 100644 --- a/plugin/trino-prometheus/src/main/java/io/trino/plugin/prometheus/PrometheusConnector.java +++ b/plugin/trino-prometheus/src/main/java/io/trino/plugin/prometheus/PrometheusConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -57,7 +58,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/RaptorConnector.java b/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/RaptorConnector.java index 6e0a740395f6..7d40caf1b430 100644 --- a/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/RaptorConnector.java +++ b/plugin/trino-raptor-legacy/src/main/java/io/trino/plugin/raptor/legacy/RaptorConnector.java @@ -26,6 +26,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSinkProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; @@ -155,7 +156,7 @@ public ConnectorPageSinkProvider getPageSinkProvider() } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { RaptorMetadata metadata = transactions.get(transaction); checkArgument(metadata != null, "no such transaction: %s", transaction); diff --git a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorConnector.java b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorConnector.java index 15aeba8fe62e..1039141bcee3 100644 --- a/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorConnector.java +++ b/plugin/trino-raptor-legacy/src/test/java/io/trino/plugin/raptor/legacy/TestRaptorConnector.java @@ -131,24 +131,24 @@ public void testMaintenanceBlocked() // begin delete for table1 ConnectorTransactionHandle txn1 = beginTransaction(); - ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(txn1), "test1"); - connector.getMetadata(txn1).beginDelete(SESSION, handle1); + ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test1"); + connector.getMetadata(SESSION, txn1).beginDelete(SESSION, handle1); assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1)); assertFalse(metadataDao.isMaintenanceBlockedLocked(tableId2)); // begin delete for table2 ConnectorTransactionHandle txn2 = beginTransaction(); - ConnectorTableHandle handle2 = getTableHandle(connector.getMetadata(txn2), "test2"); - connector.getMetadata(txn2).beginDelete(SESSION, handle2); + ConnectorTableHandle handle2 = getTableHandle(connector.getMetadata(SESSION, txn2), "test2"); + connector.getMetadata(SESSION, txn2).beginDelete(SESSION, handle2); assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1)); assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2)); // begin another delete for table1 ConnectorTransactionHandle txn3 = beginTransaction(); - ConnectorTableHandle handle3 = getTableHandle(connector.getMetadata(txn3), "test1"); - connector.getMetadata(txn3).beginDelete(SESSION, handle3); + ConnectorTableHandle handle3 = getTableHandle(connector.getMetadata(SESSION, txn3), "test1"); + connector.getMetadata(SESSION, txn3).beginDelete(SESSION, handle3); assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId1)); assertTrue(metadataDao.isMaintenanceBlockedLocked(tableId2)); @@ -211,7 +211,7 @@ private void assertSplitShard(Type temporalType, String min, String max, int exp .build(); ConnectorTransactionHandle transaction = beginTransaction(); - connector.getMetadata(transaction).createTable( + connector.getMetadata(SESSION, transaction).createTable( SESSION, new ConnectorTableMetadata( new SchemaTableName("test", "test"), @@ -221,8 +221,8 @@ private void assertSplitShard(Type temporalType, String min, String max, int exp connector.commit(transaction); ConnectorTransactionHandle txn1 = beginTransaction(); - ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(txn1), "test"); - ConnectorInsertTableHandle insertTableHandle = connector.getMetadata(txn1).beginInsert(session, handle1); + ConnectorTableHandle handle1 = getTableHandle(connector.getMetadata(SESSION, txn1), "test"); + ConnectorInsertTableHandle insertTableHandle = connector.getMetadata(SESSION, txn1).beginInsert(session, handle1); ConnectorPageSink raptorPageSink = connector.getPageSinkProvider().createPageSink(txn1, session, insertTableHandle); Object timestamp1 = null; @@ -246,14 +246,14 @@ else if (temporalType.equals(DATE)) { Collection shards = raptorPageSink.finish().get(); assertEquals(shards.size(), expectedSplits); - connector.getMetadata(txn1).dropTable(session, handle1); + connector.getMetadata(session, txn1).dropTable(session, handle1); connector.commit(txn1); } private long createTable(String name) { ConnectorTransactionHandle transaction = beginTransaction(); - connector.getMetadata(transaction).createTable( + connector.getMetadata(SESSION, transaction).createTable( SESSION, new ConnectorTableMetadata( new SchemaTableName("test", name), @@ -262,7 +262,7 @@ private long createTable(String name) connector.commit(transaction); transaction = beginTransaction(); - ConnectorTableHandle tableHandle = getTableHandle(connector.getMetadata(transaction), name); + ConnectorTableHandle tableHandle = getTableHandle(connector.getMetadata(SESSION, transaction), name); connector.commit(transaction); return ((RaptorTableHandle) tableHandle).getTableId(); } diff --git a/plugin/trino-redis/src/main/java/io/trino/plugin/redis/RedisConnector.java b/plugin/trino-redis/src/main/java/io/trino/plugin/redis/RedisConnector.java index dfd2d25a1e6a..d9a1ec63d2eb 100644 --- a/plugin/trino-redis/src/main/java/io/trino/plugin/redis/RedisConnector.java +++ b/plugin/trino-redis/src/main/java/io/trino/plugin/redis/RedisConnector.java @@ -16,6 +16,7 @@ import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -56,7 +57,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return metadata; } diff --git a/plugin/trino-thrift/src/main/java/io/trino/plugin/thrift/ThriftConnector.java b/plugin/trino-thrift/src/main/java/io/trino/plugin/thrift/ThriftConnector.java index b46150ec3e3d..dbcc43061c02 100644 --- a/plugin/trino-thrift/src/main/java/io/trino/plugin/thrift/ThriftConnector.java +++ b/plugin/trino-thrift/src/main/java/io/trino/plugin/thrift/ThriftConnector.java @@ -18,6 +18,7 @@ import io.trino.spi.connector.ConnectorIndexProvider; import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorPageSourceProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.session.PropertyMetadata; @@ -63,7 +64,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/plugin/trino-tpcds/src/main/java/io/trino/plugin/tpcds/TpcdsConnectorFactory.java b/plugin/trino-tpcds/src/main/java/io/trino/plugin/tpcds/TpcdsConnectorFactory.java index a259bbf170f6..0d7e1fa7e3af 100644 --- a/plugin/trino-tpcds/src/main/java/io/trino/plugin/tpcds/TpcdsConnectorFactory.java +++ b/plugin/trino-tpcds/src/main/java/io/trino/plugin/tpcds/TpcdsConnectorFactory.java @@ -20,6 +20,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -67,7 +68,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return new TpcdsMetadata(); } diff --git a/plugin/trino-tpch/src/main/java/io/trino/plugin/tpch/TpchConnectorFactory.java b/plugin/trino-tpch/src/main/java/io/trino/plugin/tpch/TpchConnectorFactory.java index c24c192c5a8c..826cc8efec0c 100644 --- a/plugin/trino-tpch/src/main/java/io/trino/plugin/tpch/TpchConnectorFactory.java +++ b/plugin/trino-tpch/src/main/java/io/trino/plugin/tpch/TpchConnectorFactory.java @@ -21,6 +21,7 @@ import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorPageSourceProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.transaction.IsolationLevel; @@ -89,7 +90,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transaction) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { return new TpchMetadata( columnNaming, diff --git a/testing/trino-testing/src/main/java/io/trino/testing/tpch/IndexedTpchConnectorFactory.java b/testing/trino-testing/src/main/java/io/trino/testing/tpch/IndexedTpchConnectorFactory.java index 9f56b7165c04..a584ff57e9a0 100644 --- a/testing/trino-testing/src/main/java/io/trino/testing/tpch/IndexedTpchConnectorFactory.java +++ b/testing/trino-testing/src/main/java/io/trino/testing/tpch/IndexedTpchConnectorFactory.java @@ -27,6 +27,7 @@ import io.trino.spi.connector.ConnectorMetadata; import io.trino.spi.connector.ConnectorNodePartitioningProvider; import io.trino.spi.connector.ConnectorRecordSetProvider; +import io.trino.spi.connector.ConnectorSession; import io.trino.spi.connector.ConnectorSplitManager; import io.trino.spi.connector.ConnectorTransactionHandle; import io.trino.spi.connector.SystemTable; @@ -72,7 +73,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return new TpchIndexMetadata(indexedData); } diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestBeginQuery.java b/testing/trino-tests/src/test/java/io/trino/execution/TestBeginQuery.java index 6d59e6f76baf..7b657e5fdadb 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestBeginQuery.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestBeginQuery.java @@ -189,7 +189,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } diff --git a/testing/trino-tests/src/test/java/io/trino/execution/TestCoordinatorDynamicFiltering.java b/testing/trino-tests/src/test/java/io/trino/execution/TestCoordinatorDynamicFiltering.java index 559dc7c8549a..0e183e1a5d4d 100644 --- a/testing/trino-tests/src/test/java/io/trino/execution/TestCoordinatorDynamicFiltering.java +++ b/testing/trino-tests/src/test/java/io/trino/execution/TestCoordinatorDynamicFiltering.java @@ -446,7 +446,7 @@ public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel } @Override - public ConnectorMetadata getMetadata(ConnectorTransactionHandle transactionHandle) + public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transactionHandle) { return metadata; } From def50d5e15e5e65447562c7c2bf9b972c1e91aac Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 25 Sep 2021 22:09:31 -0700 Subject: [PATCH 04/18] Delay Hive metastore creation until identity is available --- .../io/trino/plugin/hive/HiveConnector.java | 22 +----- .../java/io/trino/plugin/hive/HiveModule.java | 9 --- .../trino/plugin/hive/HiveSplitManager.java | 13 ++-- .../plugin/hive/HiveTransactionHandle.java | 26 ++++--- .../plugin/hive/HiveTransactionManager.java | 69 ++++++++++++++--- .../hive/InternalHiveConnectorFactory.java | 2 - ...sactionalLegacyAccessControlMetastore.java | 12 +-- ...onalSqlStandardAccessControlMetastore.java | 15 ++-- .../trino/plugin/hive/AbstractTestHive.java | 20 ++--- .../hive/AbstractTestHiveFileSystem.java | 6 +- .../trino/plugin/hive/TestHivePageSink.java | 2 +- .../TestNodeLocalDynamicSplitPruning.java | 4 +- .../TestConnectorPushdownRulesWithHive.java | 10 +-- .../plugin/iceberg/IcebergConnector.java | 17 +---- .../iceberg/IcebergTransactionManager.java | 76 +++++++++++++++---- .../InternalIcebergConnectorFactory.java | 2 - 16 files changed, 185 insertions(+), 120 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java index 1a361049edee..a20f11d2f05f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java @@ -18,7 +18,6 @@ import io.airlift.bootstrap.LifeCycleManager; import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorMetadata; import io.trino.plugin.base.session.SessionPropertiesProvider; -import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorAccessControl; import io.trino.spi.connector.ConnectorMetadata; @@ -48,7 +47,6 @@ public class HiveConnector implements Connector { private final LifeCycleManager lifeCycleManager; - private final TransactionalMetadataFactory metadataFactory; private final ConnectorSplitManager splitManager; private final ConnectorPageSourceProvider pageSourceProvider; private final ConnectorPageSinkProvider pageSinkProvider; @@ -70,7 +68,6 @@ public class HiveConnector public HiveConnector( LifeCycleManager lifeCycleManager, - TransactionalMetadataFactory metadataFactory, HiveTransactionManager transactionManager, ConnectorSplitManager splitManager, ConnectorPageSourceProvider pageSourceProvider, @@ -89,7 +86,6 @@ public HiveConnector( ClassLoader classLoader) { this.lifeCycleManager = requireNonNull(lifeCycleManager, "lifeCycleManager is null"); - this.metadataFactory = requireNonNull(metadataFactory, "metadataFactory is null"); this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); this.splitManager = requireNonNull(splitManager, "splitManager is null"); this.pageSourceProvider = requireNonNull(pageSourceProvider, "pageSourceProvider is null"); @@ -200,31 +196,21 @@ public boolean isSingleStatementWritesOnly() public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) { checkConnectorSupports(READ_UNCOMMITTED, isolationLevel); - ConnectorTransactionHandle transaction = new HiveTransactionHandle(); - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { - transactionManager.put(transaction, metadataFactory.create(autoCommit)); - } + ConnectorTransactionHandle transaction = new HiveTransactionHandle(autoCommit); + transactionManager.begin(transaction); return transaction; } @Override public void commit(ConnectorTransactionHandle transaction) { - TransactionalMetadata metadata = transactionManager.remove(transaction); - checkArgument(metadata != null, "no such transaction: %s", transaction); - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { - metadata.commit(); - } + transactionManager.commit(transaction); } @Override public void rollback(ConnectorTransactionHandle transaction) { - TransactionalMetadata metadata = transactionManager.remove(transaction); - checkArgument(metadata != null, "no such transaction: %s", transaction); - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { - metadata.rollback(); - } + transactionManager.rollback(transaction); } @Override diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveModule.java index b95f39292cb7..35a7fab6e688 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveModule.java @@ -22,7 +22,6 @@ import io.airlift.event.client.EventClient; import io.trino.plugin.base.CatalogName; import io.trino.plugin.hive.metastore.MetastoreConfig; -import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.orc.OrcFileWriterFactory; import io.trino.plugin.hive.orc.OrcPageSourceFactory; import io.trino.plugin.hive.orc.OrcReaderConfig; @@ -43,7 +42,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; -import java.util.function.Function; import static com.google.inject.multibindings.Multibinder.newSetBinder; import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder; @@ -143,11 +141,4 @@ public ScheduledExecutorService createHiveTransactionHeartbeatExecutor(CatalogNa hiveConfig.getHiveTransactionHeartbeatThreads(), daemonThreadsNamed("hive-heartbeat-" + catalogName + "-%s")); } - - @Singleton - @Provides - public Function createMetastoreGetter(HiveTransactionManager transactionManager) - { - return transactionHandle -> transactionManager.get(transactionHandle).getMetastore(); - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java index 865b2a8de3d7..73c6d6f947ce 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java @@ -54,7 +54,6 @@ import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; -import java.util.function.Function; import static com.google.common.base.MoreObjects.firstNonNull; import static com.google.common.base.Preconditions.checkArgument; @@ -95,7 +94,7 @@ public class HiveSplitManager public static final String PRESTO_OFFLINE = "presto_offline"; public static final String OBJECT_NOT_READABLE = "object_not_readable"; - private final Function metastoreProvider; + private final HiveTransactionManager transactionManager; private final HivePartitionManager partitionManager; private final NamenodeStats namenodeStats; private final HdfsEnvironment hdfsEnvironment; @@ -115,7 +114,7 @@ public class HiveSplitManager @Inject public HiveSplitManager( HiveConfig hiveConfig, - Function metastoreProvider, + HiveTransactionManager transactionManager, HivePartitionManager partitionManager, NamenodeStats namenodeStats, HdfsEnvironment hdfsEnvironment, @@ -125,7 +124,7 @@ public HiveSplitManager( TypeManager typeManager) { this( - metastoreProvider, + transactionManager, partitionManager, namenodeStats, hdfsEnvironment, @@ -144,7 +143,7 @@ public HiveSplitManager( } public HiveSplitManager( - Function metastoreProvider, + HiveTransactionManager transactionManager, HivePartitionManager partitionManager, NamenodeStats namenodeStats, HdfsEnvironment hdfsEnvironment, @@ -161,7 +160,7 @@ public HiveSplitManager( boolean recursiveDfsWalkerEnabled, TypeManager typeManager) { - this.metastoreProvider = requireNonNull(metastoreProvider, "metastoreProvider is null"); + this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); this.namenodeStats = requireNonNull(namenodeStats, "namenodeStats is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); @@ -192,7 +191,7 @@ public ConnectorSplitSource getSplits( SchemaTableName tableName = hiveTable.getSchemaTableName(); // get table metadata - SemiTransactionalHiveMetastore metastore = metastoreProvider.apply((HiveTransactionHandle) transaction); + SemiTransactionalHiveMetastore metastore = transactionManager.get(transaction).getMetastore(); Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionHandle.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionHandle.java index 29cbc5b635ec..c1c4f2d4616d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionHandle.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionHandle.java @@ -25,19 +25,27 @@ public class HiveTransactionHandle implements ConnectorTransactionHandle { + private final boolean autoCommit; private final UUID uuid; - public HiveTransactionHandle() + public HiveTransactionHandle(boolean autoCommit) { - this(UUID.randomUUID()); + this(autoCommit, UUID.randomUUID()); } @JsonCreator - public HiveTransactionHandle(@JsonProperty("uuid") UUID uuid) + public HiveTransactionHandle(@JsonProperty("autoCommit") boolean autoCommit, @JsonProperty("uuid") UUID uuid) { + this.autoCommit = autoCommit; this.uuid = requireNonNull(uuid, "uuid is null"); } + @JsonProperty + public boolean isAutoCommit() + { + return autoCommit; + } + @JsonProperty public UUID getUuid() { @@ -45,22 +53,22 @@ public UUID getUuid() } @Override - public boolean equals(Object obj) + public boolean equals(Object o) { - if (this == obj) { + if (this == o) { return true; } - if ((obj == null) || (getClass() != obj.getClass())) { + if (o == null || getClass() != o.getClass()) { return false; } - HiveTransactionHandle other = (HiveTransactionHandle) obj; - return Objects.equals(uuid, other.uuid); + HiveTransactionHandle that = (HiveTransactionHandle) o; + return autoCommit == that.autoCommit && Objects.equals(uuid, that.uuid); } @Override public int hashCode() { - return Objects.hash(uuid); + return Objects.hash(autoCommit, uuid); } @Override diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java index a27ea17f47c2..0dd7028e8827 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java @@ -13,31 +13,82 @@ */ package io.trino.plugin.hive; -import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorTransactionHandle; +import javax.annotation.concurrent.GuardedBy; +import javax.inject.Inject; + +import java.util.Map; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ConcurrentMap; +import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; public class HiveTransactionManager { - private final ConcurrentMap transactions = new ConcurrentHashMap<>(); + private final TransactionalMetadataFactory metadataFactory; + private final Map transactions = new ConcurrentHashMap<>(); + + @Inject + public HiveTransactionManager(TransactionalMetadataFactory metadataFactory) + { + this.metadataFactory = requireNonNull(metadataFactory, "metadataFactory is null"); + } + + public void begin(ConnectorTransactionHandle transactionHandle) + { + MemoizedMetadata previousValue = transactions.putIfAbsent(transactionHandle, new MemoizedMetadata()); + checkState(previousValue == null); + } public TransactionalMetadata get(ConnectorTransactionHandle transactionHandle) { - return transactions.get(transactionHandle); + return transactions.get(transactionHandle).get(((HiveTransactionHandle) transactionHandle).isAutoCommit()); } - public TransactionalMetadata remove(ConnectorTransactionHandle transactionHandle) + public void commit(ConnectorTransactionHandle transaction) { - return transactions.remove(transactionHandle); + MemoizedMetadata transactionalMetadata = transactions.remove(transaction); + checkArgument(transactionalMetadata != null, "no such transaction: %s", transaction); + transactionalMetadata.optionalGet().ifPresent(metadata -> { + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { + metadata.commit(); + } + }); } - public void put(ConnectorTransactionHandle transactionHandle, TransactionalMetadata metadata) + public void rollback(ConnectorTransactionHandle transaction) { - ConnectorMetadata previousValue = transactions.putIfAbsent(transactionHandle, metadata); - checkState(previousValue == null); + MemoizedMetadata transactionalMetadata = transactions.remove(transaction); + checkArgument(transactionalMetadata != null, "no such transaction: %s", transaction); + transactionalMetadata.optionalGet().ifPresent(metadata -> { + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { + metadata.rollback(); + } + }); + } + + private class MemoizedMetadata + { + @GuardedBy("this") + private TransactionalMetadata metadata; + + public synchronized Optional optionalGet() + { + return Optional.ofNullable(metadata); + } + + public synchronized TransactionalMetadata get(boolean autoCommit) + { + if (metadata == null) { + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { + metadata = metadataFactory.create(autoCommit); + } + } + return metadata; + } } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/InternalHiveConnectorFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/InternalHiveConnectorFactory.java index 18398eecb6c6..6ac6dfa7dc66 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/InternalHiveConnectorFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/InternalHiveConnectorFactory.java @@ -123,7 +123,6 @@ public static Connector createConnector(String catalogName, Map .initialize(); LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class); - TransactionalMetadataFactory metadataFactory = injector.getInstance(TransactionalMetadataFactory.class); HiveTransactionManager transactionManager = injector.getInstance(HiveTransactionManager.class); ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class); ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class); @@ -146,7 +145,6 @@ public static Connector createConnector(String catalogName, Map return new HiveConnector( lifeCycleManager, - metadataFactory, transactionManager, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java index 58eed587cb81..0c94c4bcd03a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java @@ -13,7 +13,7 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.HiveTransactionHandle; +import io.trino.plugin.hive.HiveTransactionManager; import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; @@ -22,24 +22,24 @@ import javax.inject.Inject; import java.util.Optional; -import java.util.function.Function; import static java.util.Objects.requireNonNull; public class SemiTransactionalLegacyAccessControlMetastore implements LegacyAccessControlMetastore { - private final Function metastoreProvider; + private final HiveTransactionManager transactionManager; @Inject - public SemiTransactionalLegacyAccessControlMetastore(Function metastoreProvider) + public SemiTransactionalLegacyAccessControlMetastore(HiveTransactionManager transactionManager) { - this.metastoreProvider = requireNonNull(metastoreProvider, "metastoreProvider is null"); + this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); } @Override public Optional getTable(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName) { - return metastoreProvider.apply(((HiveTransactionHandle) context.getTransactionHandle())).getTable(new HiveIdentity(context.getIdentity()), databaseName, tableName); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); + return metastore.getTable(new HiveIdentity(context.getIdentity()), databaseName, tableName); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java index 45c27b6a8660..6ffe79c05c72 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java @@ -13,7 +13,7 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.HiveTransactionHandle; +import io.trino.plugin.hive.HiveTransactionManager; import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HivePrincipal; @@ -26,39 +26,38 @@ import java.util.Optional; import java.util.Set; -import java.util.function.Function; import static java.util.Objects.requireNonNull; public class SemiTransactionalSqlStandardAccessControlMetastore implements SqlStandardAccessControlMetastore { - private final Function metastoreProvider; + private final HiveTransactionManager transactionManager; @Inject - public SemiTransactionalSqlStandardAccessControlMetastore(Function metastoreProvider) + public SemiTransactionalSqlStandardAccessControlMetastore(HiveTransactionManager transactionManager) { - this.metastoreProvider = requireNonNull(metastoreProvider, "metastoreProvider is null"); + this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); } @Override public Set listRoleGrants(ConnectorSecurityContext context, HivePrincipal principal) { - SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) context.getTransactionHandle())); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); return metastore.listRoleGrants(principal); } @Override public Set listTablePrivileges(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName, Optional principal) { - SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) context.getTransactionHandle())); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); return metastore.listTablePrivileges(identity, databaseName, tableName, principal); } @Override public Optional getDatabase(ConnectorSecurityContext context, String databaseName) { - SemiTransactionalHiveMetastore metastore = metastoreProvider.apply(((HiveTransactionHandle) context.getTransactionHandle())); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); return metastore.getDatabase(databaseName); } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index 529549a46006..e46613a872b8 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -864,9 +864,9 @@ public Optional getMaterializedView(Connect }, SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS); - transactionManager = new HiveTransactionManager(); + transactionManager = new HiveTransactionManager(metadataFactory); splitManager = new HiveSplitManager( - transactionHandle -> transactionManager.get(transactionHandle).getMetastore(), + transactionManager, partitionManager, new NamenodeStats(), hdfsEnvironment, @@ -950,7 +950,7 @@ protected ConnectorSession newSession(Map propertyValues) protected Transaction newTransaction() { - return new HiveTransaction(transactionManager, (HiveMetadata) metadataFactory.create(false)); + return new HiveTransaction(transactionManager); } protected interface Transaction @@ -977,11 +977,11 @@ static class HiveTransaction private final ConnectorTransactionHandle transactionHandle; private boolean closed; - public HiveTransaction(HiveTransactionManager transactionManager, HiveMetadata hiveMetadata) + public HiveTransaction(HiveTransactionManager transactionManager) { this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); - this.transactionHandle = new HiveTransactionHandle(); - transactionManager.put(transactionHandle, hiveMetadata); + this.transactionHandle = new HiveTransactionHandle(false); + transactionManager.begin(transactionHandle); getMetastore().testOnlyThrowOnCleanupFailures(); } @@ -1008,9 +1008,7 @@ public void commit() { checkState(!closed); closed = true; - HiveMetadata metadata = (HiveMetadata) transactionManager.remove(transactionHandle); - checkArgument(metadata != null, "no such transaction: %s", transactionHandle); - metadata.commit(); + transactionManager.commit(transactionHandle); } @Override @@ -1018,9 +1016,7 @@ public void rollback() { checkState(!closed); closed = true; - HiveMetadata metadata = (HiveMetadata) transactionManager.remove(transactionHandle); - checkArgument(metadata != null, "no such transaction: %s", transactionHandle); - metadata.rollback(); + transactionManager.rollback(transactionHandle); } @Override diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java index ef3e47e5f4ab..7b70de11e180 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java @@ -220,9 +220,9 @@ protected void setup(String host, int port, String databaseName, boolean s3Selec new DefaultHiveMaterializedViewMetadataFactory(), SqlStandardAccessControlMetadata::new, NO_REDIRECTIONS); - transactionManager = new HiveTransactionManager(); + transactionManager = new HiveTransactionManager(metadataFactory); splitManager = new HiveSplitManager( - transactionHandle -> transactionManager.get(transactionHandle).getMetastore(), + transactionManager, hivePartitionManager, new NamenodeStats(), hdfsEnvironment, @@ -273,7 +273,7 @@ protected ConnectorSession newSession() protected Transaction newTransaction() { - return new HiveTransaction(transactionManager, (HiveMetadata) metadataFactory.create(false)); + return new HiveTransaction(transactionManager); } @Test diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java index d97297350d82..cb5c7f189866 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java @@ -139,7 +139,7 @@ private static String makeFileName(File tempDir, HiveConfig config) private static long writeTestFile(HiveConfig config, HiveMetastore metastore, String outputPath) { - HiveTransactionHandle transaction = new HiveTransactionHandle(); + HiveTransactionHandle transaction = new HiveTransactionHandle(false); HiveWriterStats stats = new HiveWriterStats(); ConnectorPageSink pageSink = createPageSink(transaction, config, metastore, new Path("file:///" + outputPath), stats); List columns = getTestColumns(); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java index a5abee6c539e..16d14c62592e 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestNodeLocalDynamicSplitPruning.java @@ -86,7 +86,7 @@ public void testDynamicBucketPruning() throws IOException { HiveConfig config = new HiveConfig(); - HiveTransactionHandle transaction = new HiveTransactionHandle(); + HiveTransactionHandle transaction = new HiveTransactionHandle(false); try (TempFile tempFile = new TempFile()) { ConnectorPageSource emptyPageSource = createTestingPageSource(transaction, config, tempFile.file(), getDynamicFilter(getTupleDomainForBucketSplitPruning())); assertEquals(emptyPageSource.getClass(), EmptyPageSource.class); @@ -101,7 +101,7 @@ public void testDynamicPartitionPruning() throws IOException { HiveConfig config = new HiveConfig(); - HiveTransactionHandle transaction = new HiveTransactionHandle(); + HiveTransactionHandle transaction = new HiveTransactionHandle(false); try (TempFile tempFile = new TempFile()) { ConnectorPageSource emptyPageSource = createTestingPageSource(transaction, config, tempFile.file(), getDynamicFilter(getTupleDomainForPartitionSplitPruning())); assertEquals(emptyPageSource.getClass(), EmptyPageSource.class); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java index 1d16bdf8dbf8..dc479185dc41 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java @@ -161,7 +161,7 @@ public void testProjectionPushdown() Optional.empty()); HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); - TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle()); + TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle fullColumn = partialColumn.getBaseColumn(); @@ -191,7 +191,7 @@ public void testProjectionPushdown() new TableHandle( new CatalogName(HIVE_CATALOG_NAME), hiveTable.withProjectedColumns(ImmutableSet.of(fullColumn)), - new HiveTransactionHandle()), + new HiveTransactionHandle(false)), ImmutableList.of(p.symbol("struct_of_int", baseType)), ImmutableMap.of(p.symbol("struct_of_int", baseType), fullColumn)))) .doesNotFire(); @@ -225,7 +225,7 @@ public void testPredicatePushdown() PushPredicateIntoTableScan pushPredicateIntoTableScan = new PushPredicateIntoTableScan(tester().getPlannerContext(), tester().getTypeAnalyzer()); HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); - TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle()); + TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle column = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty()); @@ -257,7 +257,7 @@ public void testColumnPruningProjectionPushdown() PruneTableScanColumns pruneTableScanColumns = new PruneTableScanColumns(tester().getMetadata()); HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); - TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle()); + TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle columnA = createBaseColumn("a", 0, HIVE_INT, INTEGER, REGULAR, Optional.empty()); HiveColumnHandle columnB = createBaseColumn("b", 1, HIVE_INT, INTEGER, REGULAR, Optional.empty()); @@ -300,7 +300,7 @@ public void testPushdownWithDuplicateExpressions() new ScalarStatsCalculator(tester().getPlannerContext(), tester().getTypeAnalyzer())); HiveTableHandle hiveTable = new HiveTableHandle(SCHEMA_NAME, tableName, ImmutableMap.of(), ImmutableList.of(), ImmutableList.of(), Optional.empty()); - TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle()); + TableHandle table = new TableHandle(new CatalogName(HIVE_CATALOG_NAME), hiveTable, new HiveTransactionHandle(false)); HiveColumnHandle bigintColumn = createBaseColumn("just_bigint", 1, toHiveType(BIGINT), BIGINT, REGULAR, Optional.empty()); HiveColumnHandle partialColumn = new HiveColumnHandle( diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java index a3d0deb6a2e6..73576e19f76e 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java @@ -19,7 +19,6 @@ import io.trino.plugin.base.classloader.ClassLoaderSafeConnectorMetadata; import io.trino.plugin.base.session.SessionPropertiesProvider; import io.trino.plugin.hive.HiveTransactionHandle; -import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.Connector; import io.trino.spi.connector.ConnectorAccessControl; import io.trino.spi.connector.ConnectorCapabilities; @@ -52,7 +51,6 @@ public class IcebergConnector { private final LifeCycleManager lifeCycleManager; private final IcebergTransactionManager transactionManager; - private final IcebergMetadataFactory metadataFactory; private final ConnectorSplitManager splitManager; private final ConnectorPageSourceProvider pageSourceProvider; private final ConnectorPageSinkProvider pageSinkProvider; @@ -68,7 +66,6 @@ public class IcebergConnector public IcebergConnector( LifeCycleManager lifeCycleManager, IcebergTransactionManager transactionManager, - IcebergMetadataFactory metadataFactory, ConnectorSplitManager splitManager, ConnectorPageSourceProvider pageSourceProvider, ConnectorPageSinkProvider pageSinkProvider, @@ -83,7 +80,6 @@ public IcebergConnector( { this.lifeCycleManager = requireNonNull(lifeCycleManager, "lifeCycleManager is null"); this.transactionManager = requireNonNull(transactionManager, "transactionManager is null"); - this.metadataFactory = requireNonNull(metadataFactory, "metadataFactory is null"); this.splitManager = requireNonNull(splitManager, "splitManager is null"); this.pageSourceProvider = requireNonNull(pageSourceProvider, "pageSourceProvider is null"); this.pageSinkProvider = requireNonNull(pageSinkProvider, "pageSinkProvider is null"); @@ -188,26 +184,21 @@ public ConnectorAccessControl getAccessControl() public ConnectorTransactionHandle beginTransaction(IsolationLevel isolationLevel, boolean readOnly, boolean autoCommit) { checkConnectorSupports(SERIALIZABLE, isolationLevel); - ConnectorTransactionHandle transaction = new HiveTransactionHandle(); - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { - transactionManager.put(transaction, metadataFactory.create()); - } + ConnectorTransactionHandle transaction = new HiveTransactionHandle(autoCommit); + transactionManager.begin(transaction); return transaction; } @Override public void commit(ConnectorTransactionHandle transaction) { - transactionManager.remove(transaction); + transactionManager.commit(transaction); } @Override public void rollback(ConnectorTransactionHandle transaction) { - IcebergMetadata metadata = transactionManager.remove(transaction); - try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { - metadata.rollback(); - } + transactionManager.rollback(transaction); } @Override diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java index d7333b3935b8..e063713ea952 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java @@ -13,36 +13,84 @@ */ package io.trino.plugin.iceberg; -import io.trino.spi.connector.ConnectorMetadata; +import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorTransactionHandle; -import java.util.Map; +import javax.annotation.concurrent.GuardedBy; +import javax.inject.Inject; + +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; public class IcebergTransactionManager { - private final Map transactions = new ConcurrentHashMap<>(); + private final IcebergMetadataFactory metadataFactory; + private final ClassLoader classLoader; + private final ConcurrentMap transactions = new ConcurrentHashMap<>(); + + @Inject + public IcebergTransactionManager(IcebergMetadataFactory metadataFactory) + { + this(metadataFactory, Thread.currentThread().getContextClassLoader()); + } - public IcebergMetadata get(ConnectorTransactionHandle transaction) + public IcebergTransactionManager(IcebergMetadataFactory metadataFactory, ClassLoader classLoader) { - IcebergMetadata metadata = transactions.get(transaction); - checkArgument(metadata != null, "no such transaction: %s", transaction); - return metadata; + this.metadataFactory = requireNonNull(metadataFactory, "metadataFactory is null"); + this.classLoader = requireNonNull(classLoader, "classLoader is null"); } - public IcebergMetadata remove(ConnectorTransactionHandle transaction) + public void begin(ConnectorTransactionHandle transactionHandle) { - IcebergMetadata metadata = transactions.remove(transaction); - checkArgument(metadata != null, "no such transaction: %s", transaction); - return metadata; + MemoizedMetadata previousValue = transactions.putIfAbsent(transactionHandle, new MemoizedMetadata()); + checkState(previousValue == null); } - public void put(ConnectorTransactionHandle transaction, IcebergMetadata metadata) + public IcebergMetadata get(ConnectorTransactionHandle transactionHandle) { - ConnectorMetadata existing = transactions.putIfAbsent(transaction, metadata); - checkState(existing == null, "transaction already exists: %s", existing); + return transactions.get(transactionHandle).get(); + } + + public void commit(ConnectorTransactionHandle transaction) + { + MemoizedMetadata transactionalMetadata = transactions.remove(transaction); + checkArgument(transactionalMetadata != null, "no such transaction: %s", transaction); + } + + public void rollback(ConnectorTransactionHandle transaction) + { + MemoizedMetadata transactionalMetadata = transactions.remove(transaction); + checkArgument(transactionalMetadata != null, "no such transaction: %s", transaction); + transactionalMetadata.optionalGet().ifPresent(metadata -> { + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + metadata.rollback(); + } + }); + } + + private class MemoizedMetadata + { + @GuardedBy("this") + private IcebergMetadata metadata; + + public synchronized Optional optionalGet() + { + return Optional.ofNullable(metadata); + } + + public synchronized IcebergMetadata get() + { + if (metadata == null) { + try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { + metadata = metadataFactory.create(); + } + } + return metadata; + } } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/InternalIcebergConnectorFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/InternalIcebergConnectorFactory.java index 586069972d49..b2de990966ad 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/InternalIcebergConnectorFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/InternalIcebergConnectorFactory.java @@ -105,7 +105,6 @@ public static Connector createConnector( LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class); IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class); - IcebergMetadataFactory metadataFactory = injector.getInstance(IcebergMetadataFactory.class); ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class); ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class); ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class); @@ -119,7 +118,6 @@ public static Connector createConnector( return new IcebergConnector( lifeCycleManager, transactionManager, - metadataFactory, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), From 9f62d371b80ddeea58ef773f3892e1adae83e4be Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 25 Sep 2021 22:46:08 -0700 Subject: [PATCH 05/18] Provide connector identity during Hive metastore create --- .../main/java/io/trino/plugin/hive/HiveConnector.java | 2 +- .../java/io/trino/plugin/hive/HiveMetadataFactory.java | 3 ++- .../main/java/io/trino/plugin/hive/HiveSplitManager.java | 2 +- .../io/trino/plugin/hive/HiveTransactionManager.java | 9 +++++---- .../trino/plugin/hive/TransactionalMetadataFactory.java | 4 +++- .../hive/procedure/CreateEmptyPartitionProcedure.java | 2 +- .../trino/plugin/hive/procedure/DropStatsProcedure.java | 2 +- .../hive/procedure/RegisterPartitionProcedure.java | 2 +- .../hive/procedure/SyncPartitionMetadataProcedure.java | 2 +- .../hive/procedure/UnregisterPartitionProcedure.java | 2 +- .../SemiTransactionalLegacyAccessControlMetastore.java | 2 +- ...miTransactionalSqlStandardAccessControlMetastore.java | 6 +++--- .../test/java/io/trino/plugin/hive/AbstractTestHive.java | 4 ++-- 13 files changed, 23 insertions(+), 19 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java index a20f11d2f05f..d44cd4140132 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveConnector.java @@ -109,7 +109,7 @@ public HiveConnector( @Override public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { - ConnectorMetadata metadata = transactionManager.get(transaction); + ConnectorMetadata metadata = transactionManager.get(transaction, session.getIdentity()); checkArgument(metadata != null, "no such transaction: %s", transaction); return new ClassLoaderSafeConnectorMetadata(metadata, classLoader); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java index 81d2d9805eee..8b317d1ef2d8 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java @@ -23,6 +23,7 @@ import io.trino.plugin.hive.security.AccessControlMetadataFactory; import io.trino.plugin.hive.statistics.MetastoreHiveStatisticsProvider; import io.trino.spi.connector.MetadataProvider; +import io.trino.spi.security.ConnectorIdentity; import io.trino.spi.type.TypeManager; import javax.inject.Inject; @@ -188,7 +189,7 @@ public HiveMetadataFactory( } @Override - public TransactionalMetadata create(boolean autoCommit) + public TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit) { HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure( memoizeMetastore(metastore, perTransactionCacheMaximumSize)); // per-transaction cache diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java index 73c6d6f947ce..64ca8c902afd 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java @@ -191,7 +191,7 @@ public ConnectorSplitSource getSplits( SchemaTableName tableName = hiveTable.getSchemaTableName(); // get table metadata - SemiTransactionalHiveMetastore metastore = transactionManager.get(transaction).getMetastore(); + SemiTransactionalHiveMetastore metastore = transactionManager.get(transaction, session.getIdentity()).getMetastore(); Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java index 0dd7028e8827..d0293b78e2d2 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveTransactionManager.java @@ -15,6 +15,7 @@ import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorTransactionHandle; +import io.trino.spi.security.ConnectorIdentity; import javax.annotation.concurrent.GuardedBy; import javax.inject.Inject; @@ -44,9 +45,9 @@ public void begin(ConnectorTransactionHandle transactionHandle) checkState(previousValue == null); } - public TransactionalMetadata get(ConnectorTransactionHandle transactionHandle) + public TransactionalMetadata get(ConnectorTransactionHandle transactionHandle, ConnectorIdentity identity) { - return transactions.get(transactionHandle).get(((HiveTransactionHandle) transactionHandle).isAutoCommit()); + return transactions.get(transactionHandle).get(identity, ((HiveTransactionHandle) transactionHandle).isAutoCommit()); } public void commit(ConnectorTransactionHandle transaction) @@ -81,11 +82,11 @@ public synchronized Optional optionalGet() return Optional.ofNullable(metadata); } - public synchronized TransactionalMetadata get(boolean autoCommit) + public synchronized TransactionalMetadata get(ConnectorIdentity identity, boolean autoCommit) { if (metadata == null) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(getClass().getClassLoader())) { - metadata = metadataFactory.create(autoCommit); + metadata = metadataFactory.create(identity, autoCommit); } } return metadata; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/TransactionalMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/TransactionalMetadataFactory.java index 259745c81943..11d9d26f8c13 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/TransactionalMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/TransactionalMetadataFactory.java @@ -13,7 +13,9 @@ */ package io.trino.plugin.hive; +import io.trino.spi.security.ConnectorIdentity; + public interface TransactionalMetadataFactory { - TransactionalMetadata create(boolean autoCommit); + TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java index 0385e3abdb67..bac50214a23b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java @@ -106,7 +106,7 @@ public void createEmptyPartition(ConnectorSession session, ConnectorAccessContro private void doCreateEmptyPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List partitionColumnNames, List partitionValues) { - TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(true); + TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(session.getIdentity(), true); HiveTableHandle tableHandle = (HiveTableHandle) hiveMetadata.getTableHandle(session, new SchemaTableName(schemaName, tableName)); if (tableHandle == null) { throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", new SchemaTableName(schemaName, tableName))); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java index 2252a0958916..c7cb0b7d006e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java @@ -98,7 +98,7 @@ public void dropStats(ConnectorSession session, ConnectorAccessControl accessCon private void doDropStats(ConnectorSession session, ConnectorAccessControl accessControl, String schema, String table, List partitionValues) { - TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(true); + TransactionalMetadata hiveMetadata = hiveMetadataFactory.create(session.getIdentity(), true); HiveTableHandle handle = (HiveTableHandle) hiveMetadata.getTableHandle(session, new SchemaTableName(schema, table)); if (handle == null) { throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, format("Table '%s' does not exist", new SchemaTableName(schema, table))); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java index 638cc965e4c9..1acac48e2d1e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java @@ -143,7 +143,7 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Partition location does not exist: " + partitionLocation); } - SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(true).getMetastore(); + SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); metastore.addPartition( session, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java index 1d783960180e..189ec187853a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java @@ -116,7 +116,7 @@ private void doSyncPartitionMetadata(ConnectorSession session, ConnectorAccessCo SyncMode syncMode = toSyncMode(mode); HdfsContext hdfsContext = new HdfsContext(session); HiveIdentity identity = new HiveIdentity(session); - SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(true).getMetastore(); + SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); Table table = metastore.getTable(identity, schemaName, tableName) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java index 9134449203b5..8eaef78539e6 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java @@ -107,7 +107,7 @@ private void doUnregisterPartition(ConnectorSession session, ConnectorAccessCont Partition partition = metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues) .orElseThrow(() -> new TrinoException(NOT_FOUND, format("Partition '%s' does not exist", partitionName))); - SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(true).getMetastore(); + SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); metastore.dropPartition( session, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java index 0c94c4bcd03a..5503ff55c649 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java @@ -39,7 +39,7 @@ public SemiTransactionalLegacyAccessControlMetastore(HiveTransactionManager tran @Override public Optional
getTable(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName) { - SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); return metastore.getTable(new HiveIdentity(context.getIdentity()), databaseName, tableName); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java index 6ffe79c05c72..4fd8ace784b6 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java @@ -43,21 +43,21 @@ public SemiTransactionalSqlStandardAccessControlMetastore(HiveTransactionManager @Override public Set listRoleGrants(ConnectorSecurityContext context, HivePrincipal principal) { - SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); return metastore.listRoleGrants(principal); } @Override public Set listTablePrivileges(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName, Optional principal) { - SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); return metastore.listTablePrivileges(identity, databaseName, tableName, principal); } @Override public Optional getDatabase(ConnectorSecurityContext context, String databaseName) { - SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle()).getMetastore(); + SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); return metastore.getDatabase(databaseName); } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index e46613a872b8..e07bc057b533 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -988,13 +988,13 @@ public HiveTransaction(HiveTransactionManager transactionManager) @Override public ConnectorMetadata getMetadata() { - return transactionManager.get(transactionHandle); + return transactionManager.get(transactionHandle, SESSION.getIdentity()); } @Override public SemiTransactionalHiveMetastore getMetastore() { - return transactionManager.get(transactionHandle).getMetastore(); + return transactionManager.get(transactionHandle, SESSION.getIdentity()).getMetastore(); } @Override From 2dfc12d1f199ae94ab25568f6ad2d71e4008ec3e Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 20:53:23 -0700 Subject: [PATCH 06/18] Provide connector identity during Iceberg metastore create --- .../java/io/trino/plugin/iceberg/IcebergConnector.java | 2 +- .../io/trino/plugin/iceberg/IcebergMetadataFactory.java | 5 +++-- .../io/trino/plugin/iceberg/IcebergSplitManager.java | 2 +- .../trino/plugin/iceberg/IcebergTransactionManager.java | 9 +++++---- .../plugin/iceberg/RollbackToSnapshotProcedure.java | 2 +- .../io/trino/plugin/iceberg/TrinoCatalogFactory.java | 3 ++- 6 files changed, 13 insertions(+), 10 deletions(-) diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java index 73576e19f76e..7b142c6e0e0e 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergConnector.java @@ -104,7 +104,7 @@ public Set getCapabilities() @Override public ConnectorMetadata getMetadata(ConnectorSession session, ConnectorTransactionHandle transaction) { - ConnectorMetadata metadata = transactionManager.get(transaction); + ConnectorMetadata metadata = transactionManager.get(transaction, session.getIdentity()); return new ClassLoaderSafeConnectorMetadata(metadata, getClass().getClassLoader()); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadataFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadataFactory.java index 47d99fc2ace5..37302ddcdfc3 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadataFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergMetadataFactory.java @@ -14,6 +14,7 @@ package io.trino.plugin.iceberg; import io.airlift.json.JsonCodec; +import io.trino.spi.security.ConnectorIdentity; import io.trino.spi.type.TypeManager; import javax.inject.Inject; @@ -37,8 +38,8 @@ public IcebergMetadataFactory( this.catalogFactory = requireNonNull(catalogFactory, "catalogFactory is null"); } - public IcebergMetadata create() + public IcebergMetadata create(ConnectorIdentity identity) { - return new IcebergMetadata(typeManager, commitTaskCodec, catalogFactory.create()); + return new IcebergMetadata(typeManager, commitTaskCodec, catalogFactory.create(identity)); } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergSplitManager.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergSplitManager.java index d667b7eccfb2..be8e4aab6a6d 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergSplitManager.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergSplitManager.java @@ -66,7 +66,7 @@ public ConnectorSplitSource getSplits( return new FixedSplitSource(ImmutableList.of()); } - Table icebergTable = transactionManager.get(transaction).getIcebergTable(session, table.getSchemaTableName()); + Table icebergTable = transactionManager.get(transaction, session.getIdentity()).getIcebergTable(session, table.getSchemaTableName()); Duration dynamicFilteringWaitTimeout = getDynamicFilteringWaitTimeout(session); TableScan tableScan = icebergTable.newScan() diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java index e063713ea952..4b04993c9491 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergTransactionManager.java @@ -15,6 +15,7 @@ import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorTransactionHandle; +import io.trino.spi.security.ConnectorIdentity; import javax.annotation.concurrent.GuardedBy; import javax.inject.Inject; @@ -51,9 +52,9 @@ public void begin(ConnectorTransactionHandle transactionHandle) checkState(previousValue == null); } - public IcebergMetadata get(ConnectorTransactionHandle transactionHandle) + public IcebergMetadata get(ConnectorTransactionHandle transactionHandle, ConnectorIdentity identity) { - return transactions.get(transactionHandle).get(); + return transactions.get(transactionHandle).get(identity); } public void commit(ConnectorTransactionHandle transaction) @@ -83,11 +84,11 @@ public synchronized Optional optionalGet() return Optional.ofNullable(metadata); } - public synchronized IcebergMetadata get() + public synchronized IcebergMetadata get(ConnectorIdentity identity) { if (metadata == null) { try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) { - metadata = metadataFactory.create(); + metadata = metadataFactory.create(identity); } } return metadata; diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/RollbackToSnapshotProcedure.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/RollbackToSnapshotProcedure.java index d649362b7196..2f44064d84a3 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/RollbackToSnapshotProcedure.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/RollbackToSnapshotProcedure.java @@ -64,7 +64,7 @@ public Procedure get() public void rollbackToSnapshot(ConnectorSession clientSession, String schema, String table, Long snapshotId) { SchemaTableName schemaTableName = new SchemaTableName(schema, table); - Table icebergTable = catalogFactory.create().loadTable(clientSession, schemaTableName); + Table icebergTable = catalogFactory.create(clientSession.getIdentity()).loadTable(clientSession, schemaTableName); icebergTable.rollback().toSnapshotId(snapshotId).commit(); } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java index 02f8805bc203..bbff596c212f 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java @@ -20,6 +20,7 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.spi.TrinoException; +import io.trino.spi.security.ConnectorIdentity; import io.trino.spi.type.TypeManager; import javax.inject.Inject; @@ -67,7 +68,7 @@ public TrinoCatalogFactory( this.deleteSchemaLocationsFallback = requireNonNull(hiveConfig).isDeleteSchemaLocationsFallback(); } - public TrinoCatalog create() + public TrinoCatalog create(ConnectorIdentity identity) { switch (catalogType) { case TESTING_FILE_METASTORE: From 97b277d86bb67aff0c0156bb3f1608e1bcc86cd4 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 14:07:04 -0700 Subject: [PATCH 07/18] Remove use of injected HiveMetastore in procedures Instead, the HiveMetastoreClosure is fetched from the current SemiTransactionalHiveMetastore. This ensures that both are using the same underlying metastore objects. --- .../io/trino/plugin/hive/HiveMetastoreClosure.java | 4 ++++ .../metastore/SemiTransactionalHiveMetastore.java | 9 +++++++++ .../procedure/CreateEmptyPartitionProcedure.java | 6 ++---- .../plugin/hive/procedure/DropStatsProcedure.java | 6 ++---- .../hive/procedure/RegisterPartitionProcedure.java | 12 ++++-------- .../hive/procedure/UnregisterPartitionProcedure.java | 12 ++++-------- 6 files changed, 25 insertions(+), 24 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java index 848ee3da08fa..433061926ba2 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java @@ -51,6 +51,10 @@ public class HiveMetastoreClosure { private final HiveMetastore delegate; + /** + * Do not use this directly. Instead, the closure should be fetched from the current SemiTransactionalHiveMetastore, + * which can be fetched from the current HiveMetadata. + */ public HiveMetastoreClosure(HiveMetastore delegate) { this.delegate = requireNonNull(delegate, "delegate is null"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java index 6cd4e3dc9443..64fd9607f949 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java @@ -197,6 +197,15 @@ public synchronized List getAllDatabases() return delegate.getAllDatabases(); } + /** + * Get the underlying metastore closure. Use this method with caution as it bypasses the current transactional state, + * so modifications made in the transaction are visible. + */ + public HiveMetastoreClosure unsafeGetRawHiveMetastoreClosure() + { + return delegate; + } + public synchronized Optional getDatabase(String databaseName) { checkReadable(); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java index bac50214a23b..87a7ef0232d9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java @@ -28,7 +28,6 @@ import io.trino.plugin.hive.TransactionalMetadata; import io.trino.plugin.hive.TransactionalMetadataFactory; import io.trino.plugin.hive.authentication.HiveIdentity; -import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.spi.TrinoException; import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorAccessControl; @@ -70,15 +69,13 @@ public class CreateEmptyPartitionProcedure List.class); private final TransactionalMetadataFactory hiveMetadataFactory; - private final HiveMetastoreClosure metastore; private final LocationService locationService; private final JsonCodec partitionUpdateJsonCodec; @Inject - public CreateEmptyPartitionProcedure(TransactionalMetadataFactory hiveMetadataFactory, HiveMetastore metastore, LocationService locationService, JsonCodec partitionUpdateCodec) + public CreateEmptyPartitionProcedure(TransactionalMetadataFactory hiveMetadataFactory, LocationService locationService, JsonCodec partitionUpdateCodec) { this.hiveMetadataFactory = requireNonNull(hiveMetadataFactory, "hiveMetadataFactory is null"); - this.metastore = new HiveMetastoreClosure(requireNonNull(metastore, "metastore is null")); this.locationService = requireNonNull(locationService, "locationService is null"); this.partitionUpdateJsonCodec = requireNonNull(partitionUpdateCodec, "partitionUpdateCodec is null"); } @@ -122,6 +119,7 @@ private void doCreateEmptyPartition(ConnectorSession session, ConnectorAccessCon throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Provided partition column names do not match actual partition column names: " + actualPartitionColumnNames); } + HiveMetastoreClosure metastore = hiveMetadata.getMetastore().unsafeGetRawHiveMetastoreClosure(); if (metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues).isPresent()) { throw new TrinoException(ALREADY_EXISTS, "Partition already exists"); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java index c7cb0b7d006e..994cb883cd2b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java @@ -21,7 +21,6 @@ import io.trino.plugin.hive.TransactionalMetadata; import io.trino.plugin.hive.TransactionalMetadataFactory; import io.trino.plugin.hive.authentication.HiveIdentity; -import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.spi.TrinoException; import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ColumnHandle; @@ -67,13 +66,11 @@ public class DropStatsProcedure List.class); private final TransactionalMetadataFactory hiveMetadataFactory; - private final HiveMetastoreClosure metastore; @Inject - public DropStatsProcedure(TransactionalMetadataFactory hiveMetadataFactory, HiveMetastore metastore) + public DropStatsProcedure(TransactionalMetadataFactory hiveMetadataFactory) { this.hiveMetadataFactory = requireNonNull(hiveMetadataFactory, "hiveMetadataFactory is null"); - this.metastore = new HiveMetastoreClosure(requireNonNull(metastore, "metastore is null")); } @Override @@ -113,6 +110,7 @@ private void doDropStats(ConnectorSession session, ConnectorAccessControl access .map(HiveColumnHandle::getName) .collect(toImmutableList()); + HiveMetastoreClosure metastore = hiveMetadata.getMetastore().unsafeGetRawHiveMetastoreClosure(); if (partitionValues != null) { // drop stats for specified partitions List> partitionStringValues = partitionValues.stream() diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java index 1acac48e2d1e..b205515d3540 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java @@ -18,11 +18,9 @@ import io.trino.plugin.hive.HdfsEnvironment; import io.trino.plugin.hive.HdfsEnvironment.HdfsContext; import io.trino.plugin.hive.HiveConfig; -import io.trino.plugin.hive.HiveMetastoreClosure; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.TransactionalMetadataFactory; import io.trino.plugin.hive.authentication.HiveIdentity; -import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; @@ -73,15 +71,13 @@ public class RegisterPartitionProcedure private final boolean allowRegisterPartition; private final TransactionalMetadataFactory hiveMetadataFactory; private final HdfsEnvironment hdfsEnvironment; - private final HiveMetastoreClosure metastore; @Inject - public RegisterPartitionProcedure(HiveConfig hiveConfig, TransactionalMetadataFactory hiveMetadataFactory, HiveMetastore metastore, HdfsEnvironment hdfsEnvironment) + public RegisterPartitionProcedure(HiveConfig hiveConfig, TransactionalMetadataFactory hiveMetadataFactory, HdfsEnvironment hdfsEnvironment) { this.allowRegisterPartition = requireNonNull(hiveConfig, "hiveConfig is null").isAllowRegisterPartition(); this.hiveMetadataFactory = requireNonNull(hiveMetadataFactory, "hiveMetadataFactory is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); - this.metastore = new HiveMetastoreClosure(requireNonNull(metastore, "metastore is null")); } @Override @@ -112,6 +108,8 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro throw new TrinoException(PERMISSION_DENIED, "register_partition procedure is disabled"); } + SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); + HiveIdentity identity = new HiveIdentity(session); HdfsContext hdfsContext = new HdfsContext(session); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); @@ -124,7 +122,7 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro checkIsPartitionedTable(table); checkPartitionColumns(table, partitionColumn); - Optional partition = metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues); + Optional partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues); if (partition.isPresent()) { String partitionName = FileUtils.makePartName(partitionColumn, partitionValues); throw new TrinoException(ALREADY_EXISTS, format("Partition [%s] is already registered with location %s", partitionName, partition.get().getStorage().getLocation())); @@ -143,8 +141,6 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Partition location does not exist: " + partitionLocation); } - SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); - metastore.addPartition( session, table.getDatabaseName(), diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java index 8eaef78539e6..3115de32a0c3 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java @@ -14,10 +14,8 @@ package io.trino.plugin.hive.procedure; import com.google.common.collect.ImmutableList; -import io.trino.plugin.hive.HiveMetastoreClosure; import io.trino.plugin.hive.TransactionalMetadataFactory; import io.trino.plugin.hive.authentication.HiveIdentity; -import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; @@ -59,13 +57,11 @@ public class UnregisterPartitionProcedure List.class); private final TransactionalMetadataFactory hiveMetadataFactory; - private final HiveMetastoreClosure metastore; @Inject - public UnregisterPartitionProcedure(TransactionalMetadataFactory hiveMetadataFactory, HiveMetastore metastore) + public UnregisterPartitionProcedure(TransactionalMetadataFactory hiveMetadataFactory) { this.hiveMetadataFactory = requireNonNull(hiveMetadataFactory, "hiveMetadataFactory is null"); - this.metastore = new HiveMetastoreClosure(requireNonNull(metastore, "metastore is null")); } @Override @@ -94,6 +90,8 @@ private void doUnregisterPartition(ConnectorSession session, ConnectorAccessCont HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); + SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); + Table table = metastore.getTable(identity, schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); @@ -104,11 +102,9 @@ private void doUnregisterPartition(ConnectorSession session, ConnectorAccessCont String partitionName = FileUtils.makePartName(partitionColumn, partitionValues); - Partition partition = metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues) + Partition partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues) .orElseThrow(() -> new TrinoException(NOT_FOUND, format("Partition '%s' does not exist", partitionName))); - SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); - metastore.dropPartition( session, table.getDatabaseName(), From c68b71d135185f187cdce2b473b761ddc40ff72f Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 15:32:04 -0700 Subject: [PATCH 08/18] Move RecordingHiveMetastore to recording package --- .../hive/metastore/glue/GlueMetastoreModule.java | 2 +- .../{ => recording}/RecordingHiveMetastore.java | 15 ++++++++++++++- .../RecordingHiveMetastoreModule.java | 4 ++-- .../WriteHiveMetastoreRecordingProcedure.java | 3 +-- .../metastore/thrift/ThriftMetastoreModule.java | 2 +- .../TestRecordingHiveMetastore.java | 15 ++++++++++++++- 6 files changed, 33 insertions(+), 8 deletions(-) rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/{ => recording}/RecordingHiveMetastore.java (97%) rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/{ => recording}/RecordingHiveMetastoreModule.java (96%) rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/{procedure => recording}/WriteHiveMetastoreRecordingProcedure.java (95%) rename plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/{ => recording}/TestRecordingHiveMetastore.java (95%) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java index 8e2baa5df115..864ef7e69a8f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java @@ -28,8 +28,8 @@ import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RecordingHiveMetastoreModule; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; +import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreModule; import java.util.concurrent.Executor; import java.util.function.Predicate; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java similarity index 97% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastore.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java index 6a08ccf87317..f8fdd5938106 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.hive.metastore; +package io.trino.plugin.hive.metastore.recording; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; @@ -27,7 +27,20 @@ import io.trino.plugin.hive.RecordingMetastoreConfig; import io.trino.plugin.hive.acid.AcidTransaction; import io.trino.plugin.hive.authentication.HiveIdentity; +import io.trino.plugin.hive.metastore.Database; +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HivePartitionName; +import io.trino.plugin.hive.metastore.HivePrincipal; +import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; +import io.trino.plugin.hive.metastore.HiveTableName; +import io.trino.plugin.hive.metastore.Partition; +import io.trino.plugin.hive.metastore.PartitionFilter; +import io.trino.plugin.hive.metastore.PartitionWithStatistics; +import io.trino.plugin.hive.metastore.PrincipalPrivileges; +import io.trino.plugin.hive.metastore.Table; +import io.trino.plugin.hive.metastore.TablesWithParameterCacheKey; +import io.trino.plugin.hive.metastore.UserTableKey; import io.trino.spi.TrinoException; import io.trino.spi.predicate.TupleDomain; import io.trino.spi.security.RoleGrant; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java similarity index 96% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastoreModule.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java index 49f952ce6ef9..21e4864e6c9d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RecordingHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.hive.metastore; +package io.trino.plugin.hive.metastore.recording; import com.google.inject.Binder; import com.google.inject.Key; @@ -20,8 +20,8 @@ import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.RecordingMetastoreConfig; +import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.cache.ForCachingHiveMetastore; -import io.trino.plugin.hive.metastore.procedure.WriteHiveMetastoreRecordingProcedure; import io.trino.plugin.hive.util.BlockJsonSerde; import io.trino.plugin.hive.util.HiveBlockEncodingSerde; import io.trino.spi.block.Block; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/procedure/WriteHiveMetastoreRecordingProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java similarity index 95% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/procedure/WriteHiveMetastoreRecordingProcedure.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java index e2b487a55bef..f645006639bf 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/procedure/WriteHiveMetastoreRecordingProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java @@ -11,11 +11,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.hive.metastore.procedure; +package io.trino.plugin.hive.metastore.recording; import com.google.common.collect.ImmutableList; import com.google.common.util.concurrent.RateLimiter; -import io.trino.plugin.hive.metastore.RecordingHiveMetastore; import io.trino.spi.procedure.Procedure; import javax.inject.Inject; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java index e1f8b1870320..765c8026d9c5 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java @@ -19,8 +19,8 @@ import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RecordingHiveMetastoreModule; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; +import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreModule; import static io.airlift.configuration.ConfigBinder.configBinder; import static org.weakref.jmx.guice.ExportBinder.newExporter; diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestRecordingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java similarity index 95% rename from plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestRecordingHiveMetastore.java rename to plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java index df1947db894d..f2037f944610 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestRecordingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.hive.metastore; +package io.trino.plugin.hive.metastore.recording; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; @@ -28,8 +28,21 @@ import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.RecordingMetastoreConfig; import io.trino.plugin.hive.authentication.HiveIdentity; +import io.trino.plugin.hive.metastore.Column; +import io.trino.plugin.hive.metastore.Database; +import io.trino.plugin.hive.metastore.HiveColumnStatistics; +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HivePrincipal; +import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; +import io.trino.plugin.hive.metastore.IntegerStatistics; +import io.trino.plugin.hive.metastore.Partition; +import io.trino.plugin.hive.metastore.SortingColumn; import io.trino.plugin.hive.metastore.SortingColumn.Order; +import io.trino.plugin.hive.metastore.Storage; +import io.trino.plugin.hive.metastore.StorageFormat; +import io.trino.plugin.hive.metastore.Table; +import io.trino.plugin.hive.metastore.UnimplementedHiveMetastore; import io.trino.plugin.hive.util.HiveBlockEncodingSerde; import io.trino.spi.block.Block; import io.trino.spi.block.TestingBlockJsonSerde; From 161144916821602e2063a35b9c9c7a8731a46bfb Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 16:03:57 -0700 Subject: [PATCH 09/18] Separate out recording from RecordingHiveMetastore Split out the recording state from RecordingHiveMetastore, so recording can be shared by multiple HiveMetastore instances. --- .../recording/HiveMetastoreRecording.java | 492 ++++++++++++++++++ .../recording/RecordingHiveMetastore.java | 411 +-------------- .../RecordingHiveMetastoreModule.java | 6 +- .../WriteHiveMetastoreRecordingProcedure.java | 8 +- .../recording/TestRecordingHiveMetastore.java | 16 +- 5 files changed, 529 insertions(+), 404 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/HiveMetastoreRecording.java diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/HiveMetastoreRecording.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/HiveMetastoreRecording.java new file mode 100644 index 000000000000..507e6437f705 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/HiveMetastoreRecording.java @@ -0,0 +1,492 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.recording; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import io.airlift.json.JsonCodec; +import io.airlift.units.Duration; +import io.trino.collect.cache.NonEvictableCache; +import io.trino.plugin.hive.PartitionStatistics; +import io.trino.plugin.hive.RecordingMetastoreConfig; +import io.trino.plugin.hive.metastore.Database; +import io.trino.plugin.hive.metastore.HivePartitionName; +import io.trino.plugin.hive.metastore.HivePrincipal; +import io.trino.plugin.hive.metastore.HivePrivilegeInfo; +import io.trino.plugin.hive.metastore.HiveTableName; +import io.trino.plugin.hive.metastore.Partition; +import io.trino.plugin.hive.metastore.PartitionFilter; +import io.trino.plugin.hive.metastore.Table; +import io.trino.plugin.hive.metastore.TablesWithParameterCacheKey; +import io.trino.plugin.hive.metastore.UserTableKey; +import io.trino.spi.TrinoException; +import io.trino.spi.security.RoleGrant; +import io.trino.spi.statistics.ColumnStatisticType; +import org.weakref.jmx.Managed; + +import javax.annotation.concurrent.Immutable; +import javax.inject.Inject; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.function.Supplier; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.common.collect.ImmutableMap.toImmutableMap; +import static io.trino.collect.cache.SafeCaches.buildNonEvictableCache; +import static io.trino.spi.StandardErrorCode.NOT_FOUND; +import static java.nio.file.Files.readAllBytes; +import static java.util.Objects.requireNonNull; +import static java.util.concurrent.TimeUnit.MILLISECONDS; + +public class HiveMetastoreRecording +{ + private final JsonCodec recordingCodec; + private final Path recordingPath; + private final boolean replay; + + private volatile Optional> allDatabases = Optional.empty(); + private volatile Optional> allRoles = Optional.empty(); + private final NonEvictableCache> databaseCache; + private final NonEvictableCache> tableCache; + private final NonEvictableCache> supportedColumnStatisticsCache; + private final NonEvictableCache tableStatisticsCache; + private final NonEvictableCache, Map> partitionStatisticsCache; + private final NonEvictableCache> allTablesCache; + private final NonEvictableCache> tablesWithParameterCache; + private final NonEvictableCache> allViewsCache; + private final NonEvictableCache> partitionCache; + private final NonEvictableCache>> partitionNamesCache; + private final NonEvictableCache>> partitionNamesByPartsCache; + private final NonEvictableCache, Map>> partitionsByNamesCache; + private final NonEvictableCache> tablePrivilegesCache; + private final NonEvictableCache> roleGrantsCache; + private final NonEvictableCache> grantedPrincipalsCache; + + @Inject + public HiveMetastoreRecording(RecordingMetastoreConfig config, JsonCodec recordingCodec) + throws IOException + { + this.recordingCodec = recordingCodec; + requireNonNull(config, "config is null"); + this.recordingPath = Paths.get(requireNonNull(config.getRecordingPath(), "recordingPath is null")); + this.replay = config.isReplay(); + + Duration recordingDuration = config.getRecordingDuration(); + databaseCache = createCache(replay, recordingDuration); + tableCache = createCache(replay, recordingDuration); + supportedColumnStatisticsCache = createCache(replay, recordingDuration); + tableStatisticsCache = createCache(replay, recordingDuration); + partitionStatisticsCache = createCache(replay, recordingDuration); + allTablesCache = createCache(replay, recordingDuration); + tablesWithParameterCache = createCache(replay, recordingDuration); + allViewsCache = createCache(replay, recordingDuration); + partitionCache = createCache(replay, recordingDuration); + partitionNamesCache = createCache(replay, recordingDuration); + partitionNamesByPartsCache = createCache(replay, recordingDuration); + partitionsByNamesCache = createCache(replay, recordingDuration); + tablePrivilegesCache = createCache(replay, recordingDuration); + roleGrantsCache = createCache(replay, recordingDuration); + grantedPrincipalsCache = createCache(replay, recordingDuration); + + if (replay) { + loadRecording(); + } + } + + @VisibleForTesting + void loadRecording() + throws IOException + { + Recording recording = recordingCodec.fromJson(readAllBytes(recordingPath)); + + allDatabases = recording.getAllDatabases(); + allRoles = recording.getAllRoles(); + databaseCache.putAll(toMap(recording.getDatabases())); + tableCache.putAll(toMap(recording.getTables())); + supportedColumnStatisticsCache.putAll(toMap(recording.getSupportedColumnStatistics())); + tableStatisticsCache.putAll(toMap(recording.getTableStatistics())); + partitionStatisticsCache.putAll(toMap(recording.getPartitionStatistics())); + allTablesCache.putAll(toMap(recording.getAllTables())); + tablesWithParameterCache.putAll(toMap(recording.getTablesWithParameter())); + allViewsCache.putAll(toMap(recording.getAllViews())); + partitionCache.putAll(toMap(recording.getPartitions())); + partitionNamesCache.putAll(toMap(recording.getPartitionNames())); + partitionNamesByPartsCache.putAll(toMap(recording.getPartitionNamesByParts())); + partitionsByNamesCache.putAll(toMap(recording.getPartitionsByNames())); + tablePrivilegesCache.putAll(toMap(recording.getTablePrivileges())); + roleGrantsCache.putAll(toMap(recording.getRoleGrants())); + grantedPrincipalsCache.putAll(toMap(recording.getGrantedPrincipals())); + } + + public boolean isReplay() + { + return replay; + } + + public Optional getDatabase(String databaseName, Supplier> valueSupplier) + { + return loadValue(databaseCache, databaseName, valueSupplier); + } + + public List getAllDatabases(Supplier> valueSupplier) + { + if (replay) { + return allDatabases.orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry for all databases")); + } + + List result = valueSupplier.get(); + allDatabases = Optional.of(result); + return result; + } + + public Optional
getTable(HiveTableName hiveTableName, Supplier> valueSupplier) + { + return loadValue(tableCache, hiveTableName, valueSupplier); + } + + public Set getSupportedColumnStatistics(String type, Supplier> valueSupplier) + { + return loadValue(supportedColumnStatisticsCache, type, valueSupplier); + } + + public PartitionStatistics getTableStatistics(HiveTableName hiveTableName, Supplier valueSupplier) + { + return loadValue(tableStatisticsCache, hiveTableName, valueSupplier); + } + + public Map getPartitionStatistics(Set partitionNames, Supplier> valueSupplier) + { + return loadValue(partitionStatisticsCache, partitionNames, valueSupplier); + } + + public List getAllTables(String databaseName, Supplier> valueSupplier) + { + return loadValue(allTablesCache, databaseName, valueSupplier); + } + + public List getTablesWithParameter(TablesWithParameterCacheKey tablesWithParameterCacheKey, Supplier> valueSupplier) + { + return loadValue(tablesWithParameterCache, tablesWithParameterCacheKey, valueSupplier); + } + + public List getAllViews(String databaseName, Supplier> valueSupplier) + { + return loadValue(allViewsCache, databaseName, valueSupplier); + } + + public Optional getPartition(HivePartitionName hivePartitionName, Supplier> valueSupplier) + { + return loadValue(partitionCache, hivePartitionName, valueSupplier); + } + + public Optional> getPartitionNamesByFilter(PartitionFilter partitionFilter, Supplier>> valueSupplier) + { + return loadValue(partitionNamesByPartsCache, partitionFilter, valueSupplier); + } + + public Map> getPartitionsByNames(Set partitionNames, Supplier>> valueSupplier) + { + return loadValue(partitionsByNamesCache, partitionNames, valueSupplier); + } + + public Set listTablePrivileges(UserTableKey userTableKey, Supplier> valueSupplier) + { + return loadValue(tablePrivilegesCache, userTableKey, valueSupplier); + } + + public Set listRoles(Supplier> valueSupplier) + { + if (replay) { + return allRoles.orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry for roles")); + } + + Set result = valueSupplier.get(); + allRoles = Optional.of(result); + return result; + } + + public Set listGrantedPrincipals(String role, Supplier> valueSupplier) + { + return loadValue(grantedPrincipalsCache, role, valueSupplier); + } + + public Set listRoleGrants(HivePrincipal principal, Supplier> valueSupplier) + { + return loadValue(roleGrantsCache, principal, valueSupplier); + } + + private static NonEvictableCache createCache(boolean reply, Duration recordingDuration) + { + if (reply) { + return buildNonEvictableCache(CacheBuilder.newBuilder()); + } + + return buildNonEvictableCache(CacheBuilder.newBuilder() + .expireAfterWrite(recordingDuration.toMillis(), MILLISECONDS)); + } + + @Managed + public void writeRecording() + throws IOException + { + if (replay) { + throw new IllegalStateException("Cannot write recording in replay mode"); + } + + Recording recording = new Recording( + allDatabases, + allRoles, + toPairs(databaseCache), + toPairs(tableCache), + toPairs(supportedColumnStatisticsCache), + toPairs(tableStatisticsCache), + toPairs(partitionStatisticsCache), + toPairs(allTablesCache), + toPairs(tablesWithParameterCache), + toPairs(allViewsCache), + toPairs(partitionCache), + toPairs(partitionNamesCache), + toPairs(partitionNamesByPartsCache), + toPairs(partitionsByNamesCache), + toPairs(tablePrivilegesCache), + toPairs(roleGrantsCache), + toPairs(grantedPrincipalsCache)); + + Files.write(recordingPath, recordingCodec.toJsonBytes(recording)); + } + + private static Map toMap(List> pairs) + { + return pairs.stream() + .collect(toImmutableMap(Pair::getKey, Pair::getValue)); + } + + private static List> toPairs(Cache cache) + { + return cache.asMap().entrySet().stream() + .map(entry -> new Pair<>(entry.getKey(), entry.getValue())) + .collect(toImmutableList()); + } + + private V loadValue(Cache cache, K key, Supplier valueSupplier) + { + if (replay) { + return Optional.ofNullable(cache.getIfPresent(key)) + .orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry found for key: " + key)); + } + + V value = valueSupplier.get(); + cache.put(key, value); + return value; + } + + @Immutable + public static class Recording + { + private final Optional> allDatabases; + private final Optional> allRoles; + private final List>> databases; + private final List>> tables; + private final List>> supportedColumnStatistics; + private final List> tableStatistics; + private final List, Map>> partitionStatistics; + private final List>> allTables; + private final List>> tablesWithParameter; + private final List>> allViews; + private final List>> partitions; + private final List>>> partitionNames; + private final List>>> partitionNamesByParts; + private final List, Map>>> partitionsByNames; + private final List>> tablePrivileges; + private final List>> roleGrants; + private final List>> grantedPrincipals; + + @JsonCreator + public Recording( + @JsonProperty("allDatabases") Optional> allDatabases, + @JsonProperty("allRoles") Optional> allRoles, + @JsonProperty("databases") List>> databases, + @JsonProperty("tables") List>> tables, + @JsonProperty("supportedColumnStatistics") List>> supportedColumnStatistics, + @JsonProperty("tableStatistics") List> tableStatistics, + @JsonProperty("partitionStatistics") List, Map>> partitionStatistics, + @JsonProperty("allTables") List>> allTables, + @JsonProperty("tablesWithParameter") List>> tablesWithParameter, + @JsonProperty("allViews") List>> allViews, + @JsonProperty("partitions") List>> partitions, + @JsonProperty("partitionNames") List>>> partitionNames, + @JsonProperty("partitionNamesByParts") List>>> partitionNamesByParts, + @JsonProperty("partitionsByNames") List, Map>>> partitionsByNames, + @JsonProperty("tablePrivileges") List>> tablePrivileges, + @JsonProperty("roleGrants") List>> roleGrants, + @JsonProperty("grantedPrincipals") List>> grantedPrincipals) + { + this.allDatabases = allDatabases; + this.allRoles = allRoles; + this.databases = databases; + this.tables = tables; + this.supportedColumnStatistics = supportedColumnStatistics; + this.tableStatistics = tableStatistics; + this.partitionStatistics = partitionStatistics; + this.allTables = allTables; + this.tablesWithParameter = tablesWithParameter; + this.allViews = allViews; + this.partitions = partitions; + this.partitionNames = partitionNames; + this.partitionNamesByParts = partitionNamesByParts; + this.partitionsByNames = partitionsByNames; + this.tablePrivileges = tablePrivileges; + this.roleGrants = roleGrants; + this.grantedPrincipals = grantedPrincipals; + } + + @JsonProperty + public Optional> getAllDatabases() + { + return allDatabases; + } + + @JsonProperty + public Optional> getAllRoles() + { + return allRoles; + } + + @JsonProperty + public List>> getDatabases() + { + return databases; + } + + @JsonProperty + public List>> getTables() + { + return tables; + } + + @JsonProperty + public List>> getTablesWithParameter() + { + return tablesWithParameter; + } + + @JsonProperty + public List>> getSupportedColumnStatistics() + { + return supportedColumnStatistics; + } + + @JsonProperty + public List> getTableStatistics() + { + return tableStatistics; + } + + @JsonProperty + public List, Map>> getPartitionStatistics() + { + return partitionStatistics; + } + + @JsonProperty + public List>> getAllTables() + { + return allTables; + } + + @JsonProperty + public List>> getAllViews() + { + return allViews; + } + + @JsonProperty + public List>> getPartitions() + { + return partitions; + } + + @JsonProperty + public List>>> getPartitionNames() + { + return partitionNames; + } + + @JsonProperty + public List>>> getPartitionNamesByParts() + { + return partitionNamesByParts; + } + + @JsonProperty + public List, Map>>> getPartitionsByNames() + { + return partitionsByNames; + } + + @JsonProperty + public List>> getTablePrivileges() + { + return tablePrivileges; + } + + @JsonProperty + public List>> getGrantedPrincipals() + { + return grantedPrincipals; + } + + @JsonProperty + public List>> getRoleGrants() + { + return roleGrants; + } + } + + @Immutable + public static class Pair + { + private final K key; + private final V value; + + @JsonCreator + public Pair(@JsonProperty("key") K key, @JsonProperty("value") V value) + { + this.key = requireNonNull(key, "key is null"); + this.value = requireNonNull(value, "value is null"); + } + + @JsonProperty + public K getKey() + { + return key; + } + + @JsonProperty + public V getValue() + { + return value; + } + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java index f8fdd5938106..43e53bba23ed 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java @@ -13,239 +13,82 @@ */ package io.trino.plugin.hive.metastore.recording; -import com.fasterxml.jackson.annotation.JsonCreator; -import com.fasterxml.jackson.annotation.JsonProperty; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import io.airlift.json.JsonCodec; -import io.airlift.units.Duration; -import io.trino.collect.cache.NonEvictableCache; import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; -import io.trino.plugin.hive.RecordingMetastoreConfig; import io.trino.plugin.hive.acid.AcidTransaction; import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.HivePartitionName; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; -import io.trino.plugin.hive.metastore.HiveTableName; import io.trino.plugin.hive.metastore.Partition; -import io.trino.plugin.hive.metastore.PartitionFilter; import io.trino.plugin.hive.metastore.PartitionWithStatistics; import io.trino.plugin.hive.metastore.PrincipalPrivileges; import io.trino.plugin.hive.metastore.Table; import io.trino.plugin.hive.metastore.TablesWithParameterCacheKey; import io.trino.plugin.hive.metastore.UserTableKey; -import io.trino.spi.TrinoException; import io.trino.spi.predicate.TupleDomain; import io.trino.spi.security.RoleGrant; import io.trino.spi.statistics.ColumnStatisticType; import io.trino.spi.type.Type; -import org.weakref.jmx.Managed; -import javax.annotation.concurrent.Immutable; import javax.inject.Inject; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Function; -import java.util.function.Supplier; -import static com.google.common.collect.ImmutableList.toImmutableList; -import static com.google.common.collect.ImmutableMap.toImmutableMap; import static com.google.common.collect.ImmutableSet.toImmutableSet; -import static io.trino.collect.cache.SafeCaches.buildNonEvictableCache; import static io.trino.plugin.hive.metastore.HivePartitionName.hivePartitionName; import static io.trino.plugin.hive.metastore.HiveTableName.hiveTableName; import static io.trino.plugin.hive.metastore.PartitionFilter.partitionFilter; -import static io.trino.spi.StandardErrorCode.NOT_FOUND; -import static java.nio.file.Files.readAllBytes; import static java.util.Objects.requireNonNull; -import static java.util.concurrent.TimeUnit.MILLISECONDS; public class RecordingHiveMetastore implements HiveMetastore { private final HiveMetastore delegate; - private final JsonCodec recordingCodec; - private final Path recordingPath; - private final boolean replay; - - private volatile Optional> allDatabases = Optional.empty(); - private volatile Optional> allRoles = Optional.empty(); - - private final NonEvictableCache> databaseCache; - private final NonEvictableCache> tableCache; - private final NonEvictableCache> supportedColumnStatisticsCache; - private final NonEvictableCache tableStatisticsCache; - private final NonEvictableCache, Map> partitionStatisticsCache; - private final NonEvictableCache> allTablesCache; - private final NonEvictableCache> tablesWithParameterCache; - private final NonEvictableCache> allViewsCache; - private final NonEvictableCache> partitionCache; - private final NonEvictableCache>> partitionNamesCache; - private final NonEvictableCache>> partitionNamesByPartsCache; - private final NonEvictableCache, Map>> partitionsByNamesCache; - private final NonEvictableCache> tablePrivilegesCache; - private final NonEvictableCache> roleGrantsCache; - private final NonEvictableCache> grantedPrincipalsCache; + private final HiveMetastoreRecording recording; @Inject - public RecordingHiveMetastore(@ForRecordingHiveMetastore HiveMetastore delegate, RecordingMetastoreConfig config, JsonCodec recordingCodec) - throws IOException + public RecordingHiveMetastore(@ForRecordingHiveMetastore HiveMetastore delegate, HiveMetastoreRecording recording) { this.delegate = requireNonNull(delegate, "delegate is null"); - this.recordingCodec = recordingCodec; - requireNonNull(config, "config is null"); - this.recordingPath = Paths.get(requireNonNull(config.getRecordingPath(), "recordingPath is null")); - this.replay = config.isReplay(); - - Duration recordingDuration = config.getRecordingDuration(); - databaseCache = createCache(replay, recordingDuration); - tableCache = createCache(replay, recordingDuration); - supportedColumnStatisticsCache = createCache(replay, recordingDuration); - tableStatisticsCache = createCache(replay, recordingDuration); - partitionStatisticsCache = createCache(replay, recordingDuration); - allTablesCache = createCache(replay, recordingDuration); - tablesWithParameterCache = createCache(replay, recordingDuration); - allViewsCache = createCache(replay, recordingDuration); - partitionCache = createCache(replay, recordingDuration); - partitionNamesCache = createCache(replay, recordingDuration); - partitionNamesByPartsCache = createCache(replay, recordingDuration); - partitionsByNamesCache = createCache(replay, recordingDuration); - tablePrivilegesCache = createCache(replay, recordingDuration); - roleGrantsCache = createCache(replay, recordingDuration); - grantedPrincipalsCache = createCache(replay, recordingDuration); - - if (replay) { - loadRecording(); - } - } - - @VisibleForTesting - void loadRecording() - throws IOException - { - Recording recording = recordingCodec.fromJson(readAllBytes(recordingPath)); - - allDatabases = recording.getAllDatabases(); - allRoles = recording.getAllRoles(); - databaseCache.putAll(toMap(recording.getDatabases())); - tableCache.putAll(toMap(recording.getTables())); - supportedColumnStatisticsCache.putAll(toMap(recording.getSupportedColumnStatistics())); - tableStatisticsCache.putAll(toMap(recording.getTableStatistics())); - partitionStatisticsCache.putAll(toMap(recording.getPartitionStatistics())); - allTablesCache.putAll(toMap(recording.getAllTables())); - tablesWithParameterCache.putAll(toMap(recording.getTablesWithParameter())); - allViewsCache.putAll(toMap(recording.getAllViews())); - partitionCache.putAll(toMap(recording.getPartitions())); - partitionNamesCache.putAll(toMap(recording.getPartitionNames())); - partitionNamesByPartsCache.putAll(toMap(recording.getPartitionNamesByParts())); - partitionsByNamesCache.putAll(toMap(recording.getPartitionsByNames())); - tablePrivilegesCache.putAll(toMap(recording.getTablePrivileges())); - roleGrantsCache.putAll(toMap(recording.getRoleGrants())); - grantedPrincipalsCache.putAll(toMap(recording.getGrantedPrincipals())); - } - - private static NonEvictableCache createCache(boolean reply, Duration recordingDuration) - { - if (reply) { - return buildNonEvictableCache(CacheBuilder.newBuilder()); - } - - return buildNonEvictableCache(CacheBuilder.newBuilder() - .expireAfterWrite(recordingDuration.toMillis(), MILLISECONDS)); - } - - @Managed - public void writeRecording() - throws IOException - { - if (replay) { - throw new IllegalStateException("Cannot write recording in replay mode"); - } - - Recording recording = new Recording( - allDatabases, - allRoles, - toPairs(databaseCache), - toPairs(tableCache), - toPairs(supportedColumnStatisticsCache), - toPairs(tableStatisticsCache), - toPairs(partitionStatisticsCache), - toPairs(allTablesCache), - toPairs(tablesWithParameterCache), - toPairs(allViewsCache), - toPairs(partitionCache), - toPairs(partitionNamesCache), - toPairs(partitionNamesByPartsCache), - toPairs(partitionsByNamesCache), - toPairs(tablePrivilegesCache), - toPairs(roleGrantsCache), - toPairs(grantedPrincipalsCache)); - - Files.write(recordingPath, recordingCodec.toJsonBytes(recording)); - } - - private static Map toMap(List> pairs) - { - return pairs.stream() - .collect(toImmutableMap(Pair::getKey, Pair::getValue)); - } - - private static List> toPairs(Cache cache) - { - return cache.asMap().entrySet().stream() - .map(entry -> new Pair<>(entry.getKey(), entry.getValue())) - .collect(toImmutableList()); + this.recording = requireNonNull(recording, "recording is null"); } @Override public Optional getDatabase(String databaseName) { - return loadValue(databaseCache, databaseName, () -> delegate.getDatabase(databaseName)); + return recording.getDatabase(databaseName, () -> delegate.getDatabase(databaseName)); } @Override public List getAllDatabases() { - if (replay) { - return allDatabases.orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry for all databases")); - } - - List result = delegate.getAllDatabases(); - allDatabases = Optional.of(result); - return result; + return recording.getAllDatabases(delegate::getAllDatabases); } @Override public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) { - return loadValue(tableCache, hiveTableName(databaseName, tableName), () -> delegate.getTable(identity, databaseName, tableName)); + return recording.getTable(hiveTableName(databaseName, tableName), () -> delegate.getTable(identity, databaseName, tableName)); } @Override public Set getSupportedColumnStatistics(Type type) { - return loadValue(supportedColumnStatisticsCache, type.getTypeSignature().toString(), () -> delegate.getSupportedColumnStatistics(type)); + return recording.getSupportedColumnStatistics(type.getTypeSignature().toString(), () -> delegate.getSupportedColumnStatistics(type)); } @Override public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) { - return loadValue( - tableStatisticsCache, + return recording.getTableStatistics( hiveTableName(table.getDatabaseName(), table.getTableName()), () -> delegate.getTableStatistics(identity, table)); } @@ -253,8 +96,7 @@ public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table @Override public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) { - return loadValue( - partitionStatisticsCache, + return recording.getPartitionStatistics( partitions.stream() .map(partition -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partition.getValues())) .collect(toImmutableSet()), @@ -285,20 +127,20 @@ public void updatePartitionStatistics(HiveIdentity identity, Table table, Map getAllTables(String databaseName) { - return loadValue(allTablesCache, databaseName, () -> delegate.getAllTables(databaseName)); + return recording.getAllTables(databaseName, () -> delegate.getAllTables(databaseName)); } @Override public List getTablesWithParameter(String databaseName, String parameterKey, String parameterValue) { TablesWithParameterCacheKey key = new TablesWithParameterCacheKey(databaseName, parameterKey, parameterValue); - return loadValue(tablesWithParameterCache, key, () -> delegate.getTablesWithParameter(databaseName, parameterKey, parameterValue)); + return recording.getTablesWithParameter(key, () -> delegate.getTablesWithParameter(databaseName, parameterKey, parameterValue)); } @Override public List getAllViews(String databaseName) { - return loadValue(allViewsCache, databaseName, () -> delegate.getAllViews(databaseName)); + return recording.getAllViews(databaseName, () -> delegate.getAllViews(databaseName)); } @Override @@ -402,8 +244,7 @@ public void dropColumn(HiveIdentity identity, String databaseName, String tableN @Override public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) { - return loadValue( - partitionCache, + return recording.getPartition( hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionValues), () -> delegate.getPartition(identity, table, partitionValues)); } @@ -411,8 +252,7 @@ public Optional getPartition(HiveIdentity identity, Table table, List @Override public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return loadValue( - partitionNamesByPartsCache, + return recording.getPartitionNamesByFilter( partitionFilter(databaseName, tableName, columnNames, partitionKeysFilter), () -> delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter)); } @@ -420,8 +260,7 @@ public Optional> getPartitionNamesByFilter(HiveIdentity identity, S @Override public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) { - return loadValue( - partitionsByNamesCache, + return recording.getPartitionsByNames( partitionNames.stream() .map(partitionName -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionName)) .collect(toImmutableSet()), @@ -452,8 +291,7 @@ public void alterPartition(HiveIdentity identity, String databaseName, String ta @Override public Set listTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal) { - return loadValue( - tablePrivilegesCache, + return recording.listTablePrivileges( new UserTableKey(principal, databaseName, tableName, tableOwner), () -> delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal)); } @@ -489,13 +327,7 @@ public void dropRole(String role) @Override public Set listRoles() { - if (replay) { - return allRoles.orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry for roles")); - } - - Set result = delegate.listRoles(); - allRoles = Optional.of(result); - return result; + return recording.listRoles(delegate::listRoles); } @Override @@ -515,8 +347,7 @@ public void revokeRoles(Set roles, Set grantees, boolean @Override public Set listGrantedPrincipals(String role) { - return loadValue( - grantedPrincipalsCache, + return recording.listGrantedPrincipals( role, () -> delegate.listGrantedPrincipals(role)); } @@ -524,8 +355,7 @@ public Set listGrantedPrincipals(String role) @Override public Set listRoleGrants(HivePrincipal principal) { - return loadValue( - roleGrantsCache, + return recording.listRoleGrants( principal, () -> delegate.listRoleGrants(principal)); } @@ -536,211 +366,10 @@ public boolean isImpersonationEnabled() return delegate.isImpersonationEnabled(); } - private V loadValue(Cache cache, K key, Supplier valueSupplier) - { - if (replay) { - return Optional.ofNullable(cache.getIfPresent(key)) - .orElseThrow(() -> new TrinoException(NOT_FOUND, "Missing entry found for key: " + key)); - } - - V value = valueSupplier.get(); - cache.put(key, value); - return value; - } - private void verifyRecordingMode() { - if (replay) { + if (recording.isReplay()) { throw new IllegalStateException("Cannot perform Metastore updates in replay mode"); } } - - @Immutable - public static class Recording - { - private final Optional> allDatabases; - private final Optional> allRoles; - private final List>> databases; - private final List>> tables; - private final List>> supportedColumnStatistics; - private final List> tableStatistics; - private final List, Map>> partitionStatistics; - private final List>> allTables; - private final List>> tablesWithParameter; - private final List>> allViews; - private final List>> partitions; - private final List>>> partitionNames; - private final List>>> partitionNamesByParts; - private final List, Map>>> partitionsByNames; - private final List>> tablePrivileges; - private final List>> roleGrants; - private final List>> grantedPrincipals; - - @JsonCreator - public Recording( - @JsonProperty("allDatabases") Optional> allDatabases, - @JsonProperty("allRoles") Optional> allRoles, - @JsonProperty("databases") List>> databases, - @JsonProperty("tables") List>> tables, - @JsonProperty("supportedColumnStatistics") List>> supportedColumnStatistics, - @JsonProperty("tableStatistics") List> tableStatistics, - @JsonProperty("partitionStatistics") List, Map>> partitionStatistics, - @JsonProperty("allTables") List>> allTables, - @JsonProperty("tablesWithParameter") List>> tablesWithParameter, - @JsonProperty("allViews") List>> allViews, - @JsonProperty("partitions") List>> partitions, - @JsonProperty("partitionNames") List>>> partitionNames, - @JsonProperty("partitionNamesByParts") List>>> partitionNamesByParts, - @JsonProperty("partitionsByNames") List, Map>>> partitionsByNames, - @JsonProperty("tablePrivileges") List>> tablePrivileges, - @JsonProperty("roleGrants") List>> roleGrants, - @JsonProperty("grantedPrincipals") List>> grantedPrincipals) - { - this.allDatabases = allDatabases; - this.allRoles = allRoles; - this.databases = databases; - this.tables = tables; - this.supportedColumnStatistics = supportedColumnStatistics; - this.tableStatistics = tableStatistics; - this.partitionStatistics = partitionStatistics; - this.allTables = allTables; - this.tablesWithParameter = tablesWithParameter; - this.allViews = allViews; - this.partitions = partitions; - this.partitionNames = partitionNames; - this.partitionNamesByParts = partitionNamesByParts; - this.partitionsByNames = partitionsByNames; - this.tablePrivileges = tablePrivileges; - this.roleGrants = roleGrants; - this.grantedPrincipals = grantedPrincipals; - } - - @JsonProperty - public Optional> getAllDatabases() - { - return allDatabases; - } - - @JsonProperty - public Optional> getAllRoles() - { - return allRoles; - } - - @JsonProperty - public List>> getDatabases() - { - return databases; - } - - @JsonProperty - public List>> getTables() - { - return tables; - } - - @JsonProperty - public List>> getTablesWithParameter() - { - return tablesWithParameter; - } - - @JsonProperty - public List>> getSupportedColumnStatistics() - { - return supportedColumnStatistics; - } - - @JsonProperty - public List> getTableStatistics() - { - return tableStatistics; - } - - @JsonProperty - public List, Map>> getPartitionStatistics() - { - return partitionStatistics; - } - - @JsonProperty - public List>> getAllTables() - { - return allTables; - } - - @JsonProperty - public List>> getAllViews() - { - return allViews; - } - - @JsonProperty - public List>> getPartitions() - { - return partitions; - } - - @JsonProperty - public List>>> getPartitionNames() - { - return partitionNames; - } - - @JsonProperty - public List>>> getPartitionNamesByParts() - { - return partitionNamesByParts; - } - - @JsonProperty - public List, Map>>> getPartitionsByNames() - { - return partitionsByNames; - } - - @JsonProperty - public List>> getTablePrivileges() - { - return tablePrivileges; - } - - @JsonProperty - public List>> getGrantedPrincipals() - { - return grantedPrincipals; - } - - @JsonProperty - public List>> getRoleGrants() - { - return roleGrants; - } - } - - @Immutable - public static class Pair - { - private final K key; - private final V value; - - @JsonCreator - public Pair(@JsonProperty("key") K key, @JsonProperty("value") V value) - { - this.key = requireNonNull(key, "key is null"); - this.value = requireNonNull(value, "value is null"); - } - - @JsonProperty - public K getKey() - { - return key; - } - - @JsonProperty - public V getValue() - { - return value; - } - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java index 21e4864e6c9d..8b918623dcd5 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java @@ -59,11 +59,13 @@ public void configure(Binder binder) binder.bind(RecordingHiveMetastore.class).in(Scopes.SINGLETON); binder.bind(HiveBlockEncodingSerde.class).in(Scopes.SINGLETON); - jsonCodecBinder(binder).bindJsonCodec(RecordingHiveMetastore.Recording.class); + binder.bind(HiveMetastoreRecording.class).in(Scopes.SINGLETON); + jsonCodecBinder(binder).bindJsonCodec(HiveMetastoreRecording.Recording.class); jsonBinder(binder).addSerializerBinding(Block.class).to(BlockJsonSerde.Serializer.class); jsonBinder(binder).addDeserializerBinding(Block.class).to(BlockJsonSerde.Deserializer.class); - newExporter(binder).export(RecordingHiveMetastore.class).withGeneratedName(); + // export under the old name, for backwards compatibility + newExporter(binder).export(HiveMetastoreRecording.class).as(generator -> generator.generatedNameOf(RecordingHiveMetastore.class)); newSetBinder(binder, Procedure.class).addBinding().toProvider(WriteHiveMetastoreRecordingProcedure.class).in(Scopes.SINGLETON); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java index f645006639bf..3417d8fd2ad9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/WriteHiveMetastoreRecordingProcedure.java @@ -34,12 +34,12 @@ public class WriteHiveMetastoreRecordingProcedure "writeHiveMetastoreRecording"); private final RateLimiter rateLimiter = RateLimiter.create(0.2); - private final RecordingHiveMetastore recordingHiveMetastore; + private final HiveMetastoreRecording hiveMetastoreRecording; @Inject - public WriteHiveMetastoreRecordingProcedure(RecordingHiveMetastore recordingHiveMetastore) + public WriteHiveMetastoreRecordingProcedure(HiveMetastoreRecording hiveMetastoreRecording) { - this.recordingHiveMetastore = requireNonNull(recordingHiveMetastore, "recordingHiveMetastore is null"); + this.hiveMetastoreRecording = requireNonNull(hiveMetastoreRecording, "hiveMetastoreRecording is null"); } @Override @@ -57,7 +57,7 @@ public void writeHiveMetastoreRecording() try { // limit rate of recording dumps to prevent IO and Trino saturation rateLimiter.acquire(); - recordingHiveMetastore.writeRecording(); + hiveMetastoreRecording.writeRecording(); } catch (IOException ex) { throw new RuntimeException(ex); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java index f2037f944610..a7d2773a5906 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java @@ -142,21 +142,23 @@ public void testRecordingHiveMetastore() RecordingMetastoreConfig recordingConfig = new RecordingMetastoreConfig() .setRecordingPath(File.createTempFile("recording_test", "json").getAbsolutePath()) .setRecordingDuration(new Duration(10, TimeUnit.MINUTES)); - JsonCodec jsonCodec = createJsonCodec(); - RecordingHiveMetastore recordingHiveMetastore = new RecordingHiveMetastore(new TestingHiveMetastore(), recordingConfig, jsonCodec); + JsonCodec jsonCodec = createJsonCodec(); + HiveMetastoreRecording recording = new HiveMetastoreRecording(recordingConfig, jsonCodec); + RecordingHiveMetastore recordingHiveMetastore = new RecordingHiveMetastore(new TestingHiveMetastore(), recording); validateMetadata(recordingHiveMetastore); recordingHiveMetastore.dropDatabase(HIVE_CONTEXT, "other_database", true); - recordingHiveMetastore.writeRecording(); + recording.writeRecording(); RecordingMetastoreConfig replayingConfig = recordingConfig .setReplay(true); - recordingHiveMetastore = new RecordingHiveMetastore(new UnimplementedHiveMetastore(), replayingConfig, createJsonCodec()); - recordingHiveMetastore.loadRecording(); + recording = new HiveMetastoreRecording(replayingConfig, jsonCodec); + recordingHiveMetastore = new RecordingHiveMetastore(new UnimplementedHiveMetastore(), recording); + recording.loadRecording(); validateMetadata(recordingHiveMetastore); } - private JsonCodec createJsonCodec() + private JsonCodec createJsonCodec() { ObjectMapperProvider objectMapperProvider = new ObjectMapperProvider(); TypeDeserializer typeDeserializer = new TypeDeserializer(new TestingTypeManager()); @@ -165,7 +167,7 @@ private JsonCodec createJsonCodec() Block.class, new TestingBlockJsonSerde.Deserializer(new HiveBlockEncodingSerde()), Type.class, typeDeserializer)); objectMapperProvider.setJsonSerializers(ImmutableMap.of(Block.class, new TestingBlockJsonSerde.Serializer(new HiveBlockEncodingSerde()))); - JsonCodec jsonCodec = new JsonCodecFactory(objectMapperProvider).jsonCodec(RecordingHiveMetastore.Recording.class); + JsonCodec jsonCodec = new JsonCodecFactory(objectMapperProvider).jsonCodec(HiveMetastoreRecording.Recording.class); return jsonCodec; } From 7af65217578da4ad77c015bb8b444f2dc8eaf269 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 16:34:11 -0700 Subject: [PATCH 10/18] Simplify HiveMetastore construction and bindings Replace chained Guild binding annotations ForRecordingHiveMetastore and ForCachingHiveMetastore with generic module to apply decorators and shared caching. Convert RecordingHiveMetastore to a decorator. Add SharedHiveMetastoreCache and manually apply during Metastore creation. --- .../DecoratedHiveMetastoreModule.java | 83 +++++++++++++ .../metastore/HiveMetastoreDecorator.java | 5 + .../hive/metastore/HiveMetastoreModule.java | 7 +- .../RawHiveMetastore.java} | 4 +- .../alluxio/AlluxioMetastoreModule.java | 5 +- .../metastore/cache/CachingHiveMetastore.java | 16 --- .../cache/CachingHiveMetastoreConfig.java | 12 -- .../cache/CachingHiveMetastoreModule.java | 112 ------------------ .../cache/ForCachingHiveMetastore.java | 31 ----- .../cache/SharedHiveMetastoreCache.java | 101 ++++++++++++++++ .../metastore/file/FileMetastoreModule.java | 6 +- .../metastore/glue/GlueMetastoreModule.java | 9 +- .../recording/RecordingHiveMetastore.java | 6 +- .../RecordingHiveMetastoreDecorator.java | 45 +++++++ ...ecordingHiveMetastoreDecoratorModule.java} | 40 +------ .../thrift/ThriftMetastoreModule.java | 9 +- .../cache/TestCachingHiveMetastore.java | 4 +- .../plugin/iceberg/IcebergCatalogModule.java | 8 +- .../plugin/iceberg/TestIcebergPlugin.java | 11 -- 19 files changed, 258 insertions(+), 256 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/{ForRecordingHiveMetastore.java => metastore/RawHiveMetastore.java} (92%) delete mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreModule.java delete mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ForCachingHiveMetastore.java create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecorator.java rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/{RecordingHiveMetastoreModule.java => RecordingHiveMetastoreDecoratorModule.java} (64%) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java new file mode 100644 index 000000000000..1ee2ac1284b6 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java @@ -0,0 +1,83 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore; + +import com.google.inject.Binder; +import com.google.inject.Provides; +import com.google.inject.Scopes; +import com.google.inject.Singleton; +import io.airlift.configuration.AbstractConfigurationAwareModule; +import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; +import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreConfig; +import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache; +import io.trino.plugin.hive.metastore.procedure.FlushHiveMetastoreCacheProcedure; +import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreDecoratorModule; +import io.trino.spi.procedure.Procedure; + +import java.util.Comparator; +import java.util.List; +import java.util.Optional; +import java.util.Set; + +import static com.google.common.collect.ImmutableList.toImmutableList; +import static com.google.inject.multibindings.Multibinder.newSetBinder; +import static io.airlift.configuration.ConfigBinder.configBinder; +import static org.weakref.jmx.guice.ExportBinder.newExporter; + +public class DecoratedHiveMetastoreModule + extends AbstractConfigurationAwareModule +{ + @Override + protected void setup(Binder binder) + { + newSetBinder(binder, HiveMetastoreDecorator.class); + install(new RecordingHiveMetastoreDecoratorModule()); + + configBinder(binder).bindConfig(CachingHiveMetastoreConfig.class); + binder.bind(SharedHiveMetastoreCache.class).in(Scopes.SINGLETON); + newExporter(binder).export(HiveMetastore.class) + .as(generator -> generator.generatedNameOf(CachingHiveMetastore.class)); + + newSetBinder(binder, Procedure.class).addBinding().toProvider(FlushHiveMetastoreCacheProcedure.class).in(Scopes.SINGLETON); + } + + @Provides + @Singleton + public static HiveMetastore createHiveMetastore( + @RawHiveMetastore HiveMetastore metastore, + Set decorators, + SharedHiveMetastoreCache sharedHiveMetastoreCache) + { + // wrap the raw metastore with decorators like the RecordingHiveMetastore + List sortedDecorators = decorators.stream() + .sorted(Comparator.comparing(HiveMetastoreDecorator::getPriority)) + .collect(toImmutableList()); + for (HiveMetastoreDecorator decorator : sortedDecorators) { + metastore = decorator.decorate(metastore); + } + + // finally, if the shared metastore cache is enabled wrapper with a global cache + return sharedHiveMetastoreCache.createSharedHiveMetastoreCache(metastore); + } + + @Provides + @Singleton + public static Optional createHiveMetastore(HiveMetastore metastore) + { + if (metastore instanceof CachingHiveMetastore) { + return Optional.of((CachingHiveMetastore) metastore); + } + return Optional.empty(); + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreDecorator.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreDecorator.java index 21b90d05bceb..f6a6c41381f1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreDecorator.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreDecorator.java @@ -16,5 +16,10 @@ public interface HiveMetastoreDecorator { + int PRIORITY_INTIAL = 0; + int PRIORITY_RECORDING = 100; + + int getPriority(); + HiveMetastore decorate(HiveMetastore hiveMetastore); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java index c37d7af45f6e..6583f03fd23d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java @@ -17,8 +17,6 @@ import com.google.inject.Module; import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.metastore.alluxio.AlluxioMetastoreModule; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; -import io.trino.plugin.hive.metastore.cache.ForCachingHiveMetastore; import io.trino.plugin.hive.metastore.file.FileMetastoreModule; import io.trino.plugin.hive.metastore.glue.GlueMetastoreModule; import io.trino.plugin.hive.metastore.thrift.ThriftMetastoreModule; @@ -41,8 +39,7 @@ public HiveMetastoreModule(Optional metastore) protected void setup(Binder binder) { if (metastore.isPresent()) { - binder.bind(HiveMetastore.class).annotatedWith(ForCachingHiveMetastore.class).toInstance(metastore.get()); - install(new CachingHiveMetastoreModule()); + binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).toInstance(metastore.get()); } else { bindMetastoreModule("thrift", new ThriftMetastoreModule()); @@ -50,6 +47,8 @@ protected void setup(Binder binder) bindMetastoreModule("glue", new GlueMetastoreModule()); bindMetastoreModule("alluxio", new AlluxioMetastoreModule()); } + + install(new DecoratedHiveMetastoreModule()); } private void bindMetastoreModule(String name, Module module) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ForRecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java similarity index 92% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/ForRecordingHiveMetastore.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java index 304c26470623..6ba123e442b7 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ForRecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java @@ -11,7 +11,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package io.trino.plugin.hive; +package io.trino.plugin.hive.metastore; import javax.inject.Qualifier; @@ -26,6 +26,6 @@ @Retention(RUNTIME) @Target({FIELD, PARAMETER, METHOD}) @Qualifier -public @interface ForRecordingHiveMetastore +public @interface RawHiveMetastore { } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java index c4ea691b82f0..a9506548cd8f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java @@ -25,9 +25,9 @@ import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.RawHiveMetastore; import static io.airlift.configuration.ConfigBinder.configBinder; -import static org.weakref.jmx.guice.ExportBinder.newExporter; /** * Module for an Alluxio metastore implementation of the {@link HiveMetastore} interface. @@ -40,8 +40,7 @@ protected void setup(Binder binder) { configBinder(binder).bindConfig(AlluxioHiveMetastoreConfig.class); - binder.bind(HiveMetastore.class).to(AlluxioHiveMetastore.class).in(Scopes.SINGLETON); - newExporter(binder).export(HiveMetastore.class).as(generator -> generator.generatedNameOf(AlluxioHiveMetastore.class)); + binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).to(AlluxioHiveMetastore.class).in(Scopes.SINGLETON); } @Provides diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java index 515454701c26..50313456c12f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java @@ -73,7 +73,6 @@ import static com.google.common.base.Functions.identity; import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; -import static com.google.common.base.Preconditions.checkState; import static com.google.common.base.Throwables.throwIfInstanceOf; import static com.google.common.base.Throwables.throwIfUnchecked; import static com.google.common.cache.CacheLoader.asyncReloading; @@ -88,8 +87,6 @@ import static io.trino.plugin.hive.metastore.HiveTableName.hiveTableName; import static io.trino.plugin.hive.metastore.MetastoreUtil.makePartitionName; import static io.trino.plugin.hive.metastore.PartitionFilter.partitionFilter; -import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreConfig.isCacheEnabled; -import static java.lang.String.format; import static java.util.Objects.requireNonNull; import static org.apache.hadoop.hive.common.FileUtils.makePartName; @@ -123,21 +120,8 @@ public enum StatsRecording private final LoadingCache> grantedPrincipalsCache; private final LoadingCache> configValuesCache; - public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, Executor executor, CachingHiveMetastoreConfig config) - { - return cachingHiveMetastore( - delegate, - executor, - config.getMetastoreCacheTtl(), - config.getMetastoreRefreshInterval(), - config.getMetastoreCacheMaximumSize()); - } - public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, Executor executor, Duration cacheTtl, Optional refreshInterval, long maximumSize) { - checkState( - isCacheEnabled(cacheTtl, maximumSize), - format("Invalid cache parameters (cacheTtl: %s, maxSize: %s)", cacheTtl, maximumSize)); return new CachingHiveMetastore( delegate, OptionalLong.of(cacheTtl.toMillis()), diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreConfig.java index b1e860eb93d6..7e4f98d55fb6 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreConfig.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreConfig.java @@ -81,16 +81,4 @@ public CachingHiveMetastoreConfig setMaxMetastoreRefreshThreads(int maxMetastore this.maxMetastoreRefreshThreads = maxMetastoreRefreshThreads; return this; } - - public boolean isCacheEnabled() - { - return isCacheEnabled( - getMetastoreCacheTtl(), - getMetastoreCacheMaximumSize()); - } - - public static boolean isCacheEnabled(Duration metastoreCacheTtl, long metastoreCacheMaximumSize) - { - return metastoreCacheTtl.toMillis() != 0 && metastoreCacheMaximumSize != 0; - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreModule.java deleted file mode 100644 index f4f82cfc7cb0..000000000000 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastoreModule.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.hive.metastore.cache; - -import com.google.inject.Binder; -import com.google.inject.Module; -import com.google.inject.Provides; -import com.google.inject.Scopes; -import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.HiveMetastoreDecorator; -import io.trino.plugin.hive.metastore.procedure.FlushHiveMetastoreCacheProcedure; -import io.trino.spi.NodeManager; -import io.trino.spi.procedure.Procedure; - -import javax.inject.Qualifier; -import javax.inject.Singleton; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; -import java.util.Optional; - -import static com.google.inject.multibindings.Multibinder.newSetBinder; -import static com.google.inject.multibindings.OptionalBinder.newOptionalBinder; -import static io.airlift.concurrent.Threads.daemonThreadsNamed; -import static io.airlift.configuration.ConfigBinder.configBinder; -import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.cachingHiveMetastore; -import static java.lang.annotation.ElementType.FIELD; -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.ElementType.PARAMETER; -import static java.lang.annotation.RetentionPolicy.RUNTIME; -import static java.util.concurrent.Executors.newCachedThreadPool; -import static org.weakref.jmx.guice.ExportBinder.newExporter; - -public class CachingHiveMetastoreModule - implements Module -{ - @Override - public void configure(Binder binder) - { - configBinder(binder).bindConfig(CachingHiveMetastoreConfig.class); - newOptionalBinder(binder, HiveMetastoreDecorator.class); - newExporter(binder).export(HiveMetastore.class) - .as(generator -> generator.generatedNameOf(CachingHiveMetastore.class)); - newSetBinder(binder, Procedure.class).addBinding().toProvider(FlushHiveMetastoreCacheProcedure.class).in(Scopes.SINGLETON); - } - - @Provides - @Singleton - @DecoratedForCachingHiveMetastore - public HiveMetastore createDecoratedHiveMetastore( - @ForCachingHiveMetastore HiveMetastore delegate, - Optional hiveMetastoreDecorator) - { - return hiveMetastoreDecorator - .map(decorator -> decorator.decorate(delegate)) - .orElse(delegate); - } - - @Provides - @Singleton - public Optional createCachingHiveMetastore( - NodeManager nodeManager, - @DecoratedForCachingHiveMetastore HiveMetastore delegate, - CachingHiveMetastoreConfig config, - CatalogName catalogName) - { - if (!nodeManager.getCurrentNode().isCoordinator() || !config.isCacheEnabled()) { - // Disable caching on workers, because there currently is no way to invalidate such a cache. - // Note: while we could skip CachingHiveMetastoreModule altogether on workers, we retain it so that catalog - // configuration can remain identical for all nodes, making cluster configuration easier. - return Optional.empty(); - } - - return Optional.of(cachingHiveMetastore( - delegate, - // Loading of cache entry in CachingHiveMetastore might trigger loading of another cache entry for different object type - // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be - // used. - new ReentrantBoundedExecutor( - newCachedThreadPool(daemonThreadsNamed("hive-metastore-" + catalogName + "-%s")), - config.getMaxMetastoreRefreshThreads()), - config)); - } - - @Provides - @Singleton - public HiveMetastore createHiveMetastore( - @DecoratedForCachingHiveMetastore HiveMetastore delegate, - Optional cachingMetastore) - { - return cachingMetastore.map(metastore -> (HiveMetastore) metastore).orElse(delegate); - } - - @Retention(RUNTIME) - @Target({FIELD, PARAMETER, METHOD}) - @Qualifier - public @interface DecoratedForCachingHiveMetastore - { - } -} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ForCachingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ForCachingHiveMetastore.java deleted file mode 100644 index 25af522907d7..000000000000 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ForCachingHiveMetastore.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package io.trino.plugin.hive.metastore.cache; - -import javax.inject.Qualifier; - -import java.lang.annotation.Retention; -import java.lang.annotation.Target; - -import static java.lang.annotation.ElementType.FIELD; -import static java.lang.annotation.ElementType.METHOD; -import static java.lang.annotation.ElementType.PARAMETER; -import static java.lang.annotation.RetentionPolicy.RUNTIME; - -@Retention(RUNTIME) -@Target({FIELD, PARAMETER, METHOD}) -@Qualifier -public @interface ForCachingHiveMetastore -{ -} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java new file mode 100644 index 000000000000..5bb5520e78e0 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -0,0 +1,101 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.cache; + +import io.airlift.units.Duration; +import io.trino.plugin.base.CatalogName; +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.spi.NodeManager; + +import javax.annotation.PostConstruct; +import javax.annotation.PreDestroy; +import javax.inject.Inject; + +import java.util.Optional; +import java.util.concurrent.ExecutorService; + +import static io.airlift.concurrent.Threads.daemonThreadsNamed; +import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.cachingHiveMetastore; +import static java.util.Objects.requireNonNull; +import static java.util.concurrent.Executors.newCachedThreadPool; + +public class SharedHiveMetastoreCache +{ + private final boolean enabled; + private final CatalogName catalogName; + private final Duration metastoreCacheTtl; + private final Optional metastoreRefreshInterval; + private final long metastoreCacheMaximumSize; + private final int maxMetastoreRefreshThreads; + + private ExecutorService executorService; + + @Inject + public SharedHiveMetastoreCache( + CatalogName catalogName, + NodeManager nodeManager, + CachingHiveMetastoreConfig config) + { + requireNonNull(nodeManager, "nodeManager is null"); + requireNonNull(config, "config is null"); + requireNonNull(catalogName, "catalogName is null"); + + this.catalogName = catalogName; + maxMetastoreRefreshThreads = config.getMaxMetastoreRefreshThreads(); + metastoreCacheTtl = config.getMetastoreCacheTtl(); + metastoreRefreshInterval = config.getMetastoreRefreshInterval(); + metastoreCacheMaximumSize = config.getMetastoreCacheMaximumSize(); + + // Disable caching on workers, because there currently is no way to invalidate such a cache. + // Note: while we could skip CachingHiveMetastoreModule altogether on workers, we retain it so that catalog + // configuration can remain identical for all nodes, making cluster configuration easier. + enabled = nodeManager.getCurrentNode().isCoordinator() && + metastoreCacheTtl.toMillis() > 0 && + metastoreCacheMaximumSize > 0; + } + + @PostConstruct + public void start() + { + if (enabled) { + executorService = newCachedThreadPool(daemonThreadsNamed("hive-metastore-" + catalogName + "-%s")); + } + } + + @PreDestroy + public void stop() + { + if (executorService != null) { + executorService.shutdownNow(); + executorService = null; + } + } + + public HiveMetastore createSharedHiveMetastoreCache(HiveMetastore hiveMetastore) + { + if (!enabled) { + return hiveMetastore; + } + + return cachingHiveMetastore( + hiveMetastore, + // Loading of cache entry in CachingHiveMetastore might trigger loading of another cache entry for different object type + // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be + // used. + new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), + metastoreCacheTtl, + metastoreRefreshInterval, + metastoreCacheMaximumSize); + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java index c9370d66e810..65ed881d435d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java @@ -17,8 +17,7 @@ import com.google.inject.Module; import com.google.inject.Scopes; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; -import io.trino.plugin.hive.metastore.cache.ForCachingHiveMetastore; +import io.trino.plugin.hive.metastore.RawHiveMetastore; import static io.airlift.configuration.ConfigBinder.configBinder; @@ -29,7 +28,6 @@ public class FileMetastoreModule public void configure(Binder binder) { configBinder(binder).bindConfig(FileHiveMetastoreConfig.class); - binder.bind(HiveMetastore.class).annotatedWith(ForCachingHiveMetastore.class).to(FileHiveMetastore.class).in(Scopes.SINGLETON); - binder.install(new CachingHiveMetastoreModule()); + binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).to(FileHiveMetastore.class).in(Scopes.SINGLETON); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java index 864ef7e69a8f..1f3c4ef1a9fb 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java @@ -25,11 +25,9 @@ import io.airlift.concurrent.BoundedExecutor; import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; -import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreModule; +import io.trino.plugin.hive.metastore.RawHiveMetastore; import java.util.concurrent.Executor; import java.util.function.Predicate; @@ -56,7 +54,7 @@ protected void setup(Binder binder) .setDefault().toProvider(DefaultGlueMetastoreTableFilterProvider.class).in(Scopes.SINGLETON); binder.bind(HiveMetastore.class) - .annotatedWith(ForRecordingHiveMetastore.class) + .annotatedWith(RawHiveMetastore.class) .to(GlueHiveMetastore.class) .in(Scopes.SINGLETON); @@ -68,9 +66,6 @@ protected void setup(Binder binder) HiveConfig::isTableStatisticsEnabled, getGlueStatisticsModule(DefaultGlueColumnStatisticsProviderFactory.class), getGlueStatisticsModule(DisabledGlueColumnStatisticsProviderFactory.class))); - - install(new RecordingHiveMetastoreModule()); - install(new CachingHiveMetastoreModule()); } private Module getGlueStatisticsModule(Class statisticsPrividerFactoryClass) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java index 43e53bba23ed..1a1915e45bed 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.metastore.recording; -import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; @@ -34,8 +33,6 @@ import io.trino.spi.statistics.ColumnStatisticType; import io.trino.spi.type.Type; -import javax.inject.Inject; - import java.util.List; import java.util.Map; import java.util.Optional; @@ -54,8 +51,7 @@ public class RecordingHiveMetastore private final HiveMetastore delegate; private final HiveMetastoreRecording recording; - @Inject - public RecordingHiveMetastore(@ForRecordingHiveMetastore HiveMetastore delegate, HiveMetastoreRecording recording) + public RecordingHiveMetastore(HiveMetastore delegate, HiveMetastoreRecording recording) { this.delegate = requireNonNull(delegate, "delegate is null"); this.recording = requireNonNull(recording, "recording is null"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecorator.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecorator.java new file mode 100644 index 000000000000..1a123d4ffeff --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecorator.java @@ -0,0 +1,45 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.recording; + +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreDecorator; + +import javax.inject.Inject; + +import static java.util.Objects.requireNonNull; + +public class RecordingHiveMetastoreDecorator + implements HiveMetastoreDecorator +{ + private final HiveMetastoreRecording recording; + + @Inject + public RecordingHiveMetastoreDecorator(HiveMetastoreRecording recording) + { + this.recording = requireNonNull(recording, "recording is null"); + } + + @Override + public int getPriority() + { + return PRIORITY_RECORDING; + } + + @Override + public HiveMetastore decorate(HiveMetastore hiveMetastore) + { + return new RecordingHiveMetastore(hiveMetastore, recording); + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecoratorModule.java similarity index 64% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecoratorModule.java index 8b918623dcd5..e140e836393d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastoreDecoratorModule.java @@ -14,14 +14,10 @@ package io.trino.plugin.hive.metastore.recording; import com.google.inject.Binder; -import com.google.inject.Key; -import com.google.inject.Module; import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; -import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.RecordingMetastoreConfig; -import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.cache.ForCachingHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreDecorator; import io.trino.plugin.hive.util.BlockJsonSerde; import io.trino.plugin.hive.util.HiveBlockEncodingSerde; import io.trino.spi.block.Block; @@ -32,31 +28,14 @@ import static io.airlift.json.JsonCodecBinder.jsonCodecBinder; import static org.weakref.jmx.guice.ExportBinder.newExporter; -public class RecordingHiveMetastoreModule +public class RecordingHiveMetastoreDecoratorModule extends AbstractConfigurationAwareModule { @Override protected void setup(Binder binder) { if (buildConfigObject(RecordingMetastoreConfig.class).getRecordingPath() != null) { - install(new RecordingModule()); - } - else { - install(new NoRecordingModule()); - } - } - - public static class RecordingModule - implements Module - { - @Override - public void configure(Binder binder) - { - binder.bind(HiveMetastore.class) - .annotatedWith(ForCachingHiveMetastore.class) - .to(RecordingHiveMetastore.class) - .in(Scopes.SINGLETON); - binder.bind(RecordingHiveMetastore.class).in(Scopes.SINGLETON); + newSetBinder(binder, HiveMetastoreDecorator.class).addBinding().to(RecordingHiveMetastoreDecorator.class).in(Scopes.SINGLETON); binder.bind(HiveBlockEncodingSerde.class).in(Scopes.SINGLETON); binder.bind(HiveMetastoreRecording.class).in(Scopes.SINGLETON); @@ -70,17 +49,4 @@ public void configure(Binder binder) newSetBinder(binder, Procedure.class).addBinding().toProvider(WriteHiveMetastoreRecordingProcedure.class).in(Scopes.SINGLETON); } } - - public static class NoRecordingModule - implements Module - { - @Override - public void configure(Binder binder) - { - binder.bind(HiveMetastore.class) - .annotatedWith(ForCachingHiveMetastore.class) - .to(Key.get(HiveMetastore.class, ForRecordingHiveMetastore.class)) - .in(Scopes.SINGLETON); - } - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java index 765c8026d9c5..c6a625599c0e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java @@ -17,10 +17,8 @@ import com.google.inject.Scopes; import com.google.inject.multibindings.OptionalBinder; import io.airlift.configuration.AbstractConfigurationAwareModule; -import io.trino.plugin.hive.ForRecordingHiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; -import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreModule; +import io.trino.plugin.hive.metastore.RawHiveMetastore; import static io.airlift.configuration.ConfigBinder.configBinder; import static org.weakref.jmx.guice.ExportBinder.newExporter; @@ -42,13 +40,10 @@ protected void setup(Binder binder) .as(generator -> generator.generatedNameOf(ThriftHiveMetastore.class)); binder.bind(HiveMetastore.class) - .annotatedWith(ForRecordingHiveMetastore.class) + .annotatedWith(RawHiveMetastore.class) .to(BridgingHiveMetastore.class) .in(Scopes.SINGLETON); - install(new RecordingHiveMetastoreModule()); - install(new CachingHiveMetastoreModule()); - install(new ThriftMetastoreAuthenticationModule()); } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index 6a62c3d1d08b..9ba23646e0b4 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -671,7 +671,9 @@ private CachingHiveMetastore createMetastoreWithDirectExecutor(CachingHiveMetast return (CachingHiveMetastore) cachingHiveMetastore( new BridgingHiveMetastore(createThriftHiveMetastore()), directExecutor(), - config); + config.getMetastoreCacheTtl(), + config.getMetastoreRefreshInterval(), + config.getMetastoreCacheMaximumSize()); } private static class MockMetastoreLocator diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java index 85c7f093a8a6..bedf9530e6af 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java @@ -17,10 +17,10 @@ import com.google.inject.Module; import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; +import io.trino.plugin.hive.metastore.DecoratedHiveMetastoreModule; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.RawHiveMetastore; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreModule; -import io.trino.plugin.hive.metastore.cache.ForCachingHiveMetastore; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider; import io.trino.plugin.iceberg.catalog.file.IcebergFileMetastoreCatalogModule; @@ -49,8 +49,7 @@ public IcebergCatalogModule(Optional metastore) protected void setup(Binder binder) { if (metastore.isPresent()) { - binder.bind(HiveMetastore.class).annotatedWith(ForCachingHiveMetastore.class).toInstance(metastore.get()); - install(new CachingHiveMetastoreModule()); + binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).toInstance(metastore.get()); binder.bind(IcebergTableOperationsProvider.class).to(FileMetastoreTableOperationsProvider.class).in(Scopes.SINGLETON); } else { @@ -60,6 +59,7 @@ protected void setup(Binder binder) } binder.bind(MetastoreValidator.class).asEagerSingleton(); + install(new DecoratedHiveMetastoreModule()); } public static class MetastoreValidator diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java index 860f72712838..edd50f9b010d 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java @@ -99,17 +99,6 @@ public void testRecordingMetastore() "hive.metastore-recording-path", "/tmp"), new TestingConnectorContext()) .shutdown(); - - // recording with glue - assertThatThrownBy(() -> factory.create( - "test", - Map.of( - "iceberg.catalog.type", "glue", - "hive.metastore.glue.region", "us-east-2", - "hive.metastore-recording-path", "/tmp"), - new TestingConnectorContext())) - .hasMessageContaining("Configuration property 'hive.metastore-recording-path' was not used") - .hasMessageContaining("Configuration property 'hive.metastore.glue.region' was not used"); } @Test From 1b95e7ae4d9b441db5bde77d4c13b22aa682d523 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 18:25:07 -0700 Subject: [PATCH 11/18] Add ForwardingHiveMetastore and use in AbstractTestHiveFileSystem --- .../metastore/ForwardingHiveMetastore.java | 471 ++++++++++++++++++ .../hive/AbstractTestHiveFileSystem.java | 13 +- 2 files changed, 475 insertions(+), 9 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java new file mode 100644 index 000000000000..e955f1b76022 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java @@ -0,0 +1,471 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore; + +import io.trino.plugin.hive.HivePartition; +import io.trino.plugin.hive.HiveType; +import io.trino.plugin.hive.PartitionStatistics; +import io.trino.plugin.hive.acid.AcidOperation; +import io.trino.plugin.hive.acid.AcidTransaction; +import io.trino.plugin.hive.authentication.HiveIdentity; +import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; +import io.trino.spi.connector.SchemaTableName; +import io.trino.spi.predicate.TupleDomain; +import io.trino.spi.security.RoleGrant; +import io.trino.spi.statistics.ColumnStatisticType; +import io.trino.spi.type.Type; +import org.apache.hadoop.hive.metastore.api.DataOperationType; + +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.OptionalLong; +import java.util.Set; +import java.util.function.Function; + +import static java.util.Objects.requireNonNull; + +public abstract class ForwardingHiveMetastore + implements HiveMetastore +{ + private final HiveMetastore delegate; + + protected ForwardingHiveMetastore(HiveMetastore delegate) + { + this.delegate = requireNonNull(delegate, "delegate is null"); + } + + @Override + public Optional getDatabase(String databaseName) + { + return delegate.getDatabase(databaseName); + } + + @Override + public List getAllDatabases() + { + return delegate.getAllDatabases(); + } + + @Override + public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + { + return delegate.getTable(identity, databaseName, tableName); + } + + @Override + public Set getSupportedColumnStatistics(Type type) + { + return delegate.getSupportedColumnStatistics(type); + } + + @Override + public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + { + return delegate.getTableStatistics(identity, table); + } + + @Override + public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + { + return delegate.getPartitionStatistics(identity, table, partitions); + } + + @Override + public void updateTableStatistics( + HiveIdentity identity, + String databaseName, + String tableName, + AcidTransaction transaction, + Function update) + { + delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); + } + + @Override + public void updatePartitionStatistics( + HiveIdentity identity, + Table table, + String partitionName, + Function update) + { + delegate.updatePartitionStatistics(identity, table, partitionName, update); + } + + @Override + public void updatePartitionStatistics( + HiveIdentity identity, + Table table, + Map> updates) + { + delegate.updatePartitionStatistics(identity, table, updates); + } + + @Override + public List getAllTables(String databaseName) + { + return delegate.getAllTables(databaseName); + } + + @Override + public List getTablesWithParameter(String databaseName, String parameterKey, String parameterValue) + { + return delegate.getTablesWithParameter(databaseName, parameterKey, parameterValue); + } + + @Override + public List getAllViews(String databaseName) + { + return delegate.getAllViews(databaseName); + } + + @Override + public void createDatabase(HiveIdentity identity, Database database) + { + delegate.createDatabase(identity, database); + } + + @Override + public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + { + delegate.dropDatabase(identity, databaseName, deleteData); + } + + @Override + public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + { + delegate.renameDatabase(identity, databaseName, newDatabaseName); + } + + @Override + public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + { + delegate.setDatabaseOwner(identity, databaseName, principal); + } + + @Override + public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + { + delegate.createTable(identity, table, principalPrivileges); + } + + @Override + public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + { + delegate.dropTable(identity, databaseName, tableName, deleteData); + } + + @Override + public void replaceTable( + HiveIdentity identity, + String databaseName, + String tableName, + Table newTable, + PrincipalPrivileges principalPrivileges) + { + delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + } + + @Override + public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + { + delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); + } + + @Override + public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + { + delegate.commentTable(identity, databaseName, tableName, comment); + } + + @Override + public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + { + delegate.setTableOwner(identity, databaseName, tableName, principal); + } + + @Override + public void commentColumn( + HiveIdentity identity, + String databaseName, + String tableName, + String columnName, + Optional comment) + { + delegate.commentColumn(identity, databaseName, tableName, columnName, comment); + } + + @Override + public void addColumn( + HiveIdentity identity, + String databaseName, + String tableName, + String columnName, + HiveType columnType, + String columnComment) + { + delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); + } + + @Override + public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + { + delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); + } + + @Override + public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + { + delegate.dropColumn(identity, databaseName, tableName, columnName); + } + + @Override + public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + { + return delegate.getPartition(identity, table, partitionValues); + } + + @Override + public Optional> getPartitionNamesByFilter( + HiveIdentity identity, + String databaseName, + String tableName, + List columnNames, + TupleDomain partitionKeysFilter) + { + return delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter); + } + + @Override + public Map> getPartitionsByNames( + HiveIdentity identity, + Table table, + List partitionNames) + { + return delegate.getPartitionsByNames(identity, table, partitionNames); + } + + @Override + public void addPartitions( + HiveIdentity identity, + String databaseName, + String tableName, + List partitions) + { + delegate.addPartitions(identity, databaseName, tableName, partitions); + } + + @Override + public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + { + delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); + } + + @Override + public void alterPartition( + HiveIdentity identity, + String databaseName, + String tableName, + PartitionWithStatistics partition) + { + delegate.alterPartition(identity, databaseName, tableName, partition); + } + + @Override + public void createRole(String role, String grantor) + { + delegate.createRole(role, grantor); + } + + @Override + public void dropRole(String role) + { + delegate.dropRole(role); + } + + @Override + public Set listRoles() + { + return delegate.listRoles(); + } + + @Override + public void grantRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor) + { + delegate.grantRoles(roles, grantees, adminOption, grantor); + } + + @Override + public void revokeRoles(Set roles, Set grantees, boolean adminOption, HivePrincipal grantor) + { + delegate.revokeRoles(roles, grantees, adminOption, grantor); + } + + @Override + public Set listGrantedPrincipals(String role) + { + return delegate.listGrantedPrincipals(role); + } + + @Override + public Set listRoleGrants(HivePrincipal principal) + { + return delegate.listRoleGrants(principal); + } + + @Override + public void grantTablePrivileges(String databaseName, + String tableName, + String tableOwner, + HivePrincipal grantee, + HivePrincipal grantor, + Set privileges, + boolean grantOption) + { + delegate.grantTablePrivileges(databaseName, tableName, tableOwner, grantee, grantor, privileges, grantOption); + } + + @Override + public void revokeTablePrivileges(String databaseName, + String tableName, + String tableOwner, + HivePrincipal grantee, + HivePrincipal grantor, + Set privileges, boolean grantOption) + { + delegate.revokeTablePrivileges(databaseName, tableName, tableOwner, grantee, grantor, privileges, grantOption); + } + + @Override + public Set listTablePrivileges(String databaseName, + String tableName, + Optional tableOwner, + Optional principal) + { + return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); + } + + @Override + public boolean isImpersonationEnabled() + { + return delegate.isImpersonationEnabled(); + } + + @Override + public long openTransaction(HiveIdentity identity) + { + return delegate.openTransaction(identity); + } + + @Override + public void commitTransaction(HiveIdentity identity, long transactionId) + { + delegate.commitTransaction(identity, transactionId); + } + + @Override + public void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + { + delegate.sendTransactionHeartbeat(identity, transactionId); + } + + @Override + public void acquireSharedReadLock( + HiveIdentity identity, + String queryId, + long transactionId, + List fullTables, + List partitions) + { + delegate.acquireSharedReadLock(identity, queryId, transactionId, fullTables, partitions); + } + + @Override + public String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + { + return delegate.getValidWriteIds(identity, tables, currentTransactionId); + } + + @Override + public Optional getConfigValue(String name) + { + return delegate.getConfigValue(name); + } + + @Override + public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + { + return delegate.allocateWriteId(identity, dbName, tableName, transactionId); + } + + @Override + public void acquireTableWriteLock( + HiveIdentity identity, + String queryId, + long transactionId, + String dbName, + String tableName, + DataOperationType operation, + boolean isDynamicPartitionWrite) + { + delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); + } + + @Override + public void updateTableWriteId( + HiveIdentity identity, + String dbName, + String tableName, + long transactionId, + long writeId, + OptionalLong rowCountChange) + { + delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); + } + + @Override + public void alterPartitions( + HiveIdentity identity, + String dbName, + String tableName, + List partitions, + long writeId) + { + delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); + } + + @Override + public void addDynamicPartitions( + HiveIdentity identity, + String dbName, + String tableName, + List partitionNames, + long transactionId, + long writeId, + AcidOperation operation) + { + delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); + } + + @Override + public void alterTransactionalTable( + HiveIdentity identity, + Table table, + long transactionId, + long writeId, + PrincipalPrivileges principalPrivileges) + { + delegate.alterTransactionalTable(identity, table, transactionId, writeId, principalPrivileges); + } +} diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java index 7b70de11e180..75c2c58ebd07 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java @@ -29,11 +29,11 @@ import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; +import io.trino.plugin.hive.metastore.ForwardingHiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.PrincipalPrivileges; import io.trino.plugin.hive.metastore.Table; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; import io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore; import io.trino.plugin.hive.metastore.thrift.MetastoreLocator; import io.trino.plugin.hive.metastore.thrift.TestingMetastoreLocator; @@ -78,7 +78,6 @@ import java.util.Collection; import java.util.List; import java.util.Optional; -import java.util.OptionalLong; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledExecutorService; import java.util.stream.IntStream; @@ -182,7 +181,6 @@ protected void setup(String host, int port, String databaseName, boolean s3Selec MetastoreLocator metastoreLocator = new TestingMetastoreLocator(proxy, HostAndPort.fromParts(host, port)); - ExecutorService executor = newCachedThreadPool(daemonThreadsNamed("AbstractTestHiveFileSystem-%s")); HivePartitionManager hivePartitionManager = new HivePartitionManager(config); hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication()); @@ -536,14 +534,14 @@ private ConnectorTableHandle getTableHandle(ConnectorMetadata metadata, SchemaTa } protected static class TestingHiveMetastore - extends CachingHiveMetastore + extends ForwardingHiveMetastore { private final Path basePath; private final HdfsEnvironment hdfsEnvironment; public TestingHiveMetastore(HiveMetastore delegate, Path basePath, HdfsEnvironment hdfsEnvironment) { - super(delegate, OptionalLong.empty(), OptionalLong.empty(), Optional.empty(), 0, StatsRecording.ENABLED); + super(delegate); this.basePath = basePath; this.hdfsEnvironment = hdfsEnvironment; } @@ -583,7 +581,7 @@ public void dropTable(HiveIdentity identity, String databaseName, String tableNa // drop table replaceTable(identity, databaseName, tableName, tableBuilder.build(), NO_PRIVILEGES); - delegate.dropTable(identity, databaseName, tableName, false); + super.dropTable(identity, databaseName, tableName, false); // drop data if (deleteData) { @@ -596,9 +594,6 @@ public void dropTable(HiveIdentity identity, String databaseName, String tableNa catch (IOException e) { throw new UncheckedIOException(e); } - finally { - invalidateTable(databaseName, tableName); - } } public void updateTableLocation(String databaseName, String tableName, String location) From af0491cff10b4c849150543cacd55b4eb7f38d3f Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 20:29:43 -0700 Subject: [PATCH 12/18] Add HiveMetastoreFactory --- .../plugin/hive/HiveMetadataFactory.java | 14 +++--- .../plugin/hive/HivePageSinkProvider.java | 10 ++-- .../DecoratedHiveMetastoreModule.java | 50 ++++++++++++++----- .../hive/metastore/HiveMetastoreFactory.java | 43 ++++++++++++++++ .../hive/metastore/HiveMetastoreModule.java | 2 +- ...tore.java => RawHiveMetastoreFactory.java} | 2 +- .../alluxio/AlluxioHiveMetastore.java | 2 - .../alluxio/AlluxioHiveMetastoreFactory.java | 40 +++++++++++++++ .../alluxio/AlluxioMetastoreModule.java | 5 +- .../cache/SharedHiveMetastoreCache.java | 41 +++++++++++++-- .../metastore/file/FileHiveMetastore.java | 2 - .../file/FileHiveMetastoreFactory.java | 41 +++++++++++++++ .../metastore/file/FileMetastoreModule.java | 6 +-- .../glue/GlueHiveMetastoreFactory.java | 49 ++++++++++++++++++ .../metastore/glue/GlueMetastoreModule.java | 16 +++--- .../thrift/BridgingHiveMetastore.java | 3 -- .../thrift/BridgingHiveMetastoreFactory.java | 39 +++++++++++++++ .../thrift/ThriftMetastoreModule.java | 10 ++-- .../trino/plugin/hive/AbstractTestHive.java | 5 +- .../hive/AbstractTestHiveFileSystem.java | 5 +- .../trino/plugin/hive/TestHivePageSink.java | 3 +- .../plugin/iceberg/IcebergCatalogModule.java | 11 ++-- .../plugin/iceberg/TrinoCatalogFactory.java | 10 ++-- .../plugin/iceberg/TrinoHiveCatalog.java | 9 +++- .../plugin/iceberg/TestIcebergPlugin.java | 2 +- 25 files changed, 347 insertions(+), 73 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java rename plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/{RawHiveMetastore.java => RawHiveMetastoreFactory.java} (96%) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java index 8b317d1ef2d8..2648acd9646e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java @@ -17,7 +17,7 @@ import io.airlift.json.JsonCodec; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.security.AccessControlMetadataFactory; @@ -50,7 +50,7 @@ public class HiveMetadataFactory private final boolean translateHiveViews; private final boolean hideDeltaLakeTables; private final long perTransactionCacheMaximumSize; - private final HiveMetastore metastore; + private final HiveMetastoreFactory metastoreFactory; private final HdfsEnvironment hdfsEnvironment; private final HivePartitionManager partitionManager; private final TypeManager typeManager; @@ -74,7 +74,7 @@ public HiveMetadataFactory( CatalogName catalogName, HiveConfig hiveConfig, MetastoreConfig metastoreConfig, - HiveMetastore metastore, + HiveMetastoreFactory metastoreFactory, HdfsEnvironment hdfsEnvironment, HivePartitionManager partitionManager, ExecutorService executorService, @@ -92,7 +92,7 @@ public HiveMetadataFactory( { this( catalogName, - metastore, + metastoreFactory, hdfsEnvironment, partitionManager, hiveConfig.getMaxConcurrentFileRenames(), @@ -123,7 +123,7 @@ public HiveMetadataFactory( public HiveMetadataFactory( CatalogName catalogName, - HiveMetastore metastore, + HiveMetastoreFactory metastoreFactory, HdfsEnvironment hdfsEnvironment, HivePartitionManager partitionManager, int maxConcurrentFileRenames, @@ -161,7 +161,7 @@ public HiveMetadataFactory( this.hideDeltaLakeTables = hideDeltaLakeTables; this.perTransactionCacheMaximumSize = perTransactionCacheMaximumSize; - this.metastore = requireNonNull(metastore, "metastore is null"); + this.metastoreFactory = requireNonNull(metastoreFactory, "metastoreFactory is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.partitionManager = requireNonNull(partitionManager, "partitionManager is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); @@ -192,7 +192,7 @@ public HiveMetadataFactory( public TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit) { HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure( - memoizeMetastore(metastore, perTransactionCacheMaximumSize)); // per-transaction cache + memoizeMetastore(metastoreFactory.createMetastore(), perTransactionCacheMaximumSize)); // per-transaction cache SemiTransactionalHiveMetastore metastore = new SemiTransactionalHiveMetastore( hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java index 29b9736b6251..cf689e6386f3 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java @@ -21,7 +21,7 @@ import io.airlift.json.JsonCodec; import io.airlift.units.DataSize; import io.trino.plugin.hive.authentication.HiveIdentity; -import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.HivePageSinkMetadataProvider; import io.trino.plugin.hive.metastore.SortingColumn; import io.trino.spi.NodeManager; @@ -56,7 +56,7 @@ public class HivePageSinkProvider private final Set fileWriterFactories; private final HdfsEnvironment hdfsEnvironment; private final PageSorter pageSorter; - private final HiveMetastore metastore; + private final HiveMetastoreFactory metastoreFactory; private final PageIndexerFactory pageIndexerFactory; private final TypeManager typeManager; private final int maxOpenPartitions; @@ -77,7 +77,7 @@ public HivePageSinkProvider( Set fileWriterFactories, HdfsEnvironment hdfsEnvironment, PageSorter pageSorter, - HiveMetastore metastore, + HiveMetastoreFactory metastoreFactory, PageIndexerFactory pageIndexerFactory, TypeManager typeManager, HiveConfig config, @@ -91,7 +91,7 @@ public HivePageSinkProvider( this.fileWriterFactories = ImmutableSet.copyOf(requireNonNull(fileWriterFactories, "fileWriterFactories is null")); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.pageSorter = requireNonNull(pageSorter, "pageSorter is null"); - this.metastore = requireNonNull(metastore, "metastore is null"); + this.metastoreFactory = requireNonNull(metastoreFactory, "metastoreFactory is null"); this.pageIndexerFactory = requireNonNull(pageIndexerFactory, "pageIndexerFactory is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.maxOpenPartitions = config.getMaxPartitionsPerWriter(); @@ -156,7 +156,7 @@ private ConnectorPageSink createPageSink(HiveWritableTableHandle handle, boolean session.getQueryId(), new HivePageSinkMetadataProvider( handle.getPageSinkMetadata(), - new HiveMetastoreClosure(memoizeMetastore(metastore, perTransactionMetastoreCacheMaximumSize)), + new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(), perTransactionMetastoreCacheMaximumSize)), new HiveIdentity(session)), typeManager, hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java index 1ee2ac1284b6..9d2da3600904 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java @@ -21,6 +21,7 @@ import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreConfig; import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache; +import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache.CachingHiveMetastoreFactory; import io.trino.plugin.hive.metastore.procedure.FlushHiveMetastoreCacheProcedure; import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreDecoratorModule; import io.trino.spi.procedure.Procedure; @@ -33,6 +34,7 @@ import static com.google.common.collect.ImmutableList.toImmutableList; import static com.google.inject.multibindings.Multibinder.newSetBinder; import static io.airlift.configuration.ConfigBinder.configBinder; +import static java.util.Objects.requireNonNull; import static org.weakref.jmx.guice.ExportBinder.newExporter; public class DecoratedHiveMetastoreModule @@ -46,7 +48,8 @@ protected void setup(Binder binder) configBinder(binder).bindConfig(CachingHiveMetastoreConfig.class); binder.bind(SharedHiveMetastoreCache.class).in(Scopes.SINGLETON); - newExporter(binder).export(HiveMetastore.class) + // export under the old name, for backwards compatibility + newExporter(binder).export(HiveMetastoreFactory.class) .as(generator -> generator.generatedNameOf(CachingHiveMetastore.class)); newSetBinder(binder, Procedure.class).addBinding().toProvider(FlushHiveMetastoreCacheProcedure.class).in(Scopes.SINGLETON); @@ -54,29 +57,50 @@ protected void setup(Binder binder) @Provides @Singleton - public static HiveMetastore createHiveMetastore( - @RawHiveMetastore HiveMetastore metastore, + public static HiveMetastoreFactory createHiveMetastore( + @RawHiveMetastoreFactory HiveMetastoreFactory metastoreFactory, Set decorators, SharedHiveMetastoreCache sharedHiveMetastoreCache) { // wrap the raw metastore with decorators like the RecordingHiveMetastore - List sortedDecorators = decorators.stream() - .sorted(Comparator.comparing(HiveMetastoreDecorator::getPriority)) - .collect(toImmutableList()); - for (HiveMetastoreDecorator decorator : sortedDecorators) { - metastore = decorator.decorate(metastore); + metastoreFactory = new DecoratingHiveMetastoreFactory(metastoreFactory, decorators); + + // cross TX metastore cache is enabled wrapper with caching metastore + return sharedHiveMetastoreCache.createCachingHiveMetastoreFactory(metastoreFactory); + } + + private static class DecoratingHiveMetastoreFactory + implements HiveMetastoreFactory + { + private final HiveMetastoreFactory delegate; + private final List sortedDecorators; + + public DecoratingHiveMetastoreFactory(HiveMetastoreFactory delegate, Set decorators) + { + this.delegate = requireNonNull(delegate, "delegate is null"); + + this.sortedDecorators = requireNonNull(decorators, "decorators is null").stream() + .sorted(Comparator.comparing(HiveMetastoreDecorator::getPriority)) + .collect(toImmutableList()); } - // finally, if the shared metastore cache is enabled wrapper with a global cache - return sharedHiveMetastoreCache.createSharedHiveMetastoreCache(metastore); + @Override + public HiveMetastore createMetastore() + { + HiveMetastore metastore = delegate.createMetastore(); + for (HiveMetastoreDecorator decorator : sortedDecorators) { + metastore = decorator.decorate(metastore); + } + return metastore; + } } @Provides @Singleton - public static Optional createHiveMetastore(HiveMetastore metastore) + public static Optional createHiveMetastore(HiveMetastoreFactory metastoreFactory) { - if (metastore instanceof CachingHiveMetastore) { - return Optional.of((CachingHiveMetastore) metastore); + if (metastoreFactory instanceof CachingHiveMetastoreFactory) { + return Optional.of((((CachingHiveMetastoreFactory) metastoreFactory).getMetastore())); } return Optional.empty(); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java new file mode 100644 index 000000000000..7a3ab437d543 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java @@ -0,0 +1,43 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore; + +import static java.util.Objects.requireNonNull; + +public interface HiveMetastoreFactory +{ + HiveMetastore createMetastore(); + + static HiveMetastoreFactory ofInstance(HiveMetastore metastore) + { + return new StaticHiveMetastoreFactory(metastore); + } + + class StaticHiveMetastoreFactory + implements HiveMetastoreFactory + { + private final HiveMetastore metastore; + + private StaticHiveMetastoreFactory(HiveMetastore metastore) + { + this.metastore = requireNonNull(metastore, "metastore is null"); + } + + @Override + public HiveMetastore createMetastore() + { + return metastore; + } + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java index 6583f03fd23d..f835e3166d1e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreModule.java @@ -39,7 +39,7 @@ public HiveMetastoreModule(Optional metastore) protected void setup(Binder binder) { if (metastore.isPresent()) { - binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).toInstance(metastore.get()); + binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore.get())); } else { bindMetastoreModule("thrift", new ThriftMetastoreModule()); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastoreFactory.java similarity index 96% rename from plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java rename to plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastoreFactory.java index 6ba123e442b7..9931e8db1910 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/RawHiveMetastoreFactory.java @@ -26,6 +26,6 @@ @Retention(RUNTIME) @Target({FIELD, PARAMETER, METHOD}) @Qualifier -public @interface RawHiveMetastore +public @interface RawHiveMetastoreFactory { } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java index b9dfcfbdb48f..b324ebf031f4 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java @@ -21,7 +21,6 @@ import alluxio.grpc.table.TableInfo; import alluxio.grpc.table.layout.hive.PartitionInfo; import com.google.common.collect.ImmutableMap; -import com.google.inject.Inject; import io.trino.plugin.hive.HiveBasicStatistics; import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; @@ -73,7 +72,6 @@ public class AlluxioHiveMetastore { private final TableMasterClient client; - @Inject public AlluxioHiveMetastore(TableMasterClient client, MetastoreConfig metastoreConfig) { this.client = requireNonNull(client, "client is null"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java new file mode 100644 index 000000000000..60afe0c22652 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java @@ -0,0 +1,40 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.alluxio; + +import alluxio.client.table.TableMasterClient; +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.MetastoreConfig; + +import javax.inject.Inject; + +public class AlluxioHiveMetastoreFactory + implements HiveMetastoreFactory +{ + private final AlluxioHiveMetastore metastore; + + @Inject + public AlluxioHiveMetastoreFactory(TableMasterClient client, MetastoreConfig metastoreConfig) + { + // Alluxio metastore does not support impersonation, so just create a single shared instance + metastore = new AlluxioHiveMetastore(client, metastoreConfig); + } + + @Override + public HiveMetastore createMetastore() + { + return metastore; + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java index a9506548cd8f..f7886f03abb1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioMetastoreModule.java @@ -25,7 +25,8 @@ import com.google.inject.Scopes; import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RawHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.RawHiveMetastoreFactory; import static io.airlift.configuration.ConfigBinder.configBinder; @@ -40,7 +41,7 @@ protected void setup(Binder binder) { configBinder(binder).bindConfig(AlluxioHiveMetastoreConfig.class); - binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).to(AlluxioHiveMetastore.class).in(Scopes.SINGLETON); + binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).to(AlluxioHiveMetastoreFactory.class).in(Scopes.SINGLETON); } @Provides diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index 5bb5520e78e0..c27e2e9e9d66 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -16,7 +16,10 @@ import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.NodeManager; +import org.weakref.jmx.Flatten; +import org.weakref.jmx.Nested; import javax.annotation.PostConstruct; import javax.annotation.PreDestroy; @@ -82,14 +85,19 @@ public void stop() } } - public HiveMetastore createSharedHiveMetastoreCache(HiveMetastore hiveMetastore) + public boolean isEnabled() + { + return enabled; + } + + public HiveMetastoreFactory createCachingHiveMetastoreFactory(HiveMetastoreFactory metastoreFactory) { if (!enabled) { - return hiveMetastore; + return metastoreFactory; } - return cachingHiveMetastore( - hiveMetastore, + CachingHiveMetastore cachingHiveMetastore = cachingHiveMetastore( + metastoreFactory.createMetastore(), // Loading of cache entry in CachingHiveMetastore might trigger loading of another cache entry for different object type // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be // used. @@ -97,5 +105,30 @@ public HiveMetastore createSharedHiveMetastoreCache(HiveMetastore hiveMetastore) metastoreCacheTtl, metastoreRefreshInterval, metastoreCacheMaximumSize); + return new CachingHiveMetastoreFactory(cachingHiveMetastore); + } + + public static class CachingHiveMetastoreFactory + implements HiveMetastoreFactory + { + private final CachingHiveMetastore metastore; + + private CachingHiveMetastoreFactory(CachingHiveMetastore metastore) + { + this.metastore = requireNonNull(metastore, "metastore is null"); + } + + @Override + public HiveMetastore createMetastore() + { + return metastore; + } + + @Nested + @Flatten + public CachingHiveMetastore getMetastore() + { + return metastore; + } } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java index 6230cb56066d..01b659da7e60 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java @@ -69,7 +69,6 @@ import javax.annotation.concurrent.GuardedBy; import javax.annotation.concurrent.ThreadSafe; -import javax.inject.Inject; import java.io.File; import java.io.IOException; @@ -173,7 +172,6 @@ public static FileHiveMetastore createTestingFileHiveMetastore(File catalogDirec .setMetastoreUser("test")); } - @Inject public FileHiveMetastore(NodeVersion nodeVersion, HdfsEnvironment hdfsEnvironment, MetastoreConfig metastoreConfig, FileHiveMetastoreConfig config) { this.currentVersion = requireNonNull(nodeVersion, "nodeVersion is null").toString(); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java new file mode 100644 index 000000000000..506138cd355a --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java @@ -0,0 +1,41 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.file; + +import io.trino.plugin.hive.HdfsEnvironment; +import io.trino.plugin.hive.NodeVersion; +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.MetastoreConfig; + +import javax.inject.Inject; + +public class FileHiveMetastoreFactory + implements HiveMetastoreFactory +{ + private final FileHiveMetastore metastore; + + @Inject + public FileHiveMetastoreFactory(NodeVersion nodeVersion, HdfsEnvironment hdfsEnvironment, MetastoreConfig metastoreConfig, FileHiveMetastoreConfig config) + { + // file metastore does not support impersonation, so just create a single shared instance + metastore = new FileHiveMetastore(nodeVersion, hdfsEnvironment, metastoreConfig, config); + } + + @Override + public HiveMetastore createMetastore() + { + return metastore; + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java index 65ed881d435d..a54e296a03a9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileMetastoreModule.java @@ -16,8 +16,8 @@ import com.google.inject.Binder; import com.google.inject.Module; import com.google.inject.Scopes; -import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RawHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.RawHiveMetastoreFactory; import static io.airlift.configuration.ConfigBinder.configBinder; @@ -28,6 +28,6 @@ public class FileMetastoreModule public void configure(Binder binder) { configBinder(binder).bindConfig(FileHiveMetastoreConfig.class); - binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).to(FileHiveMetastore.class).in(Scopes.SINGLETON); + binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).to(FileHiveMetastoreFactory.class).in(Scopes.SINGLETON); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java new file mode 100644 index 000000000000..d9923f255610 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java @@ -0,0 +1,49 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.glue; + +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import org.weakref.jmx.Flatten; +import org.weakref.jmx.Managed; + +import javax.inject.Inject; + +import static java.util.Objects.requireNonNull; + +public class GlueHiveMetastoreFactory + implements HiveMetastoreFactory +{ + private final GlueHiveMetastore metastore; + + // Glue metastore does not support impersonation, so just use single shared instance + @Inject + public GlueHiveMetastoreFactory(GlueHiveMetastore metastore) + { + this.metastore = requireNonNull(metastore, "metastore is null"); + } + + @Flatten + @Managed + public GlueHiveMetastore getMetastore() + { + return metastore; + } + + @Override + public HiveMetastore createMetastore() + { + return metastore; + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java index 1f3c4ef1a9fb..3ac9d2eba86b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueMetastoreModule.java @@ -26,8 +26,8 @@ import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.base.CatalogName; import io.trino.plugin.hive.HiveConfig; -import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RawHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.RawHiveMetastoreFactory; import java.util.concurrent.Executor; import java.util.function.Predicate; @@ -53,13 +53,15 @@ protected void setup(Binder binder) newOptionalBinder(binder, Key.get(new TypeLiteral>() {}, ForGlueHiveMetastore.class)) .setDefault().toProvider(DefaultGlueMetastoreTableFilterProvider.class).in(Scopes.SINGLETON); - binder.bind(HiveMetastore.class) - .annotatedWith(RawHiveMetastore.class) - .to(GlueHiveMetastore.class) + binder.bind(GlueHiveMetastore.class).in(Scopes.SINGLETON); + binder.bind(HiveMetastoreFactory.class) + .annotatedWith(RawHiveMetastoreFactory.class) + .to(GlueHiveMetastoreFactory.class) .in(Scopes.SINGLETON); - binder.bind(GlueHiveMetastore.class).in(Scopes.SINGLETON); - newExporter(binder).export(GlueHiveMetastore.class).withGeneratedName(); + // export under the old name, for backwards compatibility + binder.bind(GlueHiveMetastoreFactory.class).in(Scopes.SINGLETON); + newExporter(binder).export(GlueHiveMetastoreFactory.class).as(generator -> generator.generatedNameOf(GlueHiveMetastore.class)); install(conditionalModule( HiveConfig.class, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java index f340ac60ae59..901a4f4ed3ae 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java @@ -41,8 +41,6 @@ import org.apache.hadoop.hive.metastore.api.DataOperationType; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import javax.inject.Inject; - import java.util.List; import java.util.Map; import java.util.Optional; @@ -72,7 +70,6 @@ public class BridgingHiveMetastore { private final ThriftMetastore delegate; - @Inject public BridgingHiveMetastore(ThriftMetastore delegate) { this.delegate = delegate; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java new file mode 100644 index 000000000000..b93bec956b25 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java @@ -0,0 +1,39 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.thrift; + +import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; + +import javax.inject.Inject; + +import static java.util.Objects.requireNonNull; + +public class BridgingHiveMetastoreFactory + implements HiveMetastoreFactory +{ + private final ThriftMetastore thriftMetastore; + + @Inject + public BridgingHiveMetastoreFactory(ThriftMetastore thriftMetastore) + { + this.thriftMetastore = requireNonNull(thriftMetastore, "thriftMetastore is null"); + } + + @Override + public HiveMetastore createMetastore() + { + return new BridgingHiveMetastore(thriftMetastore); + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java index c6a625599c0e..54b039bb5b53 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/ThriftMetastoreModule.java @@ -17,8 +17,8 @@ import com.google.inject.Scopes; import com.google.inject.multibindings.OptionalBinder; import io.airlift.configuration.AbstractConfigurationAwareModule; -import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RawHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.RawHiveMetastoreFactory; import static io.airlift.configuration.ConfigBinder.configBinder; import static org.weakref.jmx.guice.ExportBinder.newExporter; @@ -39,9 +39,9 @@ protected void setup(Binder binder) newExporter(binder).export(ThriftMetastore.class) .as(generator -> generator.generatedNameOf(ThriftHiveMetastore.class)); - binder.bind(HiveMetastore.class) - .annotatedWith(RawHiveMetastore.class) - .to(BridgingHiveMetastore.class) + binder.bind(HiveMetastoreFactory.class) + .annotatedWith(RawHiveMetastoreFactory.class) + .to(BridgingHiveMetastoreFactory.class) .in(Scopes.SINGLETON); install(new ThriftMetastoreAuthenticationModule()); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index e07bc057b533..e58b74a6f030 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -37,6 +37,7 @@ import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.HiveColumnStatistics; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; @@ -809,7 +810,7 @@ protected final void setup(String databaseName, HiveConfig hiveConfig, HiveMetas JsonCodec partitionUpdateCodec = JsonCodec.jsonCodec(PartitionUpdate.class); metadataFactory = new HiveMetadataFactory( new CatalogName("hive"), - metastoreClient, + HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, partitionManager, 10, @@ -886,7 +887,7 @@ public Optional getMaterializedView(Connect getDefaultHiveFileWriterFactories(hiveConfig, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, - metastoreClient, + HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(JOIN_COMPILER, BLOCK_TYPE_OPERATORS), TESTING_TYPE_MANAGER, getHiveConfig(), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java index 75c2c58ebd07..abd31a31ea62 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java @@ -31,6 +31,7 @@ import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.ForwardingHiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.PrincipalPrivileges; import io.trino.plugin.hive.metastore.Table; @@ -201,7 +202,7 @@ protected void setup(String host, int port, String databaseName, boolean s3Selec new CatalogName("hive"), config, metastoreConfig, - metastoreClient, + HiveMetastoreFactory.ofInstance(metastoreClient), hdfsEnvironment, hivePartitionManager, newDirectExecutorService(), @@ -242,7 +243,7 @@ protected void setup(String host, int port, String databaseName, boolean s3Selec getDefaultHiveFileWriterFactories(config, hdfsEnvironment), hdfsEnvironment, PAGE_SORTER, - metastoreClient, + HiveMetastoreFactory.ofInstance(metastoreClient), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java index cb5c7f189866..1efe77001823 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java @@ -22,6 +22,7 @@ import io.trino.operator.GroupByHashPageIndexerFactory; import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.HivePageSinkMetadata; import io.trino.spi.Page; import io.trino.spi.PageBuilder; @@ -284,7 +285,7 @@ private static ConnectorPageSink createPageSink(HiveTransactionHandle transactio getDefaultHiveFileWriterFactories(config, HDFS_ENVIRONMENT), HDFS_ENVIRONMENT, PAGE_SORTER, - metastore, + HiveMetastoreFactory.ofInstance(metastore), new GroupByHashPageIndexerFactory(new JoinCompiler(typeOperators), blockTypeOperators), TESTING_TYPE_MANAGER, config, diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java index bedf9530e6af..e7023fd39f3e 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/IcebergCatalogModule.java @@ -19,8 +19,9 @@ import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.metastore.DecoratedHiveMetastoreModule; import io.trino.plugin.hive.metastore.HiveMetastore; -import io.trino.plugin.hive.metastore.RawHiveMetastore; -import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.plugin.hive.metastore.RawHiveMetastoreFactory; +import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.plugin.iceberg.catalog.file.FileMetastoreTableOperationsProvider; import io.trino.plugin.iceberg.catalog.file.IcebergFileMetastoreCatalogModule; @@ -49,7 +50,7 @@ public IcebergCatalogModule(Optional metastore) protected void setup(Binder binder) { if (metastore.isPresent()) { - binder.bind(HiveMetastore.class).annotatedWith(RawHiveMetastore.class).toInstance(metastore.get()); + binder.bind(HiveMetastoreFactory.class).annotatedWith(RawHiveMetastoreFactory.class).toInstance(HiveMetastoreFactory.ofInstance(metastore.get())); binder.bind(IcebergTableOperationsProvider.class).to(FileMetastoreTableOperationsProvider.class).in(Scopes.SINGLETON); } else { @@ -65,9 +66,9 @@ protected void setup(Binder binder) public static class MetastoreValidator { @Inject - public MetastoreValidator(HiveMetastore metastore) + public MetastoreValidator(SharedHiveMetastoreCache metastoreCache) { - if (metastore instanceof CachingHiveMetastore) { + if (metastoreCache.isEnabled()) { throw new RuntimeException("Hive metastore caching must not be enabled for Iceberg"); } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java index bbff596c212f..668406f9ec28 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java @@ -17,7 +17,7 @@ import io.trino.plugin.hive.HdfsEnvironment; import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.NodeVersion; -import io.trino.plugin.hive.metastore.HiveMetastore; +import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.spi.TrinoException; import io.trino.spi.security.ConnectorIdentity; @@ -33,7 +33,7 @@ public class TrinoCatalogFactory { private final CatalogName catalogName; - private final HiveMetastore metastore; + private final HiveMetastoreFactory metastoreFactory; private final HdfsEnvironment hdfsEnvironment; private final TypeManager typeManager; private final IcebergTableOperationsProvider tableOperationsProvider; @@ -47,7 +47,7 @@ public class TrinoCatalogFactory public TrinoCatalogFactory( IcebergConfig config, CatalogName catalogName, - HiveMetastore metastore, + HiveMetastoreFactory metastoreFactory, HdfsEnvironment hdfsEnvironment, TypeManager typeManager, IcebergTableOperationsProvider tableOperationsProvider, @@ -56,7 +56,7 @@ public TrinoCatalogFactory( HiveConfig hiveConfig) { this.catalogName = requireNonNull(catalogName, "catalogName is null"); - this.metastore = requireNonNull(metastore, "metastore is null"); + this.metastoreFactory = requireNonNull(metastoreFactory, "metastoreFactory is null"); this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.tableOperationsProvider = requireNonNull(tableOperationsProvider, "tableOperationProvider is null"); @@ -75,7 +75,7 @@ public TrinoCatalog create(ConnectorIdentity identity) case HIVE_METASTORE: return new TrinoHiveCatalog( catalogName, - memoizeMetastore(metastore, 1000), + memoizeMetastore(metastoreFactory.createMetastore(), 1000), hdfsEnvironment, typeManager, tableOperationsProvider, diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java index 2cd715b0f55a..f9dd5a0b7bf0 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java @@ -256,8 +256,13 @@ public void setNamespacePrincipal(ConnectorSession session, String namespace, Tr } @Override - public Transaction newCreateTableTransaction(ConnectorSession session, SchemaTableName schemaTableName, - Schema schema, PartitionSpec partitionSpec, String location, Map properties) + public Transaction newCreateTableTransaction( + ConnectorSession session, + SchemaTableName schemaTableName, + Schema schema, + PartitionSpec partitionSpec, + String location, + Map properties) { TableMetadata metadata = newTableMetadata(schema, partitionSpec, location, properties); TableOperations ops = tableOperationsProvider.createTableOperations( diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java index edd50f9b010d..f6953ab1a417 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergPlugin.java @@ -74,7 +74,7 @@ public void testGlueMetastore() "test", Map.of("iceberg.catalog.type", "glue"), new TestingConnectorContext())) - .hasMessageContaining("Explicit bindings are required and HiveMetastore is not explicitly bound"); + .hasMessageMatching("(?s).*Explicit bindings are required and HiveMetastoreFactory .* is not explicitly bound.*"); assertThatThrownBy(() -> factory.create( "test", From 3f906962c16e77a126b430bba03ae4264756279f Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 23:16:43 -0700 Subject: [PATCH 13/18] Add impersonation to HiveMetadataFactory --- .../plugin/hive/HiveMetadataFactory.java | 2 +- .../plugin/hive/HivePageSinkProvider.java | 3 ++- .../DecoratedHiveMetastoreModule.java | 11 ++++++++-- .../hive/metastore/HiveMetastoreFactory.java | 21 +++++++++++++++++-- .../alluxio/AlluxioHiveMetastoreFactory.java | 11 +++++++++- .../cache/SharedHiveMetastoreCache.java | 12 +++++++++-- .../file/FileHiveMetastoreFactory.java | 11 +++++++++- .../metastore/glue/GlueHiveMetastore.java | 4 ---- .../glue/GlueHiveMetastoreFactory.java | 11 +++++++++- .../thrift/BridgingHiveMetastoreFactory.java | 11 +++++++++- .../plugin/iceberg/TrinoCatalogFactory.java | 4 +++- 11 files changed, 84 insertions(+), 17 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java index 2648acd9646e..9419a743d084 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java @@ -192,7 +192,7 @@ public HiveMetadataFactory( public TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit) { HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure( - memoizeMetastore(metastoreFactory.createMetastore(), perTransactionCacheMaximumSize)); // per-transaction cache + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), perTransactionCacheMaximumSize)); // per-transaction cache SemiTransactionalHiveMetastore metastore = new SemiTransactionalHiveMetastore( hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java index cf689e6386f3..6a4f8a6a80a9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java @@ -41,6 +41,7 @@ import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.OptionalInt; import java.util.Set; @@ -156,7 +157,7 @@ private ConnectorPageSink createPageSink(HiveWritableTableHandle handle, boolean session.getQueryId(), new HivePageSinkMetadataProvider( handle.getPageSinkMetadata(), - new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(), perTransactionMetastoreCacheMaximumSize)), + new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(Optional.of(session.getIdentity())), perTransactionMetastoreCacheMaximumSize)), new HiveIdentity(session)), typeManager, hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java index 9d2da3600904..216ed94906eb 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java @@ -25,6 +25,7 @@ import io.trino.plugin.hive.metastore.procedure.FlushHiveMetastoreCacheProcedure; import io.trino.plugin.hive.metastore.recording.RecordingHiveMetastoreDecoratorModule; import io.trino.spi.procedure.Procedure; +import io.trino.spi.security.ConnectorIdentity; import java.util.Comparator; import java.util.List; @@ -85,9 +86,15 @@ public DecoratingHiveMetastoreFactory(HiveMetastoreFactory delegate, Set identity) + { + HiveMetastore metastore = delegate.createMetastore(identity); for (HiveMetastoreDecorator decorator : sortedDecorators) { metastore = decorator.decorate(metastore); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java index 7a3ab437d543..070e30367e77 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastoreFactory.java @@ -13,11 +13,22 @@ */ package io.trino.plugin.hive.metastore; +import io.trino.spi.security.ConnectorIdentity; + +import java.util.Optional; + import static java.util.Objects.requireNonNull; public interface HiveMetastoreFactory { - HiveMetastore createMetastore(); + boolean isImpersonationEnabled(); + + /** + * Create a metastore instance for the identity. An empty identity will + * only be provided when impersonation is disabled, and global caching is + * enabled. + */ + HiveMetastore createMetastore(Optional identity); static HiveMetastoreFactory ofInstance(HiveMetastore metastore) { @@ -35,7 +46,13 @@ private StaticHiveMetastoreFactory(HiveMetastore metastore) } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return false; + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return metastore; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java index 60afe0c22652..fdb315d7b2aa 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastoreFactory.java @@ -17,9 +17,12 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; +import io.trino.spi.security.ConnectorIdentity; import javax.inject.Inject; +import java.util.Optional; + public class AlluxioHiveMetastoreFactory implements HiveMetastoreFactory { @@ -33,7 +36,13 @@ public AlluxioHiveMetastoreFactory(TableMasterClient client, MetastoreConfig met } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return false; + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return metastore; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index c27e2e9e9d66..12614970433e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -18,6 +18,7 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.NodeManager; +import io.trino.spi.security.ConnectorIdentity; import org.weakref.jmx.Flatten; import org.weakref.jmx.Nested; @@ -96,11 +97,12 @@ public HiveMetastoreFactory createCachingHiveMetastoreFactory(HiveMetastoreFacto return metastoreFactory; } + // caching hive metastore currently handles caching for multiple users internally CachingHiveMetastore cachingHiveMetastore = cachingHiveMetastore( - metastoreFactory.createMetastore(), // Loading of cache entry in CachingHiveMetastore might trigger loading of another cache entry for different object type // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be // used. + metastoreFactory.createMetastore(Optional.empty()), new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), metastoreCacheTtl, metastoreRefreshInterval, @@ -119,7 +121,13 @@ private CachingHiveMetastoreFactory(CachingHiveMetastore metastore) } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return metastore.isImpersonationEnabled(); + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return metastore; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java index 506138cd355a..235d2e84d482 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastoreFactory.java @@ -18,9 +18,12 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; +import io.trino.spi.security.ConnectorIdentity; import javax.inject.Inject; +import java.util.Optional; + public class FileHiveMetastoreFactory implements HiveMetastoreFactory { @@ -34,7 +37,13 @@ public FileHiveMetastoreFactory(NodeVersion nodeVersion, HdfsEnvironment hdfsEnv } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return false; + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return metastore; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java index 7476f3771a75..e5975af58264 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java @@ -104,8 +104,6 @@ import io.trino.spi.statistics.ColumnStatisticType; import io.trino.spi.type.Type; import org.apache.hadoop.fs.Path; -import org.weakref.jmx.Flatten; -import org.weakref.jmx.Managed; import javax.annotation.Nullable; import javax.inject.Inject; @@ -269,8 +267,6 @@ private static AWSCredentialsProvider getCustomAWSCredentialsProvider(String pro } } - @Managed - @Flatten public GlueMetastoreStats getStats() { return stats; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java index d9923f255610..1b8e75565e24 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastoreFactory.java @@ -15,11 +15,14 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.spi.security.ConnectorIdentity; import org.weakref.jmx.Flatten; import org.weakref.jmx.Managed; import javax.inject.Inject; +import java.util.Optional; + import static java.util.Objects.requireNonNull; public class GlueHiveMetastoreFactory @@ -42,7 +45,13 @@ public GlueHiveMetastore getMetastore() } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return false; + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return metastore; } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java index b93bec956b25..d3791a065aa1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java @@ -15,9 +15,12 @@ import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; +import io.trino.spi.security.ConnectorIdentity; import javax.inject.Inject; +import java.util.Optional; + import static java.util.Objects.requireNonNull; public class BridgingHiveMetastoreFactory @@ -32,7 +35,13 @@ public BridgingHiveMetastoreFactory(ThriftMetastore thriftMetastore) } @Override - public HiveMetastore createMetastore() + public boolean isImpersonationEnabled() + { + return thriftMetastore.isImpersonationEnabled(); + } + + @Override + public HiveMetastore createMetastore(Optional identity) { return new BridgingHiveMetastore(thriftMetastore); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java index 668406f9ec28..a189c7d68d23 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java @@ -25,6 +25,8 @@ import javax.inject.Inject; +import java.util.Optional; + import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.memoizeMetastore; import static io.trino.plugin.iceberg.IcebergSecurityConfig.IcebergSecurity.SYSTEM; import static io.trino.spi.StandardErrorCode.NOT_SUPPORTED; @@ -75,7 +77,7 @@ public TrinoCatalog create(ConnectorIdentity identity) case HIVE_METASTORE: return new TrinoHiveCatalog( catalogName, - memoizeMetastore(metastoreFactory.createMetastore(), 1000), + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), 1000), hdfsEnvironment, typeManager, tableOperationsProvider, From 16540b5c691e7bf486e745035e7b9a19543bdd71 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 20:17:15 -0700 Subject: [PATCH 14/18] Add impersonation to SharedHiveMetastoreCache --- .../DecoratedHiveMetastoreModule.java | 3 + .../cache/ImpersonationCachingConfig.java | 54 +++++++++++++ .../cache/SharedHiveMetastoreCache.java | 77 ++++++++++++++++++- .../cache/TestImpersonationCachingConfig.java | 51 ++++++++++++ 4 files changed, 182 insertions(+), 3 deletions(-) create mode 100644 plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ImpersonationCachingConfig.java create mode 100644 plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestImpersonationCachingConfig.java diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java index 216ed94906eb..71204f654810 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/DecoratedHiveMetastoreModule.java @@ -20,6 +20,7 @@ import io.airlift.configuration.AbstractConfigurationAwareModule; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastore; import io.trino.plugin.hive.metastore.cache.CachingHiveMetastoreConfig; +import io.trino.plugin.hive.metastore.cache.ImpersonationCachingConfig; import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache; import io.trino.plugin.hive.metastore.cache.SharedHiveMetastoreCache.CachingHiveMetastoreFactory; import io.trino.plugin.hive.metastore.procedure.FlushHiveMetastoreCacheProcedure; @@ -48,6 +49,8 @@ protected void setup(Binder binder) install(new RecordingHiveMetastoreDecoratorModule()); configBinder(binder).bindConfig(CachingHiveMetastoreConfig.class); + // TODO this should only be bound when impersonation is actually enabled + configBinder(binder).bindConfig(ImpersonationCachingConfig.class); binder.bind(SharedHiveMetastoreCache.class).in(Scopes.SINGLETON); // export under the old name, for backwards compatibility newExporter(binder).export(HiveMetastoreFactory.class) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ImpersonationCachingConfig.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ImpersonationCachingConfig.java new file mode 100644 index 000000000000..7a375f12bee2 --- /dev/null +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/ImpersonationCachingConfig.java @@ -0,0 +1,54 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.cache; + +import io.airlift.configuration.Config; +import io.airlift.units.Duration; + +import javax.validation.constraints.Min; +import javax.validation.constraints.NotNull; + +import java.util.concurrent.TimeUnit; + +public class ImpersonationCachingConfig +{ + private Duration userMetastoreCacheTtl = new Duration(0, TimeUnit.SECONDS); + private long userMetastoreCacheMaximumSize = 1000; + + @NotNull + public Duration getUserMetastoreCacheTtl() + { + return userMetastoreCacheTtl; + } + + @Config("hive.user-metastore-cache-ttl") + public ImpersonationCachingConfig setUserMetastoreCacheTtl(Duration userMetastoreCacheTtl) + { + this.userMetastoreCacheTtl = userMetastoreCacheTtl; + return this; + } + + @Min(0) + public long getUserMetastoreCacheMaximumSize() + { + return userMetastoreCacheMaximumSize; + } + + @Config("hive.user-metastore-cache-maximum-size") + public ImpersonationCachingConfig setUserMetastoreCacheMaximumSize(long userMetastoreCacheMaximumSize) + { + this.userMetastoreCacheMaximumSize = userMetastoreCacheMaximumSize; + return this; + } +} diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index 12614970433e..f1f723156fb3 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -13,11 +13,16 @@ */ package io.trino.plugin.hive.metastore.cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.CacheLoader; +import com.google.common.cache.LoadingCache; +import com.google.common.util.concurrent.UncheckedExecutionException; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.NodeManager; +import io.trino.spi.TrinoException; import io.trino.spi.security.ConnectorIdentity; import org.weakref.jmx.Flatten; import org.weakref.jmx.Nested; @@ -29,10 +34,14 @@ import java.util.Optional; import java.util.concurrent.ExecutorService; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Throwables.throwIfInstanceOf; import static io.airlift.concurrent.Threads.daemonThreadsNamed; +import static io.trino.collect.cache.SafeCaches.buildNonEvictableCache; import static io.trino.plugin.hive.metastore.cache.CachingHiveMetastore.cachingHiveMetastore; import static java.util.Objects.requireNonNull; import static java.util.concurrent.Executors.newCachedThreadPool; +import static java.util.concurrent.TimeUnit.MILLISECONDS; public class SharedHiveMetastoreCache { @@ -43,13 +52,17 @@ public class SharedHiveMetastoreCache private final long metastoreCacheMaximumSize; private final int maxMetastoreRefreshThreads; + private final Duration userMetastoreCacheTtl; + private final long userMetastoreCacheMaximumSize; + private ExecutorService executorService; @Inject public SharedHiveMetastoreCache( CatalogName catalogName, NodeManager nodeManager, - CachingHiveMetastoreConfig config) + CachingHiveMetastoreConfig config, + ImpersonationCachingConfig impersonationCachingConfig) { requireNonNull(nodeManager, "nodeManager is null"); requireNonNull(config, "config is null"); @@ -61,6 +74,9 @@ public SharedHiveMetastoreCache( metastoreRefreshInterval = config.getMetastoreRefreshInterval(); metastoreCacheMaximumSize = config.getMetastoreCacheMaximumSize(); + userMetastoreCacheTtl = impersonationCachingConfig.getUserMetastoreCacheTtl(); + userMetastoreCacheMaximumSize = impersonationCachingConfig.getUserMetastoreCacheMaximumSize(); + // Disable caching on workers, because there currently is no way to invalidate such a cache. // Note: while we could skip CachingHiveMetastoreModule altogether on workers, we retain it so that catalog // configuration can remain identical for all nodes, making cluster configuration easier. @@ -97,7 +113,14 @@ public HiveMetastoreFactory createCachingHiveMetastoreFactory(HiveMetastoreFacto return metastoreFactory; } - // caching hive metastore currently handles caching for multiple users internally + if (metastoreFactory.isImpersonationEnabled()) { + // per user cache can be disabled also + if (userMetastoreCacheMaximumSize == 0 || userMetastoreCacheTtl.toMillis() == 0) { + return metastoreFactory; + } + return new ImpersonationCachingHiveMetastoreFactory(metastoreFactory); + } + CachingHiveMetastore cachingHiveMetastore = cachingHiveMetastore( // Loading of cache entry in CachingHiveMetastore might trigger loading of another cache entry for different object type // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be @@ -123,7 +146,7 @@ private CachingHiveMetastoreFactory(CachingHiveMetastore metastore) @Override public boolean isImpersonationEnabled() { - return metastore.isImpersonationEnabled(); + return false; } @Override @@ -139,4 +162,52 @@ public CachingHiveMetastore getMetastore() return metastore; } } + + public class ImpersonationCachingHiveMetastoreFactory + implements HiveMetastoreFactory + { + private final HiveMetastoreFactory metastoreFactory; + private final LoadingCache cache; + + public ImpersonationCachingHiveMetastoreFactory(HiveMetastoreFactory metastoreFactory) + { + this.metastoreFactory = metastoreFactory; + cache = buildNonEvictableCache( + CacheBuilder.newBuilder() + .expireAfterWrite(userMetastoreCacheTtl.toMillis(), MILLISECONDS) + .maximumSize(userMetastoreCacheMaximumSize), + CacheLoader.from(this::createUserCachingMetastore)); + } + + @Override + public boolean isImpersonationEnabled() + { + return true; + } + + @Override + public HiveMetastore createMetastore(Optional identity) + { + checkArgument(identity.isPresent(), "Identity must be present for impersonation cache"); + try { + return cache.getUnchecked(identity.get().getUser()); + } + catch (UncheckedExecutionException e) { + throwIfInstanceOf(e.getCause(), TrinoException.class); + throw e; + } + } + + private HiveMetastore createUserCachingMetastore(String user) + { + return cachingHiveMetastore( + metastoreFactory.createMetastore(Optional.of(ConnectorIdentity.ofUser(user))), + new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), + metastoreCacheTtl, + metastoreRefreshInterval, + metastoreCacheMaximumSize); + } + + // todo aggregate and export stats + } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestImpersonationCachingConfig.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestImpersonationCachingConfig.java new file mode 100644 index 000000000000..487cd3d83a20 --- /dev/null +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestImpersonationCachingConfig.java @@ -0,0 +1,51 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package io.trino.plugin.hive.metastore.cache; + +import com.google.common.collect.ImmutableMap; +import io.airlift.units.Duration; +import org.testng.annotations.Test; + +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import static io.airlift.configuration.testing.ConfigAssertions.assertFullMapping; +import static io.airlift.configuration.testing.ConfigAssertions.assertRecordedDefaults; +import static io.airlift.configuration.testing.ConfigAssertions.recordDefaults; + +public class TestImpersonationCachingConfig +{ + @Test + public void testDefaults() + { + assertRecordedDefaults(recordDefaults(ImpersonationCachingConfig.class) + .setUserMetastoreCacheTtl(new Duration(0, TimeUnit.SECONDS)) + .setUserMetastoreCacheMaximumSize(1000)); + } + + @Test + public void testExplicitPropertyMappings() + { + Map properties = ImmutableMap.builder() + .put("hive.user-metastore-cache-ttl", "2h") + .put("hive.user-metastore-cache-maximum-size", "5") + .build(); + + ImpersonationCachingConfig expected = new ImpersonationCachingConfig() + .setUserMetastoreCacheTtl(new Duration(2, TimeUnit.HOURS)) + .setUserMetastoreCacheMaximumSize(5); + + assertFullMapping(properties, expected); + } +} From d22a660dc760d46d20c3cf09aba631f70136f5cf Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 23:40:19 -0700 Subject: [PATCH 15/18] Remove per user metadata caching from CachingHiveMetastore --- .../plugin/hive/HiveMetadataFactory.java | 3 +- .../plugin/hive/HiveMetastoreClosure.java | 5 - .../plugin/hive/HivePageSinkProvider.java | 2 +- .../metastore/ForwardingHiveMetastore.java | 6 - .../plugin/hive/metastore/HiveMetastore.java | 2 - .../alluxio/AlluxioHiveMetastore.java | 6 - .../metastore/cache/CachingHiveMetastore.java | 298 +++++++----------- .../cache/SharedHiveMetastoreCache.java | 6 +- .../metastore/file/FileHiveMetastore.java | 6 - .../metastore/glue/GlueHiveMetastore.java | 6 - .../recording/RecordingHiveMetastore.java | 6 - .../thrift/BridgingHiveMetastore.java | 6 - .../trino/plugin/hive/AbstractTestHive.java | 1 + .../metastore/UnimplementedHiveMetastore.java | 6 - .../cache/TestCachingHiveMetastore.java | 10 +- .../plugin/iceberg/TrinoCatalogFactory.java | 3 +- .../CountingAccessFileHiveMetastore.java | 7 - 17 files changed, 122 insertions(+), 257 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java index 9419a743d084..a66404d04a3b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java @@ -17,6 +17,7 @@ import io.airlift.json.JsonCodec; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; @@ -192,7 +193,7 @@ public HiveMetadataFactory( public TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit) { HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure( - memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), perTransactionCacheMaximumSize)); // per-transaction cache + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), new HiveIdentity(identity), perTransactionCacheMaximumSize)); // per-transaction cache SemiTransactionalHiveMetastore metastore = new SemiTransactionalHiveMetastore( hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java index 433061926ba2..1091873b7c67 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java @@ -301,11 +301,6 @@ public Set listTablePrivileges(String databaseName, String ta return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); } - public boolean isImpersonationEnabled() - { - return delegate.isImpersonationEnabled(); - } - public long openTransaction(HiveIdentity identity) { return delegate.openTransaction(identity); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java index 6a4f8a6a80a9..624b0bf035ec 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java @@ -157,7 +157,7 @@ private ConnectorPageSink createPageSink(HiveWritableTableHandle handle, boolean session.getQueryId(), new HivePageSinkMetadataProvider( handle.getPageSinkMetadata(), - new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(Optional.of(session.getIdentity())), perTransactionMetastoreCacheMaximumSize)), + new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(Optional.of(session.getIdentity())), new HiveIdentity(session), perTransactionMetastoreCacheMaximumSize)), new HiveIdentity(session)), typeManager, hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java index e955f1b76022..3024335bc075 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java @@ -356,12 +356,6 @@ public Set listTablePrivileges(String databaseName, return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); } - @Override - public boolean isImpersonationEnabled() - { - return delegate.isImpersonationEnabled(); - } - @Override public long openTransaction(HiveIdentity identity) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java index 4b87b0ca0ddb..b05415c1ed48 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java @@ -143,8 +143,6 @@ default void updatePartitionStatistics(HiveIdentity identity, Table table, Strin */ Set listTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal); - boolean isImpersonationEnabled(); - default long openTransaction(HiveIdentity identity) { throw new UnsupportedOperationException(); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java index b324ebf031f4..952d56402ce5 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java @@ -479,10 +479,4 @@ public Set listTablePrivileges(String databaseName, String ta { throw new TrinoException(NOT_SUPPORTED, "listTablePrivileges"); } - - @Override - public boolean isImpersonationEnabled() - { - return false; - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java index 50313456c12f..3a44bd9849e4 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.hive.metastore.cache; +import com.google.common.annotations.VisibleForTesting; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.common.collect.ImmutableList; @@ -61,7 +62,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; -import java.util.Objects; import java.util.Optional; import java.util.OptionalLong; import java.util.Set; @@ -71,7 +71,6 @@ import java.util.function.Predicate; import static com.google.common.base.Functions.identity; -import static com.google.common.base.MoreObjects.toStringHelper; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Throwables.throwIfInstanceOf; import static com.google.common.base.Throwables.throwIfUnchecked; @@ -104,26 +103,28 @@ public enum StatsRecording } protected final HiveMetastore delegate; + private final HiveIdentity identity; private final LoadingCache> databaseCache; private final LoadingCache> databaseNamesCache; - private final LoadingCache, Optional
> tableCache; + private final LoadingCache> tableCache; private final LoadingCache> tableNamesCache; private final LoadingCache> tablesWithParameterCache; - private final LoadingCache, PartitionStatistics> tableStatisticsCache; - private final LoadingCache, PartitionStatistics> partitionStatisticsCache; + private final LoadingCache tableStatisticsCache; + private final LoadingCache partitionStatisticsCache; private final LoadingCache> viewNamesCache; - private final LoadingCache, Optional> partitionCache; - private final LoadingCache, Optional>> partitionFilterCache; + private final LoadingCache> partitionCache; + private final LoadingCache>> partitionFilterCache; private final LoadingCache> tablePrivilegesCache; private final LoadingCache> rolesCache; private final LoadingCache> roleGrantsCache; private final LoadingCache> grantedPrincipalsCache; private final LoadingCache> configValuesCache; - public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, Executor executor, Duration cacheTtl, Optional refreshInterval, long maximumSize) + public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, HiveIdentity identity, Executor executor, Duration cacheTtl, Optional refreshInterval, long maximumSize) { return new CachingHiveMetastore( delegate, + identity, OptionalLong.of(cacheTtl.toMillis()), refreshInterval .map(Duration::toMillis) @@ -134,10 +135,11 @@ public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, StatsRecording.ENABLED); } - public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, long maximumSize) + public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, HiveIdentity identity, long maximumSize) { return new CachingHiveMetastore( delegate, + identity, OptionalLong.empty(), OptionalLong.empty(), Optional.empty(), @@ -145,9 +147,10 @@ public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, long StatsRecording.DISABLED); } - protected CachingHiveMetastore(HiveMetastore delegate, OptionalLong expiresAfterWriteMillis, OptionalLong refreshMills, Optional executor, long maximumSize, StatsRecording statsRecording) + protected CachingHiveMetastore(HiveMetastore delegate, HiveIdentity identity, OptionalLong expiresAfterWriteMillis, OptionalLong refreshMills, Optional executor, long maximumSize, StatsRecording statsRecording) { this.delegate = requireNonNull(delegate, "delegate is null"); + this.identity = requireNonNull(identity, "identity is null"); requireNonNull(executor, "executor is null"); databaseNamesCache = buildCache(expiresAfterWriteMillis, refreshMills, executor, maximumSize, statsRecording, ignored -> loadAllDatabases()); @@ -256,17 +259,16 @@ private List loadAllDatabases() return delegate.getAllDatabases(); } - private Table getExistingTable(HiveIdentity identity, String databaseName, String tableName) + private Table getExistingTable(String databaseName, String tableName) { return getTable(identity, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(HiveIdentity ignored, String databaseName, String tableName) { - identity = updateIdentity(identity); - return get(tableCache, new WithIdentity<>(identity, hiveTableName(databaseName, tableName))); + return get(tableCache, hiveTableName(databaseName, tableName)); } @Override @@ -275,64 +277,63 @@ public Set getSupportedColumnStatistics(Type type) return delegate.getSupportedColumnStatistics(type); } - private Optional
loadTable(WithIdentity hiveTableName) + private Optional
loadTable(HiveTableName hiveTableName) { - return delegate.getTable(hiveTableName.getIdentity(), hiveTableName.key.getDatabaseName(), hiveTableName.key.getTableName()); + return delegate.getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()); } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(HiveIdentity ignored, Table table) { - return get(tableStatisticsCache, new WithIdentity<>(updateIdentity(identity), hiveTableName(table.getDatabaseName(), table.getTableName()))); + return get(tableStatisticsCache, hiveTableName(table.getDatabaseName(), table.getTableName())); } - private PartitionStatistics loadTableColumnStatistics(WithIdentity hiveTableName) + private PartitionStatistics loadTableColumnStatistics(HiveTableName tableName) { - HiveTableName tableName = hiveTableName.getKey(); - Table table = getExistingTable(hiveTableName.getIdentity(), tableName.getDatabaseName(), tableName.getTableName()); - return delegate.getTableStatistics(hiveTableName.getIdentity(), table); + Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); + return delegate.getTableStatistics(identity, table); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(HiveIdentity ignored, Table table, List partitions) { HiveTableName hiveTableName = hiveTableName(table.getDatabaseName(), table.getTableName()); - List> partitionNames = partitions.stream() - .map(partition -> new WithIdentity<>(updateIdentity(identity), hivePartitionName(hiveTableName, makePartitionName(table, partition)))) + List partitionNames = partitions.stream() + .map(partition -> hivePartitionName(hiveTableName, makePartitionName(table, partition))) .collect(toImmutableList()); - Map, PartitionStatistics> statistics = getAll(partitionStatisticsCache, partitionNames); + Map statistics = getAll(partitionStatisticsCache, partitionNames); return statistics.entrySet() .stream() - .collect(toImmutableMap(entry -> entry.getKey().getKey().getPartitionName().get(), Entry::getValue)); + .collect(toImmutableMap(entry -> entry.getKey().getPartitionName().orElseThrow(), Entry::getValue)); } - private PartitionStatistics loadPartitionColumnStatistics(WithIdentity partition) + private PartitionStatistics loadPartitionColumnStatistics(HivePartitionName partition) { - HiveTableName tableName = partition.getKey().getHiveTableName(); - String partitionName = partition.getKey().getPartitionName().get(); - Table table = getExistingTable(partition.getIdentity(), tableName.getDatabaseName(), tableName.getTableName()); + HiveTableName tableName = partition.getHiveTableName(); + String partitionName = partition.getPartitionName().orElseThrow(); + Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); Map partitionStatistics = delegate.getPartitionStatistics( - partition.getIdentity(), + identity, table, - ImmutableList.of(getExistingPartition(partition.getIdentity(), table, partition.getKey().getPartitionValues()))); + ImmutableList.of(getExistingPartition(table, partition.getPartitionValues()))); return partitionStatistics.get(partitionName); } - private Map, PartitionStatistics> loadPartitionsColumnStatistics(Iterable> keys) + private Map loadPartitionsColumnStatistics(Iterable keys) { - SetMultimap, WithIdentity> tablePartitions = stream(keys) - .collect(toImmutableSetMultimap(value -> new WithIdentity<>(value.getIdentity(), value.getKey().getHiveTableName()), Function.identity())); - ImmutableMap.Builder, PartitionStatistics> result = ImmutableMap.builder(); + SetMultimap tablePartitions = stream(keys) + .collect(toImmutableSetMultimap(HivePartitionName::getHiveTableName, Function.identity())); + ImmutableMap.Builder result = ImmutableMap.builder(); tablePartitions.keySet().forEach(tableName -> { - Set> partitionNames = tablePartitions.get(tableName); + Set partitionNames = tablePartitions.get(tableName); Set partitionNameStrings = partitionNames.stream() - .map(partitionName -> partitionName.getKey().getPartitionName().get()) + .map(partitionName -> partitionName.getPartitionName().orElseThrow()) .collect(toImmutableSet()); - Table table = getExistingTable(tableName.getIdentity(), tableName.getKey().getDatabaseName(), tableName.getKey().getTableName()); - List partitions = getExistingPartitionsByNames(tableName.getIdentity(), table, ImmutableList.copyOf(partitionNameStrings)); - Map statisticsByPartitionName = delegate.getPartitionStatistics(tableName.getIdentity(), table, partitions); - for (WithIdentity partitionName : partitionNames) { - String stringNameForPartition = partitionName.getKey().getPartitionName().get(); + Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); + List partitions = getExistingPartitionsByNames(table, ImmutableList.copyOf(partitionNameStrings)); + Map statisticsByPartitionName = delegate.getPartitionStatistics(identity, table, partitions); + for (HivePartitionName partitionName : partitionNames) { + String stringNameForPartition = partitionName.getPartitionName().orElseThrow(); result.put(partitionName, statisticsByPartitionName.get(stringNameForPartition)); } }); @@ -340,48 +341,50 @@ private Map, PartitionStatistics> loadPartitions } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics( + HiveIdentity ignored, + String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { - identity = updateIdentity(identity); try { delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); } finally { HiveTableName hiveTableName = hiveTableName(databaseName, tableName); - tableStatisticsCache.invalidate(new WithIdentity<>(identity, hiveTableName)); + tableStatisticsCache.invalidate(hiveTableName); // basic stats are stored as table properties - tableCache.invalidate(new WithIdentity<>(identity, hiveTableName)); + tableCache.invalidate(hiveTableName); } } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, String partitionName, Function update) + public void updatePartitionStatistics(HiveIdentity ignored, Table table, String partitionName, Function update) { - identity = updateIdentity(identity); try { delegate.updatePartitionStatistics(identity, table, partitionName, update); } finally { HivePartitionName hivePartitionName = hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionName); - partitionStatisticsCache.invalidate(new WithIdentity<>(identity, hivePartitionName)); + partitionStatisticsCache.invalidate(hivePartitionName); // basic stats are stored as partition properties - partitionCache.invalidate(new WithIdentity<>(identity, hivePartitionName)); + partitionCache.invalidate(hivePartitionName); } } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(HiveIdentity ignored, Table table, Map> updates) { try { - delegate.updatePartitionStatistics(updateIdentity(identity), table, updates); + delegate.updatePartitionStatistics(identity, table, updates); } finally { - HiveIdentity hiveIdentity = updateIdentity(identity); updates.forEach((partitionName, update) -> { HivePartitionName hivePartitionName = hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionName); - partitionStatisticsCache.invalidate(new WithIdentity<>(hiveIdentity, hivePartitionName)); + partitionStatisticsCache.invalidate(hivePartitionName); // basic stats are stored as partition properties - partitionCache.invalidate(new WithIdentity<>(hiveIdentity, hivePartitionName)); + partitionCache.invalidate(hivePartitionName); }); } } @@ -421,9 +424,8 @@ private List loadAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(HiveIdentity ignored, Database database) { - identity = updateIdentity(identity); try { delegate.createDatabase(identity, database); } @@ -433,9 +435,8 @@ public void createDatabase(HiveIdentity identity, Database database) } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(HiveIdentity ignored, String databaseName, boolean deleteData) { - identity = updateIdentity(identity); try { delegate.dropDatabase(identity, databaseName, deleteData); } @@ -445,9 +446,8 @@ public void dropDatabase(HiveIdentity identity, String databaseName, boolean del } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(HiveIdentity ignored, String databaseName, String newDatabaseName) { - identity = updateIdentity(identity); try { delegate.renameDatabase(identity, databaseName, newDatabaseName); } @@ -458,9 +458,8 @@ public void renameDatabase(HiveIdentity identity, String databaseName, String ne } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(HiveIdentity ignored, String databaseName, HivePrincipal principal) { - identity = updateIdentity(identity); try { delegate.setDatabaseOwner(identity, databaseName, principal); } @@ -476,9 +475,8 @@ protected void invalidateDatabase(String databaseName) } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(HiveIdentity ignored, Table table, PrincipalPrivileges principalPrivileges) { - identity = updateIdentity(identity); try { delegate.createTable(identity, table, principalPrivileges); } @@ -488,9 +486,8 @@ public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(HiveIdentity ignored, String databaseName, String tableName, boolean deleteData) { - identity = updateIdentity(identity); try { delegate.dropTable(identity, databaseName, tableName, deleteData); } @@ -500,9 +497,8 @@ public void dropTable(HiveIdentity identity, String databaseName, String tableNa } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(HiveIdentity ignored, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { - identity = updateIdentity(identity); try { delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); } @@ -513,9 +509,8 @@ public void replaceTable(HiveIdentity identity, String databaseName, String tabl } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(HiveIdentity ignored, String databaseName, String tableName, String newDatabaseName, String newTableName) { - identity = updateIdentity(identity); try { delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); } @@ -526,9 +521,8 @@ public void renameTable(HiveIdentity identity, String databaseName, String table } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(HiveIdentity ignored, String databaseName, String tableName, Optional comment) { - identity = updateIdentity(identity); try { delegate.commentTable(identity, databaseName, tableName, comment); } @@ -540,7 +534,6 @@ public void commentTable(HiveIdentity identity, String databaseName, String tabl @Override public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) { - identity = updateIdentity(identity); try { delegate.setTableOwner(identity, databaseName, tableName, principal); } @@ -552,7 +545,6 @@ public void setTableOwner(HiveIdentity identity, String databaseName, String tab @Override public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) { - identity = updateIdentity(identity); try { delegate.commentColumn(identity, databaseName, tableName, columnName, comment); } @@ -564,7 +556,6 @@ public void commentColumn(HiveIdentity identity, String databaseName, String tab @Override public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - identity = updateIdentity(identity); try { delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); } @@ -576,7 +567,6 @@ public void addColumn(HiveIdentity identity, String databaseName, String tableNa @Override public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) { - identity = updateIdentity(identity); try { delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); } @@ -588,7 +578,6 @@ public void renameColumn(HiveIdentity identity, String databaseName, String tabl @Override public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) { - identity = updateIdentity(identity); try { delegate.dropColumn(identity, databaseName, tableName, columnName); } @@ -597,7 +586,8 @@ public void dropColumn(HiveIdentity identity, String databaseName, String tableN } } - protected void invalidateTable(String databaseName, String tableName) + @VisibleForTesting + void invalidateTable(String databaseName, String tableName) { invalidateTableCache(databaseName, tableName); tableNamesCache.invalidate(databaseName); @@ -612,24 +602,24 @@ protected void invalidateTable(String databaseName, String tableName) private void invalidateTableCache(String databaseName, String tableName) { tableCache.asMap().keySet().stream() - .filter(table -> table.getKey().getDatabaseName().equals(databaseName) && table.getKey().getTableName().equals(tableName)) + .filter(table -> table.getDatabaseName().equals(databaseName) && table.getTableName().equals(tableName)) .forEach(tableCache::invalidate); } private void invalidateTableStatisticsCache(String databaseName, String tableName) { tableStatisticsCache.asMap().keySet().stream() - .filter(table -> table.getKey().getDatabaseName().equals(databaseName) && table.getKey().getTableName().equals(tableName)) + .filter(table -> table.getDatabaseName().equals(databaseName) && table.getTableName().equals(tableName)) .forEach(tableCache::invalidate); } - private Partition getExistingPartition(HiveIdentity identity, Table table, List partitionValues) + private Partition getExistingPartition(Table table, List partitionValues) { return getPartition(identity, table, partitionValues) .orElseThrow(() -> new PartitionNotFoundException(table.getSchemaTableName(), partitionValues)); } - private List getExistingPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + private List getExistingPartitionsByNames(Table table, List partitionNames) { Map partitions = getPartitionsByNames(identity, table, partitionNames).entrySet().stream() .map(entry -> immutableEntry(entry.getKey(), entry.getValue().orElseThrow(() -> @@ -644,56 +634,55 @@ private List getExistingPartitionsByNames(HiveIdentity identity, Tabl @Override public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) { - return get(partitionCache, new WithIdentity<>(updateIdentity(identity), hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionValues))); + return get(partitionCache, hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionValues)); } @Override public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return get(partitionFilterCache, new WithIdentity<>(updateIdentity(identity), partitionFilter(databaseName, tableName, columnNames, partitionKeysFilter))); + return get(partitionFilterCache, partitionFilter(databaseName, tableName, columnNames, partitionKeysFilter)); } - private Optional> loadPartitionNamesByFilter(WithIdentity partitionFilter) + private Optional> loadPartitionNamesByFilter(PartitionFilter partitionFilter) { return delegate.getPartitionNamesByFilter( - partitionFilter.getIdentity(), - partitionFilter.getKey().getHiveTableName().getDatabaseName(), - partitionFilter.getKey().getHiveTableName().getTableName(), - partitionFilter.getKey().getPartitionColumnNames(), - partitionFilter.getKey().getPartitionKeysFilter()); + identity, + partitionFilter.getHiveTableName().getDatabaseName(), + partitionFilter.getHiveTableName().getTableName(), + partitionFilter.getPartitionColumnNames(), + partitionFilter.getPartitionKeysFilter()); } @Override public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) { - List> names = partitionNames.stream() - .map(name -> new WithIdentity<>(updateIdentity(identity), hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), name))) + List names = partitionNames.stream() + .map(name -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), name)) .collect(toImmutableList()); - Map, Optional> all = getAll(partitionCache, names); + Map> all = getAll(partitionCache, names); ImmutableMap.Builder> partitionsByName = ImmutableMap.builder(); - for (Entry, Optional> entry : all.entrySet()) { - partitionsByName.put(entry.getKey().getKey().getPartitionName().get(), entry.getValue()); + for (Entry> entry : all.entrySet()) { + partitionsByName.put(entry.getKey().getPartitionName().orElseThrow(), entry.getValue()); } return partitionsByName.buildOrThrow(); } - private Optional loadPartitionByName(WithIdentity partitionName) + private Optional loadPartitionByName(HivePartitionName partitionName) { - HiveTableName hiveTableName = partitionName.getKey().getHiveTableName(); - return getTable(partitionName.getIdentity(), hiveTableName.getDatabaseName(), hiveTableName.getTableName()) - .flatMap(table -> delegate.getPartition(partitionName.getIdentity(), table, partitionName.getKey().getPartitionValues())); + HiveTableName hiveTableName = partitionName.getHiveTableName(); + return getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()) + .flatMap(table -> delegate.getPartition(identity, table, partitionName.getPartitionValues())); } - private Map, Optional> loadPartitionsByNames(Iterable> partitionNames) + private Map> loadPartitionsByNames(Iterable partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); checkArgument(!Iterables.isEmpty(partitionNames), "partitionNames is empty"); - WithIdentity firstPartition = Iterables.get(partitionNames, 0); + HivePartitionName firstPartition = Iterables.get(partitionNames, 0); - HiveTableName hiveTableName = firstPartition.getKey().getHiveTableName(); - HiveIdentity identity = updateIdentity(firstPartition.getIdentity()); + HiveTableName hiveTableName = firstPartition.getHiveTableName(); Optional
table = getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()); if (table.isEmpty()) { return stream(partitionNames) @@ -701,16 +690,15 @@ private Map, Optional> loadPartitions } List partitionsToFetch = new ArrayList<>(); - for (WithIdentity partitionName : partitionNames) { - checkArgument(partitionName.getKey().getHiveTableName().equals(hiveTableName), "Expected table name %s but got %s", hiveTableName, partitionName.getKey().getHiveTableName()); - checkArgument(identity.equals(partitionName.getIdentity()), "Expected identity %s but got %s", identity, partitionName.getIdentity()); - partitionsToFetch.add(partitionName.getKey().getPartitionName().get()); + for (HivePartitionName partitionName : partitionNames) { + checkArgument(partitionName.getHiveTableName().equals(hiveTableName), "Expected table name %s but got %s", hiveTableName, partitionName.getHiveTableName()); + partitionsToFetch.add(partitionName.getPartitionName().orElseThrow()); } - ImmutableMap.Builder, Optional> partitions = ImmutableMap.builder(); + ImmutableMap.Builder> partitions = ImmutableMap.builder(); Map> partitionsByNames = delegate.getPartitionsByNames(identity, table.get(), partitionsToFetch); - for (WithIdentity partitionName : partitionNames) { - partitions.put(partitionName, partitionsByNames.getOrDefault(partitionName.getKey().getPartitionName().get(), Optional.empty())); + for (HivePartitionName partitionName : partitionNames) { + partitions.put(partitionName, partitionsByNames.getOrDefault(partitionName.getPartitionName().orElseThrow(), Optional.empty())); } return partitions.buildOrThrow(); } @@ -718,7 +706,6 @@ private Map, Optional> loadPartitions @Override public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) { - identity = updateIdentity(identity); try { delegate.addPartitions(identity, databaseName, tableName, partitions); } @@ -731,7 +718,6 @@ public void addPartitions(HiveIdentity identity, String databaseName, String tab @Override public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) { - identity = updateIdentity(identity); try { delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); } @@ -743,7 +729,6 @@ public void dropPartition(HiveIdentity identity, String databaseName, String tab @Override public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) { - identity = updateIdentity(identity); try { delegate.alterPartition(identity, databaseName, tableName, partition); } @@ -839,15 +824,14 @@ private void invalidatePartitionCache(String databaseName, String tableName, Pre { HiveTableName hiveTableName = hiveTableName(databaseName, tableName); - Predicate> hivePartitionPredicate = - partitionName -> partitionName.getKey().getHiveTableName().equals(hiveTableName) && - partitionPredicate.test(partitionName.getKey().getPartitionName()); + Predicate hivePartitionPredicate = partitionName -> partitionName.getHiveTableName().equals(hiveTableName) && + partitionPredicate.test(partitionName.getPartitionName()); partitionCache.asMap().keySet().stream() .filter(hivePartitionPredicate) .forEach(partitionCache::invalidate); partitionFilterCache.asMap().keySet().stream() - .filter(partitionFilter -> partitionFilter.getKey().getHiveTableName().equals(hiveTableName)) + .filter(partitionFilter -> partitionFilter.getHiveTableName().equals(hiveTableName)) .forEach(partitionFilterCache::invalidate); partitionStatisticsCache.asMap().keySet().stream() .filter(hivePartitionPredicate) @@ -878,7 +862,7 @@ public void revokeTablePrivileges(String databaseName, String tableName, String private void invalidateTablePrivilegeCacheEntries(String databaseName, String tableName, String tableOwner, HivePrincipal grantee) { - // some callers of xxxxTablePrivileges use Optional.of(grantee), some Optional.empty() (to get all privileges), so have to invalidate them both + // some callers of table privilege methods use Optional.of(grantee), some Optional.empty() (to get all privileges), so have to invalidate them both tablePrivilegesCache.invalidate(new UserTableKey(Optional.of(grantee), databaseName, tableName, Optional.of(tableOwner))); tablePrivilegesCache.invalidate(new UserTableKey(Optional.empty(), databaseName, tableName, Optional.of(tableOwner))); } @@ -936,12 +920,6 @@ public String getValidWriteIds(HiveIdentity identity, List tabl return delegate.getValidWriteIds(identity, tables, currentTransactionId); } - @Override - public boolean isImpersonationEnabled() - { - return delegate.isImpersonationEnabled(); - } - private Set loadTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal) { return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); @@ -973,7 +951,6 @@ public void updateTableWriteId(HiveIdentity identity, String dbName, String tabl @Override public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) { - identity = updateIdentity(identity); try { delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); } @@ -985,7 +962,6 @@ public void alterPartitions(HiveIdentity identity, String dbName, String tableNa @Override public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { - identity = updateIdentity(identity); try { delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); } @@ -1011,9 +987,9 @@ private static LoadingCache buildCache( Optional refreshExecutor, long maximumSize, StatsRecording statsRecording, - com.google.common.base.Function loader) + Function loader) { - CacheLoader cacheLoader = CacheLoader.from(loader); + CacheLoader cacheLoader = CacheLoader.from(loader::apply); checkArgument(refreshMillis.isEmpty() || refreshExecutor.isPresent(), "refreshMillis is provided but refreshExecutor is not"); if (refreshMillis.isPresent() && (expiresAfterWriteMillis.isEmpty() || expiresAfterWriteMillis.getAsLong() > refreshMillis.getAsLong())) { @@ -1064,66 +1040,6 @@ public Map loadAll(Iterable keys) cacheLoader); } - private static class WithIdentity - { - private final HiveIdentity identity; - private final T key; - private final int hashCode; - - public WithIdentity(HiveIdentity identity, T key) - { - this.identity = requireNonNull(identity, "identity is null"); - this.key = requireNonNull(key, "key is null"); - this.hashCode = Objects.hash(identity, key); - } - - public HiveIdentity getIdentity() - { - return identity; - } - - public T getKey() - { - return key; - } - - @Override - public boolean equals(Object o) - { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - WithIdentity other = (WithIdentity) o; - return hashCode == other.hashCode && - Objects.equals(identity, other.identity) && - Objects.equals(key, other.key); - } - - @Override - public int hashCode() - { - return hashCode; - } - - @Override - public String toString() - { - return toStringHelper(this) - .add("identity", identity) - .add("key", key) - .toString(); - } - } - - private HiveIdentity updateIdentity(HiveIdentity identity) - { - // remove identity if not doing impersonation - return delegate.isImpersonationEnabled() ? identity : HiveIdentity.none(); - } - @Managed @Nested public CacheStatsMBean getDatabaseStats() diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index f1f723156fb3..773a7e15535a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -19,6 +19,7 @@ import com.google.common.util.concurrent.UncheckedExecutionException; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.NodeManager; @@ -126,6 +127,7 @@ public HiveMetastoreFactory createCachingHiveMetastoreFactory(HiveMetastoreFacto // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be // used. metastoreFactory.createMetastore(Optional.empty()), + HiveIdentity.none(), new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), metastoreCacheTtl, metastoreRefreshInterval, @@ -200,8 +202,10 @@ public HiveMetastore createMetastore(Optional identity) private HiveMetastore createUserCachingMetastore(String user) { + ConnectorIdentity identity = ConnectorIdentity.ofUser(user); return cachingHiveMetastore( - metastoreFactory.createMetastore(Optional.of(ConnectorIdentity.ofUser(user))), + metastoreFactory.createMetastore(Optional.of(identity)), + new HiveIdentity(identity), new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), metastoreCacheTtl, metastoreRefreshInterval, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java index 01b659da7e60..ff6c413e031e 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java @@ -1146,12 +1146,6 @@ public synchronized void revokeTablePrivileges(String databaseName, String table setTablePrivileges(grantee, databaseName, tableName, Sets.difference(currentPrivileges, privilegesToRemove)); } - @Override - public boolean isImpersonationEnabled() - { - return false; - } - private synchronized void setTablePrivileges( HivePrincipal grantee, String databaseName, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java index e5975af58264..64e12a83be5c 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java @@ -1128,10 +1128,4 @@ public Set listTablePrivileges(String databaseName, String ta { return ImmutableSet.of(); } - - @Override - public boolean isImpersonationEnabled() - { - return false; - } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java index 1a1915e45bed..9270c89300dc 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java @@ -356,12 +356,6 @@ public Set listRoleGrants(HivePrincipal principal) () -> delegate.listRoleGrants(principal)); } - @Override - public boolean isImpersonationEnabled() - { - return delegate.isImpersonationEnabled(); - } - private void verifyRecordingMode() { if (recording.isReplay()) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java index 901a4f4ed3ae..9fb86d7d0e7b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java @@ -463,12 +463,6 @@ public Set listTablePrivileges(String databaseName, String ta return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); } - @Override - public boolean isImpersonationEnabled() - { - return delegate.isImpersonationEnabled(); - } - @Override public Optional getConfigValue(String name) { diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index e58b74a6f030..c327e4022c2e 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -791,6 +791,7 @@ protected final void setup(String host, int port, String databaseName, String ti new ThriftMetastoreConfig(), hdfsEnvironment, false)), + HiveIdentity.none(), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java index d2e17253559c..09f998c86a42 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java @@ -277,10 +277,4 @@ public Set listRoleGrants(HivePrincipal principal) { throw new UnsupportedOperationException(); } - - @Override - public boolean isImpersonationEnabled() - { - throw new UnsupportedOperationException(); - } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index 9ba23646e0b4..e6f870140be2 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -125,6 +125,7 @@ public void setUp() executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"))); metastore = cachingHiveMetastore( new BridgingHiveMetastore(thriftHiveMetastore), + IDENTITY, executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), @@ -480,6 +481,7 @@ public void testCachingHiveMetastoreCreationViaMemoize() ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore(); metastore = (CachingHiveMetastore) memoizeMetastore( new BridgingHiveMetastore(thriftHiveMetastore), + IDENTITY, 1000); assertEquals(mockClient.getAccessCount(), 0); @@ -566,17 +568,12 @@ public Map> getPartitionsByNames(HiveIdentity identi return result; } - - @Override - public boolean isImpersonationEnabled() - { - return false; - } }; // Caching metastore metastore = cachingHiveMetastore( mockMetastore, + IDENTITY, executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), @@ -670,6 +667,7 @@ private CachingHiveMetastore createMetastoreWithDirectExecutor(CachingHiveMetast { return (CachingHiveMetastore) cachingHiveMetastore( new BridgingHiveMetastore(createThriftHiveMetastore()), + IDENTITY, directExecutor(), config.getMetastoreCacheTtl(), config.getMetastoreRefreshInterval(), diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java index a189c7d68d23..4a9f40d1a1c4 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java @@ -17,6 +17,7 @@ import io.trino.plugin.hive.HdfsEnvironment; import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.NodeVersion; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.spi.TrinoException; @@ -77,7 +78,7 @@ public TrinoCatalog create(ConnectorIdentity identity) case HIVE_METASTORE: return new TrinoHiveCatalog( catalogName, - memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), 1000), + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), new HiveIdentity(identity), 1000), hdfsEnvironment, typeManager, tableOperationsProvider, diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java index be279f7c388c..c144f28ef3c5 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java @@ -299,13 +299,6 @@ public Set listTablePrivileges(String databaseName, String ta throw new UnsupportedOperationException(); } - @Override - public boolean isImpersonationEnabled() - { - // Local operation, doesn't need to be included in methodInvocations - return delegate.isImpersonationEnabled(); - } - @Override public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) { From 86596861f7e7154d4fc0db08e45c2e335157a17a Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 21:36:02 -0700 Subject: [PATCH 16/18] Add stats to ImpersonationCachingHiveMetastoreFactory --- .../metastore/cache/CachingHiveMetastore.java | 82 +++++++ .../cache/SharedHiveMetastoreCache.java | 205 +++++++++++++++++- .../cache/TestCachingHiveMetastore.java | 2 +- 3 files changed, 285 insertions(+), 4 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java index 3a44bd9849e4..53169f3a8952 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java @@ -1040,6 +1040,10 @@ public Map loadAll(Iterable keys) cacheLoader); } + // + // Stats used for non-impersonation shared caching + // + @Managed @Nested public CacheStatsMBean getDatabaseStats() @@ -1144,4 +1148,82 @@ public CacheStatsMBean getConfigValuesStats() { return new CacheStatsMBean(configValuesCache); } + + // + // Expose caches with ImpersonationCachingHiveMetastoreFactory so they can be aggregated + // + LoadingCache> getDatabaseCache() + { + return databaseCache; + } + + LoadingCache> getDatabaseNamesCache() + { + return databaseNamesCache; + } + + LoadingCache> getTableCache() + { + return tableCache; + } + + LoadingCache> getTableNamesCache() + { + return tableNamesCache; + } + + LoadingCache> getTablesWithParameterCache() + { + return tablesWithParameterCache; + } + + LoadingCache getTableStatisticsCache() + { + return tableStatisticsCache; + } + + LoadingCache getPartitionStatisticsCache() + { + return partitionStatisticsCache; + } + + LoadingCache> getViewNamesCache() + { + return viewNamesCache; + } + + LoadingCache> getPartitionCache() + { + return partitionCache; + } + + LoadingCache>> getPartitionFilterCache() + { + return partitionFilterCache; + } + + LoadingCache> getTablePrivilegesCache() + { + return tablePrivilegesCache; + } + + LoadingCache> getRolesCache() + { + return rolesCache; + } + + LoadingCache> getRoleGrantsCache() + { + return roleGrantsCache; + } + + LoadingCache> getGrantedPrincipalsCache() + { + return grantedPrincipalsCache; + } + + LoadingCache> getConfigValuesCache() + { + return configValuesCache; + } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index 773a7e15535a..af04db65c7e8 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -13,9 +13,12 @@ */ package io.trino.plugin.hive.metastore.cache; +import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; +import com.google.common.cache.CacheStats; import com.google.common.cache.LoadingCache; +import com.google.common.math.LongMath; import com.google.common.util.concurrent.UncheckedExecutionException; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; @@ -26,6 +29,7 @@ import io.trino.spi.TrinoException; import io.trino.spi.security.ConnectorIdentity; import org.weakref.jmx.Flatten; +import org.weakref.jmx.Managed; import org.weakref.jmx.Nested; import javax.annotation.PostConstruct; @@ -34,6 +38,7 @@ import java.util.Optional; import java.util.concurrent.ExecutorService; +import java.util.function.Function; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Throwables.throwIfInstanceOf; @@ -169,7 +174,7 @@ public class ImpersonationCachingHiveMetastoreFactory implements HiveMetastoreFactory { private final HiveMetastoreFactory metastoreFactory; - private final LoadingCache cache; + private final LoadingCache cache; public ImpersonationCachingHiveMetastoreFactory(HiveMetastoreFactory metastoreFactory) { @@ -200,7 +205,7 @@ public HiveMetastore createMetastore(Optional identity) } } - private HiveMetastore createUserCachingMetastore(String user) + private CachingHiveMetastore createUserCachingMetastore(String user) { ConnectorIdentity identity = ConnectorIdentity.ofUser(user); return cachingHiveMetastore( @@ -212,6 +217,200 @@ private HiveMetastore createUserCachingMetastore(String user) metastoreCacheMaximumSize); } - // todo aggregate and export stats + @Managed + public void flushCache() + { + cache.asMap().values().forEach(CachingHiveMetastore::flushCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getDatabaseStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getDatabaseCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getDatabaseNamesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getDatabaseNamesCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getTableStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getTableCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getTableNamesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getTableNamesCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getTableWithParameterStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getTablesWithParameterCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getTableStatisticsStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getTableStatisticsCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getPartitionStatisticsStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getPartitionStatisticsCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getViewNamesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getViewNamesCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getPartitionStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getPartitionCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getPartitionFilterStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getPartitionFilterCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getTablePrivilegesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getTablePrivilegesCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getRolesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getRolesCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getRoleGrantsStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getRoleGrantsCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getGrantedPrincipalsStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getGrantedPrincipalsCache); + } + + @Managed + @Nested + public AggregateCacheStatsMBean getConfigValuesStats() + { + return new AggregateCacheStatsMBean(CachingHiveMetastore::getConfigValuesCache); + } + + public class AggregateCacheStatsMBean + { + private final Function> cacheExtractor; + + public AggregateCacheStatsMBean(Function> cacheExtractor) + { + this.cacheExtractor = requireNonNull(cacheExtractor, "cacheExtractor is null"); + } + + @Managed + public long size() + { + return cache.asMap().values().stream() + .map(cacheExtractor) + .mapToLong(Cache::size) + .reduce(0, LongMath::saturatedAdd); + } + + @Managed + public Double getHitRate() + { + return aggregateStats().getHitRate(); + } + + @Managed + public Double getMissRate() + { + return aggregateStats().getMissRate(); + } + + @Managed + public long getRequestCount() + { + return aggregateStats().getRequestCount(); + } + + private CacheStatsAggregator aggregateStats() + { + CacheStatsAggregator aggregator = new CacheStatsAggregator(); + for (CachingHiveMetastore metastore : cache.asMap().values()) { + aggregator.add(cacheExtractor.apply(metastore).stats()); + } + return aggregator; + } + } + } + + private static final class CacheStatsAggregator + { + private long requestCount; + private long hitCount; + private long missCount; + + void add(CacheStats stats) + { + requestCount += stats.requestCount(); + hitCount += stats.hitCount(); + missCount += stats.missCount(); + } + + public long getRequestCount() + { + return requestCount; + } + + public long getHitCount() + { + return hitCount; + } + + public long getMissCount() + { + return missCount; + } + + public double getHitRate() + { + return (requestCount == 0) ? 1.0 : (double) hitCount / requestCount; + } + + public double getMissRate() + { + return (requestCount == 0) ? 0.0 : (double) missCount / requestCount; + } } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index e6f870140be2..4133f1b6348e 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -665,7 +665,7 @@ private static void await(CountDownLatch latch, long timeout, TimeUnit unit) private CachingHiveMetastore createMetastoreWithDirectExecutor(CachingHiveMetastoreConfig config) { - return (CachingHiveMetastore) cachingHiveMetastore( + return cachingHiveMetastore( new BridgingHiveMetastore(createThriftHiveMetastore()), IDENTITY, directExecutor(), From ef7a130e4471c4af796833dea2f8831344fd716b Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 21:45:09 -0700 Subject: [PATCH 17/18] Pass HiveIdentity to BridgingHiveMetastore --- .../metastore/thrift/BridgingHiveMetastore.java | 6 ++++-- .../thrift/BridgingHiveMetastoreFactory.java | 3 ++- .../io/trino/plugin/hive/AbstractTestHive.java | 5 +++-- .../plugin/hive/AbstractTestHiveFileSystem.java | 16 +++++++++------- .../trino/plugin/hive/AbstractTestHiveLocal.java | 4 ++-- .../plugin/hive/BaseTestHiveOnDataLake.java | 3 ++- .../trino/plugin/hive/TestHiveFileMetastore.java | 3 ++- .../plugin/hive/TestHiveInMemoryMetastore.java | 5 +++-- .../cache/TestCachingHiveMetastore.java | 8 ++++---- .../metastore/glue/TestHiveGlueMetastore.java | 2 +- .../trino/plugin/hive/s3/S3HiveQueryRunner.java | 4 +++- 11 files changed, 35 insertions(+), 24 deletions(-) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java index 9fb86d7d0e7b..b9bd09c81ad4 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java @@ -69,10 +69,12 @@ public class BridgingHiveMetastore implements HiveMetastore { private final ThriftMetastore delegate; + private final HiveIdentity identity; - public BridgingHiveMetastore(ThriftMetastore delegate) + public BridgingHiveMetastore(ThriftMetastore delegate, HiveIdentity identity) { - this.delegate = delegate; + this.delegate = requireNonNull(delegate, "delegate is null"); + this.identity = requireNonNull(identity, "identity is null"); } @Override diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java index d3791a065aa1..f075d54205ea 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastoreFactory.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.hive.metastore.thrift; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.security.ConnectorIdentity; @@ -43,6 +44,6 @@ public boolean isImpersonationEnabled() @Override public HiveMetastore createMetastore(Optional identity) { - return new BridgingHiveMetastore(thriftMetastore); + return new BridgingHiveMetastore(thriftMetastore, identity.map(HiveIdentity::new).orElse(HiveIdentity.none())); } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index c327e4022c2e..f7a321c3a2e2 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -790,8 +790,9 @@ protected final void setup(String host, int port, String databaseName, String ti new MetastoreConfig(), new ThriftMetastoreConfig(), hdfsEnvironment, - false)), - HiveIdentity.none(), + false), + new HiveIdentity(SESSION.getIdentity())), + new HiveIdentity(SESSION.getIdentity()), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java index abd31a31ea62..aceafab0e8a5 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java @@ -187,13 +187,15 @@ protected void setup(String host, int port, String databaseName, boolean s3Selec hdfsEnvironment = new HdfsEnvironment(hdfsConfiguration, new HdfsConfig(), new NoHdfsAuthentication()); MetastoreConfig metastoreConfig = new MetastoreConfig(); metastoreClient = new TestingHiveMetastore( - new BridgingHiveMetastore(new ThriftHiveMetastore( - metastoreLocator, - new HiveConfig(), - metastoreConfig, - new ThriftMetastoreConfig(), - hdfsEnvironment, - false)), + new BridgingHiveMetastore( + new ThriftHiveMetastore( + metastoreLocator, + new HiveConfig(), + metastoreConfig, + new ThriftMetastoreConfig(), + hdfsEnvironment, + false), + new HiveIdentity(getHiveSession(config).getIdentity())), getBasePath(), hdfsEnvironment); locationService = new HiveLocationService(hdfsEnvironment); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java index 8614f36b2078..2203b600c525 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java @@ -86,14 +86,14 @@ protected AbstractTestHiveLocal(String testDbName) this.testDbName = requireNonNull(testDbName, "testDbName is null"); } - protected abstract HiveMetastore createMetastore(File tempDir); + protected abstract HiveMetastore createMetastore(File tempDir, HiveIdentity identity); @BeforeClass(alwaysRun = true) public void initialize() { tempDir = Files.createTempDir(); - HiveMetastore metastore = createMetastore(tempDir); + HiveMetastore metastore = createMetastore(tempDir, HIVE_IDENTITY); metastore.createDatabase(HIVE_IDENTITY, Database.builder() diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java index 5ea5adbbed06..31bf7ddd227d 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java @@ -85,7 +85,8 @@ protected QueryRunner createQueryRunner() ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), - false)); + false), + HiveIdentity.none()); return S3HiveQueryRunner.create( dockerizedS3DataLake, ImmutableMap.builder() diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileMetastore.java index 01d8a1f671db..bc265a5c1b58 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveFileMetastore.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.hive; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.file.FileHiveMetastore; @@ -30,7 +31,7 @@ public class TestHiveFileMetastore extends AbstractTestHiveLocal { @Override - protected HiveMetastore createMetastore(File tempDir) + protected HiveMetastore createMetastore(File tempDir, HiveIdentity identity) { File baseDir = new File(tempDir, "metastore"); return new FileHiveMetastore( diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveInMemoryMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveInMemoryMetastore.java index 44667f59d5c4..c839869600ca 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveInMemoryMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHiveInMemoryMetastore.java @@ -13,6 +13,7 @@ */ package io.trino.plugin.hive; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.thrift.BridgingHiveMetastore; import io.trino.plugin.hive.metastore.thrift.InMemoryThriftMetastore; @@ -28,12 +29,12 @@ public class TestHiveInMemoryMetastore extends AbstractTestHiveLocal { @Override - protected HiveMetastore createMetastore(File tempDir) + protected HiveMetastore createMetastore(File tempDir, HiveIdentity identity) { File baseDir = new File(tempDir, "metastore"); ThriftMetastoreConfig metastoreConfig = new ThriftMetastoreConfig(); InMemoryThriftMetastore hiveMetastore = new InMemoryThriftMetastore(baseDir, metastoreConfig); - return new BridgingHiveMetastore(hiveMetastore); + return new BridgingHiveMetastore(hiveMetastore, identity); } @Test diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index 4133f1b6348e..971eccdb603c 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -124,7 +124,7 @@ public void setUp() ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore(); executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"))); metastore = cachingHiveMetastore( - new BridgingHiveMetastore(thriftHiveMetastore), + new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), IDENTITY, executor, new Duration(5, TimeUnit.MINUTES), @@ -479,8 +479,8 @@ public void testCachingHiveMetastoreCreationWithTtlOnly() public void testCachingHiveMetastoreCreationViaMemoize() { ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore(); - metastore = (CachingHiveMetastore) memoizeMetastore( - new BridgingHiveMetastore(thriftHiveMetastore), + metastore = memoizeMetastore( + new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), IDENTITY, 1000); @@ -666,7 +666,7 @@ private static void await(CountDownLatch latch, long timeout, TimeUnit unit) private CachingHiveMetastore createMetastoreWithDirectExecutor(CachingHiveMetastoreConfig config) { return cachingHiveMetastore( - new BridgingHiveMetastore(createThriftHiveMetastore()), + new BridgingHiveMetastore(createThriftHiveMetastore(), IDENTITY), IDENTITY, directExecutor(), config.getMetastoreCacheTtl(), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java index 50c9276b30e1..9526fe518352 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java @@ -201,7 +201,7 @@ public void setup() } @Override - protected HiveMetastore createMetastore(File tempDir) + protected HiveMetastore createMetastore(File tempDir, HiveIdentity identity) { GlueHiveMetastoreConfig glueConfig = new GlueHiveMetastoreConfig(); glueConfig.setDefaultWarehouseDir(tempDir.toURI().toString()); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/s3/S3HiveQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/s3/S3HiveQueryRunner.java index acc17de53fc7..aa06e7bec21b 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/s3/S3HiveQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/s3/S3HiveQueryRunner.java @@ -21,6 +21,7 @@ import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.HiveHdfsConfiguration; import io.trino.plugin.hive.HiveQueryRunner; +import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.containers.HiveMinioDataLake; import io.trino.plugin.hive.metastore.MetastoreConfig; @@ -155,7 +156,8 @@ public DistributedQueryRunner build() ImmutableSet.of()), new HdfsConfig(), new NoHdfsAuthentication()), - false))); + false), + new HiveIdentity(distributedQueryRunner.getDefaultSession().getIdentity().toConnectorIdentity()))); setInitialSchemasLocationBase("s3a://" + bucketName); // cannot use s3:// as Hive metastore is not configured to accept it return super.build(); } From aab50e2dfb3cd7c63ca385293b628fe503cd39a9 Mon Sep 17 00:00:00 2001 From: Dain Sundstrom Date: Sat, 2 Oct 2021 23:05:16 -0700 Subject: [PATCH 18/18] Replace HiveIdentity in metadata APIs HiveIdentity is provided to metadata objects during construction. The underlying ThriftHiveMetastore uses identity on a per call bases. --- .../plugin/geospatial/TestSpatialJoins.java | 3 - .../io/trino/plugin/hive/HiveMetadata.java | 105 +++-- .../plugin/hive/HiveMetadataFactory.java | 3 +- .../plugin/hive/HiveMetastoreClosure.java | 178 ++++---- .../plugin/hive/HivePageSinkProvider.java | 4 +- .../plugin/hive/HivePartitionManager.java | 13 +- .../trino/plugin/hive/HiveSplitManager.java | 6 +- .../hive/PartitionsSystemTableProvider.java | 5 +- .../hive/PropertiesSystemTableProvider.java | 3 +- .../io/trino/plugin/hive/ViewReaderUtil.java | 3 +- .../hive/authentication/HiveIdentity.java | 6 - .../CoralSemiTransactionalHiveMSCAdapter.java | 6 +- .../metastore/ForwardingHiveMetastore.java | 133 +++--- .../plugin/hive/metastore/HiveMetastore.java | 79 ++-- .../HivePageSinkMetadataProvider.java | 7 +- .../hive/metastore/HiveTransaction.java | 7 +- .../plugin/hive/metastore/MetastoreUtil.java | 5 +- .../SemiTransactionalHiveMetastore.java | 398 +++++++----------- .../alluxio/AlluxioHiveMetastore.java | 48 +-- .../metastore/cache/CachingHiveMetastore.java | 183 ++++---- .../cache/SharedHiveMetastoreCache.java | 3 - .../metastore/file/FileHiveMetastore.java | 74 ++-- .../metastore/glue/GlueHiveMetastore.java | 86 ++-- .../recording/RecordingHiveMetastore.java | 108 ++--- .../thrift/BridgingHiveMetastore.java | 91 ++-- .../CreateEmptyPartitionProcedure.java | 3 +- .../hive/procedure/DropStatsProcedure.java | 6 +- .../procedure/RegisterPartitionProcedure.java | 6 +- .../SyncPartitionMetadataProcedure.java | 6 +- .../UnregisterPartitionProcedure.java | 6 +- .../hive/security/LegacyAccessControl.java | 3 +- .../LegacyAccessControlMetastore.java | 3 +- ...sactionalLegacyAccessControlMetastore.java | 5 +- ...onalSqlStandardAccessControlMetastore.java | 5 +- .../security/SqlStandardAccessControl.java | 14 +- .../SqlStandardAccessControlMetadata.java | 15 +- ...tandardAccessControlMetadataMetastore.java | 7 +- .../SqlStandardAccessControlMetastore.java | 3 +- .../MetastoreHiveStatisticsProvider.java | 9 +- .../trino/plugin/hive/AbstractTestHive.java | 193 ++++----- .../hive/AbstractTestHiveFileSystem.java | 27 +- .../plugin/hive/AbstractTestHiveLocal.java | 10 +- .../plugin/hive/BaseTestHiveOnDataLake.java | 11 +- .../plugin/hive/HiveBenchmarkQueryRunner.java | 5 +- .../io/trino/plugin/hive/HiveQueryRunner.java | 7 +- .../trino/plugin/hive/TestHivePageSink.java | 5 +- .../TestSemiTransactionalHiveMetastore.java | 12 +- .../metastore/UnimplementedHiveMetastore.java | 57 +-- .../cache/TestCachingHiveMetastore.java | 86 ++-- ...stCachingHiveMetastoreWithQueryRunner.java | 3 +- .../metastore/glue/TestHiveGlueMetastore.java | 56 ++- .../recording/TestRecordingHiveMetastore.java | 36 +- .../TestConnectorPushdownRulesWithHive.java | 12 +- .../plugin/hive/optimizer/TestHivePlans.java | 3 +- ...stHiveProjectionPushdownIntoTableScan.java | 3 +- .../plugin/iceberg/TrinoCatalogFactory.java | 3 +- .../plugin/iceberg/TrinoHiveCatalog.java | 48 +-- .../AbstractMetastoreTableOperations.java | 6 +- .../file/FileMetastoreTableOperations.java | 4 +- .../hms/HiveMetastoreTableOperations.java | 4 +- .../CountingAccessFileHiveMetastore.java | 67 +-- .../TestIcebergProjectionPushdownPlans.java | 3 +- .../TestIcebergTableWithCustomLocation.java | 20 +- .../TestMetadataQueryOptimization.java | 3 +- 64 files changed, 1062 insertions(+), 1270 deletions(-) diff --git a/plugin/trino-geospatial/src/test/java/io/trino/plugin/geospatial/TestSpatialJoins.java b/plugin/trino-geospatial/src/test/java/io/trino/plugin/geospatial/TestSpatialJoins.java index 4c71be6dd635..96b615288528 100644 --- a/plugin/trino-geospatial/src/test/java/io/trino/plugin/geospatial/TestSpatialJoins.java +++ b/plugin/trino-geospatial/src/test/java/io/trino/plugin/geospatial/TestSpatialJoins.java @@ -15,7 +15,6 @@ import io.trino.Session; import io.trino.plugin.hive.TestingHivePlugin; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.spi.security.PrincipalType; @@ -28,7 +27,6 @@ import static io.trino.SystemSessionProperties.SPATIAL_PARTITIONING_TABLE_NAME; import static io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore; -import static io.trino.testing.TestingConnectorSession.SESSION; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; @@ -79,7 +77,6 @@ protected DistributedQueryRunner createQueryRunner() HiveMetastore metastore = createTestingFileHiveMetastore(baseDir); metastore.createDatabase( - new HiveIdentity(SESSION), Database.builder() .setDatabaseName("default") .setOwnerName(Optional.of("public")) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java index 162cd6cca2f8..d315f4200521 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadata.java @@ -36,7 +36,6 @@ import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidSchema; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveColumnStatistics; @@ -428,7 +427,7 @@ public HiveTableHandle getTableHandle(ConnectorSession session, SchemaTableName return null; } Table table = metastore - .getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + .getTable(tableName.getSchemaName(), tableName.getTableName()) .orElse(null); if (table == null) { @@ -554,7 +553,7 @@ private ConnectorTableMetadata getTableMetadata(ConnectorSession session, Schema private ConnectorTableMetadata doGetTableMetadata(ConnectorSession session, SchemaTableName tableName) { - Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (!translateHiveViews && isHiveOrPrestoView(table)) { @@ -712,7 +711,7 @@ private List listSchemas(ConnectorSession session, Optional sche public Map getColumnHandles(ConnectorSession session, ConnectorTableHandle tableHandle) { SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName(); - Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); return hiveColumnHandles(table, typeManager, getTimestampPrecision(session)).stream() .collect(toImmutableMap(HiveColumnHandle::getName, identity())); @@ -760,7 +759,7 @@ public TableStatistics getTableStatistics(ConnectorSession session, ConnectorTab .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); Map columnTypes = columns.entrySet().stream() .collect(toImmutableMap(Map.Entry::getKey, entry -> getColumnMetadata(session, tableHandle, entry.getValue()).getType())); - HivePartitionResult partitionResult = partitionManager.getPartitions(metastore, new HiveIdentity(session), tableHandle, constraint); + HivePartitionResult partitionResult = partitionManager.getPartitions(metastore, tableHandle, constraint); List partitions = partitionManager.getPartitionsAsList(partitionResult); return hiveStatisticsProvider.getTableStatistics(session, ((HiveTableHandle) tableHandle).getSchemaTableName(), columns, columnTypes, partitions); } @@ -777,7 +776,7 @@ private List listTables(ConnectorSession session, SchemaTablePr Optional
optionalTable; try { - optionalTable = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()); + optionalTable = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()); } catch (HiveViewNotSupportedException e) { // exists, would be returned by listTables from schema @@ -818,7 +817,7 @@ public void createSchema(ConnectorSession session, String schemaName, Map new TableNotFoundException(handle.getSchemaTableName())); if (table.getParameters().containsKey(AVRO_SCHEMA_URL_KEY) || table.getStorage().getSerdeParameters().containsKey(AVRO_SCHEMA_URL_KEY)) { throw new TrinoException(NOT_SUPPORTED, "ALTER TABLE not supported when Avro schema url is set"); @@ -1207,14 +1206,14 @@ private void failIfAvroSchemaIsSet(ConnectorSession session, HiveTableHandle han public void renameTable(ConnectorSession session, ConnectorTableHandle tableHandle, SchemaTableName newTableName) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - metastore.renameTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); + metastore.renameTable(handle.getSchemaName(), handle.getTableName(), newTableName.getSchemaName(), newTableName.getTableName()); } @Override public void setTableComment(ConnectorSession session, ConnectorTableHandle tableHandle, Optional comment) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - metastore.commentTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName(), comment); + metastore.commentTable(handle.getSchemaName(), handle.getTableName(), comment); } @Override @@ -1222,14 +1221,14 @@ public void setColumnComment(ConnectorSession session, ConnectorTableHandle tabl { HiveTableHandle handle = (HiveTableHandle) tableHandle; HiveColumnHandle columnHandle = (HiveColumnHandle) column; - metastore.commentColumn(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName(), columnHandle.getName(), comment); + metastore.commentColumn(handle.getSchemaName(), handle.getTableName(), columnHandle.getName(), comment); } @Override public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - if (metastore.getTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName()).isEmpty()) { + if (metastore.getTable(handle.getSchemaName(), handle.getTableName()).isEmpty()) { throw new TableNotFoundException(handle.getSchemaTableName()); } metastore.dropTable(session, handle.getSchemaName(), handle.getTableName()); @@ -1239,7 +1238,7 @@ public void dropTable(ConnectorSession session, ConnectorTableHandle tableHandle public ConnectorTableHandle beginStatisticsCollection(ConnectorSession session, ConnectorTableHandle tableHandle) { HiveTableHandle handle = (HiveTableHandle) tableHandle; - if (metastore.getTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName()).isEmpty()) { + if (metastore.getTable(handle.getSchemaName(), handle.getTableName()).isEmpty()) { throw new TableNotFoundException(handle.getSchemaTableName()); } return tableHandle; @@ -1248,10 +1247,9 @@ public ConnectorTableHandle beginStatisticsCollection(ConnectorSession session, @Override public void finishStatisticsCollection(ConnectorSession session, ConnectorTableHandle tableHandle, Collection computedStatistics) { - HiveIdentity identity = new HiveIdentity(session); HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); List partitionColumns = table.getPartitionColumns(); @@ -1268,7 +1266,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH if (partitionColumns.isEmpty()) { // commit analyze to unpartitioned table - metastore.setTableStatistics(identity, table, createPartitionStatistics(columnTypes, computedStatisticsMap.get(ImmutableList.of()))); + metastore.setTableStatistics(table, createPartitionStatistics(columnTypes, computedStatisticsMap.get(ImmutableList.of()))); } else { List> partitionValuesList; @@ -1276,7 +1274,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH partitionValuesList = handle.getAnalyzePartitionValues().get(); } else { - partitionValuesList = metastore.getPartitionNames(identity, handle.getSchemaName(), handle.getTableName()) + partitionValuesList = metastore.getPartitionNames(handle.getSchemaName(), handle.getTableName()) .orElseThrow(() -> new TableNotFoundException(((HiveTableHandle) tableHandle).getSchemaTableName())) .stream() .map(HiveUtil::toPartitionValues) @@ -1302,7 +1300,7 @@ public void finishStatisticsCollection(ConnectorSession session, ConnectorTableH } } verify(usedComputedStatistics == computedStatistics.size(), "All computed statistics must be used"); - metastore.setPartitionStatistics(identity, table, partitionStatistics.buildOrThrow()); + metastore.setPartitionStatistics(table, partitionStatistics.buildOrThrow()); } } @@ -1376,7 +1374,7 @@ public HiveOutputTableHandle beginCreateTable(ConnectorSession session, Connecto schemaName, tableName, columnHandles, - metastore.generatePageSinkMetadata(new HiveIdentity(session), schemaTableName), + metastore.generatePageSinkMetadata(schemaTableName), locationHandle, tableStorageFormat, partitionStorageFormat, @@ -1435,7 +1433,6 @@ public Optional finishCreateTable(ConnectorSession sess AcidTransaction transaction = handle.getTransaction(); List partitionNames = partitionUpdates.stream().map(PartitionUpdate::getName).collect(toImmutableList()); metastore.addDynamicPartitions( - new HiveIdentity(session), handle.getSchemaName(), handle.getTableName(), partitionNames, @@ -1616,10 +1613,9 @@ private static void writeEmptyFile(ConnectorSession session, Path target, JobCon @Override public ConnectorTableHandle beginUpdate(ConnectorSession session, ConnectorTableHandle tableHandle, List updatedColumns, RetryMode retryMode) { - HiveIdentity identity = new HiveIdentity(session); HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (!isFullAcidTable(table.getParameters())) { @@ -1688,8 +1684,7 @@ public void finishUpdate(ConnectorSession session, ConnectorTableHandle tableHan requireNonNull(fragments, "fragments is null"); SchemaTableName tableName = handle.getSchemaTableName(); - HiveIdentity identity = new HiveIdentity(session); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); List partitionAndStatementIds = fragments.stream() @@ -1710,9 +1705,8 @@ public void finishUpdate(ConnectorSession session, ConnectorTableHandle tableHan @Override public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTableHandle tableHandle, List columns, RetryMode retryMode) { - HiveIdentity identity = new HiveIdentity(session); SchemaTableName tableName = ((HiveTableHandle) tableHandle).getSchemaTableName(); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); checkTableIsWritable(table, writesToNonManagedTablesEnabled); @@ -1751,7 +1745,7 @@ public HiveInsertTableHandle beginInsert(ConnectorSession session, ConnectorTabl tableName.getSchemaName(), tableName.getTableName(), handles, - metastore.generatePageSinkMetadata(identity, tableName), + metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, @@ -1788,7 +1782,7 @@ public Optional finishInsert(ConnectorSession session, HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat(); partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates); - Table table = metastore.getTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName()) + Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); if (!table.getStorage().getStorageFormat().getInputFormat().equals(tableStorageFormat.getInputFormat()) && isRespectTableFormat(session)) { throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during insert"); @@ -1837,7 +1831,7 @@ public Optional finishInsert(ConnectorSession session, if (partitionUpdate.getUpdateMode() == OVERWRITE) { // get privileges from existing table - PrincipalPrivileges principalPrivileges = fromHivePrivilegeInfos(metastore.listTablePrivileges(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName(), Optional.empty())); + PrincipalPrivileges principalPrivileges = fromHivePrivilegeInfos(metastore.listTablePrivileges(handle.getSchemaName(), handle.getTableName(), Optional.empty())); // first drop it metastore.dropTable(session, handle.getSchemaName(), handle.getTableName()); @@ -2030,11 +2024,10 @@ private Optional getTableHandleForOptimize(Connecto throw new TrinoException(NOT_SUPPORTED, "OPTIMIZE procedure is not supported with query retries enabled"); } - HiveIdentity identity = new HiveIdentity(session); HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); checkTableIsWritable(table, writesToNonManagedTablesEnabled); @@ -2081,7 +2074,7 @@ private Optional getTableHandleForOptimize(Connecto tableName.getSchemaName(), tableName.getTableName(), columns, - metastore.generatePageSinkMetadata(identity, tableName), + metastore.generatePageSinkMetadata(tableName), locationHandle, table.getStorage().getBucketProperty(), tableStorageFormat, @@ -2145,7 +2138,7 @@ private void finishOptimize(ConnectorSession session, ConnectorTableExecuteHandl HiveStorageFormat tableStorageFormat = handle.getTableStorageFormat(); partitionUpdates = PartitionUpdate.mergePartitionUpdates(partitionUpdates); - Table table = metastore.getTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName()) + Table table = metastore.getTable(handle.getSchemaName(), handle.getTableName()) .orElseThrow(() -> new TableNotFoundException(handle.getSchemaTableName())); if (!table.getStorage().getStorageFormat().getInputFormat().equals(tableStorageFormat.getInputFormat()) && isRespectTableFormat(session)) { throw new TrinoException(HIVE_CONCURRENT_MODIFICATION_DETECTED, "Table format changed during optimize"); @@ -2252,7 +2245,6 @@ public void createView(ConnectorSession session, SchemaTableName viewName, Conne definition = definition.withoutOwner(); } - HiveIdentity identity = new HiveIdentity(session); Map properties = ImmutableMap.builder() .put(TABLE_COMMENT, PRESTO_VIEW_COMMENT) .put(PRESTO_VIEW_FLAG, "true") @@ -2280,13 +2272,13 @@ public void createView(ConnectorSession session, SchemaTableName viewName, Conne Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = accessControlMetadata.isUsingSystemSecurity() ? NO_PRIVILEGES : buildInitialPrivilegeSet(session.getUser()); - Optional
existing = metastore.getTable(identity, viewName.getSchemaName(), viewName.getTableName()); + Optional
existing = metastore.getTable(viewName.getSchemaName(), viewName.getTableName()); if (existing.isPresent()) { if (!replace || !isPrestoView(existing.get())) { throw new ViewAlreadyExistsException(viewName); } - metastore.replaceTable(identity, viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); + metastore.replaceTable(viewName.getSchemaName(), viewName.getTableName(), table, principalPrivileges); return; } @@ -2302,7 +2294,7 @@ public void createView(ConnectorSession session, SchemaTableName viewName, Conne public void renameView(ConnectorSession session, SchemaTableName source, SchemaTableName target) { // Not checking if source view exists as this is already done in RenameViewTask - metastore.renameTable(new HiveIdentity(session), source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); + metastore.renameTable(source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); } @Override @@ -2397,7 +2389,7 @@ public Optional getView(ConnectorSession session, Schem if (isHiveSystemSchema(viewName.getSchemaName())) { return Optional.empty(); } - return metastore.getTable(new HiveIdentity(session), viewName.getSchemaName(), viewName.getTableName()) + return metastore.getTable(viewName.getSchemaName(), viewName.getTableName()) .filter(ViewReaderUtil::canDecodeView) .map(view -> { if (!translateHiveViews && !isPrestoView(view)) { @@ -2427,7 +2419,7 @@ public ConnectorTableHandle beginDelete(ConnectorSession session, ConnectorTable HiveTableHandle handle = (HiveTableHandle) tableHandle; SchemaTableName tableName = handle.getSchemaTableName(); - Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); ensureTableSupportsDelete(table); @@ -2457,8 +2449,7 @@ public void finishDelete(ConnectorSession session, ConnectorTableHandle tableHan requireNonNull(fragments, "fragments is null"); SchemaTableName tableName = handle.getSchemaTableName(); - HiveIdentity identity = new HiveIdentity(session); - Table table = metastore.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); ensureTableSupportsDelete(table); @@ -2511,7 +2502,7 @@ public OptionalLong executeDelete(ConnectorSession session, ConnectorTableHandle { HiveTableHandle handle = (HiveTableHandle) deleteHandle; - Optional
table = metastore.getTable(new HiveIdentity(session), handle.getSchemaName(), handle.getTableName()); + Optional
table = metastore.getTable(handle.getSchemaName(), handle.getTableName()); if (table.isEmpty()) { throw new TableNotFoundException(handle.getSchemaTableName()); } @@ -2520,7 +2511,7 @@ public OptionalLong executeDelete(ConnectorSession session, ConnectorTableHandle metastore.truncateUnpartitionedTable(session, handle.getSchemaName(), handle.getTableName()); } else { - for (HivePartition hivePartition : partitionManager.getOrLoadPartitions(metastore, new HiveIdentity(session), handle)) { + for (HivePartition hivePartition : partitionManager.getOrLoadPartitions(metastore, handle)) { metastore.dropPartition(session, handle.getSchemaName(), handle.getTableName(), toPartitionValues(hivePartition.getPartitionId()), true); } } @@ -2534,7 +2525,7 @@ public ConnectorTableProperties getTableProperties(ConnectorSession session, Con HiveTableHandle hiveTable = (HiveTableHandle) table; List partitionColumns = ImmutableList.copyOf(hiveTable.getPartitionColumns()); - List partitions = partitionManager.getOrLoadPartitions(metastore, new HiveIdentity(session), hiveTable); + List partitions = partitionManager.getOrLoadPartitions(metastore, hiveTable); TupleDomain predicate = createPredicate(partitionColumns, partitions); @@ -2592,7 +2583,7 @@ public Optional> applyFilter(C HiveTableHandle handle = (HiveTableHandle) tableHandle; checkArgument(handle.getAnalyzePartitionValues().isEmpty() || constraint.getSummary().isAll(), "Analyze should not have a constraint"); - HivePartitionResult partitionResult = partitionManager.getPartitions(metastore, new HiveIdentity(session), handle, constraint); + HivePartitionResult partitionResult = partitionManager.getPartitions(metastore, handle, constraint); HiveTableHandle newHandle = partitionManager.applyPartitionResult(handle, partitionResult, constraint.getPredicateColumns()); if (handle.getPartitions().equals(newHandle.getPartitions()) && @@ -2916,7 +2907,7 @@ public Optional getInsertLayout(ConnectorSession session, { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; SchemaTableName tableName = hiveTableHandle.getSchemaTableName(); - Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (table.getStorage().getBucketProperty().isPresent()) { @@ -3463,7 +3454,7 @@ public Optional redirectTable(ConnectorSession session, } // we need to chop off any "$partitions" and similar suffixes from table name while querying the metastore for the Table object TableNameSplitResult tableNameSplit = splitTableName(tableName.getTableName()); - Optional
table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableNameSplit.getBaseTableName()); + Optional
table = metastore.getTable(tableName.getSchemaName(), tableNameSplit.getBaseTableName()); if (table.isEmpty() || VIRTUAL_VIEW.name().equals(table.get().getTableType())) { return Optional.empty(); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java index a66404d04a3b..9419a743d084 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetadataFactory.java @@ -17,7 +17,6 @@ import io.airlift.json.JsonCodec; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.MetastoreConfig; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; @@ -193,7 +192,7 @@ public HiveMetadataFactory( public TransactionalMetadata create(ConnectorIdentity identity, boolean autoCommit) { HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure( - memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), new HiveIdentity(identity), perTransactionCacheMaximumSize)); // per-transaction cache + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), perTransactionCacheMaximumSize)); // per-transaction cache SemiTransactionalHiveMetastore metastore = new SemiTransactionalHiveMetastore( hdfsEnvironment, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java index 1091873b7c67..850fc07f33e7 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveMetastoreClosure.java @@ -16,7 +16,6 @@ import com.google.common.collect.ImmutableList; import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HivePrincipal; @@ -70,15 +69,15 @@ public List getAllDatabases() return delegate.getAllDatabases(); } - private Table getExistingTable(HiveIdentity identity, String databaseName, String tableName) + private Table getExistingTable(String databaseName, String tableName) { - return delegate.getTable(identity, databaseName, tableName) + return delegate.getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { - return delegate.getTable(identity, databaseName, tableName); + return delegate.getTable(databaseName, tableName); } public Set getSupportedColumnStatistics(Type type) @@ -86,33 +85,39 @@ public Set getSupportedColumnStatistics(Type type) return delegate.getSupportedColumnStatistics(type); } - public PartitionStatistics getTableStatistics(HiveIdentity identity, String databaseName, String tableName) + public PartitionStatistics getTableStatistics(String databaseName, String tableName) { - return delegate.getTableStatistics(identity, getExistingTable(identity, databaseName, tableName)); + return delegate.getTableStatistics(getExistingTable(databaseName, tableName)); } - public Map getPartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Set partitionNames) + public Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) { - Table table = getExistingTable(identity, databaseName, tableName); - List partitions = getExistingPartitionsByNames(identity, table, ImmutableList.copyOf(partitionNames)); - return delegate.getPartitionStatistics(identity, table, partitions); + Table table = getExistingTable(databaseName, tableName); + List partitions = getExistingPartitionsByNames(table, ImmutableList.copyOf(partitionNames)); + return delegate.getPartitionStatistics(table, partitions); } - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { - delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); + delegate.updateTableStatistics(databaseName, tableName, transaction, update); } - public void updatePartitionStatistics(HiveIdentity identity, String databaseName, String tableName, String partitionName, Function update) + public void updatePartitionStatistics(String databaseName, + String tableName, + String partitionName, + Function update) { - Table table = getExistingTable(identity, databaseName, tableName); - delegate.updatePartitionStatistics(identity, table, partitionName, update); + Table table = getExistingTable(databaseName, tableName); + delegate.updatePartitionStatistics(table, partitionName, update); } - public void updatePartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Map> updates) + public void updatePartitionStatistics(String databaseName, String tableName, Map> updates) { - Table table = getExistingTable(identity, databaseName, tableName); - delegate.updatePartitionStatistics(identity, table, updates); + Table table = getExistingTable(databaseName, tableName); + delegate.updatePartitionStatistics(table, updates); } public List getAllTables(String databaseName) @@ -130,95 +135,94 @@ public List getAllViews(String databaseName) return delegate.getAllViews(databaseName); } - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { - delegate.createDatabase(identity, database); + delegate.createDatabase(database); } - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { - delegate.dropDatabase(identity, databaseName, deleteData); + delegate.dropDatabase(databaseName, deleteData); } - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { - delegate.renameDatabase(identity, databaseName, newDatabaseName); + delegate.renameDatabase(databaseName, newDatabaseName); } - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { - delegate.setDatabaseOwner(identity, databaseName, principal); + delegate.setDatabaseOwner(databaseName, principal); } - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { - delegate.setTableOwner(identity, databaseName, tableName, principal); + delegate.setTableOwner(databaseName, tableName, principal); } - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { - delegate.createTable(identity, table, principalPrivileges); + delegate.createTable(table, principalPrivileges); } - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { - delegate.dropTable(identity, databaseName, tableName, deleteData); + delegate.dropTable(databaseName, tableName, deleteData); } - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { - delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { - delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); } - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { - delegate.commentTable(identity, databaseName, tableName, comment); + delegate.commentTable(databaseName, tableName, comment); } - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { - delegate.commentColumn(identity, databaseName, tableName, columnName, comment); + delegate.commentColumn(databaseName, tableName, columnName, comment); } - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); } - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { - delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); } - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { - delegate.dropColumn(identity, databaseName, tableName, columnName); + delegate.dropColumn(databaseName, tableName, columnName); } - public Optional getPartition(HiveIdentity identity, String databaseName, String tableName, List partitionValues) + public Optional getPartition(String databaseName, String tableName, List partitionValues) { - return delegate.getTable(identity, databaseName, tableName) - .flatMap(table -> delegate.getPartition(identity, table, partitionValues)); + return delegate.getTable(databaseName, tableName) + .flatMap(table -> delegate.getPartition(table, partitionValues)); } public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter); + return delegate.getPartitionNamesByFilter(databaseName, tableName, columnNames, partitionKeysFilter); } - private List getExistingPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + private List getExistingPartitionsByNames(Table table, List partitionNames) { - Map partitions = delegate.getPartitionsByNames(identity, table, partitionNames).entrySet().stream() + Map partitions = delegate.getPartitionsByNames(table, partitionNames).entrySet().stream() .map(entry -> immutableEntry(entry.getKey(), entry.getValue().orElseThrow(() -> new PartitionNotFoundException(table.getSchemaTableName(), extractPartitionValues(entry.getKey()))))) .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); @@ -228,27 +232,27 @@ private List getExistingPartitionsByNames(HiveIdentity identity, Tabl .collect(toImmutableList()); } - public Map> getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List partitionNames) + public Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) { - return delegate.getTable(identity, databaseName, tableName) - .map(table -> delegate.getPartitionsByNames(identity, table, partitionNames)) + return delegate.getTable(databaseName, tableName) + .map(table -> delegate.getPartitionsByNames(table, partitionNames)) .orElseGet(() -> partitionNames.stream() .collect(toImmutableMap(name -> name, name -> Optional.empty()))); } - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { - delegate.addPartitions(identity, databaseName, tableName, partitions); + delegate.addPartitions(databaseName, tableName, partitions); } - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { - delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); + delegate.dropPartition(databaseName, tableName, parts, deleteData); } - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { - delegate.alterPartition(identity, databaseName, tableName, partition); + delegate.alterPartition(databaseName, tableName, partition); } public void createRole(String role, String grantor) @@ -301,34 +305,34 @@ public Set listTablePrivileges(String databaseName, String ta return delegate.listTablePrivileges(databaseName, tableName, tableOwner, principal); } - public long openTransaction(HiveIdentity identity) + public long openTransaction() { - return delegate.openTransaction(identity); + return delegate.openTransaction(); } - public void commitTransaction(HiveIdentity identity, long transactionId) + public void commitTransaction(long transactionId) { - delegate.commitTransaction(identity, transactionId); + delegate.commitTransaction(transactionId); } - public void abortTransaction(HiveIdentity identity, long transactionId) + public void abortTransaction(long transactionId) { - delegate.abortTransaction(identity, transactionId); + delegate.abortTransaction(transactionId); } - public void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + public void sendTransactionHeartbeat(long transactionId) { - delegate.sendTransactionHeartbeat(identity, transactionId); + delegate.sendTransactionHeartbeat(transactionId); } - public void acquireSharedReadLock(HiveIdentity identity, String queryId, long transactionId, List fullTables, List partitions) + public void acquireSharedReadLock(String queryId, long transactionId, List fullTables, List partitions) { - delegate.acquireSharedReadLock(identity, queryId, transactionId, fullTables, partitions); + delegate.acquireSharedReadLock(queryId, transactionId, fullTables, partitions); } - public String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + public String getValidWriteIds(List tables, long currentTransactionId) { - return delegate.getValidWriteIds(identity, tables, currentTransactionId); + return delegate.getValidWriteIds(tables, currentTransactionId); } public Optional getConfigValue(String name) @@ -336,33 +340,33 @@ public Optional getConfigValue(String name) return delegate.getConfigValue(name); } - public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + public long allocateWriteId(String dbName, String tableName, long transactionId) { - return delegate.allocateWriteId(identity, dbName, tableName, transactionId); + return delegate.allocateWriteId(dbName, tableName, transactionId); } - public void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isPartitioned) + public void acquireTableWriteLock(String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isPartitioned) { - delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isPartitioned); + delegate.acquireTableWriteLock(queryId, transactionId, dbName, tableName, operation, isPartitioned); } - public void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) + public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { - delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); + delegate.updateTableWriteId(dbName, tableName, transactionId, writeId, rowCountChange); } - public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) + public void alterPartitions(String dbName, String tableName, List partitions, long writeId) { - delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); + delegate.alterPartitions(dbName, tableName, partitions, writeId); } - public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) + public void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { - delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); + delegate.addDynamicPartitions(dbName, tableName, partitionNames, transactionId, writeId, operation); } - public void alterTransactionalTable(HiveIdentity identity, Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) + public void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) { - delegate.alterTransactionalTable(identity, table, transactionId, writeId, principalPrivileges); + delegate.alterTransactionalTable(table, transactionId, writeId, principalPrivileges); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java index 624b0bf035ec..33d6b9f6e21f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePageSinkProvider.java @@ -20,7 +20,6 @@ import io.airlift.event.client.EventClient; import io.airlift.json.JsonCodec; import io.airlift.units.DataSize; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.HivePageSinkMetadataProvider; import io.trino.plugin.hive.metastore.SortingColumn; @@ -157,8 +156,7 @@ private ConnectorPageSink createPageSink(HiveWritableTableHandle handle, boolean session.getQueryId(), new HivePageSinkMetadataProvider( handle.getPageSinkMetadata(), - new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(Optional.of(session.getIdentity())), new HiveIdentity(session), perTransactionMetastoreCacheMaximumSize)), - new HiveIdentity(session)), + new HiveMetastoreClosure(memoizeMetastore(metastoreFactory.createMetastore(Optional.of(session.getIdentity())), perTransactionMetastoreCacheMaximumSize))), typeManager, hdfsEnvironment, pageSorter, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java index 5ffbb46ed0f0..f96a9ae3c508 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HivePartitionManager.java @@ -18,7 +18,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Sets; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.util.HiveBucketing.HiveBucketFilter; import io.trino.spi.TrinoException; @@ -76,7 +75,7 @@ public HivePartitionManager( this.domainCompactionThreshold = domainCompactionThreshold; } - public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, HiveIdentity identity, ConnectorTableHandle tableHandle, Constraint constraint) + public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastore, ConnectorTableHandle tableHandle, Constraint constraint) { HiveTableHandle hiveTableHandle = (HiveTableHandle) tableHandle; TupleDomain effectivePredicate = constraint.getSummary() @@ -118,7 +117,7 @@ public HivePartitionResult getPartitions(SemiTransactionalHiveMetastore metastor .collect(toImmutableList()); } else { - List partitionNames = getFilteredPartitionNames(metastore, identity, tableName, partitionColumns, compactEffectivePredicate); + List partitionNames = getFilteredPartitionNames(metastore, tableName, partitionColumns, compactEffectivePredicate); partitionsIterable = () -> partitionNames.stream() // Apply extra filters which could not be done by getFilteredPartitionNames .map(partitionName -> parseValuesAndFilterPartition(tableName, partitionName, partitionColumns, partitionTypes, effectivePredicate, predicate)) @@ -198,10 +197,10 @@ public HiveTableHandle applyPartitionResult(HiveTableHandle handle, HivePartitio handle.getMaxScannedFileSize()); } - public List getOrLoadPartitions(SemiTransactionalHiveMetastore metastore, HiveIdentity identity, HiveTableHandle table) + public List getOrLoadPartitions(SemiTransactionalHiveMetastore metastore, HiveTableHandle table) { return table.getPartitions().orElseGet(() -> - getPartitionsAsList(getPartitions(metastore, identity, table, new Constraint(table.getEnforcedConstraint())))); + getPartitionsAsList(getPartitions(metastore, table, new Constraint(table.getEnforcedConstraint())))); } private Optional parseValuesAndFilterPartition( @@ -241,14 +240,14 @@ public static boolean partitionMatches(List partitionColumns, return true; } - private List getFilteredPartitionNames(SemiTransactionalHiveMetastore metastore, HiveIdentity identity, SchemaTableName tableName, List partitionKeys, TupleDomain effectivePredicate) + private List getFilteredPartitionNames(SemiTransactionalHiveMetastore metastore, SchemaTableName tableName, List partitionKeys, TupleDomain effectivePredicate) { List columnNames = partitionKeys.stream() .map(HiveColumnHandle::getName) .collect(toImmutableList()); TupleDomain partitionKeysFilter = computePartitionKeyFilter(partitionKeys, effectivePredicate); // fetch the partition names - return metastore.getPartitionNamesByFilter(identity, tableName.getSchemaName(), tableName.getTableName(), columnNames, partitionKeysFilter) + return metastore.getPartitionNamesByFilter(tableName.getSchemaName(), tableName.getTableName(), columnNames, partitionKeysFilter) .orElseThrow(() -> new TableNotFoundException(tableName)); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java index 64ca8c902afd..d3dca08a59a1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/HiveSplitManager.java @@ -21,7 +21,6 @@ import io.airlift.concurrent.BoundedExecutor; import io.airlift.stats.CounterStat; import io.airlift.units.DataSize; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; @@ -192,7 +191,7 @@ public ConnectorSplitSource getSplits( // get table metadata SemiTransactionalHiveMetastore metastore = transactionManager.get(transaction, session.getIdentity()).getMetastore(); - Table table = metastore.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); // verify table is not marked as non-readable @@ -202,7 +201,7 @@ public ConnectorSplitSource getSplits( } // get partitions - List partitions = partitionManager.getOrLoadPartitions(metastore, new HiveIdentity(session), hiveTable); + List partitions = partitionManager.getOrLoadPartitions(metastore, hiveTable); // short circuit if we don't have any partitions if (partitions.isEmpty()) { @@ -313,7 +312,6 @@ private Iterable getPartitionMetadata(ConnectorSession se Iterable> partitionNameBatches = partitionExponentially(hivePartitions, minPartitionBatchSize, maxPartitionBatchSize); Iterable> partitionBatches = transform(partitionNameBatches, partitionBatch -> { Map> batch = metastore.getPartitionsByNames( - new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), Lists.transform(partitionBatch, HivePartition::getPartitionId)); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java index 7dcead9401b3..24c8a45d06ec 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PartitionsSystemTableProvider.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Table; import io.trino.spi.connector.ColumnMetadata; import io.trino.spi.connector.ConnectorSession; @@ -81,7 +80,7 @@ public Optional getSystemTable(HiveMetadata metadata, ConnectorSess SchemaTableName sourceTableName = PARTITIONS.getSourceTableName(tableName); Table sourceTable = metadata.getMetastore() - .getTable(new HiveIdentity(session), sourceTableName.getSchemaName(), sourceTableName.getTableName()) + .getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()) .orElse(null); if (sourceTable == null || isDeltaLakeTable(sourceTable) || isIcebergTable(sourceTable)) { return Optional.empty(); @@ -123,7 +122,7 @@ public Optional getSystemTable(HiveMetadata metadata, ConnectorSess constraint -> { Constraint targetConstraint = new Constraint(constraint.transformKeys(fieldIdToColumnHandle::get)); Iterable> records = () -> - stream(partitionManager.getPartitions(metadata.getMetastore(), new HiveIdentity(session), sourceTableHandle, targetConstraint).getPartitions()) + stream(partitionManager.getPartitions(metadata.getMetastore(), sourceTableHandle, targetConstraint).getPartitions()) .map(hivePartition -> IntStream.range(0, partitionColumns.size()) .mapToObj(fieldIdToColumnHandle::get) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PropertiesSystemTableProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PropertiesSystemTableProvider.java index af4fee2f1bd2..4f2ec699865c 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PropertiesSystemTableProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/PropertiesSystemTableProvider.java @@ -15,7 +15,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSortedMap; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Table; import io.trino.spi.connector.ColumnMetadata; import io.trino.spi.connector.ConnectorSession; @@ -59,7 +58,7 @@ public Optional getSystemTable(HiveMetadata metadata, ConnectorSess SchemaTableName sourceTableName = PROPERTIES.getSourceTableName(tableName); Table table = metadata.getMetastore() - .getTable(new HiveIdentity(session), sourceTableName.getSchemaName(), sourceTableName.getTableName()) + .getTable(sourceTableName.getSchemaName(), sourceTableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); if (isDeltaLakeTable(table) || isIcebergTable(table)) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ViewReaderUtil.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ViewReaderUtil.java index 1f63284d6317..6aea3d73daa1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ViewReaderUtil.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/ViewReaderUtil.java @@ -20,7 +20,6 @@ import io.airlift.json.JsonCodecFactory; import io.airlift.json.ObjectMapperProvider; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.CoralSemiTransactionalHiveMSCAdapter; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; @@ -88,7 +87,7 @@ public static ViewReader createViewReader( } return new HiveViewReader( - new CoralSemiTransactionalHiveMSCAdapter(metastore, new HiveIdentity(session), coralTableRedirectionResolver(session, tableRedirectionResolver, metadataProvider)), + new CoralSemiTransactionalHiveMSCAdapter(metastore, coralTableRedirectionResolver(session, tableRedirectionResolver, metadataProvider)), typeManager); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/authentication/HiveIdentity.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/authentication/HiveIdentity.java index 5cc0c78a4c58..0c44779b674a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/authentication/HiveIdentity.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/authentication/HiveIdentity.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.authentication; -import io.trino.spi.connector.ConnectorSession; import io.trino.spi.security.ConnectorIdentity; import java.util.Objects; @@ -33,11 +32,6 @@ private HiveIdentity() this.username = Optional.empty(); } - public HiveIdentity(ConnectorSession session) - { - this(requireNonNull(session, "session is null").getIdentity()); - } - public HiveIdentity(ConnectorIdentity identity) { requireNonNull(identity, "identity is null"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/CoralSemiTransactionalHiveMSCAdapter.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/CoralSemiTransactionalHiveMSCAdapter.java index 1d50b40bccc5..5fd5402daae5 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/CoralSemiTransactionalHiveMSCAdapter.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/CoralSemiTransactionalHiveMSCAdapter.java @@ -15,7 +15,6 @@ import com.linkedin.coral.hive.hive2rel.HiveMetastoreClient; import io.trino.plugin.hive.CoralTableRedirectionResolver; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil; import io.trino.spi.connector.SchemaTableName; import org.apache.hadoop.hive.metastore.api.Database; @@ -36,16 +35,13 @@ public class CoralSemiTransactionalHiveMSCAdapter implements HiveMetastoreClient { private final SemiTransactionalHiveMetastore delegate; - private final HiveIdentity identity; private final CoralTableRedirectionResolver tableRedirection; public CoralSemiTransactionalHiveMSCAdapter( SemiTransactionalHiveMetastore coralHiveMetastoreClient, - HiveIdentity identity, CoralTableRedirectionResolver tableRedirection) { this.delegate = requireNonNull(coralHiveMetastoreClient, "coralHiveMetastoreClient is null"); - this.identity = requireNonNull(identity, "identity is null"); this.tableRedirection = requireNonNull(tableRedirection, "tableRedirection is null"); } @@ -78,7 +74,7 @@ public org.apache.hadoop.hive.metastore.api.Table getTable(String dbName, String } } - return delegate.getTable(identity, dbName, tableName) + return delegate.getTable(dbName, tableName) .map(value -> ThriftMetastoreUtil.toMetastoreApiTable(value, NO_PRIVILEGES)) .orElse(null); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java index 3024335bc075..b8e1b915b8c1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/ForwardingHiveMetastore.java @@ -18,7 +18,6 @@ import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; import io.trino.spi.connector.SchemaTableName; import io.trino.spi.predicate.TupleDomain; @@ -59,9 +58,9 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { - return delegate.getTable(identity, databaseName, tableName); + return delegate.getTable(databaseName, tableName); } @Override @@ -71,45 +70,42 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { - return delegate.getTableStatistics(identity, table); + return delegate.getTableStatistics(table); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { - return delegate.getPartitionStatistics(identity, table, partitions); + return delegate.getPartitionStatistics(table, partitions); } @Override public void updateTableStatistics( - HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) { - delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); + delegate.updateTableStatistics(databaseName, tableName, transaction, update); } @Override public void updatePartitionStatistics( - HiveIdentity identity, Table table, String partitionName, Function update) { - delegate.updatePartitionStatistics(identity, table, partitionName, update); + delegate.updatePartitionStatistics(table, partitionName, update); } @Override public void updatePartitionStatistics( - HiveIdentity identity, Table table, Map> updates) { - delegate.updatePartitionStatistics(identity, table, updates); + delegate.updatePartitionStatistics(table, updates); } @Override @@ -131,155 +127,148 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { - delegate.createDatabase(identity, database); + delegate.createDatabase(database); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { - delegate.dropDatabase(identity, databaseName, deleteData); + delegate.dropDatabase(databaseName, deleteData); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { - delegate.renameDatabase(identity, databaseName, newDatabaseName); + delegate.renameDatabase(databaseName, newDatabaseName); } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { - delegate.setDatabaseOwner(identity, databaseName, principal); + delegate.setDatabaseOwner(databaseName, principal); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { - delegate.createTable(identity, table, principalPrivileges); + delegate.createTable(table, principalPrivileges); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { - delegate.dropTable(identity, databaseName, tableName, deleteData); + delegate.dropTable(databaseName, tableName, deleteData); } @Override public void replaceTable( - HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { - delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { - delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { - delegate.commentTable(identity, databaseName, tableName, comment); + delegate.commentTable(databaseName, tableName, comment); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { - delegate.setTableOwner(identity, databaseName, tableName, principal); + delegate.setTableOwner(databaseName, tableName, principal); } @Override public void commentColumn( - HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) { - delegate.commentColumn(identity, databaseName, tableName, columnName, comment); + delegate.commentColumn(databaseName, tableName, columnName, comment); } @Override public void addColumn( - HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { - delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { - delegate.dropColumn(identity, databaseName, tableName, columnName); + delegate.dropColumn(databaseName, tableName, columnName); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { - return delegate.getPartition(identity, table, partitionValues); + return delegate.getPartition(table, partitionValues); } @Override public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter); + return delegate.getPartitionNamesByFilter(databaseName, tableName, columnNames, partitionKeysFilter); } @Override public Map> getPartitionsByNames( - HiveIdentity identity, Table table, List partitionNames) { - return delegate.getPartitionsByNames(identity, table, partitionNames); + return delegate.getPartitionsByNames(table, partitionNames); } @Override public void addPartitions( - HiveIdentity identity, String databaseName, String tableName, List partitions) { - delegate.addPartitions(identity, databaseName, tableName, partitions); + delegate.addPartitions(databaseName, tableName, partitions); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { - delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); + delegate.dropPartition(databaseName, tableName, parts, deleteData); } @Override public void alterPartition( - HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) { - delegate.alterPartition(identity, databaseName, tableName, partition); + delegate.alterPartition(databaseName, tableName, partition); } @Override @@ -357,38 +346,37 @@ public Set listTablePrivileges(String databaseName, } @Override - public long openTransaction(HiveIdentity identity) + public long openTransaction() { - return delegate.openTransaction(identity); + return delegate.openTransaction(); } @Override - public void commitTransaction(HiveIdentity identity, long transactionId) + public void commitTransaction(long transactionId) { - delegate.commitTransaction(identity, transactionId); + delegate.commitTransaction(transactionId); } @Override - public void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + public void sendTransactionHeartbeat(long transactionId) { - delegate.sendTransactionHeartbeat(identity, transactionId); + delegate.sendTransactionHeartbeat(transactionId); } @Override public void acquireSharedReadLock( - HiveIdentity identity, String queryId, long transactionId, List fullTables, List partitions) { - delegate.acquireSharedReadLock(identity, queryId, transactionId, fullTables, partitions); + delegate.acquireSharedReadLock(queryId, transactionId, fullTables, partitions); } @Override - public String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + public String getValidWriteIds(List tables, long currentTransactionId) { - return delegate.getValidWriteIds(identity, tables, currentTransactionId); + return delegate.getValidWriteIds(tables, currentTransactionId); } @Override @@ -398,14 +386,13 @@ public Optional getConfigValue(String name) } @Override - public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + public long allocateWriteId(String dbName, String tableName, long transactionId) { - return delegate.allocateWriteId(identity, dbName, tableName, transactionId); + return delegate.allocateWriteId(dbName, tableName, transactionId); } @Override public void acquireTableWriteLock( - HiveIdentity identity, String queryId, long transactionId, String dbName, @@ -413,35 +400,32 @@ public void acquireTableWriteLock( DataOperationType operation, boolean isDynamicPartitionWrite) { - delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); + delegate.acquireTableWriteLock(queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); } @Override public void updateTableWriteId( - HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { - delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); + delegate.updateTableWriteId(dbName, tableName, transactionId, writeId, rowCountChange); } @Override public void alterPartitions( - HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) { - delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); + delegate.alterPartitions(dbName, tableName, partitions, writeId); } @Override public void addDynamicPartitions( - HiveIdentity identity, String dbName, String tableName, List partitionNames, @@ -449,17 +433,16 @@ public void addDynamicPartitions( long writeId, AcidOperation operation) { - delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); + delegate.addDynamicPartitions(dbName, tableName, partitionNames, transactionId, writeId, operation); } @Override public void alterTransactionalTable( - HiveIdentity identity, Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) { - delegate.alterTransactionalTable(identity, table, transactionId, writeId, principalPrivileges); + delegate.alterTransactionalTable(table, transactionId, writeId, principalPrivileges); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java index b05415c1ed48..c2aefc3201cc 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveMetastore.java @@ -19,7 +19,6 @@ import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; import io.trino.spi.connector.SchemaTableName; import io.trino.spi.predicate.TupleDomain; @@ -41,22 +40,22 @@ public interface HiveMetastore List getAllDatabases(); - Optional
getTable(HiveIdentity identity, String databaseName, String tableName); + Optional
getTable(String databaseName, String tableName); Set getSupportedColumnStatistics(Type type); - PartitionStatistics getTableStatistics(HiveIdentity identity, Table table); + PartitionStatistics getTableStatistics(Table table); - Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions); + Map getPartitionStatistics(Table table, List partitions); - void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update); + void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update); - default void updatePartitionStatistics(HiveIdentity identity, Table table, String partitionName, Function update) + default void updatePartitionStatistics(Table table, String partitionName, Function update) { - updatePartitionStatistics(identity, table, ImmutableMap.of(partitionName, update)); + updatePartitionStatistics(table, ImmutableMap.of(partitionName, update)); } - void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates); + void updatePartitionStatistics(Table table, Map> updates); List getAllTables(String databaseName); @@ -64,40 +63,40 @@ default void updatePartitionStatistics(HiveIdentity identity, Table table, Strin List getAllViews(String databaseName); - void createDatabase(HiveIdentity identity, Database database); + void createDatabase(Database database); - void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData); + void dropDatabase(String databaseName, boolean deleteData); - void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName); + void renameDatabase(String databaseName, String newDatabaseName); - void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal); + void setDatabaseOwner(String databaseName, HivePrincipal principal); - void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges); + void createTable(Table table, PrincipalPrivileges principalPrivileges); - void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData); + void dropTable(String databaseName, String tableName, boolean deleteData); /** * This should only be used if the semantic here is drop and add. Trying to * alter one field of a table object previously acquired from getTable is * probably not what you want. */ - void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges); + void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges); - void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName); + void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName); - void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment); + void commentTable(String databaseName, String tableName, Optional comment); - void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal); + void setTableOwner(String databaseName, String tableName, HivePrincipal principal); - void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment); + void commentColumn(String databaseName, String tableName, String columnName, Optional comment); - void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment); + void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment); - void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName); + void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName); - void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName); + void dropColumn(String databaseName, String tableName, String columnName); - Optional getPartition(HiveIdentity identity, Table table, List partitionValues); + Optional getPartition(Table table, List partitionValues); /** * Return a list of partition names, with optional filtering (hint to improve performance if possible). @@ -109,15 +108,15 @@ default void updatePartitionStatistics(HiveIdentity identity, Table table, Strin * @return a list of partition names as created by {@link MetastoreUtil#toPartitionName} * @see TupleDomain */ - Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter); + Optional> getPartitionNamesByFilter(String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter); - Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames); + Map> getPartitionsByNames(Table table, List partitionNames); - void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions); + void addPartitions(String databaseName, String tableName, List partitions); - void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData); + void dropPartition(String databaseName, String tableName, List parts, boolean deleteData); - void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition); + void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition); void createRole(String role, String grantor); @@ -143,32 +142,32 @@ default void updatePartitionStatistics(HiveIdentity identity, Table table, Strin */ Set listTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal); - default long openTransaction(HiveIdentity identity) + default long openTransaction() { throw new UnsupportedOperationException(); } - default void commitTransaction(HiveIdentity identity, long transactionId) + default void commitTransaction(long transactionId) { throw new UnsupportedOperationException(); } - default void abortTransaction(HiveIdentity identity, long transactionId) + default void abortTransaction(long transactionId) { throw new UnsupportedOperationException(); } - default void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + default void sendTransactionHeartbeat(long transactionId) { throw new UnsupportedOperationException(); } - default void acquireSharedReadLock(HiveIdentity identity, String queryId, long transactionId, List fullTables, List partitions) + default void acquireSharedReadLock(String queryId, long transactionId, List fullTables, List partitions) { throw new UnsupportedOperationException(); } - default String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + default String getValidWriteIds(List tables, long currentTransactionId) { throw new UnsupportedOperationException(); } @@ -178,32 +177,32 @@ default Optional getConfigValue(String name) return Optional.empty(); } - default long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + default long allocateWriteId(String dbName, String tableName, long transactionId) { throw new UnsupportedOperationException(); } - default void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isDynamicPartitionWrite) + default void acquireTableWriteLock(String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isDynamicPartitionWrite) { throw new UnsupportedOperationException(); } - default void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) + default void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { throw new UnsupportedOperationException(); } - default void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) + default void alterPartitions(String dbName, String tableName, List partitions, long writeId) { throw new UnsupportedOperationException(); } - default void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) + default void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { throw new UnsupportedOperationException(); } - default void alterTransactionalTable(HiveIdentity identity, Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) + default void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException(); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HivePageSinkMetadataProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HivePageSinkMetadataProvider.java index 951a95a7c774..750f774509f1 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HivePageSinkMetadataProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HivePageSinkMetadataProvider.java @@ -14,7 +14,6 @@ package io.trino.plugin.hive.metastore; import io.trino.plugin.hive.HiveMetastoreClosure; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.spi.connector.SchemaTableName; import java.util.List; @@ -26,17 +25,15 @@ public class HivePageSinkMetadataProvider { - private final HiveIdentity identity; private final HiveMetastoreClosure delegate; private final SchemaTableName schemaTableName; private final Optional
table; private final Map, Optional> modifiedPartitions; - public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, HiveMetastoreClosure delegate, HiveIdentity identity) + public HivePageSinkMetadataProvider(HivePageSinkMetadata pageSinkMetadata, HiveMetastoreClosure delegate) { requireNonNull(pageSinkMetadata, "pageSinkMetadata is null"); this.delegate = delegate; - this.identity = requireNonNull(identity, "identity is null"); this.schemaTableName = pageSinkMetadata.getSchemaTableName(); this.table = pageSinkMetadata.getTable(); this.modifiedPartitions = pageSinkMetadata.getModifiedPartitions(); @@ -55,7 +52,7 @@ public Optional getPartition(List partitionValues) } Optional modifiedPartition = modifiedPartitions.get(partitionValues); if (modifiedPartition == null) { - return delegate.getPartition(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues); + return delegate.getPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues); } else { return modifiedPartition; diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveTransaction.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveTransaction.java index a4fa47d68768..e560c977b63a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveTransaction.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/HiveTransaction.java @@ -18,7 +18,6 @@ import io.trino.plugin.hive.HivePartition; import io.trino.plugin.hive.HiveTableHandle; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.spi.connector.SchemaTableName; import org.apache.hadoop.hive.common.ValidTxnWriteIdList; @@ -31,7 +30,6 @@ public class HiveTransaction { - private final HiveIdentity identity; private final String queryId; private final long transactionId; private final ScheduledFuture heartbeatTask; @@ -39,9 +37,8 @@ public class HiveTransaction private final Map validHiveTransactionsForTable = new HashMap<>(); - public HiveTransaction(HiveIdentity identity, String queryId, long transactionId, ScheduledFuture heartbeatTask, AcidTransaction transaction) + public HiveTransaction(String queryId, long transactionId, ScheduledFuture heartbeatTask, AcidTransaction transaction) { - this.identity = requireNonNull(identity, "identity is null"); this.queryId = requireNonNull(queryId, "queryId is null"); this.transactionId = transactionId; this.heartbeatTask = requireNonNull(heartbeatTask, "heartbeatTask is null"); @@ -79,7 +76,6 @@ public ValidTxnWriteIdList getValidWriteIds(HiveMetastoreClosure metastore, Hive // Different calls for same table might need to lock different partitions so acquire locks every time metastore.acquireSharedReadLock( - identity, queryId, transactionId, lockedTables, @@ -88,7 +84,6 @@ public ValidTxnWriteIdList getValidWriteIds(HiveMetastoreClosure metastore, Hive // For repeatable reads within a query, use the same list of valid transactions for a table which have once been used return validHiveTransactionsForTable.computeIfAbsent(tableHandle.getSchemaTableName(), schemaTableName -> new ValidTxnWriteIdList( metastore.getValidWriteIds( - identity, ImmutableList.of(schemaTableName), transactionId))); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/MetastoreUtil.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/MetastoreUtil.java index ccf62e70375d..9e5c16be3808 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/MetastoreUtil.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/MetastoreUtil.java @@ -22,7 +22,6 @@ import io.trino.plugin.hive.HiveColumnHandle; import io.trino.plugin.hive.PartitionOfflineException; import io.trino.plugin.hive.TableOfflineException; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.spi.TrinoException; import io.trino.spi.connector.SchemaTableName; import io.trino.spi.connector.TableNotFoundException; @@ -300,9 +299,9 @@ public static void verifyOnline(SchemaTableName tableName, Optional part } } - public static void verifyCanDropColumn(HiveMetastore metastore, HiveIdentity identity, String databaseName, String tableName, String columnName) + public static void verifyCanDropColumn(HiveMetastore metastore, String databaseName, String tableName, String columnName) { - Table table = metastore.getTable(identity, databaseName, tableName) + Table table = metastore.getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); if (table.getPartitionColumns().stream().anyMatch(column -> column.getName().equals(columnName))) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java index 64fd9607f949..40cabb1067a3 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/SemiTransactionalHiveMetastore.java @@ -37,7 +37,6 @@ import io.trino.plugin.hive.TableAlreadyExistsException; import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; import io.trino.plugin.hive.security.SqlStandardAccessControlMetadataMetastore; import io.trino.spi.TrinoException; @@ -221,12 +220,12 @@ public synchronized List getAllTables(String databaseName) return delegate.getAllTables(databaseName); } - public synchronized Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public synchronized Optional
getTable(String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTable(identity, databaseName, tableName); + return delegate.getTable(databaseName, tableName); } switch (tableAction.getType()) { case ADD: @@ -249,12 +248,12 @@ public synchronized Set getSupportedColumnStatistics(Type t return delegate.getSupportedColumnStatistics(type); } - public synchronized PartitionStatistics getTableStatistics(HiveIdentity identity, String databaseName, String tableName) + public synchronized PartitionStatistics getTableStatistics(String databaseName, String tableName) { checkReadable(); Action tableAction = tableActions.get(new SchemaTableName(databaseName, tableName)); if (tableAction == null) { - return delegate.getTableStatistics(identity, databaseName, tableName); + return delegate.getTableStatistics(databaseName, tableName); } switch (tableAction.getType()) { case ADD: @@ -272,10 +271,10 @@ public synchronized PartitionStatistics getTableStatistics(HiveIdentity identity throw new IllegalStateException("Unknown action type"); } - public synchronized Map getPartitionStatistics(HiveIdentity identity, String databaseName, String tableName, Set partitionNames) + public synchronized Map getPartitionStatistics(String databaseName, String tableName, Set partitionNames) { checkReadable(); - Optional
table = getTable(identity, databaseName, tableName); + Optional
table = getTable(databaseName, tableName); if (table.isEmpty()) { return ImmutableMap.of(); } @@ -303,7 +302,7 @@ public synchronized Map getPartitionStatistics(Hive } } - Map delegateResult = delegate.getPartitionStatistics(identity, databaseName, tableName, partitionNamesToQuery.build()); + Map delegateResult = delegate.getPartitionStatistics(databaseName, tableName, partitionNamesToQuery.build()); if (!delegateResult.isEmpty()) { resultBuilder.putAll(delegateResult); } @@ -343,10 +342,10 @@ private TableSource getTableSource(String databaseName, String tableName) throw new IllegalStateException("Unknown action type"); } - public synchronized HivePageSinkMetadata generatePageSinkMetadata(HiveIdentity identity, SchemaTableName schemaTableName) + public synchronized HivePageSinkMetadata generatePageSinkMetadata(SchemaTableName schemaTableName) { checkReadable(); - Optional
table = getTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isEmpty()) { return new HivePageSinkMetadata(schemaTableName, Optional.empty(), ImmutableMap.of()); } @@ -377,9 +376,9 @@ public synchronized List getAllViews(String databaseName) return delegate.getAllViews(databaseName); } - public synchronized void createDatabase(HiveIdentity identity, Database database) + public synchronized void createDatabase(Database database) { - setExclusive((delegate, hdfsEnvironment) -> delegate.createDatabase(identity, database)); + setExclusive((delegate, hdfsEnvironment) -> delegate.createDatabase(database)); } public synchronized void dropDatabase(ConnectorSession session, String schemaName) @@ -390,8 +389,6 @@ public synchronized void dropDatabase(ConnectorSession session, String schemaNam .map(Path::new); setExclusive((delegate, hdfsEnvironment) -> { - HiveIdentity identity = new HiveIdentity(session); - // If we see files in the schema location, don't delete it. // If we see no files, request deletion. // If we fail to check the schema location, behave according to fallback. @@ -406,30 +403,30 @@ public synchronized void dropDatabase(ConnectorSession session, String schemaNam } }).orElse(deleteSchemaLocationsFallback); - delegate.dropDatabase(identity, schemaName, deleteData); + delegate.dropDatabase(schemaName, deleteData); }); } - public synchronized void renameDatabase(HiveIdentity identity, String source, String target) + public synchronized void renameDatabase(String source, String target) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameDatabase(identity, source, target)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameDatabase(source, target)); } - public synchronized void setDatabaseOwner(HiveIdentity identity, String source, HivePrincipal principal) + public synchronized void setDatabaseOwner(String source, HivePrincipal principal) { - setExclusive((delegate, hdfsEnvironment) -> delegate.setDatabaseOwner(identity, source, principal)); + setExclusive((delegate, hdfsEnvironment) -> delegate.setDatabaseOwner(source, principal)); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setTableStatistics(HiveIdentity identity, Table table, PartitionStatistics tableStatistics) + public synchronized void setTableStatistics(Table table, PartitionStatistics tableStatistics) { AcidTransaction transaction = currentHiveTransaction.isPresent() ? currentHiveTransaction.get().getTransaction() : NO_ACID_TRANSACTION; setExclusive((delegate, hdfsEnvironment) -> - delegate.updateTableStatistics(identity, table.getDatabaseName(), table.getTableName(), transaction, statistics -> updatePartitionStatistics(statistics, tableStatistics))); + delegate.updateTableStatistics(table.getDatabaseName(), table.getTableName(), transaction, statistics -> updatePartitionStatistics(statistics, tableStatistics))); } // TODO: Allow updating statistics for 2 tables in the same transaction - public synchronized void setPartitionStatistics(HiveIdentity identity, Table table, Map, PartitionStatistics> partitionStatisticsMap) + public synchronized void setPartitionStatistics(Table table, Map, PartitionStatistics> partitionStatisticsMap) { Map> updates = partitionStatisticsMap.entrySet().stream().collect( toImmutableMap( @@ -437,7 +434,6 @@ public synchronized void setPartitionStatistics(HiveIdentity identity, Table tab entry -> oldPartitionStats -> updatePartitionStatistics(oldPartitionStats, entry.getValue()))); setExclusive((delegate, hdfsEnvironment) -> delegate.updatePartitionStatistics( - identity, table.getDatabaseName(), table.getTableName(), updates)); @@ -488,11 +484,10 @@ public synchronized void createTable( // When creating a table, it should never have partition actions. This is just a sanity check. checkNoPartitionAction(table.getDatabaseName(), table.getTableName()); Action oldTableAction = tableActions.get(table.getSchemaTableName()); - HiveIdentity identity = new HiveIdentity(session); - TableAndMore tableAndMore = new TableAndMore(table, identity, Optional.of(principalPrivileges), currentPath, files, ignoreExisting, statistics, statistics, cleanExtraOutputFilesOnCommit); + TableAndMore tableAndMore = new TableAndMore(table, Optional.of(principalPrivileges), currentPath, files, ignoreExisting, statistics, statistics, cleanExtraOutputFilesOnCommit); if (oldTableAction == null) { HdfsContext hdfsContext = new HdfsContext(session); - tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ADD, tableAndMore, hdfsContext, identity, session.getQueryId())); + tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ADD, tableAndMore, hdfsContext, session.getQueryId())); return; } switch (oldTableAction.getType()) { @@ -501,7 +496,7 @@ public synchronized void createTable( throw new TrinoException(TRANSACTION_CONFLICT, "Operation on the same table with different user in the same transaction is not supported"); } HdfsContext hdfsContext = new HdfsContext(session); - tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ALTER, tableAndMore, hdfsContext, identity, session.getQueryId())); + tableActions.put(table.getSchemaTableName(), new Action<>(ActionType.ALTER, tableAndMore, hdfsContext, session.getQueryId())); return; case ADD: @@ -526,8 +521,7 @@ public synchronized void dropTable(ConnectorSession session, String databaseName Action oldTableAction = tableActions.get(schemaTableName); if (oldTableAction == null || oldTableAction.getType() == ActionType.ALTER) { HdfsContext hdfsContext = new HdfsContext(session); - HiveIdentity identity = new HiveIdentity(session); - tableActions.put(schemaTableName, new Action<>(ActionType.DROP, null, hdfsContext, identity, session.getQueryId())); + tableActions.put(schemaTableName, new Action<>(ActionType.DROP, null, hdfsContext, session.getQueryId())); return; } switch (oldTableAction.getType()) { @@ -546,44 +540,44 @@ public synchronized void dropTable(ConnectorSession session, String databaseName throw new IllegalStateException("Unknown action type"); } - public synchronized void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void replaceTable(String databaseName, String tableName, Table table, PrincipalPrivileges principalPrivileges) { - setExclusive((delegate, hdfsEnvironment) -> delegate.replaceTable(identity, databaseName, tableName, table, principalPrivileges)); + setExclusive((delegate, hdfsEnvironment) -> delegate.replaceTable(databaseName, tableName, table, principalPrivileges)); } - public synchronized void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName)); } - public synchronized void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public synchronized void commentTable(String databaseName, String tableName, Optional comment) { - setExclusive((delegate, hdfsEnvironment) -> delegate.commentTable(identity, databaseName, tableName, comment)); + setExclusive((delegate, hdfsEnvironment) -> delegate.commentTable(databaseName, tableName, comment)); } - public synchronized void setTableOwner(HiveIdentity identity, String schema, String table, HivePrincipal principal) + public synchronized void setTableOwner(String schema, String table, HivePrincipal principal) { - setExclusive((delegate, hdfsEnvironment) -> delegate.setTableOwner(identity, schema, table, principal)); + setExclusive((delegate, hdfsEnvironment) -> delegate.setTableOwner(schema, table, principal)); } - public synchronized void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public synchronized void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { - setExclusive((delegate, hdfsEnvironment) -> delegate.commentColumn(identity, databaseName, tableName, columnName, comment)); + setExclusive((delegate, hdfsEnvironment) -> delegate.commentColumn(databaseName, tableName, columnName, comment)); } - public synchronized void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - setExclusive((delegate, hdfsEnvironment) -> delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment)); + setExclusive((delegate, hdfsEnvironment) -> delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment)); } - public synchronized void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName)); } - public synchronized void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public synchronized void dropColumn(String databaseName, String tableName, String columnName) { - setExclusive((delegate, hdfsEnvironment) -> delegate.dropColumn(identity, databaseName, tableName, columnName)); + setExclusive((delegate, hdfsEnvironment) -> delegate.dropColumn(databaseName, tableName, columnName)); } public synchronized void finishInsertIntoExistingTable( @@ -598,15 +592,14 @@ public synchronized void finishInsertIntoExistingTable( // Data can only be inserted into partitions and unpartitioned tables. They can never be inserted into a partitioned table. // Therefore, this method assumes that the table is unpartitioned. setShared(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action oldTableAction = tableActions.get(schemaTableName); if (oldTableAction == null) { - Table table = getExistingTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Table table = getExistingTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (isAcidTransactionRunning()) { table = Table.builder(table).setWriteId(OptionalLong.of(currentHiveTransaction.get().getTransaction().getWriteId())).build(); } - PartitionStatistics currentStatistics = getTableStatistics(identity, databaseName, tableName); + PartitionStatistics currentStatistics = getTableStatistics(databaseName, tableName); HdfsContext hdfsContext = new HdfsContext(session); tableActions.put( schemaTableName, @@ -614,7 +607,6 @@ public synchronized void finishInsertIntoExistingTable( ActionType.INSERT_EXISTING, new TableAndMore( table, - identity, Optional.empty(), Optional.of(currentLocation), Optional.of(fileNames), @@ -623,7 +615,6 @@ public synchronized void finishInsertIntoExistingTable( statisticsUpdate, cleanExtraOutputFilesOnCommit), hdfsContext, - identity, session.getQueryId())); return; } @@ -652,7 +643,7 @@ private boolean isAcidTransactionRunning() public synchronized void truncateUnpartitionedTable(ConnectorSession session, String databaseName, String tableName) { checkReadable(); - Optional
table = getTable(new HiveIdentity(session), databaseName, tableName); + Optional
table = getTable(databaseName, tableName); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); if (table.isEmpty()) { throw new TableNotFoundException(schemaTableName); @@ -688,11 +679,10 @@ public synchronized void finishRowLevelDelete( return; } setShared(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action oldTableAction = tableActions.get(schemaTableName); if (oldTableAction == null) { - Table table = getExistingTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Table table = getExistingTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); HdfsContext hdfsContext = new HdfsContext(session); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow()); tableActions.put( @@ -701,12 +691,10 @@ public synchronized void finishRowLevelDelete( ActionType.DELETE_ROWS, new TableAndAcidDirectories( table, - identity, Optional.of(principalPrivileges), Optional.of(currentLocation), partitionAndStatementIds), hdfsContext, - identity, session.getQueryId())); return; } @@ -738,11 +726,10 @@ public synchronized void finishUpdate( return; } setShared(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action oldTableAction = tableActions.get(schemaTableName); if (oldTableAction == null) { - Table table = getExistingTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Table table = getExistingTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); HdfsContext hdfsContext = new HdfsContext(session); PrincipalPrivileges principalPrivileges = buildInitialPrivilegeSet(table.getOwner().orElseThrow()); tableActions.put( @@ -751,12 +738,10 @@ public synchronized void finishUpdate( ActionType.UPDATE, new TableAndAcidDirectories( table, - identity, Optional.of(principalPrivileges), Optional.of(currentLocation), partitionAndStatementIds), hdfsContext, - identity, session.getQueryId())); return; } @@ -777,9 +762,9 @@ public synchronized void finishUpdate( throw new IllegalStateException("Unknown action type"); } - public synchronized Optional> getPartitionNames(HiveIdentity identity, String databaseName, String tableName) + public synchronized Optional> getPartitionNames(String databaseName, String tableName) { - Optional
table = getTable(identity, databaseName, tableName); + Optional
table = getTable(databaseName, tableName); if (table.isEmpty()) { return Optional.empty(); @@ -789,22 +774,20 @@ public synchronized Optional> getPartitionNames(HiveIdentity identi .map(Column::getName) .collect(toImmutableList()); - return doGetPartitionNames(identity, databaseName, tableName, columnNames, TupleDomain.all()); + return doGetPartitionNames(databaseName, tableName, columnNames, TupleDomain.all()); } public synchronized Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return doGetPartitionNames(identity, databaseName, tableName, columnNames, partitionKeysFilter); + return doGetPartitionNames(databaseName, tableName, columnNames, partitionKeysFilter); } @GuardedBy("this") private Optional> doGetPartitionNames( - HiveIdentity identity, String databaseName, String tableName, List columnNames, @@ -818,7 +801,7 @@ private Optional> doGetPartitionNames( return Optional.of(ImmutableList.of()); } - Optional
table = getTable(identity, databaseName, tableName); + Optional
table = getTable(databaseName, tableName); if (table.isEmpty()) { return Optional.empty(); } @@ -829,7 +812,7 @@ private Optional> doGetPartitionNames( partitionNames = ImmutableList.of(); break; case PRE_EXISTING_TABLE: - Optional> partitionNameResult = delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter); + Optional> partitionNameResult = delegate.getPartitionNamesByFilter(databaseName, tableName, columnNames, partitionKeysFilter); if (partitionNameResult.isEmpty()) { throw new TrinoException(TRANSACTION_CONFLICT, format("Table '%s.%s' was dropped by another transaction", databaseName, tableName)); } @@ -877,7 +860,7 @@ private Optional> doGetPartitionNames( return Optional.of(resultBuilder.build()); } - public synchronized Map> getPartitionsByNames(HiveIdentity identity, String databaseName, String tableName, List partitionNames) + public synchronized Map> getPartitionsByNames(String databaseName, String tableName, List partitionNames) { checkReadable(); TableSource tableSource = getTableSource(databaseName, tableName); @@ -907,7 +890,6 @@ public synchronized Map> getPartitionsByNames(HiveId List partitionNamesToQuery = partitionNamesToQueryBuilder.build(); if (!partitionNamesToQuery.isEmpty()) { Map> delegateResult = delegate.getPartitionsByNames( - identity, databaseName, tableName, partitionNamesToQuery); @@ -948,11 +930,10 @@ public synchronized void addPartition( Map, Action> partitionActionsOfTable = partitionActions.computeIfAbsent(new SchemaTableName(databaseName, tableName), k -> new HashMap<>()); Action oldPartitionAction = partitionActionsOfTable.get(partition.getValues()); HdfsContext hdfsContext = new HdfsContext(session); - HiveIdentity identity = new HiveIdentity(session); if (oldPartitionAction == null) { partitionActionsOfTable.put( partition.getValues(), - new Action<>(ActionType.ADD, new PartitionAndMore(identity, partition, currentLocation, files, statistics, statistics, cleanExtraOutputFilesOnCommit), hdfsContext, identity, session.getQueryId())); + new Action<>(ActionType.ADD, new PartitionAndMore(partition, currentLocation, files, statistics, statistics, cleanExtraOutputFilesOnCommit), hdfsContext, session.getQueryId())); return; } switch (oldPartitionAction.getType()) { @@ -963,7 +944,7 @@ public synchronized void addPartition( } partitionActionsOfTable.put( partition.getValues(), - new Action<>(ActionType.ALTER, new PartitionAndMore(identity, partition, currentLocation, files, statistics, statistics, cleanExtraOutputFilesOnCommit), hdfsContext, identity, session.getQueryId())); + new Action<>(ActionType.ALTER, new PartitionAndMore(partition, currentLocation, files, statistics, statistics, cleanExtraOutputFilesOnCommit), hdfsContext, session.getQueryId())); return; case ADD: case ALTER: @@ -982,12 +963,11 @@ public synchronized void dropPartition(ConnectorSession session, String database Action oldPartitionAction = partitionActionsOfTable.get(partitionValues); if (oldPartitionAction == null) { HdfsContext hdfsContext = new HdfsContext(session); - HiveIdentity identity = new HiveIdentity(session); if (deleteData) { - partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP, null, hdfsContext, identity, session.getQueryId())); + partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP, null, hdfsContext, session.getQueryId())); } else { - partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP_PRESERVE_DATA, null, hdfsContext, identity, session.getQueryId())); + partitionActionsOfTable.put(partitionValues, new Action<>(ActionType.DROP_PRESERVE_DATA, null, hdfsContext, session.getQueryId())); } return; } @@ -1018,15 +998,14 @@ public synchronized void finishInsertIntoExistingPartition( boolean cleanExtraOutputFilesOnCommit) { setShared(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Map, Action> partitionActionsOfTable = partitionActions.computeIfAbsent(schemaTableName, k -> new HashMap<>()); Action oldPartitionAction = partitionActionsOfTable.get(partitionValues); if (oldPartitionAction == null) { - Partition partition = delegate.getPartition(identity, databaseName, tableName, partitionValues) + Partition partition = delegate.getPartition(databaseName, tableName, partitionValues) .orElseThrow(() -> new PartitionNotFoundException(schemaTableName, partitionValues)); - String partitionName = getPartitionName(identity, databaseName, tableName, partitionValues); - PartitionStatistics currentStatistics = delegate.getPartitionStatistics(identity, databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); + String partitionName = getPartitionName(databaseName, tableName, partitionValues); + PartitionStatistics currentStatistics = delegate.getPartitionStatistics(databaseName, tableName, ImmutableSet.of(partitionName)).get(partitionName); if (currentStatistics == null) { throw new TrinoException(HIVE_METASTORE_ERROR, "currentStatistics is null"); } @@ -1036,7 +1015,6 @@ public synchronized void finishInsertIntoExistingPartition( new Action<>( ActionType.INSERT_EXISTING, new PartitionAndMore( - identity, partition, currentLocation, Optional.of(fileNames), @@ -1044,7 +1022,6 @@ public synchronized void finishInsertIntoExistingPartition( statisticsUpdate, cleanExtraOutputFilesOnCommit), context, - identity, session.getQueryId())); return; } @@ -1069,9 +1046,9 @@ private synchronized AcidTransaction getCurrentAcidTransaction() .orElseThrow(() -> new IllegalStateException("currentHiveTransaction not present")); } - private String getPartitionName(HiveIdentity identity, String databaseName, String tableName, List partitionValues) + private String getPartitionName(String databaseName, String tableName, List partitionValues) { - Table table = getTable(identity, databaseName, tableName) + Table table = getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); return getPartitionName(table, partitionValues); } @@ -1130,13 +1107,13 @@ public synchronized Set listRoleGrants(HivePrincipal principal) } @Override - public synchronized Set listTablePrivileges(HiveIdentity identity, String databaseName, String tableName, Optional principal) + public synchronized Set listTablePrivileges(String databaseName, String tableName, Optional principal) { checkReadable(); SchemaTableName schemaTableName = new SchemaTableName(databaseName, tableName); Action tableAction = tableActions.get(schemaTableName); if (tableAction == null) { - return delegate.listTablePrivileges(databaseName, tableName, getExistingTable(identity, databaseName, tableName).getOwner(), principal); + return delegate.listTablePrivileges(databaseName, tableName, getExistingTable(databaseName, tableName).getOwner(), principal); } switch (tableAction.getType()) { case ADD: @@ -1161,7 +1138,7 @@ public synchronized Set listTablePrivileges(HiveIdentity iden case INSERT_EXISTING: case DELETE_ROWS: case UPDATE: - return delegate.listTablePrivileges(databaseName, tableName, getExistingTable(identity, databaseName, tableName).getOwner(), principal); + return delegate.listTablePrivileges(databaseName, tableName, getExistingTable(databaseName, tableName).getOwner(), principal); case DROP: throw new TableNotFoundException(schemaTableName); case DROP_PRESERVE_DATA: @@ -1171,27 +1148,27 @@ public synchronized Set listTablePrivileges(HiveIdentity iden throw new IllegalStateException("Unknown action type"); } - private synchronized String getRequiredTableOwner(HiveIdentity identity, String databaseName, String tableName) + private synchronized String getRequiredTableOwner(String databaseName, String tableName) { - return getExistingTable(identity, databaseName, tableName).getOwner().orElseThrow(); + return getExistingTable(databaseName, tableName).getOwner().orElseThrow(); } - private Table getExistingTable(HiveIdentity identity, String databaseName, String tableName) + private Table getExistingTable(String databaseName, String tableName) { - return delegate.getTable(identity, databaseName, tableName) + return delegate.getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } @Override - public synchronized void grantTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption) + public synchronized void grantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption) { - setExclusive((delegate, hdfsEnvironment) -> delegate.grantTablePrivileges(databaseName, tableName, getRequiredTableOwner(identity, databaseName, tableName), grantee, grantor, privileges, grantOption)); + setExclusive((delegate, hdfsEnvironment) -> delegate.grantTablePrivileges(databaseName, tableName, getRequiredTableOwner(databaseName, tableName), grantee, grantor, privileges, grantOption)); } @Override - public synchronized void revokeTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption) + public synchronized void revokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption) { - setExclusive((delegate, hdfsEnvironment) -> delegate.revokeTablePrivileges(databaseName, tableName, getRequiredTableOwner(identity, databaseName, tableName), grantee, grantor, privileges, grantOption)); + setExclusive((delegate, hdfsEnvironment) -> delegate.revokeTablePrivileges(databaseName, tableName, getRequiredTableOwner(databaseName, tableName), grantee, grantor, privileges, grantOption)); } public synchronized String declareIntentionToWrite(ConnectorSession session, WriteMode writeMode, Path stagingPathRoot, SchemaTableName schemaTableName) @@ -1204,11 +1181,10 @@ public synchronized String declareIntentionToWrite(ConnectorSession session, Wri } } HdfsContext hdfsContext = new HdfsContext(session); - HiveIdentity identity = new HiveIdentity(session); String queryId = session.getQueryId(); String declarationId = queryId + "_" + declaredIntentionsToWriteCounter; declaredIntentionsToWriteCounter++; - declaredIntentionsToWrite.add(new DeclaredIntentionToWrite(declarationId, writeMode, hdfsContext, identity, queryId, stagingPathRoot, schemaTableName)); + declaredIntentionsToWrite.add(new DeclaredIntentionToWrite(declarationId, writeMode, hdfsContext, queryId, stagingPathRoot, schemaTableName)); return declarationId; } @@ -1310,14 +1286,13 @@ private AcidTransaction beginOperation(ConnectorSession session, Table table, Ac // because we need the writeId in order to write the delta files. HiveTransaction hiveTransaction = makeHiveTransaction(session, transactionId -> { acquireTableWriteLock( - new HiveIdentity(session), queryId, transactionId, table.getDatabaseName(), table.getTableName(), hiveOperation, !table.getPartitionColumns().isEmpty()); - long writeId = allocateWriteId(new HiveIdentity(session), table.getDatabaseName(), table.getTableName(), transactionId); + long writeId = allocateWriteId(table.getDatabaseName(), table.getTableName(), transactionId); return new AcidTransaction(operation, transactionId, writeId, updateProcessor); }); hiveTransactionSupplier = Optional.of(() -> hiveTransaction); @@ -1329,22 +1304,21 @@ private AcidTransaction beginOperation(ConnectorSession session, Table table, Ac private HiveTransaction makeHiveTransaction(ConnectorSession session, Function transactionMaker) { String queryId = session.getQueryId(); - HiveIdentity identity = new HiveIdentity(session); long heartbeatInterval = configuredTransactionHeartbeatInterval .map(Duration::toMillis) .orElseGet(this::getServerExpectedHeartbeatIntervalMillis); - long transactionId = delegate.openTransaction(identity); + long transactionId = delegate.openTransaction(); log.debug("Using hive transaction %s for %s", transactionId, queryId); ScheduledFuture heartbeatTask = heartbeatExecutor.scheduleAtFixedRate( - () -> delegate.sendTransactionHeartbeat(identity, transactionId), + () -> delegate.sendTransactionHeartbeat(transactionId), 0, heartbeatInterval, MILLISECONDS); AcidTransaction transaction = transactionMaker.apply(transactionId); - return new HiveTransaction(identity, queryId, transactionId, heartbeatTask, transaction); + return new HiveTransaction(queryId, transactionId, heartbeatTask, transaction); } private long getServerExpectedHeartbeatIntervalMillis() @@ -1375,7 +1349,6 @@ public synchronized Optional getValidWriteIds(ConnectorSess public synchronized void cleanupQuery(ConnectorSession session) { String queryId = session.getQueryId(); - HiveIdentity identity = new HiveIdentity(session); checkState(currentQueryId.equals(Optional.of(queryId)), "Invalid query id %s while current query is", queryId, currentQueryId); Optional transaction = currentHiveTransaction; @@ -1389,7 +1362,7 @@ public synchronized void cleanupQuery(ConnectorSession session) } catch (Throwable commitFailure) { try { - postCommitCleanup(identity, transaction, false); + postCommitCleanup(transaction, false); } catch (Throwable cleanupFailure) { if (cleanupFailure != commitFailure) { @@ -1398,10 +1371,10 @@ public synchronized void cleanupQuery(ConnectorSession session) } throw commitFailure; } - postCommitCleanup(identity, transaction, true); + postCommitCleanup(transaction, true); } - private void postCommitCleanup(HiveIdentity identity, Optional transaction, boolean commit) + private void postCommitCleanup(Optional transaction, boolean commit) { clearCurrentTransaction(); long transactionId = transaction.get().getTransactionId(); @@ -1410,10 +1383,10 @@ private void postCommitCleanup(HiveIdentity identity, Optional if (commit) { // Any failure around aborted transactions, etc would be handled by Hive Metastore commit and TrinoException will be thrown - delegate.commitTransaction(identity, transactionId); + delegate.commitTransaction(transactionId); } else { - delegate.abortTransaction(identity, transactionId); + delegate.abortTransaction(transactionId); } } @@ -1439,10 +1412,10 @@ private void commitShared() Action action = entry.getValue(); switch (action.getType()) { case DROP: - committer.prepareDropTable(action.getIdentity(), schemaTableName); + committer.prepareDropTable(schemaTableName); break; case ALTER: - committer.prepareAlterTable(action.getHdfsContext(), action.getIdentity(), action.getQueryId(), action.getData()); + committer.prepareAlterTable(action.getHdfsContext(), action.getQueryId(), action.getData()); break; case ADD: committer.prepareAddTable(action.getHdfsContext(), action.getQueryId(), action.getData()); @@ -1467,19 +1440,19 @@ private void commitShared() Action action = partitionEntry.getValue(); switch (action.getType()) { case DROP: - committer.prepareDropPartition(action.getIdentity(), schemaTableName, partitionValues, true); + committer.prepareDropPartition(schemaTableName, partitionValues, true); break; case DROP_PRESERVE_DATA: - committer.prepareDropPartition(action.getIdentity(), schemaTableName, partitionValues, false); + committer.prepareDropPartition(schemaTableName, partitionValues, false); break; case ALTER: - committer.prepareAlterPartition(action.getHdfsContext(), action.getIdentity(), action.getQueryId(), action.getData()); + committer.prepareAlterPartition(action.getHdfsContext(), action.getQueryId(), action.getData()); break; case ADD: - committer.prepareAddPartition(action.getHdfsContext(), action.getIdentity(), action.getQueryId(), action.getData()); + committer.prepareAddPartition(action.getHdfsContext(), action.getQueryId(), action.getData()); break; case INSERT_EXISTING: - committer.prepareInsertExistingPartition(action.getHdfsContext(), action.getIdentity(), action.getQueryId(), action.getData()); + committer.prepareInsertExistingPartition(action.getHdfsContext(), action.getQueryId(), action.getData()); break; case UPDATE: case DELETE_ROWS: @@ -1589,20 +1562,20 @@ private class Committer this.transaction = transaction; } - private void prepareDropTable(HiveIdentity identity, SchemaTableName schemaTableName) + private void prepareDropTable(SchemaTableName schemaTableName) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop table %s", schemaTableName), - () -> delegate.dropTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), true))); + () -> delegate.dropTable(schemaTableName.getSchemaName(), schemaTableName.getTableName(), true))); } - private void prepareAlterTable(HdfsContext hdfsContext, HiveIdentity identity, String queryId, TableAndMore tableAndMore) + private void prepareAlterTable(HdfsContext hdfsContext, String queryId, TableAndMore tableAndMore) { deleteOnly = false; Table table = tableAndMore.getTable(); String targetLocation = table.getStorage().getLocation(); - Table oldTable = delegate.getTable(identity, table.getDatabaseName(), table.getTableName()) + Table oldTable = delegate.getTable(table.getDatabaseName(), table.getTableName()) .orElseThrow(() -> new TrinoException(TRANSACTION_CONFLICT, "The table that this transaction modified was deleted in another transaction. " + table.getSchemaTableName())); String oldTableLocation = oldTable.getStorage().getLocation(); Path oldTablePath = new Path(oldTableLocation); @@ -1647,10 +1620,9 @@ private void prepareAlterTable(HdfsContext hdfsContext, HiveIdentity identity, S } // Partition alter must happen regardless of whether original and current location is the same // because metadata might change: e.g. storage format, column types, etc - alterTableOperations.add(new AlterTableOperation(tableAndMore.getIdentity(), tableAndMore.getTable(), oldTable, tableAndMore.getPrincipalPrivileges())); + alterTableOperations.add(new AlterTableOperation(tableAndMore.getTable(), oldTable, tableAndMore.getPrincipalPrivileges())); updateStatisticsOperations.add(new UpdateStatisticsOperation( - tableAndMore.getIdentity(), table.getSchemaTableName(), Optional.empty(), tableAndMore.getStatisticsUpdate(), @@ -1708,10 +1680,9 @@ private void prepareAddTable(HdfsContext context, String queryId, TableAndMore t } // if targetLocation is not set in table we assume table directory is created by HMS } - addTableOperations.add(new CreateTableOperation(tableAndMore.getIdentity(), table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); + addTableOperations.add(new CreateTableOperation(table, tableAndMore.getPrincipalPrivileges(), tableAndMore.isIgnoreExisting())); if (!isPrestoView(table)) { updateStatisticsOperations.add(new UpdateStatisticsOperation( - tableAndMore.getIdentity(), table.getSchemaTableName(), Optional.empty(), tableAndMore.getStatisticsUpdate(), @@ -1736,7 +1707,6 @@ private void prepareInsertExistingTable(HdfsContext context, String queryId, Tab cleanExtraOutputFiles(context, queryId, tableAndMore); } updateStatisticsOperations.add(new UpdateStatisticsOperation( - tableAndMore.getIdentity(), table.getSchemaTableName(), Optional.empty(), tableAndMore.getStatisticsUpdate(), @@ -1744,7 +1714,7 @@ private void prepareInsertExistingTable(HdfsContext context, String queryId, Tab if (isAcidTransactionRunning()) { AcidTransaction transaction = getCurrentAcidTransaction(); - updateTableWriteId(tableAndMore.getIdentity(), table.getDatabaseName(), table.getTableName(), transaction.getAcidTransactionId(), transaction.getWriteId(), OptionalLong.empty()); + updateTableWriteId(table.getDatabaseName(), table.getTableName(), transaction.getAcidTransactionId(), transaction.getWriteId(), OptionalLong.empty()); } } @@ -1771,16 +1741,15 @@ private void prepareDeleteRowsFromExistingTable(HdfsContext context, TableAndMor partitionRowCounts.compute(ps.getPartitionName(), (k, count) -> count != null ? count + rowCount : rowCount); } - HiveIdentity identity = deletionState.getIdentity(); String databaseName = table.getDatabaseName(); String tableName = table.getTableName(); // Update the table statistics - PartitionStatistics tableStatistics = getTableStatistics(identity, databaseName, tableName); + PartitionStatistics tableStatistics = getTableStatistics(databaseName, tableName); HiveBasicStatistics basicStatistics = tableStatistics.getBasicStatistics(); if (basicStatistics.getRowCount().isPresent()) { tableStatistics = tableStatistics.withAdjustedRowCount(-totalRowsDeleted); - updateStatisticsOperations.add(new UpdateStatisticsOperation(identity, table.getSchemaTableName(), Optional.empty(), tableStatistics, true)); + updateStatisticsOperations.add(new UpdateStatisticsOperation(table.getSchemaTableName(), Optional.empty(), tableStatistics, true)); } // Decrement the numRows of the table itself, if it has the numRows parameter if (table.getParameters() != null && table.getParameters().get(NUM_ROWS) != null) { @@ -1788,10 +1757,10 @@ private void prepareDeleteRowsFromExistingTable(HdfsContext context, TableAndMor .setParameters(MetastoreUtil.adjustRowCount(table.getParameters(), "decrement table rows", -totalRowsDeleted)) .setWriteId(OptionalLong.of(transaction.getWriteId())) .build(); - alterTableOperations.add(new AlterTableOperation(identity, updatedTable, table, tableAndMore.getPrincipalPrivileges())); + alterTableOperations.add(new AlterTableOperation(updatedTable, table, tableAndMore.getPrincipalPrivileges())); } - Map> partitionsOptionalMap = getPartitionsByNames(identity, databaseName, tableName, new ArrayList<>(partitionRowCounts.keySet())); + Map> partitionsOptionalMap = getPartitionsByNames(databaseName, tableName, new ArrayList<>(partitionRowCounts.keySet())); Map updatedPartitions = new HashMap<>(partitionsOptionalMap.size()); // Collect the partitions that have deletions and also have the numRows parameter @@ -1808,7 +1777,7 @@ private void prepareDeleteRowsFromExistingTable(HdfsContext context, TableAndMor } // Decrement the row counts in the Partitions and PartitionStatistics - Map allPartitionStatistics = getPartitionStatistics(identity, databaseName, tableName, updatedPartitions.keySet()); + Map allPartitionStatistics = getPartitionStatistics(databaseName, tableName, updatedPartitions.keySet()); allPartitionStatistics.forEach((partitionId, statistics) -> { Long rowCount = partitionRowCounts.get(partitionId); requireNonNull(rowCount, "rowCount is null"); @@ -1818,20 +1787,19 @@ private void prepareDeleteRowsFromExistingTable(HdfsContext context, TableAndMor requireNonNull(updatedPartition, "updatedPartition is null"); Partition originalPartition = partitionsOptionalMap.get(partitionId).get(); alterPartitionOperations.add(new AlterPartitionOperation( - identity, new PartitionWithStatistics(updatedPartition, partitionId, updatedStatistics), new PartitionWithStatistics(originalPartition, partitionId, statistics))); - updateStatisticsOperations.add(new UpdateStatisticsOperation(identity, table.getSchemaTableName(), Optional.of(partitionId), updatedStatistics, false)); + updateStatisticsOperations.add(new UpdateStatisticsOperation(table.getSchemaTableName(), Optional.of(partitionId), updatedStatistics, false)); }); // Finally, tell the metastore what has changed long writeId = transaction.getWriteId(); long transactionId = transaction.getAcidTransactionId(); if (!updatedPartitions.isEmpty()) { - alterPartitions(identity, databaseName, tableName, ImmutableList.copyOf(updatedPartitions.values()), writeId); - addDynamicPartitions(identity, databaseName, tableName, ImmutableList.copyOf(updatedPartitions.keySet()), transactionId, writeId, AcidOperation.DELETE); + alterPartitions(databaseName, tableName, ImmutableList.copyOf(updatedPartitions.values()), writeId); + addDynamicPartitions(databaseName, tableName, ImmutableList.copyOf(updatedPartitions.keySet()), transactionId, writeId, AcidOperation.DELETE); } - updateTableWriteId(identity, databaseName, tableName, transactionId, writeId, OptionalLong.of(-totalRowsDeleted)); + updateTableWriteId(databaseName, tableName, transactionId, writeId, OptionalLong.of(-totalRowsDeleted)); } private void prepareUpdateExistingTable(HdfsContext context, TableAndMore tableAndMore) @@ -1850,37 +1818,36 @@ private void prepareUpdateExistingTable(HdfsContext context, TableAndMore tableA .forEach(directory -> cleanUpTasksForAbort.add(new DirectoryCleanUpTask(context, new Path(directory), true))); - HiveIdentity identity = updateState.getIdentity(); String databaseName = table.getDatabaseName(); String tableName = table.getTableName(); // Finally, tell the metastore what has changed long writeId = transaction.getWriteId(); long transactionId = transaction.getAcidTransactionId(); - updateTableWriteId(identity, databaseName, tableName, transactionId, writeId, OptionalLong.empty()); + updateTableWriteId(databaseName, tableName, transactionId, writeId, OptionalLong.empty()); } - private void prepareDropPartition(HiveIdentity identity, SchemaTableName schemaTableName, List partitionValues, boolean deleteData) + private void prepareDropPartition(SchemaTableName schemaTableName, List partitionValues, boolean deleteData) { metastoreDeleteOperations.add(new IrreversibleMetastoreOperation( format("drop partition %s.%s %s", schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues), - () -> delegate.dropPartition(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, deleteData))); + () -> delegate.dropPartition(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionValues, deleteData))); } - private void prepareAlterPartition(HdfsContext hdfsContext, HiveIdentity identity, String queryId, PartitionAndMore partitionAndMore) + private void prepareAlterPartition(HdfsContext hdfsContext, String queryId, PartitionAndMore partitionAndMore) { deleteOnly = false; Partition partition = partitionAndMore.getPartition(); String targetLocation = partition.getStorage().getLocation(); - Optional oldPartition = delegate.getPartition(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + Optional oldPartition = delegate.getPartition(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); if (oldPartition.isEmpty()) { throw new TrinoException( TRANSACTION_CONFLICT, format("The partition that this transaction modified was deleted in another transaction. %s %s", partition.getTableName(), partition.getValues())); } - String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); - PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(identity, partition, partitionName); + String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + PartitionStatistics oldPartitionStatistics = getExistingPartitionStatistics(partition, partitionName); String oldPartitionLocation = oldPartition.get().getStorage().getLocation(); Path oldPartitionPath = new Path(oldPartitionLocation); @@ -1924,7 +1891,6 @@ private void prepareAlterPartition(HdfsContext hdfsContext, HiveIdentity identit // Partition alter must happen regardless of whether original and current location is the same // because metadata might change: e.g. storage format, column types, etc alterPartitionOperations.add(new AlterPartitionOperation( - partitionAndMore.getIdentity(), new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate()), new PartitionWithStatistics(oldPartition.get(), partitionName, oldPartitionStatistics))); } @@ -2001,10 +1967,10 @@ private void cleanExtraOutputFiles(HdfsContext hdfsContext, String queryId, Path } } - private PartitionStatistics getExistingPartitionStatistics(HiveIdentity identity, Partition partition, String partitionName) + private PartitionStatistics getExistingPartitionStatistics(Partition partition, String partitionName) { try { - PartitionStatistics statistics = delegate.getPartitionStatistics(identity, partition.getDatabaseName(), partition.getTableName(), ImmutableSet.of(partitionName)) + PartitionStatistics statistics = delegate.getPartitionStatistics(partition.getDatabaseName(), partition.getTableName(), ImmutableSet.of(partitionName)) .get(partitionName); if (statistics == null) { throw new TrinoException( @@ -2027,7 +1993,7 @@ private PartitionStatistics getExistingPartitionStatistics(HiveIdentity identity } } - private void prepareAddPartition(HdfsContext hdfsContext, HiveIdentity identity, String queryId, PartitionAndMore partitionAndMore) + private void prepareAddPartition(HdfsContext hdfsContext, String queryId, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -2040,7 +2006,7 @@ private void prepareAddPartition(HdfsContext hdfsContext, HiveIdentity identity, PartitionAdder partitionAdder = partitionAdders.computeIfAbsent( partition.getSchemaTableName(), - ignored -> new PartitionAdder(partitionAndMore.getIdentity(), partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); + ignored -> new PartitionAdder(partition.getDatabaseName(), partition.getTableName(), delegate, PARTITION_COMMIT_BATCH_SIZE)); if (pathExists(hdfsContext, hdfsEnvironment, currentPath)) { if (!targetPath.equals(currentPath)) { @@ -2056,11 +2022,11 @@ private void prepareAddPartition(HdfsContext hdfsContext, HiveIdentity identity, cleanUpTasksForAbort.add(new DirectoryCleanUpTask(hdfsContext, targetPath, true)); createDirectory(hdfsContext, hdfsEnvironment, targetPath); } - String partitionName = getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues()); + String partitionName = getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues()); partitionAdder.addPartition(new PartitionWithStatistics(partition, partitionName, partitionAndMore.getStatisticsUpdate())); } - private void prepareInsertExistingPartition(HdfsContext hdfsContext, HiveIdentity identity, String queryId, PartitionAndMore partitionAndMore) + private void prepareInsertExistingPartition(HdfsContext hdfsContext, String queryId, PartitionAndMore partitionAndMore) { deleteOnly = false; @@ -2079,9 +2045,8 @@ private void prepareInsertExistingPartition(HdfsContext hdfsContext, HiveIdentit } updateStatisticsOperations.add(new UpdateStatisticsOperation( - partitionAndMore.getIdentity(), partition.getSchemaTableName(), - Optional.of(getPartitionName(identity, partition.getDatabaseName(), partition.getTableName(), partition.getValues())), + Optional.of(getPartitionName(partition.getDatabaseName(), partition.getTableName(), partition.getValues())), partitionAndMore.getStatisticsUpdate(), true)); } @@ -2370,9 +2335,8 @@ private void rollbackShared() Path baseDirectory = declaredIntentionToWrite.getRootPath(); pathsToClean.add(baseDirectory); - HiveIdentity identity = declaredIntentionToWrite.getIdentity(); SchemaTableName schemaTableName = declaredIntentionToWrite.getSchemaTableName(); - Optional
table = delegate.getTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()); + Optional
table = delegate.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()); if (table.isPresent()) { // check every existing partition that is outside for the base directory List partitionColumns = table.get().getPartitionColumns(); @@ -2381,10 +2345,13 @@ private void rollbackShared() .map(Column::getName) .collect(toImmutableList()); List partitionNames = delegate.getPartitionNamesByFilter( - identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionColumnNames, TupleDomain.all()) + schemaTableName.getSchemaName(), + schemaTableName.getTableName(), + partitionColumnNames, + TupleDomain.all()) .orElse(ImmutableList.of()); for (List partitionNameBatch : Iterables.partition(partitionNames, 10)) { - Collection> partitions = delegate.getPartitionsByNames(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNameBatch).values(); + Collection> partitions = delegate.getPartitionsByNames(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionNameBatch).values(); partitions.stream() .filter(Optional::isPresent) .map(Optional::get) @@ -2804,10 +2771,9 @@ public static class Action private final ActionType type; private final T data; private final HdfsContext hdfsContext; - private final HiveIdentity identity; private final String queryId; - public Action(ActionType type, T data, HdfsContext hdfsContext, HiveIdentity identity, String queryId) + public Action(ActionType type, T data, HdfsContext hdfsContext, String queryId) { this.type = requireNonNull(type, "type is null"); if (type == ActionType.DROP || type == ActionType.DROP_PRESERVE_DATA) { @@ -2818,7 +2784,6 @@ public Action(ActionType type, T data, HdfsContext hdfsContext, HiveIdentity ide } this.data = data; this.hdfsContext = requireNonNull(hdfsContext, "hdfsContext is null"); - this.identity = requireNonNull(identity, "identity is null"); this.queryId = requireNonNull(queryId, "queryId is null"); } @@ -2843,11 +2808,6 @@ public String getQueryId() return queryId; } - public HiveIdentity getIdentity() - { - return identity; - } - @Override public String toString() { @@ -2862,7 +2822,6 @@ public String toString() private static class TableAndMore { private final Table table; - private final HiveIdentity identity; private final Optional principalPrivileges; private final Optional currentLocation; // unpartitioned table only private final Optional> fileNames; @@ -2873,7 +2832,6 @@ private static class TableAndMore public TableAndMore( Table table, - HiveIdentity identity, Optional principalPrivileges, Optional currentLocation, Optional> fileNames, @@ -2883,7 +2841,6 @@ public TableAndMore( boolean cleanExtraOutputFilesOnCommit) { this.table = requireNonNull(table, "table is null"); - this.identity = requireNonNull(identity, "identity is null"); this.principalPrivileges = requireNonNull(principalPrivileges, "principalPrivileges is null"); this.currentLocation = requireNonNull(currentLocation, "currentLocation is null"); this.fileNames = requireNonNull(fileNames, "fileNames is null"); @@ -2906,11 +2863,6 @@ public Table getTable() return table; } - public HiveIdentity getIdentity() - { - return identity; - } - public PrincipalPrivileges getPrincipalPrivileges() { checkState(principalPrivileges.isPresent()); @@ -2973,11 +2925,10 @@ private static class TableAndAcidDirectories { private final List partitionAndStatementIds; - public TableAndAcidDirectories(Table table, HiveIdentity identity, Optional principalPrivileges, Optional currentLocation, List partitionAndStatementIds) + public TableAndAcidDirectories(Table table, Optional principalPrivileges, Optional currentLocation, List partitionAndStatementIds) { super( table, - identity, principalPrivileges, currentLocation, Optional.empty(), @@ -3007,7 +2958,6 @@ public String toString() private static class PartitionAndMore { - private final HiveIdentity identity; private final Partition partition; private final Path currentLocation; private final Optional> fileNames; @@ -3015,9 +2965,8 @@ private static class PartitionAndMore private final PartitionStatistics statisticsUpdate; private final boolean cleanExtraOutputFilesOnCommit; - public PartitionAndMore(HiveIdentity identity, Partition partition, Path currentLocation, Optional> fileNames, PartitionStatistics statistics, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit) + public PartitionAndMore(Partition partition, Path currentLocation, Optional> fileNames, PartitionStatistics statistics, PartitionStatistics statisticsUpdate, boolean cleanExtraOutputFilesOnCommit) { - this.identity = requireNonNull(identity, "identity is null"); this.partition = requireNonNull(partition, "partition is null"); this.currentLocation = requireNonNull(currentLocation, "currentLocation is null"); this.fileNames = requireNonNull(fileNames, "fileNames is null"); @@ -3026,11 +2975,6 @@ public PartitionAndMore(HiveIdentity identity, Partition partition, Path current this.cleanExtraOutputFilesOnCommit = cleanExtraOutputFilesOnCommit; } - public HiveIdentity getIdentity() - { - return identity; - } - public Partition getPartition() { return partition; @@ -3099,17 +3043,15 @@ private static class DeclaredIntentionToWrite private final String declarationId; private final WriteMode mode; private final HdfsContext hdfsContext; - private final HiveIdentity identity; private final String queryId; private final Path rootPath; private final SchemaTableName schemaTableName; - public DeclaredIntentionToWrite(String declarationId, WriteMode mode, HdfsContext hdfsContext, HiveIdentity identity, String queryId, Path stagingPathRoot, SchemaTableName schemaTableName) + public DeclaredIntentionToWrite(String declarationId, WriteMode mode, HdfsContext hdfsContext, String queryId, Path stagingPathRoot, SchemaTableName schemaTableName) { this.declarationId = requireNonNull(declarationId, "declarationId is null"); this.mode = requireNonNull(mode, "mode is null"); this.hdfsContext = requireNonNull(hdfsContext, "hdfsContext is null"); - this.identity = requireNonNull(identity, "identity is null"); this.queryId = requireNonNull(queryId, "queryId is null"); this.rootPath = requireNonNull(stagingPathRoot, "stagingPathRoot is null"); this.schemaTableName = requireNonNull(schemaTableName, "schemaTableName is null"); @@ -3130,11 +3072,6 @@ public HdfsContext getHdfsContext() return hdfsContext; } - public HiveIdentity getIdentity() - { - return identity; - } - public String getQueryId() { return queryId; @@ -3156,7 +3093,6 @@ public String toString() return toStringHelper(this) .add("mode", mode) .add("hdfsContext", hdfsContext) - .add("identity", identity) .add("queryId", queryId) .add("rootPath", rootPath) .add("schemaTableName", schemaTableName) @@ -3297,16 +3233,14 @@ public void run() private static class CreateTableOperation { - private final HiveIdentity identity; private final Table newTable; private final PrincipalPrivileges privileges; private boolean tableCreated; private final boolean ignoreExisting; private final String queryId; - public CreateTableOperation(HiveIdentity identity, Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) + public CreateTableOperation(Table newTable, PrincipalPrivileges privileges, boolean ignoreExisting) { - this.identity = requireNonNull(identity, "identity is null"); requireNonNull(newTable, "newTable is null"); this.newTable = newTable; this.privileges = requireNonNull(privileges, "privileges is null"); @@ -3322,12 +3256,12 @@ public String getDescription() public void run(HiveMetastoreClosure metastore) { try { - metastore.createTable(identity, newTable, privileges); + metastore.createTable(newTable, privileges); } catch (RuntimeException e) { boolean done = false; try { - Optional
existingTable = metastore.getTable(identity, newTable.getDatabaseName(), newTable.getTableName()); + Optional
existingTable = metastore.getTable(newTable.getDatabaseName(), newTable.getTableName()); if (existingTable.isPresent()) { Table table = existingTable.get(); Optional existingTableQueryId = getPrestoQueryId(table); @@ -3387,21 +3321,19 @@ public void undo(HiveMetastoreClosure metastore) if (!tableCreated) { return; } - metastore.dropTable(identity, newTable.getDatabaseName(), newTable.getTableName(), false); + metastore.dropTable(newTable.getDatabaseName(), newTable.getTableName(), false); } } private static class AlterTableOperation { - private final HiveIdentity identity; private final Table newTable; private final Table oldTable; private final PrincipalPrivileges principalPrivileges; private boolean undo; - public AlterTableOperation(HiveIdentity identity, Table newTable, Table oldTable, PrincipalPrivileges principalPrivileges) + public AlterTableOperation(Table newTable, Table oldTable, PrincipalPrivileges principalPrivileges) { - this.identity = requireNonNull(identity, "identity is null"); this.newTable = requireNonNull(newTable, "newTable is null"); this.oldTable = requireNonNull(oldTable, "oldTable is null"); this.principalPrivileges = requireNonNull(principalPrivileges, "principalPrivileges is null"); @@ -3421,10 +3353,10 @@ public void run(HiveMetastoreClosure metastore, AcidTransaction transaction) { undo = true; if (transaction.isTransactional()) { - metastore.alterTransactionalTable(identity, newTable, transaction.getAcidTransactionId(), transaction.getWriteId(), principalPrivileges); + metastore.alterTransactionalTable(newTable, transaction.getAcidTransactionId(), transaction.getWriteId(), principalPrivileges); } else { - metastore.replaceTable(identity, newTable.getDatabaseName(), newTable.getTableName(), newTable, principalPrivileges); + metastore.replaceTable(newTable.getDatabaseName(), newTable.getTableName(), newTable, principalPrivileges); } } @@ -3435,24 +3367,22 @@ public void undo(HiveMetastoreClosure metastore, AcidTransaction transaction) } if (transaction.isTransactional()) { - metastore.alterTransactionalTable(identity, oldTable, transaction.getAcidTransactionId(), transaction.getWriteId(), principalPrivileges); + metastore.alterTransactionalTable(oldTable, transaction.getAcidTransactionId(), transaction.getWriteId(), principalPrivileges); } else { - metastore.replaceTable(identity, oldTable.getDatabaseName(), oldTable.getTableName(), oldTable, principalPrivileges); + metastore.replaceTable(oldTable.getDatabaseName(), oldTable.getTableName(), oldTable, principalPrivileges); } } } private static class AlterPartitionOperation { - private final HiveIdentity identity; private final PartitionWithStatistics newPartition; private final PartitionWithStatistics oldPartition; private boolean undo; - public AlterPartitionOperation(HiveIdentity identity, PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) + public AlterPartitionOperation(PartitionWithStatistics newPartition, PartitionWithStatistics oldPartition) { - this.identity = requireNonNull(identity, "identity is null"); this.newPartition = requireNonNull(newPartition, "newPartition is null"); this.oldPartition = requireNonNull(oldPartition, "oldPartition is null"); checkArgument(newPartition.getPartition().getDatabaseName().equals(oldPartition.getPartition().getDatabaseName())); @@ -3472,7 +3402,7 @@ public String getDescription() public void run(HiveMetastoreClosure metastore) { undo = true; - metastore.alterPartition(identity, newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition); + metastore.alterPartition(newPartition.getPartition().getDatabaseName(), newPartition.getPartition().getTableName(), newPartition); } public void undo(HiveMetastoreClosure metastore) @@ -3480,13 +3410,12 @@ public void undo(HiveMetastoreClosure metastore) if (!undo) { return; } - metastore.alterPartition(identity, oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition); + metastore.alterPartition(oldPartition.getPartition().getDatabaseName(), oldPartition.getPartition().getTableName(), oldPartition); } } private static class UpdateStatisticsOperation { - private final HiveIdentity identity; private final SchemaTableName tableName; private final Optional partitionName; private final PartitionStatistics statistics; @@ -3494,9 +3423,8 @@ private static class UpdateStatisticsOperation private boolean done; - public UpdateStatisticsOperation(HiveIdentity identity, SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) + public UpdateStatisticsOperation(SchemaTableName tableName, Optional partitionName, PartitionStatistics statistics, boolean merge) { - this.identity = requireNonNull(identity, "identity is null"); this.tableName = requireNonNull(tableName, "tableName is null"); this.partitionName = requireNonNull(partitionName, "partitionName is null"); this.statistics = requireNonNull(statistics, "statistics is null"); @@ -3506,10 +3434,10 @@ public UpdateStatisticsOperation(HiveIdentity identity, SchemaTableName tableNam public void run(HiveMetastoreClosure metastore, AcidTransaction transaction) { if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics); + metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::updateStatistics); } else { - metastore.updateTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), transaction, this::updateStatistics); + metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), transaction, this::updateStatistics); } done = true; } @@ -3520,10 +3448,10 @@ public void undo(HiveMetastoreClosure metastore, AcidTransaction transaction) return; } if (partitionName.isPresent()) { - metastore.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics); + metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), partitionName.get(), this::resetStatistics); } else { - metastore.updateTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), transaction, this::resetStatistics); + metastore.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), transaction, this::resetStatistics); } } @@ -3548,7 +3476,6 @@ private PartitionStatistics resetStatistics(PartitionStatistics currentStatistic private static class PartitionAdder { - private final HiveIdentity identity; private final String schemaName; private final String tableName; private final HiveMetastoreClosure metastore; @@ -3556,9 +3483,8 @@ private static class PartitionAdder private final List partitions; private List> createdPartitionValues = new ArrayList<>(); - public PartitionAdder(HiveIdentity identity, String schemaName, String tableName, HiveMetastoreClosure metastore, int batchSize) + public PartitionAdder(String schemaName, String tableName, HiveMetastoreClosure metastore, int batchSize) { - this.identity = identity; this.schemaName = schemaName; this.tableName = tableName; this.metastore = metastore; @@ -3587,7 +3513,7 @@ public void execute(AcidTransaction transaction) List> batchedPartitions = Lists.partition(partitions, batchSize); for (List batch : batchedPartitions) { try { - metastore.addPartitions(identity, schemaName, tableName, batch); + metastore.addPartitions(schemaName, tableName, batch); for (PartitionWithStatistics partition : batch) { createdPartitionValues.add(partition.getPartition().getValues()); } @@ -3598,7 +3524,7 @@ public void execute(AcidTransaction transaction) boolean batchCompletelyAdded = true; for (PartitionWithStatistics partition : batch) { try { - Optional remotePartition = metastore.getPartition(identity, schemaName, tableName, partition.getPartition().getValues()); + Optional remotePartition = metastore.getPartition(schemaName, tableName, partition.getPartition().getValues()); // getPrestoQueryId(partition) is guaranteed to be non-empty. It is asserted in PartitionAdder.addPartition. if (remotePartition.isPresent() && getPrestoQueryId(remotePartition.get()).equals(getPrestoQueryId(partition.getPartition()))) { createdPartitionValues.add(partition.getPartition().getValues()); @@ -3628,7 +3554,7 @@ public void execute(AcidTransaction transaction) } if (transaction.isAcidTransactionRunning()) { List partitionNames = partitions.stream().map(PartitionWithStatistics::getPartitionName).collect(Collectors.toUnmodifiableList()); - metastore.addDynamicPartitions(identity, schemaName, tableName, partitionNames, transaction.getAcidTransactionId(), transaction.getWriteId(), transaction.getOperation()); + metastore.addDynamicPartitions(schemaName, tableName, partitionNames, transaction.getAcidTransactionId(), transaction.getWriteId(), transaction.getOperation()); } partitions.clear(); } @@ -3639,7 +3565,7 @@ public List> rollback() List> partitionsFailedToRollback = new ArrayList<>(); for (List createdPartitionValue : createdPartitionValues) { try { - metastore.dropPartition(identity, schemaName, tableName, createdPartitionValue, false); + metastore.dropPartition(schemaName, tableName, createdPartitionValue, false); } catch (PartitionNotFoundException e) { // Maybe some one deleted the partition we added. @@ -3681,33 +3607,33 @@ private interface ExclusiveOperation void execute(HiveMetastoreClosure delegate, HdfsEnvironment hdfsEnvironment); } - public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + public long allocateWriteId(String dbName, String tableName, long transactionId) { - return delegate.allocateWriteId(identity, dbName, tableName, transactionId); + return delegate.allocateWriteId(dbName, tableName, transactionId); } - public void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isPartitioned) + public void acquireTableWriteLock(String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isPartitioned) { - delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isPartitioned); + delegate.acquireTableWriteLock(queryId, transactionId, dbName, tableName, operation, isPartitioned); } - public void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) + public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { - delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); + delegate.updateTableWriteId(dbName, tableName, transactionId, writeId, rowCountChange); } - public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) + public void alterPartitions(String dbName, String tableName, List partitions, long writeId) { - delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); + delegate.alterPartitions(dbName, tableName, partitions, writeId); } - public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) + public void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { - delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); + delegate.addDynamicPartitions(dbName, tableName, partitionNames, transactionId, writeId, operation); } - public void commitTransaction(HiveIdentity identity, long transactionId) + public void commitTransaction(long transactionId) { - delegate.commitTransaction(identity, transactionId); + delegate.commitTransaction(transactionId); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java index 952d56402ce5..625f6aa3b61b 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/alluxio/AlluxioHiveMetastore.java @@ -25,7 +25,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveColumnStatistics; @@ -102,7 +101,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { try { return Optional.of(ProtoUtils.fromProto(client.getTable(databaseName, tableName))); @@ -128,7 +127,7 @@ private Map groupStatisticsByColumn(List getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { try { List dataColumns = table.getDataColumns().stream() @@ -183,7 +182,6 @@ public Map getPartitionStatistics(HiveIdentity iden @Override public void updateTableStatistics( - HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, @@ -194,7 +192,6 @@ public void updateTableStatistics( @Override public void updatePartitionStatistics( - HiveIdentity identity, Table table, Map> updates) { @@ -252,102 +249,101 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { throw new TrinoException(NOT_SUPPORTED, "createDatabase"); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { throw new TrinoException(NOT_SUPPORTED, "dropDatabase"); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { throw new TrinoException(NOT_SUPPORTED, "renameDatabase"); } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { throw new TrinoException(NOT_SUPPORTED, "setDatabaseOwner"); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { throw new TrinoException(NOT_SUPPORTED, "createTable"); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { throw new TrinoException(NOT_SUPPORTED, "dropTable"); } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { throw new TrinoException(NOT_SUPPORTED, "replaceTable"); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new TrinoException(NOT_SUPPORTED, "renameTable"); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { throw new TrinoException(NOT_SUPPORTED, "commentTable"); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { throw new TrinoException(NOT_SUPPORTED, "setTableOwner"); } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { throw new TrinoException(NOT_SUPPORTED, "commentColumn"); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new TrinoException(NOT_SUPPORTED, "addColumn"); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new TrinoException(NOT_SUPPORTED, "renameColumn"); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { throw new TrinoException(NOT_SUPPORTED, "dropColumn"); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { throw new TrinoException(NOT_SUPPORTED, "getPartition"); } @Override public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, @@ -367,7 +363,7 @@ public Optional> getPartitionNamesByFilter( } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { if (partitionNames.isEmpty()) { return Collections.emptyMap(); @@ -398,21 +394,21 @@ public Map> getPartitionsByNames(HiveIdentity identi } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, + public void addPartitions(String databaseName, String tableName, List partitions) { throw new TrinoException(NOT_SUPPORTED, "addPartitions"); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { throw new TrinoException(NOT_SUPPORTED, "dropPartition"); } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { throw new TrinoException(NOT_SUPPORTED, "alterPartition"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java index 53169f3a8952..62ee49abe612 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/CachingHiveMetastore.java @@ -30,7 +30,6 @@ import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidOperation; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HivePartitionName; @@ -103,7 +102,6 @@ public enum StatsRecording } protected final HiveMetastore delegate; - private final HiveIdentity identity; private final LoadingCache> databaseCache; private final LoadingCache> databaseNamesCache; private final LoadingCache> tableCache; @@ -120,11 +118,10 @@ public enum StatsRecording private final LoadingCache> grantedPrincipalsCache; private final LoadingCache> configValuesCache; - public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, HiveIdentity identity, Executor executor, Duration cacheTtl, Optional refreshInterval, long maximumSize) + public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, Executor executor, Duration cacheTtl, Optional refreshInterval, long maximumSize) { return new CachingHiveMetastore( delegate, - identity, OptionalLong.of(cacheTtl.toMillis()), refreshInterval .map(Duration::toMillis) @@ -135,11 +132,10 @@ public static CachingHiveMetastore cachingHiveMetastore(HiveMetastore delegate, StatsRecording.ENABLED); } - public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, HiveIdentity identity, long maximumSize) + public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, long maximumSize) { return new CachingHiveMetastore( delegate, - identity, OptionalLong.empty(), OptionalLong.empty(), Optional.empty(), @@ -147,10 +143,9 @@ public static CachingHiveMetastore memoizeMetastore(HiveMetastore delegate, Hive StatsRecording.DISABLED); } - protected CachingHiveMetastore(HiveMetastore delegate, HiveIdentity identity, OptionalLong expiresAfterWriteMillis, OptionalLong refreshMills, Optional executor, long maximumSize, StatsRecording statsRecording) + protected CachingHiveMetastore(HiveMetastore delegate, OptionalLong expiresAfterWriteMillis, OptionalLong refreshMills, Optional executor, long maximumSize, StatsRecording statsRecording) { this.delegate = requireNonNull(delegate, "delegate is null"); - this.identity = requireNonNull(identity, "identity is null"); requireNonNull(executor, "executor is null"); databaseNamesCache = buildCache(expiresAfterWriteMillis, refreshMills, executor, maximumSize, statsRecording, ignored -> loadAllDatabases()); @@ -261,12 +256,12 @@ private List loadAllDatabases() private Table getExistingTable(String databaseName, String tableName) { - return getTable(identity, databaseName, tableName) + return getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } @Override - public Optional
getTable(HiveIdentity ignored, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { return get(tableCache, hiveTableName(databaseName, tableName)); } @@ -279,11 +274,11 @@ public Set getSupportedColumnStatistics(Type type) private Optional
loadTable(HiveTableName hiveTableName) { - return delegate.getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()); + return delegate.getTable(hiveTableName.getDatabaseName(), hiveTableName.getTableName()); } @Override - public PartitionStatistics getTableStatistics(HiveIdentity ignored, Table table) + public PartitionStatistics getTableStatistics(Table table) { return get(tableStatisticsCache, hiveTableName(table.getDatabaseName(), table.getTableName())); } @@ -291,11 +286,11 @@ public PartitionStatistics getTableStatistics(HiveIdentity ignored, Table table) private PartitionStatistics loadTableColumnStatistics(HiveTableName tableName) { Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); - return delegate.getTableStatistics(identity, table); + return delegate.getTableStatistics(table); } @Override - public Map getPartitionStatistics(HiveIdentity ignored, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { HiveTableName hiveTableName = hiveTableName(table.getDatabaseName(), table.getTableName()); List partitionNames = partitions.stream() @@ -313,7 +308,6 @@ private PartitionStatistics loadPartitionColumnStatistics(HivePartitionName part String partitionName = partition.getPartitionName().orElseThrow(); Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); Map partitionStatistics = delegate.getPartitionStatistics( - identity, table, ImmutableList.of(getExistingPartition(table, partition.getPartitionValues()))); return partitionStatistics.get(partitionName); @@ -331,7 +325,7 @@ private Map loadPartitionsColumnStatisti .collect(toImmutableSet()); Table table = getExistingTable(tableName.getDatabaseName(), tableName.getTableName()); List partitions = getExistingPartitionsByNames(table, ImmutableList.copyOf(partitionNameStrings)); - Map statisticsByPartitionName = delegate.getPartitionStatistics(identity, table, partitions); + Map statisticsByPartitionName = delegate.getPartitionStatistics(table, partitions); for (HivePartitionName partitionName : partitionNames) { String stringNameForPartition = partitionName.getPartitionName().orElseThrow(); result.put(partitionName, statisticsByPartitionName.get(stringNameForPartition)); @@ -341,15 +335,13 @@ private Map loadPartitionsColumnStatisti } @Override - public void updateTableStatistics( - HiveIdentity ignored, - String databaseName, + public void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update) { try { - delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); + delegate.updateTableStatistics(databaseName, tableName, transaction, update); } finally { HiveTableName hiveTableName = hiveTableName(databaseName, tableName); @@ -360,10 +352,10 @@ public void updateTableStatistics( } @Override - public void updatePartitionStatistics(HiveIdentity ignored, Table table, String partitionName, Function update) + public void updatePartitionStatistics(Table table, String partitionName, Function update) { try { - delegate.updatePartitionStatistics(identity, table, partitionName, update); + delegate.updatePartitionStatistics(table, partitionName, update); } finally { HivePartitionName hivePartitionName = hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionName); @@ -374,10 +366,10 @@ public void updatePartitionStatistics(HiveIdentity ignored, Table table, String } @Override - public void updatePartitionStatistics(HiveIdentity ignored, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { try { - delegate.updatePartitionStatistics(identity, table, updates); + delegate.updatePartitionStatistics(table, updates); } finally { updates.forEach((partitionName, update) -> { @@ -424,10 +416,10 @@ private List loadAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity ignored, Database database) + public void createDatabase(Database database) { try { - delegate.createDatabase(identity, database); + delegate.createDatabase(database); } finally { invalidateDatabase(database.getDatabaseName()); @@ -435,10 +427,10 @@ public void createDatabase(HiveIdentity ignored, Database database) } @Override - public void dropDatabase(HiveIdentity ignored, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { try { - delegate.dropDatabase(identity, databaseName, deleteData); + delegate.dropDatabase(databaseName, deleteData); } finally { invalidateDatabase(databaseName); @@ -446,10 +438,10 @@ public void dropDatabase(HiveIdentity ignored, String databaseName, boolean dele } @Override - public void renameDatabase(HiveIdentity ignored, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { try { - delegate.renameDatabase(identity, databaseName, newDatabaseName); + delegate.renameDatabase(databaseName, newDatabaseName); } finally { invalidateDatabase(databaseName); @@ -458,10 +450,10 @@ public void renameDatabase(HiveIdentity ignored, String databaseName, String new } @Override - public void setDatabaseOwner(HiveIdentity ignored, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { try { - delegate.setDatabaseOwner(identity, databaseName, principal); + delegate.setDatabaseOwner(databaseName, principal); } finally { invalidateDatabase(databaseName); @@ -475,10 +467,10 @@ protected void invalidateDatabase(String databaseName) } @Override - public void createTable(HiveIdentity ignored, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { - delegate.createTable(identity, table, principalPrivileges); + delegate.createTable(table, principalPrivileges); } finally { invalidateTable(table.getDatabaseName(), table.getTableName()); @@ -486,10 +478,10 @@ public void createTable(HiveIdentity ignored, Table table, PrincipalPrivileges p } @Override - public void dropTable(HiveIdentity ignored, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { try { - delegate.dropTable(identity, databaseName, tableName, deleteData); + delegate.dropTable(databaseName, tableName, deleteData); } finally { invalidateTable(databaseName, tableName); @@ -497,10 +489,10 @@ public void dropTable(HiveIdentity ignored, String databaseName, String tableNam } @Override - public void replaceTable(HiveIdentity ignored, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { - delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } finally { invalidateTable(databaseName, tableName); @@ -509,10 +501,10 @@ public void replaceTable(HiveIdentity ignored, String databaseName, String table } @Override - public void renameTable(HiveIdentity ignored, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { try { - delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); } finally { invalidateTable(databaseName, tableName); @@ -521,10 +513,10 @@ public void renameTable(HiveIdentity ignored, String databaseName, String tableN } @Override - public void commentTable(HiveIdentity ignored, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { try { - delegate.commentTable(identity, databaseName, tableName, comment); + delegate.commentTable(databaseName, tableName, comment); } finally { invalidateTable(databaseName, tableName); @@ -532,10 +524,10 @@ public void commentTable(HiveIdentity ignored, String databaseName, String table } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { try { - delegate.setTableOwner(identity, databaseName, tableName, principal); + delegate.setTableOwner(databaseName, tableName, principal); } finally { invalidateTable(databaseName, tableName); @@ -543,10 +535,10 @@ public void setTableOwner(HiveIdentity identity, String databaseName, String tab } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { try { - delegate.commentColumn(identity, databaseName, tableName, columnName, comment); + delegate.commentColumn(databaseName, tableName, columnName, comment); } finally { invalidateTable(databaseName, tableName); @@ -554,10 +546,10 @@ public void commentColumn(HiveIdentity identity, String databaseName, String tab } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { try { - delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); } finally { invalidateTable(databaseName, tableName); @@ -565,10 +557,10 @@ public void addColumn(HiveIdentity identity, String databaseName, String tableNa } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { try { - delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); } finally { invalidateTable(databaseName, tableName); @@ -576,10 +568,10 @@ public void renameColumn(HiveIdentity identity, String databaseName, String tabl } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { try { - delegate.dropColumn(identity, databaseName, tableName, columnName); + delegate.dropColumn(databaseName, tableName, columnName); } finally { invalidateTable(databaseName, tableName); @@ -615,13 +607,13 @@ private void invalidateTableStatisticsCache(String databaseName, String tableNam private Partition getExistingPartition(Table table, List partitionValues) { - return getPartition(identity, table, partitionValues) + return getPartition(table, partitionValues) .orElseThrow(() -> new PartitionNotFoundException(table.getSchemaTableName(), partitionValues)); } private List getExistingPartitionsByNames(Table table, List partitionNames) { - Map partitions = getPartitionsByNames(identity, table, partitionNames).entrySet().stream() + Map partitions = getPartitionsByNames(table, partitionNames).entrySet().stream() .map(entry -> immutableEntry(entry.getKey(), entry.getValue().orElseThrow(() -> new PartitionNotFoundException(table.getSchemaTableName(), extractPartitionValues(entry.getKey()))))) .collect(toImmutableMap(Map.Entry::getKey, Map.Entry::getValue)); @@ -632,13 +624,16 @@ private List getExistingPartitionsByNames(Table table, List p } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { return get(partitionCache, hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionValues)); } @Override - public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) + public Optional> getPartitionNamesByFilter(String databaseName, + String tableName, + List columnNames, + TupleDomain partitionKeysFilter) { return get(partitionFilterCache, partitionFilter(databaseName, tableName, columnNames, partitionKeysFilter)); } @@ -646,7 +641,6 @@ public Optional> getPartitionNamesByFilter(HiveIdentity identity, S private Optional> loadPartitionNamesByFilter(PartitionFilter partitionFilter) { return delegate.getPartitionNamesByFilter( - identity, partitionFilter.getHiveTableName().getDatabaseName(), partitionFilter.getHiveTableName().getTableName(), partitionFilter.getPartitionColumnNames(), @@ -654,7 +648,7 @@ private Optional> loadPartitionNamesByFilter(PartitionFilter partit } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { List names = partitionNames.stream() .map(name -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), name)) @@ -671,8 +665,8 @@ public Map> getPartitionsByNames(HiveIdentity identi private Optional loadPartitionByName(HivePartitionName partitionName) { HiveTableName hiveTableName = partitionName.getHiveTableName(); - return getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()) - .flatMap(table -> delegate.getPartition(identity, table, partitionName.getPartitionValues())); + return getTable(hiveTableName.getDatabaseName(), hiveTableName.getTableName()) + .flatMap(table -> delegate.getPartition(table, partitionName.getPartitionValues())); } private Map> loadPartitionsByNames(Iterable partitionNames) @@ -683,7 +677,7 @@ private Map> loadPartitionsByNames(Iterab HivePartitionName firstPartition = Iterables.get(partitionNames, 0); HiveTableName hiveTableName = firstPartition.getHiveTableName(); - Optional
table = getTable(identity, hiveTableName.getDatabaseName(), hiveTableName.getTableName()); + Optional
table = getTable(hiveTableName.getDatabaseName(), hiveTableName.getTableName()); if (table.isEmpty()) { return stream(partitionNames) .collect(toImmutableMap(name -> name, name -> Optional.empty())); @@ -696,7 +690,7 @@ private Map> loadPartitionsByNames(Iterab } ImmutableMap.Builder> partitions = ImmutableMap.builder(); - Map> partitionsByNames = delegate.getPartitionsByNames(identity, table.get(), partitionsToFetch); + Map> partitionsByNames = delegate.getPartitionsByNames(table.get(), partitionsToFetch); for (HivePartitionName partitionName : partitionNames) { partitions.put(partitionName, partitionsByNames.getOrDefault(partitionName.getPartitionName().orElseThrow(), Optional.empty())); } @@ -704,10 +698,10 @@ private Map> loadPartitionsByNames(Iterab } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { try { - delegate.addPartitions(identity, databaseName, tableName, partitions); + delegate.addPartitions(databaseName, tableName, partitions); } finally { // todo do we need to invalidate all partitions? @@ -716,10 +710,10 @@ public void addPartitions(HiveIdentity identity, String databaseName, String tab } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { try { - delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); + delegate.dropPartition(databaseName, tableName, parts, deleteData); } finally { invalidatePartitionCache(databaseName, tableName); @@ -727,10 +721,10 @@ public void dropPartition(HiveIdentity identity, String databaseName, String tab } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { try { - delegate.alterPartition(identity, databaseName, tableName, partition); + delegate.alterPartition(databaseName, tableName, partition); } finally { invalidatePartitionCache(databaseName, tableName); @@ -885,39 +879,39 @@ private Optional loadConfigValue(String name) } @Override - public long openTransaction(HiveIdentity identity) + public long openTransaction() { - return delegate.openTransaction(identity); + return delegate.openTransaction(); } @Override - public void commitTransaction(HiveIdentity identity, long transactionId) + public void commitTransaction(long transactionId) { - delegate.commitTransaction(identity, transactionId); + delegate.commitTransaction(transactionId); } @Override - public void abortTransaction(HiveIdentity identity, long transactionId) + public void abortTransaction(long transactionId) { - delegate.abortTransaction(identity, transactionId); + delegate.abortTransaction(transactionId); } @Override - public void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + public void sendTransactionHeartbeat(long transactionId) { - delegate.sendTransactionHeartbeat(identity, transactionId); + delegate.sendTransactionHeartbeat(transactionId); } @Override - public void acquireSharedReadLock(HiveIdentity identity, String queryId, long transactionId, List fullTables, List partitions) + public void acquireSharedReadLock(String queryId, long transactionId, List fullTables, List partitions) { - delegate.acquireSharedReadLock(identity, queryId, transactionId, fullTables, partitions); + delegate.acquireSharedReadLock(queryId, transactionId, fullTables, partitions); } @Override - public String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + public String getValidWriteIds(List tables, long currentTransactionId) { - return delegate.getValidWriteIds(identity, tables, currentTransactionId); + return delegate.getValidWriteIds(tables, currentTransactionId); } private Set loadTablePrivileges(String databaseName, String tableName, Optional tableOwner, Optional principal) @@ -926,22 +920,27 @@ private Set loadTablePrivileges(String databaseName, String t } @Override - public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + public long allocateWriteId(String dbName, String tableName, long transactionId) { - return delegate.allocateWriteId(identity, dbName, tableName, transactionId); + return delegate.allocateWriteId(dbName, tableName, transactionId); } @Override - public void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isDynamicPartitionWrite) + public void acquireTableWriteLock(String queryId, + long transactionId, + String dbName, + String tableName, + DataOperationType operation, + boolean isDynamicPartitionWrite) { - delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); + delegate.acquireTableWriteLock(queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); } @Override - public void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) + public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { try { - delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); + delegate.updateTableWriteId(dbName, tableName, transactionId, writeId, rowCountChange); } finally { invalidateTable(dbName, tableName); @@ -949,10 +948,10 @@ public void updateTableWriteId(HiveIdentity identity, String dbName, String tabl } @Override - public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) + public void alterPartitions(String dbName, String tableName, List partitions, long writeId) { try { - delegate.alterPartitions(identity, dbName, tableName, partitions, writeId); + delegate.alterPartitions(dbName, tableName, partitions, writeId); } finally { invalidatePartitionCache(dbName, tableName); @@ -960,10 +959,10 @@ public void alterPartitions(HiveIdentity identity, String dbName, String tableNa } @Override - public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) + public void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { try { - delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); + delegate.addDynamicPartitions(dbName, tableName, partitionNames, transactionId, writeId, operation); } finally { invalidatePartitionCache(dbName, tableName); @@ -971,10 +970,10 @@ public void addDynamicPartitions(HiveIdentity identity, String dbName, String ta } @Override - public void alterTransactionalTable(HiveIdentity identity, Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) + public void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) { try { - delegate.alterTransactionalTable(identity, table, transactionId, writeId, principalPrivileges); + delegate.alterTransactionalTable(table, transactionId, writeId, principalPrivileges); } finally { invalidateTable(table.getDatabaseName(), table.getTableName()); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java index af04db65c7e8..9d259019f128 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/cache/SharedHiveMetastoreCache.java @@ -22,7 +22,6 @@ import com.google.common.util.concurrent.UncheckedExecutionException; import io.airlift.units.Duration; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.spi.NodeManager; @@ -132,7 +131,6 @@ public HiveMetastoreFactory createCachingHiveMetastoreFactory(HiveMetastoreFacto // In case there are no empty executor slots, such operation would deadlock. Therefore, a reentrant executor needs to be // used. metastoreFactory.createMetastore(Optional.empty()), - HiveIdentity.none(), new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), metastoreCacheTtl, metastoreRefreshInterval, @@ -210,7 +208,6 @@ private CachingHiveMetastore createUserCachingMetastore(String user) ConnectorIdentity identity = ConnectorIdentity.ofUser(user); return cachingHiveMetastore( metastoreFactory.createMetastore(Optional.of(identity)), - new HiveIdentity(identity), new ReentrantBoundedExecutor(executorService, maxMetastoreRefreshThreads), metastoreCacheTtl, metastoreRefreshInterval, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java index ff6c413e031e..c01787c8ae5a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/file/FileHiveMetastore.java @@ -35,7 +35,6 @@ import io.trino.plugin.hive.SchemaAlreadyExistsException; import io.trino.plugin.hive.TableAlreadyExistsException; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; @@ -190,7 +189,7 @@ public FileHiveMetastore(NodeVersion nodeVersion, HdfsEnvironment hdfsEnvironmen } @Override - public synchronized void createDatabase(HiveIdentity identity, Database database) + public synchronized void createDatabase(Database database) { requireNonNull(database, "database is null"); @@ -211,7 +210,7 @@ public synchronized void createDatabase(HiveIdentity identity, Database database } @Override - public synchronized void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public synchronized void dropDatabase(String databaseName, boolean deleteData) { requireNonNull(databaseName, "databaseName is null"); @@ -230,7 +229,7 @@ public synchronized void dropDatabase(HiveIdentity identity, String databaseName } @Override - public synchronized void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public synchronized void renameDatabase(String databaseName, String newDatabaseName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(newDatabaseName, "newDatabaseName is null"); @@ -249,7 +248,7 @@ public synchronized void renameDatabase(HiveIdentity identity, String databaseNa } @Override - public synchronized void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public synchronized void setDatabaseOwner(String databaseName, HivePrincipal principal) { Database database = getRequiredDatabase(databaseName); Path databaseMetadataDirectory = getDatabaseMetadataDirectory(database.getDatabaseName()); @@ -297,7 +296,7 @@ public synchronized List getAllDatabases() } @Override - public synchronized void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public synchronized void createTable(Table table, PrincipalPrivileges principalPrivileges) { verifyTableNotExists(table.getDatabaseName(), table.getTableName()); @@ -339,12 +338,7 @@ else if (!table.getTableType().equals(MATERIALIZED_VIEW.name())) { } @Override - public synchronized Optional
getTable(HiveIdentity identity, String databaseName, String tableName) - { - return getTable(databaseName, tableName); - } - - private Optional
getTable(String databaseName, String tableName) + public synchronized Optional
getTable(String databaseName, String tableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); @@ -358,7 +352,7 @@ private Optional
getTable(String databaseName, String tableName) } @Override - public synchronized void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public synchronized void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { // TODO Add role support https://github.com/trinodb/trino/issues/5706 if (principal.getType() != USER) { @@ -381,7 +375,7 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public synchronized PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public synchronized PartitionStatistics getTableStatistics(Table table) { return getTableStatistics(table.getDatabaseName(), table.getTableName()); } @@ -398,13 +392,13 @@ private synchronized PartitionStatistics getTableStatistics(String databaseName, } @Override - public synchronized Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public synchronized Map getPartitionStatistics(Table table, List partitions) { return partitions.stream() - .collect(toImmutableMap(partition -> makePartitionName(table, partition), partition -> getPartitionStatistics(table, partition.getValues()))); + .collect(toImmutableMap(partition -> makePartitionName(table, partition), partition -> getPartitionStatisticsInternal(table, partition.getValues()))); } - private synchronized PartitionStatistics getPartitionStatistics(Table table, List partitionValues) + private synchronized PartitionStatistics getPartitionStatisticsInternal(Table table, List partitionValues) { Path partitionDirectory = getPartitionMetadataDirectory(table, ImmutableList.copyOf(partitionValues)); PartitionMetadata partitionMetadata = readSchemaFile(PARTITION, partitionDirectory, partitionCodec) @@ -427,7 +421,7 @@ private void verifyTableNotExists(String newDatabaseName, String newTableName) } @Override - public synchronized void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public synchronized void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update) { PartitionStatistics originalStatistics = getTableStatistics(databaseName, tableName); PartitionStatistics updatedStatistics = update.apply(originalStatistics); @@ -445,10 +439,10 @@ public synchronized void updateTableStatistics(HiveIdentity identity, String dat } @Override - public synchronized void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public synchronized void updatePartitionStatistics(Table table, Map> updates) { updates.forEach((partitionName, update) -> { - PartitionStatistics originalStatistics = getPartitionStatistics(table, extractPartitionValues(partitionName)); + PartitionStatistics originalStatistics = getPartitionStatisticsInternal(table, extractPartitionValues(partitionName)); PartitionStatistics updatedStatistics = update.apply(originalStatistics); List partitionValues = extractPartitionValues(partitionName); @@ -521,7 +515,7 @@ public synchronized List getAllViews(String databaseName) } @Override - public synchronized void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public synchronized void dropTable(String databaseName, String tableName, boolean deleteData) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); @@ -542,7 +536,7 @@ public synchronized void dropTable(HiveIdentity identity, String databaseName, S } @Override - public synchronized void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public synchronized void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { Table table = getRequiredTable(databaseName, tableName); if (!table.getDatabaseName().equals(databaseName) || !table.getTableName().equals(tableName)) { @@ -564,7 +558,7 @@ public synchronized void replaceTable(HiveIdentity identity, String databaseName } @Override - public synchronized void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public synchronized void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); @@ -603,7 +597,7 @@ public synchronized void renameTable(HiveIdentity identity, String databaseName, } @Override - public synchronized void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public synchronized void commentTable(String databaseName, String tableName, Optional comment) { alterTable(databaseName, tableName, oldTable -> { Map parameters = oldTable.getParameters().entrySet().stream() @@ -616,7 +610,7 @@ public synchronized void commentTable(HiveIdentity identity, String databaseName } @Override - public synchronized void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public synchronized void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { alterTable(databaseName, tableName, oldTable -> { if (oldTable.getColumn(columnName).isEmpty()) { @@ -639,7 +633,7 @@ public synchronized void commentColumn(HiveIdentity identity, String databaseNam } @Override - public synchronized void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public synchronized void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { alterTable(databaseName, tableName, oldTable -> { if (oldTable.getColumn(columnName).isPresent()) { @@ -656,7 +650,7 @@ public synchronized void addColumn(HiveIdentity identity, String databaseName, S } @Override - public synchronized void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public synchronized void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { alterTable(databaseName, tableName, oldTable -> { if (oldTable.getColumn(newColumnName).isPresent()) { @@ -687,10 +681,10 @@ public synchronized void renameColumn(HiveIdentity identity, String databaseName } @Override - public synchronized void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public synchronized void dropColumn(String databaseName, String tableName, String columnName) { alterTable(databaseName, tableName, oldTable -> { - verifyCanDropColumn(this, identity, databaseName, tableName, columnName); + verifyCanDropColumn(this, databaseName, tableName, columnName); if (oldTable.getColumn(columnName).isEmpty()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); throw new ColumnNotFoundException(name, columnName); @@ -727,7 +721,7 @@ private void alterTable(String databaseName, String tableName, Function partitions) + public synchronized void addPartitions(String databaseName, String tableName, List partitions) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); @@ -810,13 +804,13 @@ else if (table.getTableType().equals(EXTERNAL_TABLE.name())) { } @Override - public synchronized void dropPartition(HiveIdentity identity, String databaseName, String tableName, List partitionValues, boolean deleteData) + public synchronized void dropPartition(String databaseName, String tableName, List partitionValues, boolean deleteData) { requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); requireNonNull(partitionValues, "partitionValues is null"); - Optional
tableReference = getTable(identity, databaseName, tableName); + Optional
tableReference = getTable(databaseName, tableName); if (tableReference.isEmpty()) { return; } @@ -832,7 +826,7 @@ public synchronized void dropPartition(HiveIdentity identity, String databaseNam } @Override - public synchronized void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) + public synchronized void alterPartition(String databaseName, String tableName, PartitionWithStatistics partitionWithStatistics) { Table table = getRequiredTable(databaseName, tableName); @@ -998,13 +992,12 @@ private void writeRoleGrantsFile(Set roleGrants) writeFile("roleGrants", getRoleGrantsFile(), roleGrantsCodec, ImmutableList.copyOf(roleGrants), true); } - private synchronized Optional> getAllPartitionNames(HiveIdentity identity, String databaseName, String tableName) + private synchronized Optional> getAllPartitionNames(String databaseName, String tableName) { - requireNonNull(identity, "identity is null"); requireNonNull(databaseName, "databaseName is null"); requireNonNull(tableName, "tableName is null"); - Optional
tableReference = getTable(identity, databaseName, tableName); + Optional
tableReference = getTable(databaseName, tableName); if (tableReference.isEmpty()) { return Optional.empty(); } @@ -1072,7 +1065,7 @@ private List> listPartitions(Path director, List part } @Override - public synchronized Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public synchronized Optional getPartition(Table table, List partitionValues) { requireNonNull(table, "table is null"); requireNonNull(partitionValues, "partitionValues is null"); @@ -1084,22 +1077,21 @@ public synchronized Optional getPartition(HiveIdentity identity, Tabl @Override public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { - return getAllPartitionNames(identity, databaseName, tableName); + return getAllPartitionNames(databaseName, tableName); } @Override - public synchronized Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public synchronized Map> getPartitionsByNames(Table table, List partitionNames) { ImmutableMap.Builder> builder = ImmutableMap.builder(); for (String partitionName : partitionNames) { List partitionValues = toPartitionValues(partitionName); - builder.put(partitionName, getPartition(identity, table, partitionValues)); + builder.put(partitionName, getPartition(table, partitionValues)); } return builder.buildOrThrow(); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java index 64e12a83be5c..fa145f2a4d20 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/glue/GlueHiveMetastore.java @@ -76,7 +76,6 @@ import io.trino.plugin.hive.SchemaAlreadyExistsException; import io.trino.plugin.hive.TableAlreadyExistsException; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveColumnStatistics; @@ -311,7 +310,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { try { GetTableResult result = stats.getGetTable().call(() -> @@ -335,20 +334,20 @@ public Set getSupportedColumnStatistics(Type type) return columnStatisticsProvider.getSupportedColumnStatistics(type); } - private Table getExistingTable(HiveIdentity identity, String databaseName, String tableName) + private Table getExistingTable(String databaseName, String tableName) { - return getTable(identity, databaseName, tableName) + return getTable(databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { return new PartitionStatistics(getHiveBasicStatistics(table.getParameters()), columnStatisticsProvider.getTableColumnStatistics(table)); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { return columnStatisticsProvider.getPartitionColumnStatistics(partitions).entrySet().stream() .collect(toImmutableMap( @@ -357,13 +356,13 @@ public Map getPartitionStatistics(HiveIdentity iden } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update) { - Table table = getExistingTable(identity, databaseName, tableName); + Table table = getExistingTable(databaseName, tableName); if (transaction.isAcidTransactionRunning()) { table = Table.builder(table).setWriteId(OptionalLong.of(transaction.getWriteId())).build(); } - PartitionStatistics currentStatistics = getTableStatistics(identity, table); + PartitionStatistics currentStatistics = getTableStatistics(table); PartitionStatistics updatedStatistics = update.apply(currentStatistics); try { @@ -386,7 +385,7 @@ public void updateTableStatistics(HiveIdentity identity, String databaseName, St } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { Iterables.partition(updates.entrySet(), BATCH_CREATE_PARTITION_MAX_PAGE_SIZE).forEach(partitionUpdates -> updatePartitionStatisticsBatch(table, partitionUpdates.stream().collect(toImmutableMap(Entry::getKey, Entry::getValue)))); @@ -511,7 +510,7 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { if (database.getLocation().isEmpty() && defaultDir.isPresent()) { String databaseLocation = new Path(defaultDir.get(), database.getDatabaseName()).toString(); @@ -539,7 +538,7 @@ public void createDatabase(HiveIdentity identity, Database database) // TODO: respect deleteData @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { Optional location = Optional.empty(); if (deleteData) { @@ -565,7 +564,7 @@ public void dropDatabase(HiveIdentity identity, String databaseName, boolean del } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { try { Database database = getDatabase(databaseName).orElseThrow(() -> new SchemaNotFoundException(databaseName)); @@ -582,13 +581,13 @@ public void renameDatabase(HiveIdentity identity, String databaseName, String ne } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { throw new TrinoException(NOT_SUPPORTED, "setting the database owner is not supported by Glue"); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { try { TableInput input = GlueInputConverter.convertTable(table); @@ -610,9 +609,9 @@ public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { - Table table = getExistingTable(identity, databaseName, tableName); + Table table = getExistingTable(databaseName, tableName); try { stats.getDropTable().call(() -> @@ -648,7 +647,7 @@ private static void deleteDir(HdfsContext context, HdfsEnvironment hdfsEnvironme } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { try { TableInput newTableInput = GlueInputConverter.convertTable(newTable); @@ -667,19 +666,19 @@ public void replaceTable(HiveIdentity identity, String databaseName, String tabl } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new TrinoException(NOT_SUPPORTED, "Table rename is not yet supported by Glue service"); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { throw new TrinoException(NOT_SUPPORTED, "Table comment is not yet supported by Glue service"); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { // TODO Add role support https://github.com/trinodb/trino/issues/5706 if (principal.getType() != USER) { @@ -687,7 +686,7 @@ public void setTableOwner(HiveIdentity identity, String databaseName, String tab } try { - Table table = getExistingTable(identity, databaseName, tableName); + Table table = getExistingTable(databaseName, tableName); TableInput newTableInput = GlueInputConverter.convertTable(table); newTableInput.setOwner(principal.getName()); @@ -706,25 +705,25 @@ public void setTableOwner(HiveIdentity identity, String databaseName, String tab } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { throw new TrinoException(NOT_SUPPORTED, "Column comment is not yet supported by Glue service"); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { - Table oldTable = getExistingTable(identity, databaseName, tableName); + Table oldTable = getExistingTable(databaseName, tableName); Table newTable = Table.builder(oldTable) .addDataColumn(new Column(columnName, columnType, Optional.ofNullable(columnComment))) .build(); - replaceTable(identity, databaseName, tableName, newTable, null); + replaceTable(databaseName, tableName, newTable, null); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { - Table oldTable = getExistingTable(identity, databaseName, tableName); + Table oldTable = getExistingTable(databaseName, tableName); if (oldTable.getPartitionColumns().stream().anyMatch(c -> c.getName().equals(oldColumnName))) { throw new TrinoException(NOT_SUPPORTED, "Renaming partition columns is not supported"); } @@ -742,14 +741,14 @@ public void renameColumn(HiveIdentity identity, String databaseName, String tabl Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); - replaceTable(identity, databaseName, tableName, newTable, null); + replaceTable(databaseName, tableName, newTable, null); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { - verifyCanDropColumn(this, identity, databaseName, tableName, columnName); - Table oldTable = getExistingTable(identity, databaseName, tableName); + verifyCanDropColumn(this, databaseName, tableName, columnName); + Table oldTable = getExistingTable(databaseName, tableName); if (oldTable.getColumn(columnName).isEmpty()) { SchemaTableName name = new SchemaTableName(databaseName, tableName); @@ -764,11 +763,11 @@ public void dropColumn(HiveIdentity identity, String databaseName, String tableN Table newTable = Table.builder(oldTable) .setDataColumns(newDataColumns.build()) .build(); - replaceTable(identity, databaseName, tableName, newTable, null); + replaceTable(databaseName, tableName, newTable, null); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { try { GetPartitionResult result = stats.getGetPartition().call(() -> @@ -789,7 +788,6 @@ public Optional getPartition(HiveIdentity identity, Table table, List @Override public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, @@ -798,7 +796,7 @@ public Optional> getPartitionNamesByFilter( if (partitionKeysFilter.isNone()) { return Optional.of(ImmutableList.of()); } - Table table = getExistingTable(identity, databaseName, tableName); + Table table = getExistingTable(databaseName, tableName); String expression = GlueExpressionUtil.buildGlueExpression(columnNames, partitionKeysFilter, assumeCanonicalPartitionKeys); List partitions = getPartitions(table, expression); return Optional.of(buildPartitionNames(table.getPartitionColumns(), partitions)); @@ -880,12 +878,12 @@ private static List buildPartitionNames(List partitionColumns, L * @return Mapping of partition name to partition object */ @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { - return stats.getGetPartitionByName().call(() -> getPartitionsByNames(table, partitionNames)); + return stats.getGetPartitionByName().call(() -> getPartitionsByNamesInternal(table, partitionNames)); } - private Map> getPartitionsByNames(Table table, List partitionNames) + private Map> getPartitionsByNamesInternal(Table table, List partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); if (partitionNames.isEmpty()) { @@ -959,7 +957,7 @@ private List batchGetPartition(Table table, List partitionNam } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { try { stats.getAddPartitions().call(() -> { @@ -1018,10 +1016,10 @@ private static void propagatePartitionErrorToTrinoException(String databaseName, } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { - Table table = getExistingTable(identity, databaseName, tableName); - Partition partition = getPartition(identity, table, parts) + Table table = getExistingTable(databaseName, tableName); + Partition partition = getPartition(table, parts) .orElseThrow(() -> new PartitionNotFoundException(new SchemaTableName(databaseName, tableName), parts)); try { @@ -1043,7 +1041,7 @@ public void dropPartition(HiveIdentity identity, String databaseName, String tab } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { try { PartitionInput newPartition = convertPartition(partition); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java index 9270c89300dc..3b08a4a7a51c 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/recording/RecordingHiveMetastore.java @@ -16,7 +16,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HivePrincipal; @@ -70,9 +69,9 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { - return recording.getTable(hiveTableName(databaseName, tableName), () -> delegate.getTable(identity, databaseName, tableName)); + return recording.getTable(hiveTableName(databaseName, tableName), () -> delegate.getTable(databaseName, tableName)); } @Override @@ -82,42 +81,45 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { return recording.getTableStatistics( hiveTableName(table.getDatabaseName(), table.getTableName()), - () -> delegate.getTableStatistics(identity, table)); + () -> delegate.getTableStatistics(table)); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { return recording.getPartitionStatistics( partitions.stream() .map(partition -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partition.getValues())) .collect(toImmutableSet()), - () -> delegate.getPartitionStatistics(identity, table, partitions)); + () -> delegate.getPartitionStatistics(table, partitions)); } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { verifyRecordingMode(); - delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); + delegate.updateTableStatistics(databaseName, tableName, transaction, update); } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, String partitionName, Function update) + public void updatePartitionStatistics(Table table, String partitionName, Function update) { verifyRecordingMode(); - delegate.updatePartitionStatistics(identity, table, partitionName, update); + delegate.updatePartitionStatistics(table, partitionName, update); } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { verifyRecordingMode(); - delegate.updatePartitionStatistics(identity, table, updates); + delegate.updatePartitionStatistics(table, updates); } @Override @@ -140,148 +142,148 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { verifyRecordingMode(); - delegate.createDatabase(identity, database); + delegate.createDatabase(database); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { verifyRecordingMode(); - delegate.dropDatabase(identity, databaseName, deleteData); + delegate.dropDatabase(databaseName, deleteData); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { verifyRecordingMode(); - delegate.renameDatabase(identity, databaseName, newDatabaseName); + delegate.renameDatabase(databaseName, newDatabaseName); } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { verifyRecordingMode(); - delegate.setDatabaseOwner(identity, databaseName, principal); + delegate.setDatabaseOwner(databaseName, principal); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { verifyRecordingMode(); - delegate.createTable(identity, table, principalPrivileges); + delegate.createTable(table, principalPrivileges); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { verifyRecordingMode(); - delegate.dropTable(identity, databaseName, tableName, deleteData); + delegate.dropTable(databaseName, tableName, deleteData); } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { verifyRecordingMode(); - delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { verifyRecordingMode(); - delegate.renameTable(identity, databaseName, tableName, newDatabaseName, newTableName); + delegate.renameTable(databaseName, tableName, newDatabaseName, newTableName); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { verifyRecordingMode(); - delegate.commentTable(identity, databaseName, tableName, comment); + delegate.commentTable(databaseName, tableName, comment); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { verifyRecordingMode(); - delegate.setTableOwner(identity, databaseName, tableName, principal); + delegate.setTableOwner(databaseName, tableName, principal); } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { verifyRecordingMode(); - delegate.commentColumn(identity, databaseName, tableName, columnName, comment); + delegate.commentColumn(databaseName, tableName, columnName, comment); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { verifyRecordingMode(); - delegate.addColumn(identity, databaseName, tableName, columnName, columnType, columnComment); + delegate.addColumn(databaseName, tableName, columnName, columnType, columnComment); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { verifyRecordingMode(); - delegate.renameColumn(identity, databaseName, tableName, oldColumnName, newColumnName); + delegate.renameColumn(databaseName, tableName, oldColumnName, newColumnName); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { verifyRecordingMode(); - delegate.dropColumn(identity, databaseName, tableName, columnName); + delegate.dropColumn(databaseName, tableName, columnName); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { return recording.getPartition( hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionValues), - () -> delegate.getPartition(identity, table, partitionValues)); + () -> delegate.getPartition(table, partitionValues)); } @Override - public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) + public Optional> getPartitionNamesByFilter(String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) { return recording.getPartitionNamesByFilter( partitionFilter(databaseName, tableName, columnNames, partitionKeysFilter), - () -> delegate.getPartitionNamesByFilter(identity, databaseName, tableName, columnNames, partitionKeysFilter)); + () -> delegate.getPartitionNamesByFilter(databaseName, tableName, columnNames, partitionKeysFilter)); } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { return recording.getPartitionsByNames( partitionNames.stream() .map(partitionName -> hivePartitionName(hiveTableName(table.getDatabaseName(), table.getTableName()), partitionName)) .collect(toImmutableSet()), - () -> delegate.getPartitionsByNames(identity, table, partitionNames)); + () -> delegate.getPartitionsByNames(table, partitionNames)); } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { verifyRecordingMode(); - delegate.addPartitions(identity, databaseName, tableName, partitions); + delegate.addPartitions(databaseName, tableName, partitions); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { verifyRecordingMode(); - delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); + delegate.dropPartition(databaseName, tableName, parts, deleteData); } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { verifyRecordingMode(); - delegate.alterPartition(identity, databaseName, tableName, partition); + delegate.alterPartition(databaseName, tableName, partition); } @Override diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java index b9bd09c81ad4..37f7f1dcf3a6 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/metastore/thrift/BridgingHiveMetastore.java @@ -90,7 +90,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { return delegate.getTable(identity, databaseName, tableName).map(table -> { if (isAvroTableWithSchemaSet(table)) { @@ -110,13 +110,13 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { return delegate.getTableStatistics(identity, toMetastoreApiTable(table)); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { return delegate.getPartitionStatistics( identity, @@ -127,13 +127,13 @@ public Map getPartitionStatistics(HiveIdentity iden } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, String tableName, AcidTransaction transaction, Function update) { delegate.updateTableStatistics(identity, databaseName, tableName, transaction, update); } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { org.apache.hadoop.hive.metastore.api.Table metastoreTable = toMetastoreApiTable(table); updates.forEach((partitionName, update) -> delegate.updatePartitionStatistics(identity, metastoreTable, partitionName, update)); @@ -158,19 +158,19 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { delegate.createDatabase(identity, toMetastoreApiDatabase(database)); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { delegate.dropDatabase(identity, databaseName, deleteData); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { org.apache.hadoop.hive.metastore.api.Database database = delegate.getDatabase(databaseName) .orElseThrow(() -> new SchemaNotFoundException(databaseName)); @@ -185,7 +185,7 @@ public void renameDatabase(HiveIdentity identity, String databaseName, String ne } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { Database database = fromMetastoreApiDatabase(delegate.getDatabase(databaseName) .orElseThrow(() -> new SchemaNotFoundException(databaseName))); @@ -199,25 +199,25 @@ public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePri } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { delegate.createTable(identity, toMetastoreApiTable(table, principalPrivileges)); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { delegate.dropTable(identity, databaseName, tableName, deleteData); } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { - alterTable(identity, databaseName, tableName, toMetastoreApiTable(newTable, principalPrivileges)); + alterTable(databaseName, tableName, toMetastoreApiTable(newTable, principalPrivileges)); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { Optional source = delegate.getTable(identity, databaseName, tableName); if (source.isEmpty()) { @@ -226,11 +226,11 @@ public void renameTable(HiveIdentity identity, String databaseName, String table org.apache.hadoop.hive.metastore.api.Table table = source.get(); table.setDbName(newDatabaseName); table.setTableName(newTableName); - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { Optional source = delegate.getTable(identity, databaseName, tableName); if (source.isEmpty()) { @@ -244,11 +244,11 @@ public void commentTable(HiveIdentity identity, String databaseName, String tabl comment.ifPresent(value -> parameters.put(TABLE_COMMENT, value)); table.setParameters(parameters); - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { // TODO Add role support https://github.com/trinodb/trino/issues/5706 if (principal.getType() != USER) { @@ -266,7 +266,7 @@ public void setTableOwner(HiveIdentity identity, String databaseName, String tab } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { Optional source = delegate.getTable(identity, databaseName, tableName); if (source.isEmpty()) { @@ -285,11 +285,11 @@ public void commentColumn(HiveIdentity identity, String databaseName, String tab } } - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { Optional source = delegate.getTable(identity, databaseName, tableName); if (source.isEmpty()) { @@ -298,11 +298,11 @@ public void addColumn(HiveIdentity identity, String databaseName, String tableNa org.apache.hadoop.hive.metastore.api.Table table = source.get(); table.getSd().getCols().add( new FieldSchema(columnName, columnType.getHiveTypeName().toString(), columnComment)); - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { Optional source = delegate.getTable(identity, databaseName, tableName); if (source.isEmpty()) { @@ -319,33 +319,32 @@ public void renameColumn(HiveIdentity identity, String databaseName, String tabl fieldSchema.setName(newColumnName); } } - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { - verifyCanDropColumn(this, identity, databaseName, tableName, columnName); + verifyCanDropColumn(this, databaseName, tableName, columnName); org.apache.hadoop.hive.metastore.api.Table table = delegate.getTable(identity, databaseName, tableName) .orElseThrow(() -> new TableNotFoundException(new SchemaTableName(databaseName, tableName))); table.getSd().getCols().removeIf(fieldSchema -> fieldSchema.getName().equals(columnName)); - alterTable(identity, databaseName, tableName, table); + alterTable(databaseName, tableName, table); } - private void alterTable(HiveIdentity identity, String databaseName, String tableName, org.apache.hadoop.hive.metastore.api.Table table) + private void alterTable(String databaseName, String tableName, org.apache.hadoop.hive.metastore.api.Table table) { delegate.alterTable(identity, databaseName, tableName, table); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { return delegate.getPartition(identity, table.getDatabaseName(), table.getTableName(), partitionValues).map(partition -> fromMetastoreApiPartition(table, partition)); } @Override public Optional> getPartitionNamesByFilter( - HiveIdentity identity, String databaseName, String tableName, List columnNames, @@ -355,7 +354,7 @@ public Optional> getPartitionNamesByFilter( } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { requireNonNull(partitionNames, "partitionNames is null"); if (partitionNames.isEmpty()) { @@ -388,19 +387,19 @@ private Partition fromMetastoreApiPartition(Table table, org.apache.hadoop.hive. } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { delegate.addPartitions(identity, databaseName, tableName, partitions); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { delegate.dropPartition(identity, databaseName, tableName, parts, deleteData); } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { delegate.alterPartition(identity, databaseName, tableName, partition); } @@ -472,61 +471,61 @@ public Optional getConfigValue(String name) } @Override - public long openTransaction(HiveIdentity identity) + public long openTransaction() { return delegate.openTransaction(identity); } @Override - public void commitTransaction(HiveIdentity identity, long transactionId) + public void commitTransaction(long transactionId) { delegate.commitTransaction(identity, transactionId); } @Override - public void abortTransaction(HiveIdentity identity, long transactionId) + public void abortTransaction(long transactionId) { delegate.abortTransaction(identity, transactionId); } @Override - public void sendTransactionHeartbeat(HiveIdentity identity, long transactionId) + public void sendTransactionHeartbeat(long transactionId) { delegate.sendTransactionHeartbeat(identity, transactionId); } @Override - public void acquireSharedReadLock(HiveIdentity identity, String queryId, long transactionId, List fullTables, List partitions) + public void acquireSharedReadLock(String queryId, long transactionId, List fullTables, List partitions) { delegate.acquireSharedReadLock(identity, queryId, transactionId, fullTables, partitions); } @Override - public String getValidWriteIds(HiveIdentity identity, List tables, long currentTransactionId) + public String getValidWriteIds(List tables, long currentTransactionId) { return delegate.getValidWriteIds(identity, tables, currentTransactionId); } @Override - public long allocateWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId) + public long allocateWriteId(String dbName, String tableName, long transactionId) { return delegate.allocateWriteId(identity, dbName, tableName, transactionId); } @Override - public void acquireTableWriteLock(HiveIdentity identity, String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isDynamicPartitionWrite) + public void acquireTableWriteLock(String queryId, long transactionId, String dbName, String tableName, DataOperationType operation, boolean isDynamicPartitionWrite) { delegate.acquireTableWriteLock(identity, queryId, transactionId, dbName, tableName, operation, isDynamicPartitionWrite); } @Override - public void updateTableWriteId(HiveIdentity identity, String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) + public void updateTableWriteId(String dbName, String tableName, long transactionId, long writeId, OptionalLong rowCountChange) { delegate.updateTableWriteId(identity, dbName, tableName, transactionId, writeId, rowCountChange); } @Override - public void alterPartitions(HiveIdentity identity, String dbName, String tableName, List partitions, long writeId) + public void alterPartitions(String dbName, String tableName, List partitions, long writeId) { List hadoopPartitions = partitions.stream() .map(ThriftMetastoreUtil::toMetastoreApiPartition) @@ -536,13 +535,13 @@ public void alterPartitions(HiveIdentity identity, String dbName, String tableNa } @Override - public void addDynamicPartitions(HiveIdentity identity, String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) + public void addDynamicPartitions(String dbName, String tableName, List partitionNames, long transactionId, long writeId, AcidOperation operation) { delegate.addDynamicPartitions(identity, dbName, tableName, partitionNames, transactionId, writeId, operation); } @Override - public void alterTransactionalTable(HiveIdentity identity, Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) + public void alterTransactionalTable(Table table, long transactionId, long writeId, PrincipalPrivileges principalPrivileges) { delegate.alterTransactionalTable(identity, toMetastoreApiTable(table, principalPrivileges), transactionId, writeId); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java index 87a7ef0232d9..044945a359c9 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/CreateEmptyPartitionProcedure.java @@ -27,7 +27,6 @@ import io.trino.plugin.hive.PartitionUpdate.UpdateMode; import io.trino.plugin.hive.TransactionalMetadata; import io.trino.plugin.hive.TransactionalMetadataFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.spi.TrinoException; import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ConnectorAccessControl; @@ -120,7 +119,7 @@ private void doCreateEmptyPartition(ConnectorSession session, ConnectorAccessCon } HiveMetastoreClosure metastore = hiveMetadata.getMetastore().unsafeGetRawHiveMetastoreClosure(); - if (metastore.getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues).isPresent()) { + if (metastore.getPartition(schemaName, tableName, partitionValues).isPresent()) { throw new TrinoException(ALREADY_EXISTS, "Partition already exists"); } HiveInsertTableHandle hiveInsertTableHandle = (HiveInsertTableHandle) hiveMetadata.beginInsert(session, tableHandle, ImmutableList.of(), NO_RETRIES); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java index 994cb883cd2b..6196686d0c6a 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/DropStatsProcedure.java @@ -20,7 +20,6 @@ import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.TransactionalMetadata; import io.trino.plugin.hive.TransactionalMetadataFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.spi.TrinoException; import io.trino.spi.classloader.ThreadContextClassLoader; import io.trino.spi.connector.ColumnHandle; @@ -119,7 +118,6 @@ private void doDropStats(ConnectorSession session, ConnectorAccessControl access validatePartitions(partitionStringValues, partitionColumns); partitionStringValues.forEach(values -> metastore.updatePartitionStatistics( - new HiveIdentity(session.getIdentity()), schema, table, makePartName(partitionColumns, values), @@ -130,7 +128,6 @@ private void doDropStats(ConnectorSession session, ConnectorAccessControl access if (partitionColumns.isEmpty()) { // for non-partitioned tables, just wipe table stats metastore.updateTableStatistics( - new HiveIdentity(session.getIdentity()), schema, table, NO_ACID_TRANSACTION, @@ -138,9 +135,8 @@ private void doDropStats(ConnectorSession session, ConnectorAccessControl access } else { // the table is partitioned; remove stats for every partition - metastore.getPartitionNamesByFilter(new HiveIdentity(session.getIdentity()), handle.getSchemaName(), handle.getTableName(), partitionColumns, TupleDomain.all()) + metastore.getPartitionNamesByFilter(handle.getSchemaName(), handle.getTableName(), partitionColumns, TupleDomain.all()) .ifPresent(partitions -> partitions.forEach(partitionName -> metastore.updatePartitionStatistics( - new HiveIdentity(session.getIdentity()), schema, table, partitionName, diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java index b205515d3540..a081742ec548 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/RegisterPartitionProcedure.java @@ -20,7 +20,6 @@ import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.TransactionalMetadataFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; @@ -110,11 +109,10 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); - HiveIdentity identity = new HiveIdentity(session); HdfsContext hdfsContext = new HdfsContext(session); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); - Table table = metastore.getTable(identity, schemaName, tableName) + Table table = metastore.getTable(schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); accessControl.checkCanInsertIntoTable(null, schemaTableName); @@ -122,7 +120,7 @@ private void doRegisterPartition(ConnectorSession session, ConnectorAccessContro checkIsPartitionedTable(table); checkPartitionColumns(table, partitionColumn); - Optional partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues); + Optional partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(schemaName, tableName, partitionValues); if (partition.isPresent()) { String partitionName = FileUtils.makePartName(partitionColumn, partitionValues); throw new TrinoException(ALREADY_EXISTS, format("Partition [%s] is already registered with location %s", partitionName, partition.get().getStorage().getLocation())); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java index 189ec187853a..a6fc658cf9ff 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/SyncPartitionMetadataProcedure.java @@ -19,7 +19,6 @@ import io.trino.plugin.hive.HdfsEnvironment; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.TransactionalMetadataFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; @@ -115,11 +114,10 @@ private void doSyncPartitionMetadata(ConnectorSession session, ConnectorAccessCo { SyncMode syncMode = toSyncMode(mode); HdfsContext hdfsContext = new HdfsContext(session); - HiveIdentity identity = new HiveIdentity(session); SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); - Table table = metastore.getTable(identity, schemaName, tableName) + Table table = metastore.getTable(schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); if (table.getPartitionColumns().isEmpty()) { throw new TrinoException(INVALID_PROCEDURE_ARGUMENT, "Table is not partitioned: " + schemaTableName); @@ -139,7 +137,7 @@ private void doSyncPartitionMetadata(ConnectorSession session, ConnectorAccessCo try { FileSystem fileSystem = hdfsEnvironment.getFileSystem(hdfsContext, tableLocation); - List partitionsInMetastore = metastore.getPartitionNames(identity, schemaName, tableName) + List partitionsInMetastore = metastore.getPartitionNames(schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); List partitionsInFileSystem = listDirectory(fileSystem, fileSystem.getFileStatus(tableLocation), table.getPartitionColumns(), table.getPartitionColumns().size(), caseSensitive).stream() .map(fileStatus -> fileStatus.getPath().toUri()) diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java index 3115de32a0c3..5f9db471c1a8 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/procedure/UnregisterPartitionProcedure.java @@ -15,7 +15,6 @@ import com.google.common.collect.ImmutableList; import io.trino.plugin.hive.TransactionalMetadataFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Partition; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; @@ -87,12 +86,11 @@ public void unregisterPartition(ConnectorSession session, ConnectorAccessControl private void doUnregisterPartition(ConnectorSession session, ConnectorAccessControl accessControl, String schemaName, String tableName, List partitionColumn, List partitionValues) { - HiveIdentity identity = new HiveIdentity(session); SchemaTableName schemaTableName = new SchemaTableName(schemaName, tableName); SemiTransactionalHiveMetastore metastore = hiveMetadataFactory.create(session.getIdentity(), true).getMetastore(); - Table table = metastore.getTable(identity, schemaName, tableName) + Table table = metastore.getTable(schemaName, tableName) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); accessControl.checkCanDeleteFromTable(null, schemaTableName); @@ -102,7 +100,7 @@ private void doUnregisterPartition(ConnectorSession session, ConnectorAccessCont String partitionName = FileUtils.makePartName(partitionColumn, partitionValues); - Partition partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(new HiveIdentity(session), schemaName, tableName, partitionValues) + Partition partition = metastore.unsafeGetRawHiveMetastoreClosure().getPartition(schemaName, tableName, partitionValues) .orElseThrow(() -> new TrinoException(NOT_FOUND, format("Partition '%s' does not exist", partitionName))); metastore.dropPartition( diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControl.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControl.java index d1ac8e85385b..054430bffeca 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControl.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControl.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Table; import io.trino.spi.connector.ConnectorAccessControl; import io.trino.spi.connector.ConnectorSecurityContext; @@ -122,7 +121,7 @@ public void checkCanDropTable(ConnectorSecurityContext context, SchemaTableName denyDropTable(tableName.toString()); } - Optional
target = accessControlMetastore.getTable(context, new HiveIdentity(context.getIdentity()), tableName.getSchemaName(), tableName.getTableName()); + Optional
target = accessControlMetastore.getTable(context, tableName.getSchemaName(), tableName.getTableName()); if (target.isEmpty()) { denyDropTable(tableName.toString(), "Table not found"); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControlMetastore.java index 20877fc0ea8b..098cca02a01f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/LegacyAccessControlMetastore.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Table; import io.trino.spi.connector.ConnectorSecurityContext; @@ -25,5 +24,5 @@ */ public interface LegacyAccessControlMetastore { - Optional
getTable(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName); + Optional
getTable(ConnectorSecurityContext context, String databaseName, String tableName); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java index 5503ff55c649..e6c8a20cf85f 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalLegacyAccessControlMetastore.java @@ -14,7 +14,6 @@ package io.trino.plugin.hive.security; import io.trino.plugin.hive.HiveTransactionManager; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.SemiTransactionalHiveMetastore; import io.trino.plugin.hive.metastore.Table; import io.trino.spi.connector.ConnectorSecurityContext; @@ -37,9 +36,9 @@ public SemiTransactionalLegacyAccessControlMetastore(HiveTransactionManager tran } @Override - public Optional
getTable(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(ConnectorSecurityContext context, String databaseName, String tableName) { SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); - return metastore.getTable(new HiveIdentity(context.getIdentity()), databaseName, tableName); + return metastore.getTable(databaseName, tableName); } } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java index 4fd8ace784b6..4b8c9a9ab1fc 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SemiTransactionalSqlStandardAccessControlMetastore.java @@ -14,7 +14,6 @@ package io.trino.plugin.hive.security; import io.trino.plugin.hive.HiveTransactionManager; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; @@ -48,10 +47,10 @@ public Set listRoleGrants(ConnectorSecurityContext context, HivePrinc } @Override - public Set listTablePrivileges(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName, Optional principal) + public Set listTablePrivileges(ConnectorSecurityContext context, String databaseName, String tableName, Optional principal) { SemiTransactionalHiveMetastore metastore = transactionManager.get(context.getTransactionHandle(), context.getIdentity()).getMetastore(); - return metastore.listTablePrivileges(identity, databaseName, tableName, principal); + return metastore.listTablePrivileges(databaseName, tableName, principal); } @Override diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControl.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControl.java index 43e977d0ac12..d6b49367e4c7 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControl.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControl.java @@ -15,7 +15,6 @@ import com.google.common.collect.ImmutableSet; import io.trino.plugin.base.CatalogName; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; @@ -639,7 +638,7 @@ private boolean checkTablePermission( return true; } - Set allowedPrincipals = metastore.listTablePrivileges(context, new HiveIdentity(context.getIdentity()), tableName.getSchemaName(), tableName.getTableName(), Optional.empty()).stream() + Set allowedPrincipals = metastore.listTablePrivileges(context, tableName.getSchemaName(), tableName.getTableName(), Optional.empty()).stream() .filter(privilegeInfo -> privilegeInfo.getHivePrivilege() == requiredPrivilege) .filter(privilegeInfo -> !grantOptionRequired || privilegeInfo.isGrantOption()) .map(HivePrivilegeInfo::getGrantee) @@ -676,12 +675,15 @@ private Stream listApplicableTablePrivileges(ConnectorSecurit Stream.of(userPrincipal), listApplicableRoles(userPrincipal, hivePrincipal -> metastore.listRoleGrants(context, hivePrincipal)) .map(role -> new HivePrincipal(ROLE, role.getRoleName()))); - return listTablePrivileges(context, new HiveIdentity(identity), databaseName, tableName, principals); + return listTablePrivileges(context, databaseName, tableName, principals); } - private Stream listTablePrivileges(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName, Stream principals) + private Stream listTablePrivileges(ConnectorSecurityContext context, + String databaseName, + String tableName, + Stream principals) { - return principals.flatMap(principal -> metastore.listTablePrivileges(context, identity, databaseName, tableName, Optional.of(principal)).stream()); + return principals.flatMap(principal -> metastore.listTablePrivileges(context, databaseName, tableName, Optional.of(principal)).stream()); } private boolean hasAdminOptionForRoles(ConnectorSecurityContext context, Set roles) @@ -711,7 +713,7 @@ private boolean hasAnyTablePermission(ConnectorSecurityContext context, SchemaTa return true; } - Set allowedPrincipals = metastore.listTablePrivileges(context, new HiveIdentity(context.getIdentity()), tableName.getSchemaName(), tableName.getTableName(), Optional.empty()).stream() + Set allowedPrincipals = metastore.listTablePrivileges(context, tableName.getSchemaName(), tableName.getTableName(), Optional.empty()).stream() .map(HivePrivilegeInfo::getGrantee) .collect(toImmutableSet()); diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadata.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadata.java index c0f10f692db5..2be8b05cc2da 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadata.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadata.java @@ -16,7 +16,6 @@ import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import io.trino.plugin.hive.HiveViewNotSupportedException; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.thrift.ThriftMetastoreUtil; @@ -183,7 +182,6 @@ public void grantTablePrivileges(ConnectorSession session, SchemaTableName schem .collect(toImmutableSet()); metastore.grantTablePrivileges( - new HiveIdentity(session), schemaName, tableName, grantee, @@ -208,7 +206,6 @@ public void revokeTablePrivileges(ConnectorSession session, SchemaTableName sche .collect(toImmutableSet()); metastore.revokeTablePrivileges( - new HiveIdentity(session), schemaName, tableName, grantee, @@ -228,7 +225,7 @@ public List listTablePrivileges(ConnectorSession session, List result = ImmutableList.builder(); for (SchemaTableName tableName : tableNames) { try { - result.addAll(buildGrants(session, principals, isAdminRoleSet, tableName)); + result.addAll(buildGrants(principals, isAdminRoleSet, tableName)); } catch (TableNotFoundException e) { // table disappeared during listing operation @@ -240,22 +237,22 @@ public List listTablePrivileges(ConnectorSession session, List buildGrants(ConnectorSession session, Set principals, boolean isAdminRoleSet, SchemaTableName tableName) + private List buildGrants(Set principals, boolean isAdminRoleSet, SchemaTableName tableName) { if (isAdminRoleSet) { - return buildGrants(session, tableName, Optional.empty()); + return buildGrants(tableName, Optional.empty()); } ImmutableList.Builder result = ImmutableList.builder(); for (HivePrincipal grantee : principals) { - result.addAll(buildGrants(session, tableName, Optional.of(grantee))); + result.addAll(buildGrants(tableName, Optional.of(grantee))); } return result.build(); } - private List buildGrants(ConnectorSession session, SchemaTableName tableName, Optional principal) + private List buildGrants(SchemaTableName tableName, Optional principal) { ImmutableList.Builder result = ImmutableList.builder(); - Set hivePrivileges = metastore.listTablePrivileges(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), principal); + Set hivePrivileges = metastore.listTablePrivileges(tableName.getSchemaName(), tableName.getTableName(), principal); for (HivePrivilegeInfo hivePrivilege : hivePrivileges) { Set prestoPrivileges = hivePrivilege.toPrivilegeInfo(); for (PrivilegeInfo prestoPrivilege : prestoPrivileges) { diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadataMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadataMetastore.java index dc025bfe6142..023ebfb7a00d 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadataMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetadataMetastore.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; @@ -42,9 +41,9 @@ public interface SqlStandardAccessControlMetadataMetastore Set listGrantedPrincipals(String role); - void revokeTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption); + void revokeTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption); - void grantTablePrivileges(HiveIdentity identity, String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption); + void grantTablePrivileges(String databaseName, String tableName, HivePrincipal grantee, HivePrincipal grantor, Set privileges, boolean grantOption); - Set listTablePrivileges(HiveIdentity identity, String databaseName, String tableName, Optional principal); + Set listTablePrivileges(String databaseName, String tableName, Optional principal); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetastore.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetastore.java index 9a80977e0560..100bbdcc394c 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetastore.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/security/SqlStandardAccessControlMetastore.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.hive.security; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HivePrincipal; import io.trino.plugin.hive.metastore.HivePrivilegeInfo; @@ -31,7 +30,7 @@ public interface SqlStandardAccessControlMetastore { Set listRoleGrants(ConnectorSecurityContext context, HivePrincipal principal); - Set listTablePrivileges(ConnectorSecurityContext context, HiveIdentity identity, String databaseName, String tableName, Optional principal); + Set listTablePrivileges(ConnectorSecurityContext context, String databaseName, String tableName, Optional principal); Optional getDatabase(ConnectorSecurityContext context, String databaseName); } diff --git a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/statistics/MetastoreHiveStatisticsProvider.java b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/statistics/MetastoreHiveStatisticsProvider.java index f5a81441b5c6..ce7fa2d79caa 100644 --- a/plugin/trino-hive/src/main/java/io/trino/plugin/hive/statistics/MetastoreHiveStatisticsProvider.java +++ b/plugin/trino-hive/src/main/java/io/trino/plugin/hive/statistics/MetastoreHiveStatisticsProvider.java @@ -27,7 +27,6 @@ import io.trino.plugin.hive.HiveColumnHandle; import io.trino.plugin.hive.HivePartition; import io.trino.plugin.hive.PartitionStatistics; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.DateStatistics; import io.trino.plugin.hive.metastore.DecimalStatistics; import io.trino.plugin.hive.metastore.DoubleStatistics; @@ -99,7 +98,7 @@ public class MetastoreHiveStatisticsProvider public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) { requireNonNull(metastore, "metastore is null"); - this.statisticsProvider = (session, table, hivePartitions) -> getPartitionsStatistics(session, metastore, table, hivePartitions); + this.statisticsProvider = (session, table, hivePartitions) -> getPartitionsStatistics(metastore, table, hivePartitions); } @VisibleForTesting @@ -108,7 +107,7 @@ public MetastoreHiveStatisticsProvider(SemiTransactionalHiveMetastore metastore) this.statisticsProvider = requireNonNull(statisticsProvider, "statisticsProvider is null"); } - private static Map getPartitionsStatistics(ConnectorSession session, SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) + private static Map getPartitionsStatistics(SemiTransactionalHiveMetastore metastore, SchemaTableName table, List hivePartitions) { if (hivePartitions.isEmpty()) { return ImmutableMap.of(); @@ -116,12 +115,12 @@ private static Map getPartitionsStatistics(Connecto boolean unpartitioned = hivePartitions.stream().anyMatch(partition -> partition.getPartitionId().equals(UNPARTITIONED_ID)); if (unpartitioned) { checkArgument(hivePartitions.size() == 1, "expected only one hive partition"); - return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(new HiveIdentity(session), table.getSchemaName(), table.getTableName())); + return ImmutableMap.of(UNPARTITIONED_ID, metastore.getTableStatistics(table.getSchemaName(), table.getTableName())); } Set partitionNames = hivePartitions.stream() .map(HivePartition::getPartitionId) .collect(toImmutableSet()); - return metastore.getPartitionStatistics(new HiveIdentity(session), table.getSchemaName(), table.getTableName(), partitionNames); + return metastore.getPartitionStatistics(table.getSchemaName(), table.getTableName(), partitionNames); } @Override diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java index f7a321c3a2e2..1bbae553875f 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHive.java @@ -792,7 +792,6 @@ protected final void setup(String host, int port, String databaseName, String ti hdfsEnvironment, false), new HiveIdentity(SESSION.getIdentity())), - new HiveIdentity(SESSION.getIdentity()), executor, new Duration(1, MINUTES), Optional.of(new Duration(15, SECONDS)), @@ -1208,7 +1207,7 @@ protected void doTestMismatchSchemaTable( try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session); - Table oldTable = transaction.getMetastore().getTable(new HiveIdentity(session), schemaName, tableName).get(); + Table oldTable = transaction.getMetastore().getTable(schemaName, tableName).get(); List dataColumns = tableAfter.stream() .filter(columnMetadata -> !columnMetadata.getName().equals("ds")) .map(columnMetadata -> new Column(columnMetadata.getName(), toHiveType(columnMetadata.getType()), Optional.empty())) @@ -1216,7 +1215,7 @@ protected void doTestMismatchSchemaTable( Table.Builder newTable = Table.builder(oldTable) .setDataColumns(dataColumns); - transaction.getMetastore().replaceTable(new HiveIdentity(session), schemaName, tableName, newTable.build(), principalPrivileges); + transaction.getMetastore().replaceTable(schemaName, tableName, newTable.build(), principalPrivileges); transaction.commit(); } @@ -2346,7 +2345,7 @@ private void assertEmptyFile(HiveStorageFormat format) List columnHandles = filterNonHiddenColumnHandles(metadata.getColumnHandles(session, tableHandle).values()); Table table = transaction.getMetastore() - .getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + .getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(AssertionError::new); // verify directory is empty @@ -2490,7 +2489,7 @@ public void testTableCreationIgnoreExisting() Table table = createSimpleTable(schemaTableName, columns, session, targetPath, "q1"); transaction.getMetastore() .createTable(session, table, privileges, Optional.empty(), Optional.empty(), false, EMPTY_TABLE_STATISTICS, false); - Optional
tableHandle = transaction.getMetastore().getTable(new HiveIdentity(session), schemaName, tableName); + Optional
tableHandle = transaction.getMetastore().getTable(schemaName, tableName); assertTrue(tableHandle.isPresent()); transaction.commit(); } @@ -2870,7 +2869,6 @@ public void testCreateTableUnsupportedType() public void testHideDeltaLakeTables() { ConnectorSession session = newSession(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName tableName = temporaryTable("trino_delta_lake_table"); Table.Builder table = Table.builder() @@ -2889,7 +2887,7 @@ public void testHideDeltaLakeTables() hdfsEnvironment, tableName.getSchemaName(), tableName.getTableName()).toString()); - metastoreClient.createTable(identity, table.build(), NO_PRIVILEGES); + metastoreClient.createTable(table.build(), NO_PRIVILEGES); try { // Verify the table was created as a Delta Lake table @@ -2940,7 +2938,7 @@ public void testHideDeltaLakeTables() } finally { // Clean up - metastoreClient.dropTable(identity, tableName.getSchemaName(), tableName.getTableName(), true); + metastoreClient.dropTable(tableName.getSchemaName(), tableName.getTableName(), true); } } @@ -2948,7 +2946,6 @@ public void testHideDeltaLakeTables() public void testDisallowQueryingOfIcebergTables() { ConnectorSession session = newSession(); - HiveIdentity identity = new HiveIdentity(session); SchemaTableName tableName = temporaryTable("trino_iceberg_table"); Table.Builder table = Table.builder() @@ -2967,7 +2964,7 @@ public void testDisallowQueryingOfIcebergTables() hdfsEnvironment, tableName.getSchemaName(), tableName.getTableName()).toString()); - metastoreClient.createTable(identity, table.build(), NO_PRIVILEGES); + metastoreClient.createTable(table.build(), NO_PRIVILEGES); try { // Verify that the table was created as a Iceberg table can't be queried in hive @@ -2990,7 +2987,7 @@ public void testDisallowQueryingOfIcebergTables() } finally { // Clean up - metastoreClient.dropTable(identity, tableName.getSchemaName(), tableName.getTableName(), true); + metastoreClient.dropTable(tableName.getSchemaName(), tableName.getTableName(), true); } } @@ -3039,30 +3036,29 @@ public void testUpdateTableColumnStatisticsEmptyOptionalFields() protected void testUpdateTableStatistics(SchemaTableName tableName, PartitionStatistics initialStatistics, PartitionStatistics... statistics) { HiveMetastoreClosure metastoreClient = new HiveMetastoreClosure(getMetastoreClient()); - HiveIdentity identity = new HiveIdentity(SESSION); - assertThat(metastoreClient.getTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(initialStatistics); AtomicReference expectedStatistics = new AtomicReference<>(initialStatistics); for (PartitionStatistics partitionStatistics : statistics) { - metastoreClient.updateTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, actualStatistics -> { + metastoreClient.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatistics.get()); return partitionStatistics; }); - assertThat(metastoreClient.getTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(partitionStatistics); expectedStatistics.set(partitionStatistics); } - assertThat(metastoreClient.getTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(expectedStatistics.get()); - metastoreClient.updateTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, actualStatistics -> { + metastoreClient.updateTableStatistics(tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatistics.get()); return initialStatistics; }); - assertThat(metastoreClient.getTableStatistics(identity, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastoreClient.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(initialStatistics); } @@ -3195,8 +3191,7 @@ protected void createDummyPartitionedTable(SchemaTableName tableName, List new TableNotFoundException(tableName)); List firstPartitionValues = ImmutableList.of("2016-01-01"); @@ -3209,9 +3204,9 @@ protected void createDummyPartitionedTable(SchemaTableName tableName, List new PartitionWithStatistics(createDummyPartition(table, partitionName), partitionName, PartitionStatistics.empty())) .collect(toImmutableList()); - metastoreClient.addPartitions(identity, tableName.getSchemaName(), tableName.getTableName(), partitions); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), partitions); + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> EMPTY_TABLE_STATISTICS); } protected void testUpdatePartitionStatistics( @@ -3226,8 +3221,7 @@ protected void testUpdatePartitionStatistics( String secondPartitionName = "ds=2016-01-02"; HiveMetastoreClosure metastoreClient = new HiveMetastoreClosure(getMetastoreClient()); - HiveIdentity identity = new HiveIdentity(SESSION); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, initialStatistics, secondPartitionName, initialStatistics)); AtomicReference expectedStatisticsPartition1 = new AtomicReference<>(initialStatistics); @@ -3236,31 +3230,31 @@ protected void testUpdatePartitionStatistics( for (int i = 0; i < firstPartitionStatistics.size(); i++) { PartitionStatistics statisticsPartition1 = firstPartitionStatistics.get(i); PartitionStatistics statisticsPartition2 = secondPartitionStatistics.get(i); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, actualStatistics -> { + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatisticsPartition1.get()); return statisticsPartition1; }); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, actualStatistics -> { + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, actualStatistics -> { assertThat(actualStatistics).isEqualTo(expectedStatisticsPartition2.get()); return statisticsPartition2; }); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, statisticsPartition1, secondPartitionName, statisticsPartition2)); expectedStatisticsPartition1.set(statisticsPartition1); expectedStatisticsPartition2.set(statisticsPartition2); } - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, expectedStatisticsPartition1.get(), secondPartitionName, expectedStatisticsPartition2.get())); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> { + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), firstPartitionName, currentStatistics -> { assertThat(currentStatistics).isEqualTo(expectedStatisticsPartition1.get()); return initialStatistics; }); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> { + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), secondPartitionName, currentStatistics -> { assertThat(currentStatistics).isEqualTo(expectedStatisticsPartition2.get()); return initialStatistics; }); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(firstPartitionName, secondPartitionName))) .isEqualTo(ImmutableMap.of(firstPartitionName, initialStatistics, secondPartitionName, initialStatistics)); } @@ -3284,8 +3278,7 @@ protected void testStorePartitionWithStatistics( doCreateEmptyTable(tableName, ORC, columns); HiveMetastoreClosure metastoreClient = new HiveMetastoreClosure(getMetastoreClient()); - HiveIdentity identity = new HiveIdentity(SESSION); - Table table = metastoreClient.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); List partitionValues = ImmutableList.of("2016-01-01"); @@ -3294,11 +3287,11 @@ protected void testStorePartitionWithStatistics( Partition partition = createDummyPartition(table, partitionName); // create partition with stats for all columns - metastoreClient.addPartitions(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(partition, partitionName, statsForAllColumns1))); + metastoreClient.addPartitions(tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(partition, partitionName, statsForAllColumns1))); assertEquals( - metastoreClient.getPartition(identity, tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), + metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(ORC)); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns1)); // alter the partition into one with other stats @@ -3307,11 +3300,11 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(RCBINARY)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(identity, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2)); + metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForAllColumns2)); assertEquals( - metastoreClient.getPartition(identity, tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), + metastoreClient.getPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValues).get().getStorage().getStorageFormat(), fromHiveStorageFormat(RCBINARY)); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForAllColumns2)); // alter the partition into one with stats for only subset of columns @@ -3320,8 +3313,8 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(identity, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns)); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, statsForSubsetOfColumns)); + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, statsForSubsetOfColumns)); // alter the partition into one without stats @@ -3330,8 +3323,8 @@ protected void testStorePartitionWithStatistics( .setStorageFormat(fromHiveStorageFormat(TEXTFILE)) .setLocation(partitionTargetPath(tableName, partitionName))) .build(); - metastoreClient.alterPartition(identity, tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics)); - assertThat(metastoreClient.getPartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) + metastoreClient.alterPartition(tableName.getSchemaName(), tableName.getTableName(), new PartitionWithStatistics(modifiedPartition, partitionName, emptyStatistics)); + assertThat(metastoreClient.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), ImmutableSet.of(partitionName))) .isEqualTo(ImmutableMap.of(partitionName, emptyStatistics)); } finally { @@ -3361,7 +3354,7 @@ protected String partitionTargetPath(SchemaTableName schemaTableName, String par ConnectorSession session = newSession(); SemiTransactionalHiveMetastore metastore = transaction.getMetastore(); LocationService locationService = getLocationService(); - Table table = metastore.getTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); + Table table = metastore.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()).get(); LocationHandle handle = locationService.forExistingTable(metastore, session, table); return locationService.getPartitionWriteInfo(handle, Optional.empty(), partitionName).getTargetPath().toString(); } @@ -3386,9 +3379,8 @@ protected void testPartitionStatisticsSampling(List columns, Par try { createDummyPartitionedTable(tableName, columns); HiveMetastoreClosure metastoreClient = new HiveMetastoreClosure(getMetastoreClient()); - HiveIdentity identity = new HiveIdentity(SESSION); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> statistics); - metastoreClient.updatePartitionStatistics(identity, tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-02", actualStatistics -> statistics); + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> statistics); + metastoreClient.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-02", actualStatistics -> statistics); try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); @@ -3743,12 +3735,12 @@ protected void doCreateTable(SchemaTableName tableName, HiveStorageFormat storag assertEqualsIgnoreOrder(result.getMaterializedRows(), CREATE_TABLE_DATA.getMaterializedRows()); // verify the node version and query ID in table - Table table = getMetastoreClient().getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = getMetastoreClient().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); assertEquals(table.getParameters().get(PRESTO_VERSION_NAME), TEST_SERVER_VERSION); assertEquals(table.getParameters().get(PRESTO_QUERY_ID_NAME), queryId); // verify basic statistics - HiveBasicStatistics statistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount()); assertEquals(statistics.getFileCount().getAsLong(), 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3804,7 +3796,7 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s assertEquals(filterNonHiddenColumnMetadata(tableMetadata.getColumns()), expectedColumns); // verify table format - Table table = transaction.getMetastore().getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); assertEquals(table.getStorage().getStorageFormat().getInputFormat(), storageFormat.getInputFormat()); // verify the node version and query ID @@ -3818,7 +3810,7 @@ protected void doCreateEmptyTable(SchemaTableName tableName, HiveStorageFormat s // verify basic statistics if (partitionedBy.isEmpty()) { - HiveBasicStatistics statistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), 0L); assertEquals(statistics.getFileCount().getAsLong(), 0L); assertEquals(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3856,7 +3848,7 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName assertEqualsIgnoreOrder(result.getMaterializedRows(), resultBuilder.build().getMaterializedRows()); // statistics - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * (i + 1)); assertEquals(tableStatistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -3887,12 +3879,12 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of()); // statistics, visible from within transaction - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 5L); try (Transaction otherTransaction = newTransaction()) { // statistics, not visible from outside transaction - HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(session, otherTransaction, tableName); + HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName); assertEquals(otherTableStatistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); } @@ -3933,8 +3925,7 @@ private void doInsert(HiveStorageFormat storageFormat, SchemaTableName tableName // verify statistics unchanged try (Transaction transaction = newTransaction()) { - ConnectorSession session = newSession(); - HiveBasicStatistics statistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), CREATE_TABLE_DATA.getRowCount() * 3L); assertEquals(statistics.getFileCount().getAsLong(), 3L); } @@ -3978,7 +3969,7 @@ private void doInsertOverwriteUnpartitioned(SchemaTableName tableName) assertEqualsIgnoreOrder(result.getMaterializedRows(), overwriteData.getMaterializedRows()); // statistics - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), overwriteData.getRowCount()); assertEquals(tableStatistics.getFileCount().getAsLong(), 1L); assertGreaterThan(tableStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4010,12 +4001,12 @@ private void doInsertOverwriteUnpartitioned(SchemaTableName tableName) metadata.finishInsert(session, insertTableHandle, fragments, ImmutableList.of()); // statistics, visible from within transaction - HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics tableStatistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(tableStatistics.getRowCount().getAsLong(), overwriteData.getRowCount() * 4L); try (Transaction otherTransaction = newTransaction()) { // statistics, not visible from outside transaction - HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(session, otherTransaction, tableName); + HiveBasicStatistics otherTableStatistics = getBasicStatisticsForTable(otherTransaction, tableName); assertEquals(otherTableStatistics.getRowCount().getAsLong(), overwriteData.getRowCount()); } @@ -4056,8 +4047,7 @@ private void doInsertOverwriteUnpartitioned(SchemaTableName tableName) // verify statistics unchanged try (Transaction transaction = newTransaction()) { - ConnectorSession session = newSession(); - HiveBasicStatistics statistics = getBasicStatisticsForTable(session, transaction, tableName); + HiveBasicStatistics statistics = getBasicStatisticsForTable(transaction, tableName); assertEquals(statistics.getRowCount().getAsLong(), overwriteData.getRowCount()); assertEquals(statistics.getFileCount().getAsLong(), 1L); } @@ -4095,18 +4085,17 @@ protected Set listAllDataFiles(Transaction transaction, String schemaNam throws IOException { HdfsContext hdfsContext = new HdfsContext(newSession()); - HiveIdentity identity = new HiveIdentity(newSession()); Set existingFiles = new HashSet<>(); - for (String location : listAllDataPaths(identity, transaction.getMetastore(), schemaName, tableName)) { + for (String location : listAllDataPaths(transaction.getMetastore(), schemaName, tableName)) { existingFiles.addAll(listAllDataFiles(hdfsContext, new Path(location))); } return existingFiles; } - public static List listAllDataPaths(HiveIdentity identity, SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) + public static List listAllDataPaths(SemiTransactionalHiveMetastore metastore, String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = metastore.getTable(identity, schemaName, tableName).get(); + Table table = metastore.getTable(schemaName, tableName).get(); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. // But including this location in the set makes the directory content assert more @@ -4114,9 +4103,9 @@ public static List listAllDataPaths(HiveIdentity identity, SemiTransacti locations.add(table.getStorage().getLocation()); } - Optional> partitionNames = metastore.getPartitionNames(identity, schemaName, tableName); + Optional> partitionNames = metastore.getPartitionNames(schemaName, tableName); if (partitionNames.isPresent()) { - metastore.getPartitionsByNames(identity, schemaName, tableName, partitionNames.get()).values().stream() + metastore.getPartitionsByNames(schemaName, tableName, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) @@ -4159,17 +4148,16 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab Set existingFiles; try (Transaction transaction = newTransaction()) { // verify partitions were created - HiveIdentity identity = new HiveIdentity(newSession()); - Table table = metastoreClient.getTable(identity, tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); - List partitionNames = transaction.getMetastore().getPartitionNames(identity, tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) .collect(toImmutableList())); // verify the node versions in partitions - Map> partitions = getMetastoreClient().getPartitionsByNames(identity, table, partitionNames); + Map> partitions = getMetastoreClient().getPartitionsByNames(table, partitionNames); assertEquals(partitions.size(), partitionNames.size()); for (String partitionName : partitionNames) { Partition partition = partitions.get(partitionName).get(); @@ -4194,7 +4182,7 @@ private void doInsertIntoNewPartition(HiveStorageFormat storageFormat, SchemaTab // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 1L); assertEquals(partitionStatistics.getFileCount().getAsLong(), 1L); assertGreaterThan(partitionStatistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4287,7 +4275,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche ConnectorTableHandle tableHandle = getTableHandle(metadata, tableName); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) @@ -4303,7 +4291,7 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche // test statistics for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); assertEquals(statistics.getRowCount().getAsLong(), i + 1L); assertEquals(statistics.getFileCount().getAsLong(), i + 1L); assertGreaterThan(statistics.getInMemoryDataSizeInBytes().getAsLong(), 0L); @@ -4342,10 +4330,10 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche } // verify statistics are visible from within of the current transaction - List partitionNames = transaction.getMetastore().getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 5L); } @@ -4372,11 +4360,10 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche assertTrue(listAllDataFiles(hdfsContext, stagingPathRoot).isEmpty()); // verify statistics have been rolled back - HiveIdentity identity = new HiveIdentity(session); - List partitionNames = transaction.getMetastore().getPartitionNames(identity, tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName); + HiveBasicStatistics partitionStatistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); assertEquals(partitionStatistics.getRowCount().getAsLong(), 3L); } } @@ -4385,7 +4372,6 @@ private void doInsertIntoExistingPartition(HiveStorageFormat storageFormat, Sche private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat storageFormat, SchemaTableName tableName) throws Exception { - ConnectorSession session = newSession(); doCreateEmptyTable(tableName, storageFormat, CREATE_TABLE_COLUMNS_PARTITIONED); insertData(tableName, CREATE_TABLE_PARTITIONED_DATA); @@ -4394,11 +4380,11 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor insertData(tableName, CREATE_TABLE_PARTITIONED_DATA); try (Transaction transaction = newTransaction()) { - List partitionNames = transaction.getMetastore().getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); for (String partitionName : partitionNames) { - HiveBasicStatistics statistics = getBasicStatisticsForPartition(session, transaction, tableName, partitionName); + HiveBasicStatistics statistics = getBasicStatisticsForPartition(transaction, tableName, partitionName); assertThat(statistics.getRowCount()).isNotPresent(); assertThat(statistics.getInMemoryDataSizeInBytes()).isNotPresent(); // fileCount and rawSize statistics are computed on the fly by the metastore, thus cannot be erased @@ -4406,19 +4392,19 @@ private void doInsertIntoExistingPartitionEmptyStatistics(HiveStorageFormat stor } } - private static HiveBasicStatistics getBasicStatisticsForTable(ConnectorSession session, Transaction transaction, SchemaTableName table) + private static HiveBasicStatistics getBasicStatisticsForTable(Transaction transaction, SchemaTableName table) { return transaction .getMetastore() - .getTableStatistics(new HiveIdentity(session), table.getSchemaName(), table.getTableName()) + .getTableStatistics(table.getSchemaName(), table.getTableName()) .getBasicStatistics(); } - private static HiveBasicStatistics getBasicStatisticsForPartition(ConnectorSession session, Transaction transaction, SchemaTableName table, String partitionName) + private static HiveBasicStatistics getBasicStatisticsForPartition(Transaction transaction, SchemaTableName table, String partitionName) { return transaction .getMetastore() - .getPartitionStatistics(new HiveIdentity(session), table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) + .getPartitionStatistics(table.getSchemaName(), table.getTableName(), ImmutableSet.of(partitionName)) .get(partitionName) .getBasicStatistics(); } @@ -4426,18 +4412,17 @@ private static HiveBasicStatistics getBasicStatisticsForPartition(ConnectorSessi private void eraseStatistics(SchemaTableName schemaTableName) { HiveMetastore metastoreClient = getMetastoreClient(); - HiveIdentity identity = new HiveIdentity(SESSION); - metastoreClient.updateTableStatistics(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), NO_ACID_TRANSACTION, statistics -> new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of())); - Table table = metastoreClient.getTable(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName()) + metastoreClient.updateTableStatistics(schemaTableName.getSchemaName(), schemaTableName.getTableName(), NO_ACID_TRANSACTION, statistics -> new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of())); + Table table = metastoreClient.getTable(schemaTableName.getSchemaName(), schemaTableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(schemaTableName)); List partitionColumns = table.getPartitionColumns().stream() .map(Column::getName) .collect(toImmutableList()); if (!table.getPartitionColumns().isEmpty()) { - List partitionNames = metastoreClient.getPartitionNamesByFilter(identity, schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionColumns, TupleDomain.all()) + List partitionNames = metastoreClient.getPartitionNamesByFilter(schemaTableName.getSchemaName(), schemaTableName.getTableName(), partitionColumns, TupleDomain.all()) .orElse(ImmutableList.of()); List partitions = metastoreClient - .getPartitionsByNames(identity, table, partitionNames) + .getPartitionsByNames(table, partitionNames) .entrySet() .stream() .map(Map.Entry::getValue) @@ -4446,7 +4431,6 @@ private void eraseStatistics(SchemaTableName schemaTableName) .collect(toImmutableList()); for (Partition partition : partitions) { metastoreClient.updatePartitionStatistics( - identity, table, makePartName(partitionColumns, partition.getValues()), statistics -> new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of())); @@ -4516,7 +4500,7 @@ private void doTestMetadataDelete(HiveStorageFormat storageFormat, SchemaTableNa metadata.beginQuery(session); // verify partitions were created - List partitionNames = transaction.getMetastore().getPartitionNames(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + List partitionNames = transaction.getMetastore().getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder(partitionNames, CREATE_TABLE_PARTITIONED_DATA.getMaterializedRows().stream() .map(row -> "ds=" + row.getField(CREATE_TABLE_PARTITIONED_DATA.getTypes().size() - 1)) @@ -5215,11 +5199,11 @@ private void alterBucketProperty(SchemaTableName schemaTableName, Optional table = transaction.getMetastore().getTable(new HiveIdentity(session), schemaName, tableName); + Optional
table = transaction.getMetastore().getTable(schemaName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setBucketProperty(bucketProperty); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(tableOwner, session.getUser()); - transaction.getMetastore().replaceTable(new HiveIdentity(session), schemaName, tableName, tableBuilder.build(), principalPrivileges); + transaction.getMetastore().replaceTable(schemaName, tableName, tableBuilder.build(), principalPrivileges); transaction.commit(); } @@ -5508,7 +5492,7 @@ public void testNewDirectoryPermissions() metadata.beginQuery(session); Table table = transaction.getMetastore() - .getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + .getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(); // create new directory and set directory permission after creation @@ -5699,7 +5683,7 @@ private void doTestTransactionDeleteInsert( try (Transaction transaction = newTransaction()) { // verify partitions List partitionNames = transaction.getMetastore() - .getPartitionNames(new HiveIdentity(newSession()), tableName.getSchemaName(), tableName.getTableName()) + .getPartitionNames(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new AssertionError("Table does not exist: " + tableName)); assertEqualsIgnoreOrder( partitionNames, @@ -5813,14 +5797,13 @@ public void triggerConflict(ConnectorSession session, SchemaTableName tableName, // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. HiveMetastore metastoreClient = getMetastoreClient(); - Table table = metastoreClient.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); - Optional partition = metastoreClient.getPartition(new HiveIdentity(session), table, copyPartitionFrom); + Optional partition = metastoreClient.getPartition(table, copyPartitionFrom); conflictPartition = Partition.builder(partition.get()) .setValues(toPartitionValues(partitionNameToConflict)) .build(); metastoreClient.addPartitions( - new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of(new PartitionWithStatistics(conflictPartition, partitionNameToConflict, PartitionStatistics.empty()))); @@ -5832,13 +5815,13 @@ public void verifyAndCleanup(ConnectorSession session, SchemaTableName tableName // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. HiveMetastore metastoreClient = getMetastoreClient(); - Table table = metastoreClient.getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()) + Table table = metastoreClient.getTable(tableName.getSchemaName(), tableName.getTableName()) .orElseThrow(() -> new TableNotFoundException(tableName)); - Optional actualPartition = metastoreClient.getPartition(new HiveIdentity(session), table, toPartitionValues(partitionNameToConflict)); + Optional actualPartition = metastoreClient.getPartition(table, toPartitionValues(partitionNameToConflict)); // Make sure the partition inserted to trigger conflict was not overwritten // Checking storage location is sufficient because implement never uses .../pk1=a/pk2=a2 as the directory for partition [b, b2]. assertEquals(actualPartition.get().getStorage().getLocation(), conflictPartition.getStorage().getLocation()); - metastoreClient.dropPartition(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), conflictPartition.getValues(), false); + metastoreClient.dropPartition(tableName.getSchemaName(), tableName.getTableName(), conflictPartition.getValues(), false); } } @@ -5853,7 +5836,7 @@ public void triggerConflict(ConnectorSession session, SchemaTableName tableName, // This method bypasses transaction interface because this method is inherently hacky and doesn't work well with the transaction abstraction. // Additionally, this method is not part of a test. Its purpose is to set up an environment for another test. HiveMetastore metastoreClient = getMetastoreClient(); - metastoreClient.dropPartition(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), partitionValueToConflict, false); + metastoreClient.dropPartition(tableName.getSchemaName(), tableName.getTableName(), partitionValueToConflict, false); } @Override diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java index aceafab0e8a5..c5f549248ec5 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveFileSystem.java @@ -559,32 +559,32 @@ public Optional getDatabase(String databaseName) } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges privileges) + public void createTable(Table table, PrincipalPrivileges privileges) { // hack to work around the metastore not being configured for S3 or other FS Table.Builder tableBuilder = Table.builder(table); tableBuilder.getStorageBuilder().setLocation("/"); - super.createTable(identity, tableBuilder.build(), privileges); + super.createTable(tableBuilder.build(), privileges); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { try { - Optional
table = getTable(identity, databaseName, tableName); + Optional
table = getTable(databaseName, tableName); if (table.isEmpty()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } // hack to work around the metastore not being configured for S3 or other FS - List locations = listAllDataPaths(identity, databaseName, tableName); + List locations = listAllDataPaths(databaseName, tableName); Table.Builder tableBuilder = Table.builder(table.get()); tableBuilder.getStorageBuilder().setLocation("/"); // drop table - replaceTable(identity, databaseName, tableName, tableBuilder.build(), NO_PRIVILEGES); - super.dropTable(identity, databaseName, tableName, false); + replaceTable(databaseName, tableName, tableBuilder.build(), NO_PRIVILEGES); + super.dropTable(databaseName, tableName, false); // drop data if (deleteData) { @@ -601,8 +601,7 @@ public void dropTable(HiveIdentity identity, String databaseName, String tableNa public void updateTableLocation(String databaseName, String tableName, String location) { - HiveIdentity identity = new HiveIdentity(TESTING_CONTEXT.getIdentity()); - Optional
table = getTable(identity, databaseName, tableName); + Optional
table = getTable(databaseName, tableName); if (table.isEmpty()) { throw new TableNotFoundException(new SchemaTableName(databaseName, tableName)); } @@ -611,13 +610,13 @@ public void updateTableLocation(String databaseName, String tableName, String lo tableBuilder.getStorageBuilder().setLocation(location); // NOTE: this clears the permissions - replaceTable(identity, databaseName, tableName, tableBuilder.build(), NO_PRIVILEGES); + replaceTable(databaseName, tableName, tableBuilder.build(), NO_PRIVILEGES); } - private List listAllDataPaths(HiveIdentity identity, String schemaName, String tableName) + private List listAllDataPaths(String schemaName, String tableName) { ImmutableList.Builder locations = ImmutableList.builder(); - Table table = getTable(identity, schemaName, tableName).get(); + Table table = getTable(schemaName, tableName).get(); List partitionColumnNames = table.getPartitionColumns().stream().map(Column::getName).collect(toImmutableList()); if (table.getStorage().getLocation() != null) { // For partitioned table, there should be nothing directly under this directory. @@ -626,9 +625,9 @@ private List listAllDataPaths(HiveIdentity identity, String schemaName, locations.add(table.getStorage().getLocation()); } - Optional> partitionNames = getPartitionNamesByFilter(identity, schemaName, tableName, partitionColumnNames, TupleDomain.all()); + Optional> partitionNames = getPartitionNamesByFilter(schemaName, tableName, partitionColumnNames, TupleDomain.all()); if (partitionNames.isPresent()) { - getPartitionsByNames(identity, table, partitionNames.get()).values().stream() + getPartitionsByNames(table, partitionNames.get()).values().stream() .map(Optional::get) .map(partition -> partition.getStorage().getLocation()) .filter(location -> !location.startsWith(table.getStorage().getLocation())) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java index 2203b600c525..0f59a9006d05 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/AbstractTestHiveLocal.java @@ -71,7 +71,7 @@ public abstract class AbstractTestHiveLocal { private static final Logger log = Logger.get(AbstractTestHiveLocal.class); private static final String DEFAULT_TEST_DB_NAME = "test"; - private static final HiveIdentity HIVE_IDENTITY = new HiveIdentity(SESSION); + private static final HiveIdentity HIVE_IDENTITY = new HiveIdentity(SESSION.getIdentity()); private File tempDir; private final String testDbName; @@ -95,7 +95,7 @@ public void initialize() HiveMetastore metastore = createMetastore(tempDir, HIVE_IDENTITY); - metastore.createDatabase(HIVE_IDENTITY, + metastore.createDatabase( Database.builder() .setDatabaseName(testDbName) .setOwnerName(Optional.of("public")) @@ -114,7 +114,7 @@ public void cleanup() throws IOException { try { - getMetastoreClient().dropDatabase(HIVE_IDENTITY, testDbName, true); + getMetastoreClient().dropDatabase(testDbName, true); } finally { deleteRecursively(tempDir.toPath(), ALLOW_INSECURE); @@ -222,9 +222,9 @@ private void markTableAsCreatedBySpark(SchemaTableName tableName, String provide try (Transaction transaction = newTransaction()) { ConnectorSession session = newSession(); PrincipalPrivileges principalPrivileges = testingPrincipalPrivilege(session); - Table oldTable = transaction.getMetastore().getTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName()).get(); + Table oldTable = transaction.getMetastore().getTable(tableName.getSchemaName(), tableName.getTableName()).get(); Table.Builder newTable = Table.builder(oldTable).setParameter(SPARK_TABLE_PROVIDER_KEY, provider); - transaction.getMetastore().replaceTable(new HiveIdentity(session), tableName.getSchemaName(), tableName.getTableName(), newTable.build(), principalPrivileges); + transaction.getMetastore().replaceTable(tableName.getSchemaName(), tableName.getTableName(), newTable.build(), principalPrivileges); transaction.commit(); } } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java index 31bf7ddd227d..5207baadf658 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/BaseTestHiveOnDataLake.java @@ -264,14 +264,13 @@ private void renamePartitionResourcesOutsideTrino(String tableName, String parti }); // Delete old partition and update metadata to point to location of new copy - HiveIdentity hiveIdentity = HiveIdentity.none(); - Table hiveTable = metastoreClient.getTable(hiveIdentity, HIVE_TEST_SCHEMA, tableName).get(); - Partition hivePartition = metastoreClient.getPartition(hiveIdentity, hiveTable, List.of(regionKey)).get(); + Table hiveTable = metastoreClient.getTable(HIVE_TEST_SCHEMA, tableName).get(); + Partition hivePartition = metastoreClient.getPartition(hiveTable, List.of(regionKey)).get(); Map partitionStatistics = - metastoreClient.getPartitionStatistics(hiveIdentity, hiveTable, List.of(hivePartition)); + metastoreClient.getPartitionStatistics(hiveTable, List.of(hivePartition)); - metastoreClient.dropPartition(hiveIdentity, HIVE_TEST_SCHEMA, tableName, List.of(regionKey), true); - metastoreClient.addPartitions(hiveIdentity, HIVE_TEST_SCHEMA, tableName, List.of( + metastoreClient.dropPartition(HIVE_TEST_SCHEMA, tableName, List.of(regionKey), true); + metastoreClient.addPartitions(HIVE_TEST_SCHEMA, tableName, List.of( new PartitionWithStatistics( Partition.builder(hivePartition) .withStorage(builder -> builder.setLocation( diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveBenchmarkQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveBenchmarkQueryRunner.java index d24158711502..d45d19eb5c6d 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveBenchmarkQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveBenchmarkQueryRunner.java @@ -17,7 +17,6 @@ import com.google.common.io.Files; import io.trino.Session; import io.trino.benchmark.BenchmarkSuite; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.tpch.TpchConnectorFactory; @@ -32,7 +31,6 @@ import static com.google.common.io.MoreFiles.deleteRecursively; import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE; import static io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore; -import static io.trino.testing.TestingConnectorSession.SESSION; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.util.Objects.requireNonNull; @@ -69,8 +67,7 @@ public static LocalQueryRunner createLocalQueryRunner(File tempDir) File hiveDir = new File(tempDir, "hive_data"); HiveMetastore metastore = createTestingFileHiveMetastore(hiveDir); - HiveIdentity identity = new HiveIdentity(SESSION); - metastore.createDatabase(identity, + metastore.createDatabase( Database.builder() .setDatabaseName("tpch") .setOwnerName(Optional.of("public")) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java index 34a559f9ce47..68dbe43da20c 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/HiveQueryRunner.java @@ -20,7 +20,6 @@ import io.airlift.log.Logging; import io.trino.Session; import io.trino.metadata.QualifiedObjectName; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.MetastoreConfig; @@ -53,7 +52,6 @@ import static io.trino.plugin.tpch.TpchMetadata.TINY_SCHEMA_NAME; import static io.trino.spi.security.SelectedRole.Type.ROLE; import static io.trino.testing.QueryAssertions.copyTpchTables; -import static io.trino.testing.TestingConnectorSession.SESSION; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; import static java.nio.file.Files.createDirectories; @@ -216,15 +214,14 @@ public DistributedQueryRunner build() private void populateData(DistributedQueryRunner queryRunner, HiveMetastore metastore) { - HiveIdentity identity = new HiveIdentity(SESSION); if (metastore.getDatabase(TPCH_SCHEMA).isEmpty()) { - metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_SCHEMA, initialSchemasLocationBase)); + metastore.createDatabase(createDatabaseMetastoreObject(TPCH_SCHEMA, initialSchemasLocationBase)); Session session = initialTablesSessionMutator.apply(createSession(Optional.empty())); copyTpchTables(queryRunner, "tpch", TINY_SCHEMA_NAME, session, initialTables); } if (metastore.getDatabase(TPCH_BUCKETED_SCHEMA).isEmpty()) { - metastore.createDatabase(identity, createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA, initialSchemasLocationBase)); + metastore.createDatabase(createDatabaseMetastoreObject(TPCH_BUCKETED_SCHEMA, initialSchemasLocationBase)); Session session = initialTablesSessionMutator.apply(createBucketedSession(Optional.empty())); copyTpchTablesBucketed(queryRunner, "tpch", TINY_SCHEMA_NAME, session, initialTables); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java index 1efe77001823..27e436ac033a 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/TestHivePageSink.java @@ -20,7 +20,6 @@ import io.airlift.json.JsonCodec; import io.airlift.slice.Slices; import io.trino.operator.GroupByHashPageIndexerFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.hive.metastore.HivePageSinkMetadata; @@ -260,14 +259,12 @@ private static ConnectorPageSource createPageSource(HiveTransactionHandle transa private static ConnectorPageSink createPageSink(HiveTransactionHandle transaction, HiveConfig config, HiveMetastore metastore, Path outputPath, HiveWriterStats stats) { - ConnectorSession session = getHiveSession(config); - HiveIdentity identity = new HiveIdentity(session); LocationHandle locationHandle = new LocationHandle(outputPath, outputPath, false, DIRECT_TO_TARGET_NEW_DIRECTORY); HiveOutputTableHandle handle = new HiveOutputTableHandle( SCHEMA_NAME, TABLE_NAME, getColumnHandles(), - new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(identity, SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), + new HivePageSinkMetadata(new SchemaTableName(SCHEMA_NAME, TABLE_NAME), metastore.getTable(SCHEMA_NAME, TABLE_NAME), ImmutableMap.of()), locationHandle, config.getHiveStorageFormat(), config.getHiveStorageFormat(), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestSemiTransactionalHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestSemiTransactionalHiveMetastore.java index 2109ace5016d..83565c49b798 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestSemiTransactionalHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/TestSemiTransactionalHiveMetastore.java @@ -20,7 +20,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import org.apache.hadoop.fs.Path; import org.testng.annotations.Test; @@ -133,7 +132,7 @@ private class TestingHiveMetastore extends UnimplementedHiveMetastore { @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { if (databaseName.equals("database")) { return Optional.of(new Table( @@ -153,19 +152,22 @@ public Optional
getTable(HiveIdentity identity, String databaseName, Stri } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { return new PartitionStatistics(createEmptyStatistics(), ImmutableMap.of()); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { assertCountDownLatch(); } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { assertCountDownLatch(); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java index 09f998c86a42..e280d9d919a5 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/UnimplementedHiveMetastore.java @@ -16,7 +16,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HivePrivilegeInfo.HivePrivilege; import io.trino.spi.predicate.TupleDomain; import io.trino.spi.security.RoleGrant; @@ -45,7 +44,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { throw new UnsupportedOperationException(); } @@ -57,25 +56,28 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { throw new UnsupportedOperationException(); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { throw new UnsupportedOperationException(); } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { throw new UnsupportedOperationException(); } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { throw new UnsupportedOperationException(); } @@ -99,121 +101,124 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { throw new UnsupportedOperationException(); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { throw new UnsupportedOperationException(); } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException(); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { throw new UnsupportedOperationException(); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException(); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { throw new UnsupportedOperationException(); } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { throw new UnsupportedOperationException(); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException(); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException(); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException(); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { throw new UnsupportedOperationException(); } @Override - public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) + public Optional> getPartitionNamesByFilter(String databaseName, + String tableName, + List columnNames, + TupleDomain partitionKeysFilter) { throw new UnsupportedOperationException(); } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { throw new UnsupportedOperationException(); } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { throw new UnsupportedOperationException(); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { throw new UnsupportedOperationException(); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java index 971eccdb603c..e77ef487481e 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastore.java @@ -107,7 +107,7 @@ public class TestCachingHiveMetastore { private static final Logger log = Logger.get(TestCachingHiveMetastore.class); - private static final HiveIdentity IDENTITY = new HiveIdentity(SESSION); + private static final HiveIdentity IDENTITY = new HiveIdentity(SESSION.getIdentity()); private static final PartitionStatistics TEST_STATS = PartitionStatistics.builder() .setColumnStatistics(ImmutableMap.of(TEST_COLUMN, createIntegerColumnStatistics(OptionalLong.empty(), OptionalLong.empty(), OptionalLong.empty(), OptionalLong.empty()))) .build(); @@ -125,7 +125,6 @@ public void setUp() executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed(getClass().getSimpleName() + "-%s"))); metastore = cachingHiveMetastore( new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), - IDENTITY, executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), @@ -195,16 +194,16 @@ public void testInvalidDbGetAllTAbles() public void testGetTable() { assertEquals(mockClient.getAccessCount(), 0); - assertNotNull(metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); - assertNotNull(metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 1); assertEquals(metastore.getTableStats().getRequestCount(), 2); assertEquals(metastore.getTableStats().getHitRate(), 0.5); metastore.flushCache(); - assertNotNull(metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 2); assertEquals(metastore.getTableStats().getRequestCount(), 3); assertEquals(metastore.getTableStats().getHitRate(), 1.0 / 3); @@ -214,12 +213,12 @@ public void testGetTable() public void testSetTableAuthorization() { assertEquals(mockClient.getAccessCount(), 0); - assertNotNull(metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); assertNotNull(metastore.getDatabase(TEST_DATABASE)); assertEquals(mockClient.getAccessCount(), 2); - metastore.setTableOwner(IDENTITY, TEST_DATABASE, TEST_TABLE, new HivePrincipal(USER, "ignore")); + metastore.setTableOwner(TEST_DATABASE, TEST_TABLE, new HivePrincipal(USER, "ignore")); assertEquals(mockClient.getAccessCount(), 3); - assertNotNull(metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE)); + assertNotNull(metastore.getTable(TEST_DATABASE, TEST_TABLE)); assertEquals(mockClient.getAccessCount(), 4); // Assert that database cache has not been invalidated assertNotNull(metastore.getDatabase(TEST_DATABASE)); @@ -229,7 +228,7 @@ public void testSetTableAuthorization() @Test public void testInvalidDbGetTable() { - assertFalse(metastore.getTable(IDENTITY, BAD_DATABASE, TEST_TABLE).isPresent()); + assertFalse(metastore.getTable(BAD_DATABASE, TEST_TABLE).isPresent()); assertEquals(stats.getGetTable().getThriftExceptions().getTotalCount(), 0); assertEquals(stats.getGetTable().getTotalFailures().getTotalCount(), 0); @@ -240,21 +239,21 @@ public void testGetPartitionNames() { ImmutableList expectedPartitions = ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2); assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); metastore.flushCache(); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 2); } @Test public void testInvalidGetPartitionNamesByFilterAll() { - assertTrue(metastore.getPartitionNamesByFilter(IDENTITY, BAD_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).isEmpty()); + assertTrue(metastore.getPartitionNamesByFilter(BAD_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).isEmpty()); } @Test @@ -263,16 +262,16 @@ public void testGetPartitionNamesByParts() ImmutableList expectedPartitions = ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2); assertEquals(mockClient.getAccessCount(), 0); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 1); assertEquals(metastore.getPartitionFilterStats().getRequestCount(), 2); assertEquals(metastore.getPartitionFilterStats().getHitRate(), 0.5); metastore.flushCache(); - assertEquals(metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); + assertEquals(metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).get(), expectedPartitions); assertEquals(mockClient.getAccessCount(), 2); assertEquals(metastore.getPartitionFilterStats().getRequestCount(), 3); assertEquals(metastore.getPartitionFilterStats().getHitRate(), 1.0 / 3); @@ -301,46 +300,46 @@ public void testGetPartitionNamesByParts() .buildOrThrow())); assertEquals(stats.getGetPartitionNamesByParts().getTime().getAllTime().getCount(), 0.0); - metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, partitionColumnNames, withNoFilter); + metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, partitionColumnNames, withNoFilter); assertEquals(stats.getGetPartitionNamesByParts().getTime().getAllTime().getCount(), 0.0); - metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, partitionColumnNames, withSingleValueFilter); + metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, partitionColumnNames, withSingleValueFilter); assertEquals(stats.getGetPartitionNamesByParts().getTime().getAllTime().getCount(), 1.0); - metastore.getPartitionNamesByFilter(IDENTITY, TEST_DATABASE, TEST_TABLE, partitionColumnNames, withNoSingleValueFilter); + metastore.getPartitionNamesByFilter(TEST_DATABASE, TEST_TABLE, partitionColumnNames, withNoSingleValueFilter); assertEquals(stats.getGetPartitionNamesByParts().getTime().getAllTime().getCount(), 2.0); } @Test public void testInvalidGetPartitionNamesByParts() { - assertFalse(metastore.getPartitionNamesByFilter(IDENTITY, BAD_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).isPresent()); + assertFalse(metastore.getPartitionNamesByFilter(BAD_DATABASE, TEST_TABLE, PARTITION_COLUMN_NAMES, TupleDomain.all()).isPresent()); } @Test public void testGetPartitionsByNames() { assertEquals(mockClient.getAccessCount(), 0); - Table table = metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE).get(); + Table table = metastore.getTable(TEST_DATABASE, TEST_TABLE).get(); assertEquals(mockClient.getAccessCount(), 1); // Select half of the available partitions and load them into the cache - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION1)).size(), 1); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1)).size(), 1); assertEquals(mockClient.getAccessCount(), 2); // Now select all of the partitions - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); // There should be one more access to fetch the remaining partition assertEquals(mockClient.getAccessCount(), 3); // Now if we fetch any or both of them, they should not hit the client - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION1)).size(), 1); - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION2)).size(), 1); - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1)).size(), 1); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION2)).size(), 1); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 3); metastore.flushCache(); // Fetching both should only result in one batched access - assertEquals(metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); + assertEquals(metastore.getPartitionsByNames(table, ImmutableList.of(TEST_PARTITION1, TEST_PARTITION2)).size(), 2); assertEquals(mockClient.getAccessCount(), 5); } @@ -382,10 +381,10 @@ public void testGetTableStatistics() { assertEquals(mockClient.getAccessCount(), 0); - Table table = metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE).get(); + Table table = metastore.getTable(TEST_DATABASE, TEST_TABLE).get(); assertEquals(mockClient.getAccessCount(), 1); - assertEquals(metastore.getTableStatistics(IDENTITY, table), TEST_STATS); + assertEquals(metastore.getTableStatistics(table), TEST_STATS); assertEquals(mockClient.getAccessCount(), 2); assertEquals(metastore.getTableStatisticsStats().getRequestCount(), 1); @@ -400,13 +399,13 @@ public void testGetPartitionStatistics() { assertEquals(mockClient.getAccessCount(), 0); - Table table = metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE).get(); + Table table = metastore.getTable(TEST_DATABASE, TEST_TABLE).get(); assertEquals(mockClient.getAccessCount(), 1); - Partition partition = metastore.getPartition(IDENTITY, table, TEST_PARTITION_VALUES1).get(); + Partition partition = metastore.getPartition(table, TEST_PARTITION_VALUES1).get(); assertEquals(mockClient.getAccessCount(), 2); - assertEquals(metastore.getPartitionStatistics(IDENTITY, table, ImmutableList.of(partition)), ImmutableMap.of(TEST_PARTITION1, TEST_STATS)); + assertEquals(metastore.getPartitionStatistics(table, ImmutableList.of(partition)), ImmutableMap.of(TEST_PARTITION1, TEST_STATS)); assertEquals(mockClient.getAccessCount(), 3); assertEquals(metastore.getPartitionStatisticsStats().getRequestCount(), 1); @@ -426,18 +425,18 @@ public void testUpdatePartitionStatistics() HiveMetastoreClosure hiveMetastoreClosure = new HiveMetastoreClosure(metastore); - Table table = hiveMetastoreClosure.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE).get(); + Table table = hiveMetastoreClosure.getTable(TEST_DATABASE, TEST_TABLE).get(); assertEquals(mockClient.getAccessCount(), 1); - hiveMetastoreClosure.updatePartitionStatistics(IDENTITY, table.getDatabaseName(), table.getTableName(), TEST_PARTITION1, identity()); + hiveMetastoreClosure.updatePartitionStatistics(table.getDatabaseName(), table.getTableName(), TEST_PARTITION1, identity()); assertEquals(mockClient.getAccessCount(), 5); } @Test public void testInvalidGetPartitionsByNames() { - Table table = metastore.getTable(IDENTITY, TEST_DATABASE, TEST_TABLE).get(); - Map> partitionsByNames = metastore.getPartitionsByNames(IDENTITY, table, ImmutableList.of(BAD_PARTITION)); + Table table = metastore.getTable(TEST_DATABASE, TEST_TABLE).get(); + Map> partitionsByNames = metastore.getPartitionsByNames(table, ImmutableList.of(BAD_PARTITION)); assertEquals(partitionsByNames.size(), 1); Optional onlyElement = Iterables.getOnlyElement(partitionsByNames.values()); assertFalse(onlyElement.isPresent()); @@ -481,7 +480,6 @@ public void testCachingHiveMetastoreCreationViaMemoize() ThriftHiveMetastore thriftHiveMetastore = createThriftHiveMetastore(); metastore = memoizeMetastore( new BridgingHiveMetastore(thriftHiveMetastore, IDENTITY), - IDENTITY, 1000); assertEquals(mockClient.getAccessCount(), 0); @@ -536,7 +534,7 @@ public void testLoadAfterInvalidate(boolean invalidateAll) HiveMetastore mockMetastore = new UnimplementedHiveMetastore() { @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { Optional
table = Optional.of(Table.builder() .setDatabaseName(databaseName) @@ -556,7 +554,7 @@ public Optional
getTable(HiveIdentity identity, String databaseName, Stri } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { Map> result = new HashMap<>(); for (String partitionName : partitionNames) { @@ -573,7 +571,6 @@ public Map> getPartitionsByNames(HiveIdentity identi // Caching metastore metastore = cachingHiveMetastore( mockMetastore, - IDENTITY, executor, new Duration(5, TimeUnit.MINUTES), Optional.of(new Duration(1, TimeUnit.MINUTES)), @@ -586,10 +583,10 @@ public Map> getPartitionsByNames(HiveIdentity identi try { Table table; - table = metastore.getTable(IDENTITY, databaseName, tableName).orElseThrow(); + table = metastore.getTable(databaseName, tableName).orElseThrow(); getTableFinishedLatch.countDown(); // 3 - metastore.getPartitionsByNames(IDENTITY, table, partitionNames); + metastore.getPartitionsByNames(table, partitionNames); getPartitionsByNamesFinishedLatch.countDown(); // 6 return (Void) null; @@ -610,7 +607,7 @@ public Map> getPartitionsByNames(HiveIdentity identi } getTableReturnLatch.countDown(); // 2 await(getTableFinishedLatch, 10, SECONDS); // 3 - Table table = metastore.getTable(IDENTITY, databaseName, tableName).orElseThrow(); + Table table = metastore.getTable(databaseName, tableName).orElseThrow(); assertThat(table.getParameters()) .isEqualTo(Map.of("frequent-changing-table-parameter", "main-thread-put-xyz")); @@ -629,7 +626,7 @@ public Map> getPartitionsByNames(HiveIdentity identi } getPartitionsByNamesReturnLatch.countDown(); // 5 await(getPartitionsByNamesFinishedLatch, 10, SECONDS); // 6 - Map> loadedPartitions = metastore.getPartitionsByNames(IDENTITY, table, partitionNames); + Map> loadedPartitions = metastore.getPartitionsByNames(table, partitionNames); assertThat(loadedPartitions.get(partitionName)) .isNotNull() .isPresent() @@ -667,7 +664,6 @@ private CachingHiveMetastore createMetastoreWithDirectExecutor(CachingHiveMetast { return cachingHiveMetastore( new BridgingHiveMetastore(createThriftHiveMetastore(), IDENTITY), - IDENTITY, directExecutor(), config.getMetastoreCacheTtl(), config.getMetastoreRefreshInterval(), diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java index 3621e93b83a3..9b9a2114ff32 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/cache/TestCachingHiveMetastoreWithQueryRunner.java @@ -35,7 +35,6 @@ import static com.google.common.collect.Lists.cartesianProduct; import static com.google.common.io.MoreFiles.deleteRecursively; import static com.google.common.io.RecursiveDeleteOption.ALLOW_INSECURE; -import static io.trino.plugin.hive.authentication.HiveIdentity.none; import static io.trino.plugin.hive.metastore.file.FileHiveMetastore.createTestingFileHiveMetastore; import static io.trino.spi.security.SelectedRole.Type.ROLE; import static io.trino.testing.TestingSession.testSessionBuilder; @@ -129,7 +128,7 @@ public void testFlushHiveMetastoreCacheProcedureCallable() getQueryRunner().execute("SELECT initial FROM cached"); // Rename column name in Metastore outside Trino - fileHiveMetastore.renameColumn(none(), "test", "cached", "initial", "renamed"); + fileHiveMetastore.renameColumn("test", "cached", "initial", "renamed"); String renamedColumnQuery = "SELECT renamed FROM cached"; // Should fail as Trino has old metadata cached diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java index 9526fe518352..7d2efe2e6e06 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/glue/TestHiveGlueMetastore.java @@ -30,7 +30,6 @@ import io.trino.plugin.hive.AbstractTestHiveLocal; import io.trino.plugin.hive.HiveBasicStatistics; import io.trino.plugin.hive.HiveMetastoreClosure; -import io.trino.plugin.hive.HiveTestUtils; import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.authentication.HiveIdentity; @@ -97,7 +96,6 @@ import static io.trino.spi.statistics.ColumnStatisticType.NUMBER_OF_NON_NULL_VALUES; import static io.trino.spi.type.BigintType.BIGINT; import static io.trino.spi.type.VarcharType.VARCHAR; -import static io.trino.testing.TestingConnectorSession.SESSION; import static java.lang.String.format; import static java.lang.System.currentTimeMillis; import static java.util.Locale.ENGLISH; @@ -120,7 +118,6 @@ public class TestHiveGlueMetastore { private static final Logger log = Logger.get(TestHiveGlueMetastore.class); - private static final HiveIdentity HIVE_IDENTITY = new HiveIdentity(SESSION); private static final String PARTITION_KEY = "part_key_1"; private static final String PARTITION_KEY2 = "part_key_2"; private static final String TEST_DATABASE_NAME_PREFIX = "test_glue"; @@ -290,7 +287,6 @@ public void testGetPartitions() createDummyPartitionedTable(tableName, CREATE_TABLE_COLUMNS_PARTITIONED); HiveMetastore metastoreClient = getMetastoreClient(); Optional> partitionNames = metastoreClient.getPartitionNamesByFilter( - HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), ImmutableList.of("ds"), TupleDomain.all()); @@ -926,7 +922,7 @@ public void testStatisticsLongColumnNames() doCreateEmptyTable(tableName, ORC, columns); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(EMPTY_TABLE_STATISTICS); testUpdateTableStatistics(tableName, EMPTY_TABLE_STATISTICS, partitionStatistics); } @@ -957,7 +953,6 @@ public void testStatisticsColumnModification() // set table statistics for column1 metastore.updateTableStatistics( - HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, @@ -966,26 +961,26 @@ public void testStatisticsColumnModification() return partitionStatistics; }); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(partitionStatistics); - metastore.renameColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column1", "column4"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.renameColumn(tableName.getSchemaName(), tableName.getTableName(), "column1", "column4"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(new PartitionStatistics( HIVE_BASIC_STATISTICS, Map.of("column2", INTEGER_COLUMN_STATISTICS))); - metastore.dropColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column2"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.dropColumn(tableName.getSchemaName(), tableName.getTableName(), "column2"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(new PartitionStatistics(HIVE_BASIC_STATISTICS, Map.of())); - metastore.addColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column5", HiveType.HIVE_INT, "comment"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.addColumn(tableName.getSchemaName(), tableName.getTableName(), "column5", HiveType.HIVE_INT, "comment"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(new PartitionStatistics(HIVE_BASIC_STATISTICS, Map.of())); // TODO: column1 stats should be removed on column delete. However this is tricky since stats can be stored in multiple partitions. - metastore.renameColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column4", "column1"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.renameColumn(tableName.getSchemaName(), tableName.getTableName(), "column4", "column1"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(new PartitionStatistics( HIVE_BASIC_STATISTICS, Map.of("column1", INTEGER_COLUMN_STATISTICS))); @@ -1014,26 +1009,26 @@ public void testStatisticsPartitionedTableColumnModification() .setColumnStatistics(columnStatistics).build(); createDummyPartitionedTable(tableName, columns); - metastore.updatePartitionStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> partitionStatistics); + metastore.updatePartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), "ds=2016-01-01", actualStatistics -> partitionStatistics); PartitionStatistics tableStatistics = new PartitionStatistics(createEmptyStatistics(), Map.of()); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(tableStatistics); - assertThat(metastore.getPartitionStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) + assertThat(metastore.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) .isEqualTo(Map.of("ds=2016-01-01", partitionStatistics)); // renaming table column does not rename partition columns - metastore.renameColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column1", "column4"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.renameColumn(tableName.getSchemaName(), tableName.getTableName(), "column1", "column4"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(tableStatistics); - assertThat(metastore.getPartitionStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) + assertThat(metastore.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) .isEqualTo(Map.of("ds=2016-01-01", partitionStatistics)); // dropping table column does not drop partition columns - metastore.dropColumn(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), "column2"); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + metastore.dropColumn(tableName.getSchemaName(), tableName.getTableName(), "column2"); + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(tableStatistics); - assertThat(metastore.getPartitionStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) + assertThat(metastore.getPartitionStatistics(tableName.getSchemaName(), tableName.getTableName(), Set.of("ds=2016-01-01"))) .isEqualTo(Map.of("ds=2016-01-01", partitionStatistics)); } finally { @@ -1060,7 +1055,6 @@ public void testInvalidColumnStatisticsMetadata() // set table statistics for column1 metastore.updateTableStatistics( - HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), NO_ACID_TRANSACTION, @@ -1069,7 +1063,7 @@ public void testInvalidColumnStatisticsMetadata() return partitionStatistics; }); - Table table = metastore.getTable(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName()).get(); + Table table = metastore.getTable(tableName.getSchemaName(), tableName.getTableName()).get(); TableInput tableInput = GlueInputConverter.convertTable(table); tableInput.setParameters(ImmutableMap.builder() .putAll(tableInput.getParameters()) @@ -1079,7 +1073,7 @@ public void testInvalidColumnStatisticsMetadata() .withDatabaseName(tableName.getSchemaName()) .withTableInput(tableInput)); - assertThat(metastore.getTableStatistics(HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName())) + assertThat(metastore.getTableStatistics(tableName.getSchemaName(), tableName.getTableName())) .isEqualTo(partitionStatistics); } finally { @@ -1137,7 +1131,6 @@ private void doGetPartitionsFilterTest( .collect(toImmutableList()); Optional> partitionNames = metastoreClient.getPartitionNamesByFilter( - HIVE_IDENTITY, tableName.getSchemaName(), tableName.getTableName(), partitionColumnNames, @@ -1157,8 +1150,7 @@ private void createDummyPartitionedTable(SchemaTableName tableName, List new TableNotFoundException(tableName)); List partitions = new ArrayList<>(); List partitionNames = new ArrayList<>(); @@ -1169,10 +1161,10 @@ private void createDummyPartitionedTable(SchemaTableName tableName, List metastoreClient.updatePartitionStatistics( - identity, tableName.getSchemaName(), tableName.getTableName(), partitionName, currentStatistics -> EMPTY_TABLE_STATISTICS)); + tableName.getSchemaName(), tableName.getTableName(), partitionName, currentStatistics -> EMPTY_TABLE_STATISTICS)); } private class CloseableSchamaTableName diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java index a7d2773a5906..db19c0d40cdc 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/metastore/recording/TestRecordingHiveMetastore.java @@ -27,7 +27,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.RecordingMetastoreConfig; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveColumnStatistics; @@ -71,7 +70,6 @@ import static io.trino.spi.statistics.ColumnStatisticType.MIN_VALUE; import static io.trino.spi.type.VarcharType.createUnboundedVarcharType; import static io.trino.spi.type.VarcharType.createVarcharType; -import static io.trino.testing.TestingConnectorSession.SESSION; import static org.testng.Assert.assertEquals; public class TestRecordingHiveMetastore @@ -128,7 +126,6 @@ public class TestRecordingHiveMetastore OptionalLong.of(8)))); private static final HivePrivilegeInfo PRIVILEGE_INFO = new HivePrivilegeInfo(HivePrivilege.SELECT, true, new HivePrincipal(USER, "grantor"), new HivePrincipal(USER, "grantee")); private static final RoleGrant ROLE_GRANT = new RoleGrant(new TrinoPrincipal(USER, "grantee"), "role", true); - private static final HiveIdentity HIVE_CONTEXT = new HiveIdentity(SESSION); private static final List PARTITION_COLUMN_NAMES = ImmutableList.of(TABLE_COLUMN.getName()); private static final Domain PARTITION_COLUMN_EQUAL_DOMAIN = Domain.singleValue(createUnboundedVarcharType(), Slices.utf8Slice("value1")); private static final TupleDomain TUPLE_DOMAIN = TupleDomain.withColumnDomains(ImmutableMap.builder() @@ -146,7 +143,7 @@ public void testRecordingHiveMetastore() HiveMetastoreRecording recording = new HiveMetastoreRecording(recordingConfig, jsonCodec); RecordingHiveMetastore recordingHiveMetastore = new RecordingHiveMetastore(new TestingHiveMetastore(), recording); validateMetadata(recordingHiveMetastore); - recordingHiveMetastore.dropDatabase(HIVE_CONTEXT, "other_database", true); + recordingHiveMetastore.dropDatabase("other_database", true); recording.writeRecording(); RecordingMetastoreConfig replayingConfig = recordingConfig @@ -175,17 +172,17 @@ private void validateMetadata(HiveMetastore hiveMetastore) { assertEquals(hiveMetastore.getDatabase("database"), Optional.of(DATABASE)); assertEquals(hiveMetastore.getAllDatabases(), ImmutableList.of("database")); - assertEquals(hiveMetastore.getTable(HIVE_CONTEXT, "database", "table"), Optional.of(TABLE)); + assertEquals(hiveMetastore.getTable("database", "table"), Optional.of(TABLE)); assertEquals(hiveMetastore.getSupportedColumnStatistics(createVarcharType(123)), ImmutableSet.of(MIN_VALUE, MAX_VALUE)); - assertEquals(hiveMetastore.getTableStatistics(HIVE_CONTEXT, TABLE), PARTITION_STATISTICS); - assertEquals(hiveMetastore.getPartitionStatistics(HIVE_CONTEXT, TABLE, ImmutableList.of(PARTITION)), ImmutableMap.of("value", PARTITION_STATISTICS)); + assertEquals(hiveMetastore.getTableStatistics(TABLE), PARTITION_STATISTICS); + assertEquals(hiveMetastore.getPartitionStatistics(TABLE, ImmutableList.of(PARTITION)), ImmutableMap.of("value", PARTITION_STATISTICS)); assertEquals(hiveMetastore.getAllTables("database"), ImmutableList.of("table")); assertEquals(hiveMetastore.getTablesWithParameter("database", "param", "value3"), ImmutableList.of("table")); assertEquals(hiveMetastore.getAllViews("database"), ImmutableList.of()); - assertEquals(hiveMetastore.getPartition(HIVE_CONTEXT, TABLE, ImmutableList.of("value")), Optional.of(PARTITION)); - assertEquals(hiveMetastore.getPartitionNamesByFilter(HIVE_CONTEXT, "database", "table", PARTITION_COLUMN_NAMES, TupleDomain.all()), Optional.of(ImmutableList.of("value"))); - assertEquals(hiveMetastore.getPartitionNamesByFilter(HIVE_CONTEXT, "database", "table", PARTITION_COLUMN_NAMES, TUPLE_DOMAIN), Optional.of(ImmutableList.of("value"))); - assertEquals(hiveMetastore.getPartitionsByNames(HIVE_CONTEXT, TABLE, ImmutableList.of("value")), ImmutableMap.of("value", Optional.of(PARTITION))); + assertEquals(hiveMetastore.getPartition(TABLE, ImmutableList.of("value")), Optional.of(PARTITION)); + assertEquals(hiveMetastore.getPartitionNamesByFilter("database", "table", PARTITION_COLUMN_NAMES, TupleDomain.all()), Optional.of(ImmutableList.of("value"))); + assertEquals(hiveMetastore.getPartitionNamesByFilter("database", "table", PARTITION_COLUMN_NAMES, TUPLE_DOMAIN), Optional.of(ImmutableList.of("value"))); + assertEquals(hiveMetastore.getPartitionsByNames(TABLE, ImmutableList.of("value")), ImmutableMap.of("value", Optional.of(PARTITION))); assertEquals(hiveMetastore.listTablePrivileges("database", "table", Optional.of("owner"), Optional.of(new HivePrincipal(USER, "user"))), ImmutableSet.of(PRIVILEGE_INFO)); assertEquals(hiveMetastore.listRoles(), ImmutableSet.of("role")); assertEquals(hiveMetastore.listRoleGrants(new HivePrincipal(USER, "user")), ImmutableSet.of(ROLE_GRANT)); @@ -212,7 +209,7 @@ public List getAllDatabases() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { if (databaseName.equals("database") && tableName.equals("table")) { return Optional.of(TABLE); @@ -232,7 +229,7 @@ public Set getSupportedColumnStatistics(Type type) } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { if (table.getDatabaseName().equals("database") && table.getTableName().equals("table")) { return PARTITION_STATISTICS; @@ -242,7 +239,7 @@ public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { boolean partitionMatches = partitions.stream() .anyMatch(partition -> partition.getValues().get(0).equals("value")); @@ -279,13 +276,13 @@ public List getAllViews(String databaseName) } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { // noop for test purpose } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { if (table.getDatabaseName().equals("database") && table.getTableName().equals("table") && partitionValues.equals(ImmutableList.of("value"))) { return Optional.of(PARTITION); @@ -295,7 +292,10 @@ public Optional getPartition(HiveIdentity identity, Table table, List } @Override - public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) + public Optional> getPartitionNamesByFilter(String databaseName, + String tableName, + List columnNames, + TupleDomain partitionKeysFilter) { Domain filterDomain = partitionKeysFilter.getDomains().get().get(TABLE_COLUMN.getName()); if (databaseName.equals("database") && tableName.equals("table") && (filterDomain == null || filterDomain.equals(PARTITION_COLUMN_EQUAL_DOMAIN))) { @@ -306,7 +306,7 @@ public Optional> getPartitionNamesByFilter(HiveIdentity identity, S } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { if (table.getDatabaseName().equals("database") && table.getTableName().equals("table") && partitionNames.contains("value")) { return ImmutableMap.of("value", Optional.of(PARTITION)); diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java index dc479185dc41..0bc41986507c 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestConnectorPushdownRulesWithHive.java @@ -32,7 +32,6 @@ import io.trino.plugin.hive.HiveTransactionHandle; import io.trino.plugin.hive.NodeVersion; import io.trino.plugin.hive.TestingHiveConnectorFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; @@ -81,7 +80,6 @@ import static io.trino.sql.planner.assertions.PlanMatchPattern.tableScan; import static io.trino.sql.tree.ArithmeticBinaryExpression.Operator.ADD; import static io.trino.sql.tree.ArithmeticUnaryExpression.Sign.MINUS; -import static io.trino.testing.TestingConnectorSession.SESSION; import static io.trino.testing.TestingSession.testSessionBuilder; import static java.lang.String.format; import static java.util.Arrays.asList; @@ -123,7 +121,7 @@ protected Optional createLocalQueryRunner() .setOwnerType(Optional.of(PrincipalType.ROLE)) .build(); - metastore.createDatabase(new HiveIdentity(SESSION), database); + metastore.createDatabase(database); LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION); queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of()); @@ -213,7 +211,7 @@ public void testProjectionPushdown() TupleDomain.all(), ImmutableMap.of("struct_of_int#a", partialColumn::equals)))); - metastore.dropTable(new HiveIdentity(SESSION), SCHEMA_NAME, tableName, true); + metastore.dropTable(SCHEMA_NAME, tableName, true); } @Test @@ -245,7 +243,7 @@ public void testPredicatePushdown() TupleDomain.all(), ImmutableMap.of("a", column::equals)))); - metastore.dropTable(new HiveIdentity(SESSION), SCHEMA_NAME, tableName, true); + metastore.dropTable(SCHEMA_NAME, tableName, true); } @Test @@ -283,7 +281,7 @@ public void testColumnPruningProjectionPushdown() TupleDomain.all(), ImmutableMap.of("COLA", columnA::equals)))); - metastore.dropTable(new HiveIdentity(SESSION), SCHEMA_NAME, tableName, true); + metastore.dropTable(SCHEMA_NAME, tableName, true); } @Test @@ -364,7 +362,7 @@ public void testPushdownWithDuplicateExpressions() TupleDomain.all(), ImmutableMap.of("struct_of_bigint#a", partialColumn::equals)))); - metastore.dropTable(new HiveIdentity(SESSION), SCHEMA_NAME, tableName, true); + metastore.dropTable(SCHEMA_NAME, tableName, true); } @AfterClass(alwaysRun = true) diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHivePlans.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHivePlans.java index 6b3d93b59cec..e3353ae5eab2 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHivePlans.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHivePlans.java @@ -25,7 +25,6 @@ import io.trino.plugin.hive.HiveHdfsConfiguration; import io.trino.plugin.hive.NodeVersion; import io.trino.plugin.hive.TestingHiveConnectorFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; @@ -97,7 +96,7 @@ protected LocalQueryRunner createLocalQueryRunner() .setOwnerType(Optional.of(PrincipalType.ROLE)) .build(); - metastore.createDatabase(new HiveIdentity(HIVE_SESSION.toConnectorSession()), database); + metastore.createDatabase(database); return createQueryRunner(HIVE_SESSION, metastore); } diff --git a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java index 081bf1d5d08f..dbda46f2451c 100644 --- a/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java +++ b/plugin/trino-hive/src/test/java/io/trino/plugin/hive/optimizer/TestHiveProjectionPushdownIntoTableScan.java @@ -29,7 +29,6 @@ import io.trino.plugin.hive.HiveTableHandle; import io.trino.plugin.hive.NodeVersion; import io.trino.plugin.hive.TestingHiveConnectorFactory; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.authentication.NoHdfsAuthentication; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; @@ -100,7 +99,7 @@ protected LocalQueryRunner createLocalQueryRunner() .setOwnerType(Optional.of(PrincipalType.ROLE)) .build(); - metastore.createDatabase(new HiveIdentity(HIVE_SESSION.toConnectorSession()), database); + metastore.createDatabase(database); LocalQueryRunner queryRunner = LocalQueryRunner.create(HIVE_SESSION); queryRunner.createCatalog(HIVE_CATALOG_NAME, new TestingHiveConnectorFactory(metastore), ImmutableMap.of()); diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java index 4a9f40d1a1c4..a189c7d68d23 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoCatalogFactory.java @@ -17,7 +17,6 @@ import io.trino.plugin.hive.HdfsEnvironment; import io.trino.plugin.hive.HiveConfig; import io.trino.plugin.hive.NodeVersion; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastoreFactory; import io.trino.plugin.iceberg.catalog.IcebergTableOperationsProvider; import io.trino.spi.TrinoException; @@ -78,7 +77,7 @@ public TrinoCatalog create(ConnectorIdentity identity) case HIVE_METASTORE: return new TrinoHiveCatalog( catalogName, - memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), new HiveIdentity(identity), 1000), + memoizeMetastore(metastoreFactory.createMetastore(Optional.of(identity)), 1000), hdfsEnvironment, typeManager, tableOperationsProvider, diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java index f9dd5a0b7bf0..5a9376388581 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/TrinoHiveCatalog.java @@ -25,7 +25,6 @@ import io.trino.plugin.hive.TableAlreadyExistsException; import io.trino.plugin.hive.ViewAlreadyExistsException; import io.trino.plugin.hive.ViewReaderUtil; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; @@ -208,7 +207,7 @@ public void createNamespace(ConnectorSession session, String namespace, Map comment) { - metastore.commentTable(new HiveIdentity(session), schemaTableName.getSchemaName(), schemaTableName.getTableName(), comment); + metastore.commentTable(schemaTableName.getSchemaName(), schemaTableName.getTableName(), comment); Table icebergTable = loadTable(session, schemaTableName); if (comment.isEmpty()) { icebergTable.updateProperties().remove(TABLE_COMMENT).commit(); @@ -365,7 +364,6 @@ public void createView(ConnectorSession session, SchemaTableName schemaViewName, definition = definition.withoutOwner(); } - HiveIdentity identity = new HiveIdentity(session); Map properties = ImmutableMap.builder() .put(PRESTO_VIEW_FLAG, "true") .put(TRINO_CREATED_BY, TRINO_CREATED_BY_VALUE) @@ -391,18 +389,18 @@ public void createView(ConnectorSession session, SchemaTableName schemaViewName, io.trino.plugin.hive.metastore.Table table = tableBuilder.build(); PrincipalPrivileges principalPrivileges = isUsingSystemSecurity ? NO_PRIVILEGES : buildInitialPrivilegeSet(session.getUser()); - Optional existing = metastore.getTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName()); + Optional existing = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName()); if (existing.isPresent()) { if (!replace || !isPrestoView(existing.get())) { throw new ViewAlreadyExistsException(schemaViewName); } - metastore.replaceTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges); + metastore.replaceTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges); return; } try { - metastore.createTable(identity, table, principalPrivileges); + metastore.createTable(table, principalPrivileges); } catch (TableAlreadyExistsException e) { throw new ViewAlreadyExistsException(e.getTableName()); @@ -413,7 +411,7 @@ public void createView(ConnectorSession session, SchemaTableName schemaViewName, public void renameView(ConnectorSession session, SchemaTableName source, SchemaTableName target) { // Not checking if source view exists as this is already done in RenameViewTask - metastore.renameTable(new HiveIdentity(session), source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); + metastore.renameTable(source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); } @Override @@ -431,7 +429,7 @@ public void dropView(ConnectorSession session, SchemaTableName schemaViewName) } try { - metastore.dropTable(new HiveIdentity(session), schemaViewName.getSchemaName(), schemaViewName.getTableName(), true); + metastore.dropTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), true); } catch (TableNotFoundException e) { throw new ViewNotFoundException(e.getTableName()); @@ -475,7 +473,7 @@ public Optional getView(ConnectorSession session, Schem if (isHiveSystemSchema(viewIdentifier.getSchemaName())) { return Optional.empty(); } - return metastore.getTable(new HiveIdentity(session), viewIdentifier.getSchemaName(), viewIdentifier.getTableName()) + return metastore.getTable(viewIdentifier.getSchemaName(), viewIdentifier.getTableName()) .filter(table -> HiveMetadata.PRESTO_VIEW_COMMENT.equals(table.getParameters().get(TABLE_COMMENT))) // filter out materialized views .filter(ViewReaderUtil::canDecodeView) .map(view -> { @@ -514,8 +512,7 @@ public List listMaterializedViews(ConnectorSession session, Opt public void createMaterializedView(ConnectorSession session, SchemaTableName schemaViewName, ConnectorMaterializedViewDefinition definition, boolean replace, boolean ignoreExisting) { - HiveIdentity identity = new HiveIdentity(session); - Optional existing = metastore.getTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName()); + Optional existing = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName()); // It's a create command where the materialized view already exists and 'if not exists' clause is not specified if (!replace && existing.isPresent()) { @@ -571,39 +568,38 @@ public void createMaterializedView(ConnectorSession session, SchemaTableName sch // drop the current storage table String oldStorageTable = existing.get().getParameters().get(STORAGE_TABLE); if (oldStorageTable != null) { - metastore.dropTable(identity, schemaViewName.getSchemaName(), oldStorageTable, true); + metastore.dropTable(schemaViewName.getSchemaName(), oldStorageTable, true); } // Replace the existing view definition - metastore.replaceTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges); + metastore.replaceTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), table, principalPrivileges); return; } // create the view definition - metastore.createTable(identity, table, principalPrivileges); + metastore.createTable(table, principalPrivileges); } @Override public void dropMaterializedView(ConnectorSession session, SchemaTableName schemaViewName) { - final HiveIdentity identity = new HiveIdentity(session); - io.trino.plugin.hive.metastore.Table view = metastore.getTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName()) + io.trino.plugin.hive.metastore.Table view = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName()) .orElseThrow(() -> new MaterializedViewNotFoundException(schemaViewName)); String storageTableName = view.getParameters().get(STORAGE_TABLE); if (storageTableName != null) { try { - metastore.dropTable(identity, schemaViewName.getSchemaName(), storageTableName, true); + metastore.dropTable(schemaViewName.getSchemaName(), storageTableName, true); } catch (TrinoException e) { log.warn(e, "Failed to drop storage table '%s' for materialized view '%s'", storageTableName, schemaViewName); } } - metastore.dropTable(identity, schemaViewName.getSchemaName(), schemaViewName.getTableName(), true); + metastore.dropTable(schemaViewName.getSchemaName(), schemaViewName.getTableName(), true); } @Override public Optional getMaterializedView(ConnectorSession session, SchemaTableName schemaViewName) { - Optional tableOptional = metastore.getTable(new HiveIdentity(session), schemaViewName.getSchemaName(), schemaViewName.getTableName()); + Optional tableOptional = metastore.getTable(schemaViewName.getSchemaName(), schemaViewName.getTableName()); if (tableOptional.isEmpty()) { return Optional.empty(); } @@ -643,7 +639,7 @@ public Optional getMaterializedView(Connect @Override public void renameMaterializedView(ConnectorSession session, SchemaTableName source, SchemaTableName target) { - metastore.renameTable(new HiveIdentity(session), source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); + metastore.renameTable(source.getSchemaName(), source.getTableName(), target.getSchemaName(), target.getTableName()); } private List listNamespaces(ConnectorSession session, Optional namespace) diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractMetastoreTableOperations.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractMetastoreTableOperations.java index 0751124ea550..061de97c205a 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractMetastoreTableOperations.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/AbstractMetastoreTableOperations.java @@ -14,7 +14,6 @@ package io.trino.plugin.iceberg.catalog; import io.airlift.log.Logger; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Column; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.MetastoreUtil; @@ -221,8 +220,7 @@ protected void commitNewTable(TableMetadata metadata) } PrincipalPrivileges privileges = owner.map(MetastoreUtil::buildInitialPrivilegeSet).orElse(NO_PRIVILEGES); - HiveIdentity identity = new HiveIdentity(session); - metastore.createTable(identity, table, privileges); + metastore.createTable(table, privileges); } protected abstract void commitToExistingTable(TableMetadata base, TableMetadata metadata); @@ -260,7 +258,7 @@ public LocationProvider locationProvider() protected Table getTable() { - return metastore.getTable(new HiveIdentity(session), database, tableName) + return metastore.getTable(database, tableName) .orElseThrow(() -> new TableNotFoundException(getSchemaTableName())); } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/file/FileMetastoreTableOperations.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/file/FileMetastoreTableOperations.java index 7098cd1ba976..93f68303525f 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/file/FileMetastoreTableOperations.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/file/FileMetastoreTableOperations.java @@ -13,7 +13,6 @@ */ package io.trino.plugin.iceberg.catalog.file; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.PrincipalPrivileges; import io.trino.plugin.hive.metastore.Table; @@ -82,7 +81,6 @@ protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) // todo privileges should not be replaced for an alter PrincipalPrivileges privileges = owner.isEmpty() && table.getOwner().isPresent() ? NO_PRIVILEGES : buildInitialPrivilegeSet(table.getOwner().get()); - HiveIdentity identity = new HiveIdentity(session); - metastore.replaceTable(identity, database, tableName, table, privileges); + metastore.replaceTable(database, tableName, table, privileges); } } diff --git a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/HiveMetastoreTableOperations.java b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/HiveMetastoreTableOperations.java index 32c7f806b13a..30a5821e5d61 100644 --- a/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/HiveMetastoreTableOperations.java +++ b/plugin/trino-iceberg/src/main/java/io/trino/plugin/iceberg/catalog/hms/HiveMetastoreTableOperations.java @@ -59,7 +59,7 @@ public HiveMetastoreTableOperations( protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) { String newMetadataLocation = writeNewMetadata(metadata, version + 1); - HiveIdentity identity = new HiveIdentity(session); + HiveIdentity identity = new HiveIdentity(session.getIdentity()); long lockId = thriftMetastore.acquireTableExclusiveLock( identity, @@ -98,7 +98,7 @@ protected void commitToExistingTable(TableMetadata base, TableMetadata metadata) // todo privileges should not be replaced for an alter PrincipalPrivileges privileges = owner.isEmpty() && table.getOwner().isPresent() ? NO_PRIVILEGES : buildInitialPrivilegeSet(table.getOwner().get()); - metastore.replaceTable(identity, database, tableName, table, privileges); + metastore.replaceTable(database, tableName, table, privileges); } finally { thriftMetastore.releaseTableLock(identity, lockId); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java index c144f28ef3c5..3e14149c82a6 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/CountingAccessFileHiveMetastore.java @@ -19,7 +19,6 @@ import io.trino.plugin.hive.HiveType; import io.trino.plugin.hive.PartitionStatistics; import io.trino.plugin.hive.acid.AcidTransaction; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.plugin.hive.metastore.HivePrincipal; @@ -77,10 +76,10 @@ public void resetCounters() } @Override - public Optional
getTable(HiveIdentity identity, String databaseName, String tableName) + public Optional
getTable(String databaseName, String tableName) { methodInvocations.add(Methods.GET_TABLE); - return delegate.getTable(identity, databaseName, tableName); + return delegate.getTable(databaseName, tableName); } @Override @@ -117,124 +116,127 @@ public List getAllViews(String databaseName) } @Override - public void createDatabase(HiveIdentity identity, Database database) + public void createDatabase(Database database) { methodInvocations.add(Methods.CREATE_DATABASE); - delegate.createDatabase(identity, database); + delegate.createDatabase(database); } @Override - public void dropDatabase(HiveIdentity identity, String databaseName, boolean deleteData) + public void dropDatabase(String databaseName, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void renameDatabase(HiveIdentity identity, String databaseName, String newDatabaseName) + public void renameDatabase(String databaseName, String newDatabaseName) { throw new UnsupportedOperationException(); } @Override - public void setDatabaseOwner(HiveIdentity identity, String databaseName, HivePrincipal principal) + public void setDatabaseOwner(String databaseName, HivePrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void createTable(HiveIdentity identity, Table table, PrincipalPrivileges principalPrivileges) + public void createTable(Table table, PrincipalPrivileges principalPrivileges) { methodInvocations.add(Methods.CREATE_TABLE); - delegate.createTable(identity, table, principalPrivileges); + delegate.createTable(table, principalPrivileges); } @Override - public void dropTable(HiveIdentity identity, String databaseName, String tableName, boolean deleteData) + public void dropTable(String databaseName, String tableName, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void replaceTable(HiveIdentity identity, String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) + public void replaceTable(String databaseName, String tableName, Table newTable, PrincipalPrivileges principalPrivileges) { methodInvocations.add(Methods.REPLACE_TABLE); - delegate.replaceTable(identity, databaseName, tableName, newTable, principalPrivileges); + delegate.replaceTable(databaseName, tableName, newTable, principalPrivileges); } @Override - public void renameTable(HiveIdentity identity, String databaseName, String tableName, String newDatabaseName, String newTableName) + public void renameTable(String databaseName, String tableName, String newDatabaseName, String newTableName) { throw new UnsupportedOperationException(); } @Override - public void commentTable(HiveIdentity identity, String databaseName, String tableName, Optional comment) + public void commentTable(String databaseName, String tableName, Optional comment) { throw new UnsupportedOperationException(); } @Override - public void setTableOwner(HiveIdentity identity, String databaseName, String tableName, HivePrincipal principal) + public void setTableOwner(String databaseName, String tableName, HivePrincipal principal) { throw new UnsupportedOperationException(); } @Override - public void commentColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, Optional comment) + public void commentColumn(String databaseName, String tableName, String columnName, Optional comment) { throw new UnsupportedOperationException(); } @Override - public void addColumn(HiveIdentity identity, String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) + public void addColumn(String databaseName, String tableName, String columnName, HiveType columnType, String columnComment) { throw new UnsupportedOperationException(); } @Override - public void renameColumn(HiveIdentity identity, String databaseName, String tableName, String oldColumnName, String newColumnName) + public void renameColumn(String databaseName, String tableName, String oldColumnName, String newColumnName) { throw new UnsupportedOperationException(); } @Override - public void dropColumn(HiveIdentity identity, String databaseName, String tableName, String columnName) + public void dropColumn(String databaseName, String tableName, String columnName) { throw new UnsupportedOperationException(); } @Override - public Optional getPartition(HiveIdentity identity, Table table, List partitionValues) + public Optional getPartition(Table table, List partitionValues) { throw new UnsupportedOperationException(); } @Override - public Optional> getPartitionNamesByFilter(HiveIdentity identity, String databaseName, String tableName, List columnNames, TupleDomain partitionKeysFilter) + public Optional> getPartitionNamesByFilter(String databaseName, + String tableName, + List columnNames, + TupleDomain partitionKeysFilter) { throw new UnsupportedOperationException(); } @Override - public Map> getPartitionsByNames(HiveIdentity identity, Table table, List partitionNames) + public Map> getPartitionsByNames(Table table, List partitionNames) { throw new UnsupportedOperationException(); } @Override - public void addPartitions(HiveIdentity identity, String databaseName, String tableName, List partitions) + public void addPartitions(String databaseName, String tableName, List partitions) { throw new UnsupportedOperationException(); } @Override - public void dropPartition(HiveIdentity identity, String databaseName, String tableName, List parts, boolean deleteData) + public void dropPartition(String databaseName, String tableName, List parts, boolean deleteData) { throw new UnsupportedOperationException(); } @Override - public void alterPartition(HiveIdentity identity, String databaseName, String tableName, PartitionWithStatistics partition) + public void alterPartition(String databaseName, String tableName, PartitionWithStatistics partition) { throw new UnsupportedOperationException(); } @@ -300,26 +302,29 @@ public Set listTablePrivileges(String databaseName, String ta } @Override - public PartitionStatistics getTableStatistics(HiveIdentity identity, Table table) + public PartitionStatistics getTableStatistics(Table table) { methodInvocations.add(Methods.GET_TABLE_STATISTICS); - return delegate.getTableStatistics(identity, table); + return delegate.getTableStatistics(table); } @Override - public Map getPartitionStatistics(HiveIdentity identity, Table table, List partitions) + public Map getPartitionStatistics(Table table, List partitions) { throw new UnsupportedOperationException(); } @Override - public void updateTableStatistics(HiveIdentity identity, String databaseName, String tableName, AcidTransaction transaction, Function update) + public void updateTableStatistics(String databaseName, + String tableName, + AcidTransaction transaction, + Function update) { throw new UnsupportedOperationException(); } @Override - public void updatePartitionStatistics(HiveIdentity identity, Table table, Map> updates) + public void updatePartitionStatistics(Table table, Map> updates) { throw new UnsupportedOperationException(); } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java index 2fcdaeabbf11..3d7cd78be07c 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergProjectionPushdownPlans.java @@ -20,7 +20,6 @@ import io.trino.Session; import io.trino.metadata.QualifiedObjectName; import io.trino.metadata.TableHandle; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.spi.connector.ColumnHandle; @@ -85,7 +84,7 @@ protected LocalQueryRunner createLocalQueryRunner() .setOwnerName(Optional.of("public")) .setOwnerType(Optional.of(PrincipalType.ROLE)) .build(); - metastore.createDatabase(new HiveIdentity(session.toConnectorSession()), database); + metastore.createDatabase(database); return queryRunner; } diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergTableWithCustomLocation.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergTableWithCustomLocation.java index f22184991687..ce6fb0022053 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergTableWithCustomLocation.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestIcebergTableWithCustomLocation.java @@ -70,7 +70,7 @@ public void testTableHasUuidSuffixInLocation() { String tableName = "table_with_uuid"; assertQuerySucceeds(format("CREATE TABLE %s as select 1 as val", tableName)); - Optional
table = metastore.getTable(null, "tpch", tableName); + Optional
table = metastore.getTable("tpch", tableName); assertTrue(table.isPresent(), "Table should exists"); String location = table.get().getStorage().getLocation(); assertThat(location).matches(format(".*%s-[0-9a-f]{32}", tableName)); @@ -81,11 +81,11 @@ public void testCreateAndDrop() { String tableName = "test_create_and_drop"; assertQuerySucceeds(format("CREATE TABLE %s as select 1 as val", tableName)); - Optional
table = metastore.getTable(null, "tpch", tableName); + Optional
table = metastore.getTable("tpch", tableName); assertTrue(table.isPresent(), "Table should exist"); assertQuerySucceeds(format("DROP TABLE %s", tableName)); - assertFalse(metastore.getTable(null, "tpch", tableName).isPresent(), "Table should be dropped"); + assertFalse(metastore.getTable("tpch", tableName).isPresent(), "Table should be dropped"); } @Test @@ -94,19 +94,19 @@ public void testCreateRenameDrop() String tableName = "test_create_rename_drop"; String renamedName = "test_create_rename_drop_renamed"; assertQuerySucceeds(format("CREATE TABLE %s as select 1 as val", tableName)); - Optional
table = metastore.getTable(null, "tpch", tableName); + Optional
table = metastore.getTable("tpch", tableName); assertTrue(table.isPresent(), "Table should exist"); String tableInitialLocation = table.get().getStorage().getLocation(); assertQuerySucceeds(format("ALTER TABLE %s RENAME TO %s", tableName, renamedName)); - Optional
renamedTable = metastore.getTable(null, "tpch", renamedName); + Optional
renamedTable = metastore.getTable("tpch", renamedName); assertTrue(renamedTable.isPresent(), "Table should exist"); String renamedTableLocation = renamedTable.get().getStorage().getLocation(); assertEquals(renamedTableLocation, tableInitialLocation, "Location should not be changed"); assertQuerySucceeds(format("DROP TABLE %s", renamedName)); - assertFalse(metastore.getTable(null, "tpch", tableName).isPresent(), "Initial table should not exists"); - assertFalse(metastore.getTable(null, "tpch", renamedName).isPresent(), "Renamed table should be dropped"); + assertFalse(metastore.getTable("tpch", tableName).isPresent(), "Initial table should not exists"); + assertFalse(metastore.getTable("tpch", renamedName).isPresent(), "Renamed table should be dropped"); } @Test @@ -115,18 +115,18 @@ public void testCreateRenameCreate() String tableName = "test_create_rename_create"; String renamedName = "test_create_rename_create_renamed"; assertQuerySucceeds(format("CREATE TABLE %s as select 1 as val", tableName)); - Optional
table = metastore.getTable(null, "tpch", tableName); + Optional
table = metastore.getTable("tpch", tableName); assertTrue(table.isPresent(), "Table should exist"); String tableInitialLocation = table.get().getStorage().getLocation(); assertQuerySucceeds(format("ALTER TABLE %s RENAME TO %s", tableName, renamedName)); - Optional
renamedTable = metastore.getTable(null, "tpch", renamedName); + Optional
renamedTable = metastore.getTable("tpch", renamedName); assertTrue(renamedTable.isPresent(), "Table should exist"); String renamedTableLocation = renamedTable.get().getStorage().getLocation(); assertEquals(renamedTableLocation, tableInitialLocation, "Location should not be changed"); assertQuerySucceeds(format("CREATE TABLE %s as select 1 as val", tableName)); - Optional
recreatedTableWithInitialName = metastore.getTable(null, "tpch", tableName); + Optional
recreatedTableWithInitialName = metastore.getTable("tpch", tableName); assertTrue(recreatedTableWithInitialName.isPresent(), "Table should exist"); String recreatedTableLocation = recreatedTableWithInitialName.get().getStorage().getLocation(); assertNotEquals(tableInitialLocation, recreatedTableLocation, "Location should be different"); diff --git a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestMetadataQueryOptimization.java b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestMetadataQueryOptimization.java index 2d32275f66bd..e94bdce47c11 100644 --- a/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestMetadataQueryOptimization.java +++ b/plugin/trino-iceberg/src/test/java/io/trino/plugin/iceberg/TestMetadataQueryOptimization.java @@ -17,7 +17,6 @@ import com.google.common.collect.ImmutableMap; import com.google.common.io.Files; import io.trino.Session; -import io.trino.plugin.hive.authentication.HiveIdentity; import io.trino.plugin.hive.metastore.Database; import io.trino.plugin.hive.metastore.HiveMetastore; import io.trino.spi.security.PrincipalType; @@ -67,7 +66,7 @@ protected LocalQueryRunner createLocalQueryRunner() .setOwnerName(Optional.of("public")) .setOwnerType(Optional.of(PrincipalType.ROLE)) .build(); - metastore.createDatabase(new HiveIdentity(session.toConnectorSession()), database); + metastore.createDatabase(database); return queryRunner; }