From 3ee4dcf62b08a3b09c3a75df3a1a62a06bb40210 Mon Sep 17 00:00:00 2001 From: Marc Hennemann <64148462+hennlo@users.noreply.github.com> Date: Mon, 18 Oct 2021 16:11:19 +0200 Subject: [PATCH] Temperature-aware data partitioning (#348) --- .../CassandraPhysicalNameProvider.java | 5 +- .../db/adapter/cassandra/CassandraStore.java | 28 +- .../org/polypheny/db/catalog/CatalogImpl.java | 1102 +++++++++++++---- .../polypheny/db/catalog/CatalogInfoPage.java | 35 +- .../org/polypheny/db/test/CatalogTest.java | 12 +- core/_docs/reference.md | 7 + core/build.gradle | 2 + core/src/main/codegen/includes/ddlParser.ftl | 56 +- .../src/main/codegen/includes/parserImpls.ftl | 56 +- core/src/main/codegen/templates/Parser.jj | 3 + .../org/polypheny/db/adapter/Adapter.java | 15 +- .../org/polypheny/db/adapter/DataStore.java | 8 +- .../org/polypheny/db/catalog/Catalog.java | 265 +++- .../entity/CatalogColumnPlacement.java | 4 - .../db/catalog/entity/CatalogPartition.java | 43 +- .../catalog/entity/CatalogPartitionGroup.java | 91 ++ .../entity/CatalogPartitionPlacement.java | 61 + .../db/catalog/entity/CatalogTable.java | 76 +- .../db/catalog/entity/CatalogView.java | 20 +- ...nownPartitionGroupIdRuntimeException.java} | 4 +- .../UnknownPartitionPlacementException.java | 26 + .../polypheny/db/config/RuntimeConfig.java | 20 +- .../java/org/polypheny/db/ddl/DdlManager.java | 90 +- ...artitionGroupNamesNotUniqueException.java} | 2 +- .../db/monitoring/core/MonitoringQueue.java | 2 +- .../db/monitoring/events/BaseEvent.java | 2 +- .../db/monitoring/events/StatementEvent.java | 2 + .../db/monitoring/ui/MonitoringServiceUi.java | 12 + .../partition/AbstractPartitionManager.java | 112 -- .../polypheny/db/partition/FrequencyMap.java | 42 + .../db/partition/PartitionManager.java | 13 +- .../db/partition/PartitionManagerFactory.java | 31 +- .../properties/PartitionProperty.java | 40 + .../TemperaturePartitionProperty.java | 44 + .../raw/RawPartitionInformation.java | 39 + .../RawTemperaturePartitionInformation.java | 44 + .../polypheny/db/processing/DataMigrator.java | 50 +- .../java/org/polypheny/db/routing/Router.java | 4 +- .../polypheny/db/sql/ddl/SqlCreateTable.java | 104 +- .../org/polypheny/db/sql/ddl/SqlDdlNodes.java | 5 +- .../SqlAlterTableAddPartitions.java | 72 +- .../altertable/SqlAlterTableAddPlacement.java | 39 +- .../SqlAlterTableMergePartitions.java | 15 +- .../SqlAlterTableModifyPartitions.java | 57 +- .../SqlAlterTableModifyPlacement.java | 54 +- .../polypheny/db/transaction/Transaction.java | 6 +- .../db/util/background/BackgroundTask.java | 6 +- .../util/background/BackgroundTaskHandle.java | 9 +- .../db/sql/parser/SqlParserTest.java | 2 + .../db/test/catalog/MockCatalog.java | 261 +++- .../adapter/cottontail/CottontailStore.java | 440 ++++--- .../cottontail/util/CottontailNameUtil.java | 29 +- .../polypheny/db/adapter/csv/CsvSchema.java | 7 +- .../polypheny/db/adapter/csv/CsvSource.java | 9 +- .../java/org/polypheny/db/PolyphenyDb.java | 12 + .../org/polypheny/db/ddl/DdlManagerImpl.java | 694 ++++++++--- .../partition/AbstractPartitionManager.java | 126 ++ .../db/partition/FrequencyMapImpl.java | 464 +++++++ .../db/partition/HashPartitionManager.java | 64 +- .../db/partition/ListPartitionManager.java | 127 +- .../PartitionManagerFactoryImpl.java | 44 + .../db/partition/RangePartitionManager.java | 119 +- .../TemperatureAwarePartitionManager.java | 305 +++++ .../db/processing/AbstractQueryProcessor.java | 69 +- .../db/processing/DataContextImpl.java | 32 +- .../db/processing/DataMigratorImpl.java | 411 ++++-- .../db/processing/QueryParameterizer.java | 32 +- .../polypheny/db/router/AbstractRouter.java | 901 ++++++++++---- .../org/polypheny/db/router/IcarusRouter.java | 8 +- .../org/polypheny/db/router/SimpleRouter.java | 2 +- .../db/schema/PolySchemaBuilder.java | 42 +- .../db/transaction/TransactionImpl.java | 24 +- .../polypheny/db/adapter/FileAdapterTest.java | 2 +- .../db/misc/HorizontalPartitioningTest.java | 536 +++++++- .../polypheny/db/sql/clause/GroupByTest.java | 6 +- .../adapter/ethereum/EthereumDataSource.java | 9 +- .../db/adapter/file/FileEnumerator.java | 17 +- .../polypheny/db/adapter/file/FileMethod.java | 6 +- .../db/adapter/file/FileModifier.java | 6 +- .../polypheny/db/adapter/file/FileStore.java | 148 ++- .../db/adapter/file/FileStoreSchema.java | 51 +- .../adapter/file/FileTranslatableTable.java | 28 +- .../db/adapter/file/rel/FileRules.java | 2 +- .../file/rel/FileToEnumerableConverter.java | 2 + .../polypheny/db/adapter/file/source/Qfs.java | 5 +- .../db/adapter/file/source/QfsSchema.java | 9 +- .../db/information/InformationDuration.java | 42 +- .../polypheny/db/adapter/jdbc/JdbcSchema.java | 16 +- .../polypheny/db/adapter/jdbc/JdbcTable.java | 19 +- .../jdbc/rel2sql/RelToSqlConverter.java | 33 +- .../adapter/jdbc/rel2sql/SqlImplementor.java | 41 +- .../jdbc/sources/AbstractJdbcSource.java | 4 +- .../adapter/jdbc/sources/MonetdbSource.java | 5 +- .../db/adapter/jdbc/sources/MysqlSource.java | 5 +- .../jdbc/sources/PostgresqlSource.java | 5 +- .../jdbc/stores/AbstractJdbcStore.java | 189 +-- .../db/adapter/jdbc/stores/HsqldbStore.java | 77 +- .../db/adapter/jdbc/stores/MonetdbStore.java | 156 +-- .../adapter/jdbc/stores/PostgresqlStore.java | 174 +-- jdbc-interface/build.gradle | 1 + .../java/org/polypheny/db/jdbc/DbmsMeta.java | 6 + .../db/adapter/mongodb/MongoSchema.java | 7 +- .../db/adapter/mongodb/MongoStore.java | 192 +-- .../db/adapter/mongodb/MongoTable.java | 5 +- .../monitoring/core/MonitoringQueueImpl.java | 17 +- .../db/monitoring/events/DmlEvent.java | 11 +- .../db/monitoring/events/QueryEvent.java | 12 +- .../events/analyzer/DmlEventAnalyzer.java | 5 +- .../events/analyzer/QueryEventAnalyzer.java | 3 +- .../events/metrics/DmlDataPoint.java | 1 + .../events/metrics/QueryDataPoint.java | 1 + .../persistence/MapDbRepository.java | 23 +- .../ui/MonitoringServiceUiImpl.java | 39 +- .../java/org/polypheny/db/restapi/Rest.java | 18 +- .../org/polypheny/db/restapi/RestResult.java | 7 +- statistic/build.gradle | 1 + .../db/statistic/StatisticQueryProcessor.java | 8 + .../java/org/polypheny/db/webui/Crud.java | 72 +- .../polypheny/db/webui/models/Placement.java | 2 - .../db/webui/SchemaToJsonMapperTest.java | 4 +- 120 files changed, 6815 insertions(+), 2272 deletions(-) create mode 100644 core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionGroup.java create mode 100644 core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionPlacement.java rename core/src/main/java/org/polypheny/db/catalog/exceptions/{UnknownPartitionIdRuntimeException.java => UnknownPartitionGroupIdRuntimeException.java} (82%) create mode 100644 core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionPlacementException.java rename core/src/main/java/org/polypheny/db/ddl/exception/{PartitionNamesNotUniqueException.java => PartitionGroupNamesNotUniqueException.java} (90%) delete mode 100644 core/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java create mode 100644 core/src/main/java/org/polypheny/db/partition/FrequencyMap.java create mode 100644 core/src/main/java/org/polypheny/db/partition/properties/PartitionProperty.java create mode 100644 core/src/main/java/org/polypheny/db/partition/properties/TemperaturePartitionProperty.java create mode 100644 core/src/main/java/org/polypheny/db/partition/raw/RawPartitionInformation.java create mode 100644 core/src/main/java/org/polypheny/db/partition/raw/RawTemperaturePartitionInformation.java create mode 100644 dbms/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java create mode 100644 dbms/src/main/java/org/polypheny/db/partition/FrequencyMapImpl.java rename {core => dbms}/src/main/java/org/polypheny/db/partition/HashPartitionManager.java (54%) rename {core => dbms}/src/main/java/org/polypheny/db/partition/ListPartitionManager.java (57%) create mode 100644 dbms/src/main/java/org/polypheny/db/partition/PartitionManagerFactoryImpl.java rename {core => dbms}/src/main/java/org/polypheny/db/partition/RangePartitionManager.java (66%) create mode 100644 dbms/src/main/java/org/polypheny/db/partition/TemperatureAwarePartitionManager.java diff --git a/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraPhysicalNameProvider.java b/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraPhysicalNameProvider.java index d17cb3234f..7edbe773f4 100644 --- a/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraPhysicalNameProvider.java +++ b/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraPhysicalNameProvider.java @@ -23,6 +23,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.catalog.exceptions.UnknownColumnException; import org.polypheny.db.catalog.exceptions.UnknownDatabaseException; @@ -146,7 +147,8 @@ public String getPhysicalColumnName( String tableName, String logicalColumnName public void updatePhysicalColumnName( long columnId, String updatedName, boolean updatePosition ) { CatalogColumnPlacement placement = this.catalog.getColumnPlacement( this.storeId, columnId ); - this.catalog.updateColumnPlacementPhysicalNames( this.storeId, columnId, placement.physicalTableName, placement.physicalTableName, updatedName, updatePosition ); + CatalogPartitionPlacement partitionPlacement = catalog.getPartitionPlacement( this.storeId, catalog.getTable( placement.tableId ).partitionProperty.partitionIds.get( 0 ) ); + this.catalog.updateColumnPlacementPhysicalNames( this.storeId, columnId, partitionPlacement.physicalTableName, updatedName, updatePosition ); } @@ -193,4 +195,5 @@ public static String incrementNameRevision( String name ) { return type + id + "r" + rev; } + } diff --git a/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraStore.java b/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraStore.java index 97470b3786..4def4a7045 100644 --- a/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraStore.java +++ b/cassandra-adapter/src/main/java/org/polypheny/db/adapter/cassandra/CassandraStore.java @@ -55,6 +55,7 @@ import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; import org.polypheny.db.catalog.entity.CatalogKey; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.docker.DockerInstance; import org.polypheny.db.docker.DockerManager; @@ -208,9 +209,9 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - String physicalTableName = currentSchema.getConvention().physicalNameProvider.getPhysicalTableName( catalogTable.id ); - return new CassandraTable( this.currentSchema, catalogTable.name, physicalTableName, false ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + String cassandraphysicalTableName = currentSchema.getConvention().physicalNameProvider.getPhysicalTableName( catalogTable.id ); + return new CassandraTable( this.currentSchema, catalogTable.name, cassandraphysicalTableName, false ); } @@ -221,7 +222,7 @@ public Schema getCurrentSchema() { @Override - public void createTable( Context context, CatalogTable catalogTable ) { + public void createTable( Context context, CatalogTable catalogTable, List partitionIds ) { // This check is probably not required due to the check below it. if ( catalogTable.primaryKey == null ) { throw new UnsupportedOperationException( "Cannot create Cassandra Table without a primary key!" ); @@ -247,7 +248,7 @@ public void createTable( Context context, CatalogTable catalogTable ) { CassandraPhysicalNameProvider physicalNameProvider = new CassandraPhysicalNameProvider( this.getAdapterId() ); String physicalTableName = physicalNameProvider.getPhysicalTableName( catalogTable.id ); // List columns = combinedTable.getColumns(); - List columns = catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ); + List columns = catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ); CatalogColumnPlacement primaryColumnPlacement = columns.stream().filter( c -> c.columnId == primaryKeyColumnLambda ).findFirst().get(); CatalogColumn catalogColumn = catalog.getColumn( primaryColumnPlacement.columnId ); @@ -275,12 +276,11 @@ public void createTable( Context context, CatalogTable catalogTable ) { context.getStatement().getTransaction().registerInvolvedAdapter( this ); this.session.execute( createTable.build() ); - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { catalog.updateColumnPlacementPhysicalNames( getAdapterId(), placement.columnId, this.dbKeyspace, // TODO MV: physical schema name - physicalTableName, physicalNameProvider.generatePhysicalColumnName( placement.columnId ), true ); } @@ -288,7 +288,7 @@ public void createTable( Context context, CatalogTable catalogTable ) { @Override - public void dropTable( Context context, CatalogTable catalogTable ) { + public void dropTable( Context context, CatalogTable catalogTable, List partitionIds ) { CassandraPhysicalNameProvider physicalNameProvider = new CassandraPhysicalNameProvider( this.getAdapterId() ); String physicalTableName = physicalNameProvider.getPhysicalTableName( catalogTable.id ); SimpleStatement dropTable = SchemaBuilder.dropTable( this.dbKeyspace, physicalTableName ).build(); @@ -315,7 +315,6 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn getAdapterId(), catalogColumn.id, this.dbKeyspace, - physicalTableName, physicalColumnName, false ); } @@ -325,7 +324,10 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn public void dropColumn( Context context, CatalogColumnPlacement columnPlacement ) { // public void dropColumn( Context context, CatalogCombinedTable catalogTable, CatalogColumn catalogColumn ) { // CassandraPhysicalNameProvider physicalNameProvider = new CassandraPhysicalNameProvider( context.getStatement().getTransaction().getCatalog(), this.getStoreId() ); - String physicalTableName = columnPlacement.physicalTableName; + + CatalogPartitionPlacement partitionPlacement = catalog.getPartitionPlacement( getAdapterId(), catalog.getTable( columnPlacement.tableId ).partitionProperty.partitionIds.get( 0 ) ); + + String physicalTableName = partitionPlacement.physicalTableName; String physicalColumnName = columnPlacement.physicalColumnName; SimpleStatement dropColumn = SchemaBuilder.alterTable( this.dbKeyspace, physicalTableName ) @@ -337,14 +339,14 @@ public void dropColumn( Context context, CatalogColumnPlacement columnPlacement @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { throw new RuntimeException( "Cassandra adapter does not support adding indexes" ); } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { - throw new RuntimeException( "Cassandra adaper does not support dropping indexes" ); + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + throw new RuntimeException( "Cassandra adapter does not support dropping indexes" ); } diff --git a/catalog/src/main/java/org/polypheny/db/catalog/CatalogImpl.java b/catalog/src/main/java/org/polypheny/db/catalog/CatalogImpl.java index 98ef360dd3..858ffb71d9 100644 --- a/catalog/src/main/java/org/polypheny/db/catalog/CatalogImpl.java +++ b/catalog/src/main/java/org/polypheny/db/catalog/CatalogImpl.java @@ -16,7 +16,6 @@ package org.polypheny.db.catalog; - import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import java.io.File; @@ -55,6 +54,8 @@ import org.polypheny.db.catalog.entity.CatalogIndex; import org.polypheny.db.catalog.entity.CatalogKey; import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionGroup; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogQueryInterface; import org.polypheny.db.catalog.entity.CatalogSchema; @@ -75,7 +76,8 @@ import org.polypheny.db.catalog.exceptions.UnknownIndexException; import org.polypheny.db.catalog.exceptions.UnknownIndexIdRuntimeException; import org.polypheny.db.catalog.exceptions.UnknownKeyIdRuntimeException; -import org.polypheny.db.catalog.exceptions.UnknownPartitionIdRuntimeException; +import org.polypheny.db.catalog.exceptions.UnknownPartitionGroupIdRuntimeException; +import org.polypheny.db.catalog.exceptions.UnknownPartitionPlacementException; import org.polypheny.db.catalog.exceptions.UnknownQueryInterfaceException; import org.polypheny.db.catalog.exceptions.UnknownQueryInterfaceRuntimeException; import org.polypheny.db.catalog.exceptions.UnknownSchemaException; @@ -85,8 +87,10 @@ import org.polypheny.db.catalog.exceptions.UnknownUserException; import org.polypheny.db.catalog.exceptions.UnknownUserIdRuntimeException; import org.polypheny.db.config.RuntimeConfig; +import org.polypheny.db.partition.FrequencyMap; import org.polypheny.db.partition.PartitionManager; import org.polypheny.db.partition.PartitionManagerFactory; +import org.polypheny.db.partition.properties.PartitionProperty; import org.polypheny.db.rel.RelCollation; import org.polypheny.db.rel.RelNode; import org.polypheny.db.rel.type.RelDataType; @@ -148,9 +152,15 @@ public class CatalogImpl extends Catalog { private static final AtomicLong tableIdBuilder = new AtomicLong( 1 ); private static final AtomicLong columnIdBuilder = new AtomicLong( 1 ); - private static final AtomicLong partitionIdBuilder = new AtomicLong(); + private static final AtomicLong partitionGroupIdBuilder = new AtomicLong(); + private static final AtomicLong partitionIdBuilder = new AtomicLong( 1000 ); + private static BTreeMap partitionGroups; private static BTreeMap partitions; - private static HTreeMap> dataPartitionPlacement; // + private static HTreeMap> dataPartitionGroupPlacement; // + private static List frequencyDependentTables = new ArrayList<>(); //all tables to consider in periodic run + + // adapterId + Partition + private static BTreeMap partitionPlacements; // Keeps a list of all tableIDs which are going to be deleted. This is required to avoid constraints when recursively // removing a table and all placements and partitions. Otherwise **validatePartitionDistribution()** inside the Catalog would throw an error. @@ -333,7 +343,7 @@ public void restoreColumnPlacements( Transaction transaction ) { Map> restoredTables = new HashMap<>(); for ( CatalogColumn c : columns.values() ) { - List placements = getColumnPlacements( c.id ); + List placements = getColumnPlacement( c.id ); CatalogTable catalogTable = getTable( c.tableId ); if ( !catalogTable.isView() ) { @@ -349,11 +359,11 @@ public void restoreColumnPlacements( Transaction transaction ) { // TODO only full placements atm here if ( !restoredTables.containsKey( store.getAdapterId() ) ) { - store.createTable( transaction.createStatement().getPrepareContext(), catalogTable ); + store.createTable( transaction.createStatement().getPrepareContext(), catalogTable, catalogTable.partitionProperty.partitionIds ); restoredTables.put( store.getAdapterId(), Collections.singletonList( catalogTable.id ) ); } else if ( !(restoredTables.containsKey( store.getAdapterId() ) && restoredTables.get( store.getAdapterId() ).contains( catalogTable.id )) ) { - store.createTable( transaction.createStatement().getPrepareContext(), catalogTable ); + store.createTable( transaction.createStatement().getPrepareContext(), catalogTable, catalogTable.partitionProperty.partitionIds ); List ids = new ArrayList<>( restoredTables.get( store.getAdapterId() ) ); ids.add( catalogTable.id ); restoredTables.put( store.getAdapterId(), ids ); @@ -369,13 +379,13 @@ public void restoreColumnPlacements( Transaction transaction ) { DataStore store = manager.getStore( p.adapterId ); if ( !restoredTables.containsKey( store.getAdapterId() ) ) { - store.createTable( transaction.createStatement().getPrepareContext(), table ); + store.createTable( transaction.createStatement().getPrepareContext(), table, table.partitionProperty.partitionIds ); List ids = new ArrayList<>(); ids.add( table.id ); restoredTables.put( store.getAdapterId(), ids ); } else if ( !(restoredTables.containsKey( store.getAdapterId() ) && restoredTables.get( store.getAdapterId() ).contains( table.id )) ) { - store.createTable( transaction.createStatement().getPrepareContext(), table ); + store.createTable( transaction.createStatement().getPrepareContext(), table, table.partitionProperty.partitionIds ); List ids = new ArrayList<>( restoredTables.get( store.getAdapterId() ) ); ids.add( table.id ); restoredTables.put( store.getAdapterId(), ids ); @@ -427,6 +437,7 @@ private void restoreAllIdBuilders() { restoreIdBuilder( adapters, adapterIdBuilder ); restoreIdBuilder( queryInterfaces, queryInterfaceIdBuilder ); restoreIdBuilder( foreignKeys, foreignKeyIdBuilder ); + restoreIdBuilder( partitionGroups, partitionGroupIdBuilder ); restoreIdBuilder( partitions, partitionIdBuilder ); // Restore physical position builder @@ -531,12 +542,18 @@ private void initTableInfo( DB db ) { .keySerializer( new SerializerArrayTuple( Serializer.LONG, Serializer.LONG, Serializer.STRING ) ) .valueSerializer( Serializer.JAVA ) .createOrOpen(); + partitionGroups = db.treeMap( "partitionGroups", Serializer.LONG, Serializer.JAVA ).createOrOpen(); partitions = db.treeMap( "partitions", Serializer.LONG, Serializer.JAVA ).createOrOpen(); - dataPartitionPlacement = db.hashMap( "dataPartitionPlacement" ) + dataPartitionGroupPlacement = db.hashMap( "dataPartitionPlacement" ) .keySerializer( new SerializerArrayTuple( Serializer.INTEGER, Serializer.LONG ) ) .valueSerializer( new GenericSerializer>() ) .createOrOpen(); + partitionPlacements = db.treeMap( "partitionPlacements", new SerializerArrayTuple( Serializer.INTEGER, Serializer.LONG ), Serializer.JAVA ).createOrOpen(); + + //Restores all Tables dependent on periodic checks like TEMPERATURE Partitioning + frequencyDependentTables = tables.values().stream().filter( t -> t.partitionProperty.reliesOnPeriodicChecks ).map( t -> t.id ).collect( Collectors.toList() ); + } @@ -735,8 +752,12 @@ private void addDefaultColumn( CatalogAdapter csv, CatalogTable table, String na if ( table.name.equals( "emp" ) || table.name.equals( "work" ) ) { filename += ".gz"; } + addColumnPlacement( csv.id, colId, PlacementType.AUTOMATIC, filename, table.name, name, null ); updateColumnPlacementPhysicalPosition( csv.id, colId, position ); + + long partitionId = getPartitionsOnDataPlacement( csv.id, table.id ).get( 0 ); + addPartitionPlacement( csv.id, table.id, partitionId, PlacementType.AUTOMATIC, filename, table.name ); } } @@ -1273,23 +1294,43 @@ public long addTable( String name, long schemaId, int ownerId, TableType tableTy long id = tableIdBuilder.getAndIncrement(); CatalogSchema schema = getSchema( schemaId ); CatalogUser owner = getUser( ownerId ); - CatalogTable table = new CatalogTable( - id, - name, - ImmutableList.of(), - schemaId, - schema.databaseId, - ownerId, - owner.name, - tableType, - null, - ImmutableMap.of(), - modifiable ); - - updateTableLogistics( name, schemaId, id, schema, table ); - openTable = id; + try { + //Technically every Table is partitioned. But tables classified as UNPARTITIONED only consist of one PartitionGroup and one large partition + List partitionGroupIds = new ArrayList<>(); + partitionGroupIds.add( addPartitionGroup( id, "full", schemaId, PartitionType.NONE, 1, new ArrayList<>(), true ) ); + //get All(only one) PartitionGroups and then get all partitionIds for each PG and add them to completeList of partitionIds + CatalogPartitionGroup defaultUnpartitionedGroup = getPartitionGroup( partitionGroupIds.get( 0 ) ); + + PartitionProperty partitionProperty = PartitionProperty.builder() + .partitionType( PartitionType.NONE ) + .partitionGroupIds( ImmutableList.copyOf( partitionGroupIds ) ) + .partitionIds( ImmutableList.copyOf( defaultUnpartitionedGroup.partitionIds ) ) + .reliesOnPeriodicChecks( false ) + .build(); + + CatalogTable table = new CatalogTable( + id, + name, + ImmutableList.of(), + schemaId, + schema.databaseId, + ownerId, + owner.name, + tableType, + null, + ImmutableMap.of(), + modifiable, + partitionProperty ); + + updateTableLogistics( name, schemaId, id, schema, table ); + openTable = id; + + } catch ( GenericCatalogException e ) { + throw new RuntimeException( "Error when adding table " + name, e ); + } return id; + } @@ -1312,6 +1353,13 @@ public long addView( String name, long schemaId, int ownerId, TableType tableTyp CatalogSchema schema = getSchema( schemaId ); CatalogUser owner = getUser( ownerId ); + PartitionProperty partitionProperty = PartitionProperty.builder() + .partitionType( PartitionType.NONE ) + .reliesOnPeriodicChecks( false ) + .partitionIds( ImmutableList.copyOf( new ArrayList<>() ) ) + .partitionGroupIds( ImmutableList.copyOf( new ArrayList<>() ) ) + .build(); + if ( tableType == TableType.VIEW ) { CatalogView viewTable = new CatalogView( id, @@ -1328,7 +1376,8 @@ public long addView( String name, long schemaId, int ownerId, TableType tableTyp modifiable, relCollation, ImmutableMap.copyOf( underlyingTables ), - fieldList + fieldList, + partitionProperty ); addConnectedViews( underlyingTables, viewTable.id ); updateTableLogistics( name, schemaId, id, schema, viewTable ); @@ -1380,6 +1429,7 @@ public void addConnectedViews( Map> underlyingTables, long view * * @param catalogView view to be deleted */ + @Override public void deleteViewDependencies( CatalogView catalogView ) { for ( long id : catalogView.getUnderlyingTables().keySet() ) { CatalogTable old = getTable( id ); @@ -1443,6 +1493,16 @@ public void deleteTable( long tableId ) { synchronized ( this ) { schemaChildren.replace( table.schemaId, ImmutableList.copyOf( children ) ); + if ( table.partitionProperty.reliesOnPeriodicChecks ) { + removeTableFromPeriodicProcessing( tableId ); + } + + if ( table.isPartitioned ) { + for ( Long partitionGroupId : Objects.requireNonNull( table.partitionProperty.partitionGroupIds ) ) { + deletePartitionGroup( table.id, table.schemaId, partitionGroupId ); + } + } + for ( Long columnId : Objects.requireNonNull( tableChildren.get( tableId ) ) ) { deleteColumn( columnId ); } @@ -1471,23 +1531,39 @@ public void deleteTable( long tableId ) { public void setTableOwner( long tableId, int ownerId ) { CatalogTable old = getTable( tableId ); CatalogUser user = getUser( ownerId ); - CatalogTable table = new CatalogTable( old.id, - old.name, - old.columnIds, - old.schemaId, - old.databaseId, - ownerId, - user.name, - old.tableType, - old.primaryKey, - old.placementsByAdapter, - old.modifiable, - old.numPartitions, - old.partitionType, - old.partitionIds, - old.partitionColumnId, - old.isPartitioned, - old.connectedViews ); + + CatalogTable table; + if ( old.isPartitioned ) { + table = new CatalogTable( old.id + , old.name + , old.columnIds + , old.schemaId + , old.databaseId + , ownerId + , user.name + , old.tableType + , old.primaryKey + , old.placementsByAdapter + , old.modifiable + , old.partitionType + , old.partitionColumnId + , old.partitionProperty + , old.connectedViews ); + } else { + table = new CatalogTable( + old.id, + old.name, + old.columnIds, + old.schemaId, + old.databaseId, + ownerId, + user.name, + old.tableType, + old.primaryKey, + old.placementsByAdapter, + old.modifiable, + old.partitionProperty ); + } synchronized ( this ) { tables.replace( tableId, table ); tableNames.replace( new Object[]{ table.databaseId, table.schemaId, table.name }, table ); @@ -1505,23 +1581,40 @@ public void setTableOwner( long tableId, int ownerId ) { @Override public void setPrimaryKey( long tableId, Long keyId ) { CatalogTable old = getTable( tableId ); - CatalogTable table = new CatalogTable( old.id, - old.name, - old.columnIds, - old.schemaId, - old.databaseId, - old.ownerId, - old.ownerName, - old.tableType, - keyId, - old.placementsByAdapter, - old.modifiable, - old.numPartitions, - old.partitionType, - old.partitionIds, - old.partitionColumnId, - old.isPartitioned, - old.connectedViews ); + CatalogTable table; + + //This is needed otherwise this would reset the already partitioned table + if ( old.isPartitioned ) { + table = new CatalogTable( old.id + , old.name + , old.columnIds + , old.schemaId + , old.databaseId + , old.ownerId + , old.ownerName + , old.tableType + , keyId + , old.placementsByAdapter + , old.modifiable + , old.partitionType + , old.partitionColumnId + , old.partitionProperty + , old.connectedViews ); + } else { + table = new CatalogTable( + old.id, + old.name, + old.columnIds, + old.schemaId, + old.databaseId, + old.ownerId, + old.ownerName, + old.tableType, + keyId, + old.placementsByAdapter, + old.modifiable, + old.partitionProperty ); + } synchronized ( this ) { tables.replace( tableId, table ); tableNames.replace( new Object[]{ table.databaseId, table.schemaId, table.name }, table ); @@ -1546,12 +1639,13 @@ public void setPrimaryKey( long tableId, Long keyId ) { * @param physicalSchemaName The schema name on the adapter * @param physicalTableName The table name on the adapter * @param physicalColumnName The column name on the adapter - * @param partitionIds List of partitions to place on this column placement (may be null) + * @param partitionGroupIds List of partitions to place on this column placement (may be null) */ @Override - public void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionIds ) { + public void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionGroupIds ) { CatalogColumn column = Objects.requireNonNull( columns.get( columnId ) ); CatalogAdapter store = Objects.requireNonNull( adapters.get( adapterId ) ); + CatalogColumnPlacement placement = new CatalogColumnPlacement( column.tableId, columnId, @@ -1559,7 +1653,6 @@ public void addColumnPlacement( int adapterId, long columnId, PlacementType plac store.uniqueName, placementType, physicalSchemaName, - physicalTableName, physicalColumnName, physicalPositionBuilder.getAndIncrement() ); @@ -1581,7 +1674,9 @@ public void addColumnPlacement( int adapterId, long columnId, PlacementType plac // Required because otherwise an already partitioned table would be reset to a regular table due to the different constructors. if ( old.isPartitioned ) { - log.debug( " Table '{}' is partitioned.", old.name ); + if ( log.isDebugEnabled() ) { + log.debug( " Table '{}' is partitioned.", old.name ); + } table = new CatalogTable( old.id, old.name, @@ -1594,33 +1689,11 @@ public void addColumnPlacement( int adapterId, long columnId, PlacementType plac old.primaryKey, ImmutableMap.copyOf( placementsByStore ), old.modifiable, - old.numPartitions, old.partitionType, - old.partitionIds, old.partitionColumnId, + old.partitionProperty, old.connectedViews ); - // If table is partitioned and no concrete partitions are defined place all partitions on columnPlacement - if ( partitionIds == null ) { - partitionIds = table.partitionIds; - } - - // Only executed if this is the first placement on the store - if ( !dataPartitionPlacement.containsKey( new Object[]{ adapterId, column.tableId } ) ) { - if ( log.isDebugEnabled() ) { - log.debug( "Table '{}.{}' does not exists in DataPartitionPlacements so far. Assigning partitions {}", - store.uniqueName, - old.name, partitionIds ); - } - updatePartitionsOnDataPlacement( adapterId, column.tableId, partitionIds ); - } else { - if ( log.isDebugEnabled() ) { - log.debug( "Table '{}.{}' already exists in DataPartitionPlacement, keeping assigned partitions {}", - store.uniqueName, - old.name, - getPartitionsOnDataPlacement( adapterId, old.id ) ); - } - } } else { table = new CatalogTable( @@ -1635,14 +1708,31 @@ public void addColumnPlacement( int adapterId, long columnId, PlacementType plac old.primaryKey, ImmutableMap.copyOf( placementsByStore ), old.modifiable, - old.numPartitions, - old.partitionType, - old.partitionIds, - old.partitionColumnId, - old.isPartitioned, + old.partitionProperty, old.connectedViews ); } + // If table is partitioned and no concrete partitions are defined place all partitions on columnPlacement + if ( partitionGroupIds == null ) { + partitionGroupIds = table.partitionProperty.partitionGroupIds; + } + + // Only executed if this is the first placement on the store + if ( !dataPartitionGroupPlacement.containsKey( new Object[]{ adapterId, column.tableId } ) ) { + if ( log.isDebugEnabled() ) { + log.debug( "Table '{}.{}' does not exists in DataPartitionPlacements so far. Assigning partitions {}", + store.uniqueName, old.name, partitionGroupIds ); + } + updatePartitionGroupsOnDataPlacement( adapterId, column.tableId, partitionGroupIds ); + } else { + if ( log.isDebugEnabled() ) { + log.debug( "Table '{}.{}' already exists in DataPartitionPlacement, keeping assigned partitions {}", + store.uniqueName, + old.name, + getPartitionGroupsOnDataPlacement( adapterId, old.id ) ); + } + } + tables.replace( column.tableId, table ); tableNames.replace( new Object[]{ table.databaseId, table.schemaId, table.name }, table ); } @@ -1651,13 +1741,47 @@ public void addColumnPlacement( int adapterId, long columnId, PlacementType plac /** - * Deletes a column placement from a specified adapter. + * Change physical names of a partition placement. + * + * @param adapterId The id of the adapter + * @param partitionId The id of the partition + * @param physicalSchemaName The physical schema name + * @param physicalTableName The physical table name + */ + @Override + public void updatePartitionPlacementPhysicalNames( int adapterId, long partitionId, String physicalSchemaName, String physicalTableName ) { + try { + CatalogPartitionPlacement old = Objects.requireNonNull( partitionPlacements.get( new Object[]{ adapterId, partitionId } ) ); + CatalogPartitionPlacement placement = new CatalogPartitionPlacement( + old.tableId, + old.adapterId, + old.adapterUniqueName, + old.placementType, + physicalSchemaName, + physicalTableName, + old.partitionId ); + + synchronized ( this ) { + partitionPlacements.replace( new Object[]{ adapterId, partitionId }, placement ); + } + listeners.firePropertyChange( "partitionPlacement", old, placement ); + } catch ( NullPointerException e ) { + getAdapter( adapterId ); + getPartition( partitionId ); + throw new UnknownPartitionPlacementException( adapterId, partitionId ); + } + } + + + /** + * Deletes all dependent column placements * * @param adapterId The id of the adapter * @param columnId The id of the column */ @Override public void deleteColumnPlacement( int adapterId, long columnId ) { + boolean lastPlacementOnStore = false; CatalogTable oldTable = getTable( getColumn( columnId ).tableId ); Map> placementsByStore = new HashMap<>( oldTable.placementsByAdapter ); @@ -1677,8 +1801,8 @@ public void deleteColumnPlacement( int adapterId, long columnId ) { if ( log.isDebugEnabled() ) { log.debug( "Is flagged for deletion {}", isTableFlaggedForDeletion( oldTable.id ) ); } - if ( isTableFlaggedForDeletion( oldTable.id ) ) { - if ( !validatePartitionDistribution( adapterId, oldTable.id, columnId ) ) { + if ( !isTableFlaggedForDeletion( oldTable.id ) ) { + if ( !validatePartitionGroupDistribution( adapterId, oldTable.id, columnId, 1 ) ) { throw new RuntimeException( "Partition Distribution failed" ); } } @@ -1698,15 +1822,14 @@ public void deleteColumnPlacement( int adapterId, long columnId ) { oldTable.primaryKey, ImmutableMap.copyOf( placementsByStore ), oldTable.modifiable, - oldTable.numPartitions, oldTable.partitionType, - oldTable.partitionIds, oldTable.partitionColumnId, + oldTable.partitionProperty, oldTable.connectedViews ); //Check if this is the last placement on store. If so remove dataPartitionPlacement if ( lastPlacementOnStore ) { - dataPartitionPlacement.remove( new Object[]{ adapterId, oldTable.id } ); + dataPartitionGroupPlacement.remove( new Object[]{ adapterId, oldTable.id } ); if ( log.isDebugEnabled() ) { log.debug( "Column '{}' was the last placement on store: '{}.{}' ", getColumn( columnId ).name, @@ -1727,11 +1850,7 @@ public void deleteColumnPlacement( int adapterId, long columnId ) { oldTable.primaryKey, ImmutableMap.copyOf( placementsByStore ), oldTable.modifiable, - oldTable.numPartitions, - oldTable.partitionType, - oldTable.partitionIds, - oldTable.partitionColumnId, - oldTable.isPartitioned, + oldTable.partitionProperty, oldTable.connectedViews ); } @@ -1744,7 +1863,8 @@ public void deleteColumnPlacement( int adapterId, long columnId ) { /** - * Get a specific column placement. + * Get a column placement independent of any partition. + * Mostly used get information about the placement itself rather than the chunk of data * * @param adapterId The id of the adapter * @param columnId The id of the column @@ -1777,7 +1897,8 @@ public boolean checkIfExistsColumnPlacement( int adapterId, long columnId ) { /** - * Get column placements on a adapter + * Get column placements on a adapter. On column detail level + * Only returns one ColumnPlacement per column on adapter. Ignores multiplicity due to different partitionsIds * * @param adapterId The id of the adapter * @return List of column placements on the specified adapter @@ -1789,13 +1910,14 @@ public List getColumnPlacementsOnAdapter( int adapterId /** - * Get column placements of a specific table on a specific adapter + * Get column placements of a specific table on a specific adapter on column detail level. + * Only returns one ColumnPlacement per column on adapter. Ignores multiplicity due to different partitionsIds * * @param adapterId The id of the adapter * @return List of column placements of the table on the specified adapter */ @Override - public List getColumnPlacementsOnAdapter( int adapterId, long tableId ) { + public List getColumnPlacementsOnAdapterPerTable( int adapterId, long tableId ) { final Comparator columnPlacementComparator = Comparator.comparingInt( p -> getColumn( p.columnId ).position ); return getColumnPlacementsOnAdapter( adapterId ) .stream() @@ -1826,13 +1948,14 @@ public List getColumnPlacementsByColumn( long columnId ) /** - * Get all column placements of a column + * T + * Get all column placements of a column. * * @param columnId The id of the specific column * @return List of column placements of specific column */ @Override - public List getColumnPlacements( long columnId ) { + public List getColumnPlacement( long columnId ) { return columnPlacements.values() .stream() .filter( p -> p.columnId == columnId ) @@ -1877,7 +2000,6 @@ public void updateColumnPlacementType( int adapterId, long columnId, PlacementTy old.adapterUniqueName, placementType, old.physicalSchemaName, - old.physicalTableName, old.physicalColumnName, old.physicalPosition ); synchronized ( this ) { @@ -1910,7 +2032,6 @@ public void updateColumnPlacementPhysicalPosition( int adapterId, long columnId, old.adapterUniqueName, old.placementType, old.physicalSchemaName, - old.physicalTableName, old.physicalColumnName, position ); synchronized ( this ) { @@ -1925,7 +2046,8 @@ public void updateColumnPlacementPhysicalPosition( int adapterId, long columnId, } - /** + /* + ** * Update physical position of a column placement on a specified adapter. Uses auto-increment to get the globally increasing number. * * @param adapterId The id of the adapter @@ -1942,7 +2064,6 @@ public void updateColumnPlacementPhysicalPosition( int adapterId, long columnId old.adapterUniqueName, old.placementType, old.physicalSchemaName, - old.physicalTableName, old.physicalColumnName, physicalPositionBuilder.getAndIncrement() ); synchronized ( this ) { @@ -1963,12 +2084,11 @@ public void updateColumnPlacementPhysicalPosition( int adapterId, long columnId * @param adapterId The id of the adapter * @param columnId The id of the column * @param physicalSchemaName The physical schema name - * @param physicalTableName The physical table name * @param physicalColumnName The physical column name * @param updatePhysicalColumnPosition Whether to reset the column position (highest number in the table; represents that the column is now at the last position) */ @Override - public void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalTableName, String physicalColumnName, boolean updatePhysicalColumnPosition ) { + public void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalColumnName, boolean updatePhysicalColumnPosition ) { try { CatalogColumnPlacement old = Objects.requireNonNull( columnPlacements.get( new Object[]{ adapterId, columnId } ) ); CatalogColumnPlacement placement = new CatalogColumnPlacement( @@ -1978,7 +2098,6 @@ public void updateColumnPlacementPhysicalNames( int adapterId, long columnId, St old.adapterUniqueName, old.placementType, physicalSchemaName, - physicalTableName, physicalColumnName, updatePhysicalColumnPosition ? physicalPositionBuilder.getAndIncrement() : old.physicalPosition ); synchronized ( this ) { @@ -2144,7 +2263,10 @@ public long addColumn( String name, long tableId, int position, PolyType type, P List columnIds = new ArrayList<>( table.columnIds ); columnIds.add( id ); - CatalogTable updatedTable = table.getTableWithColumns( ImmutableList.copyOf( columnIds ) ); + + CatalogTable updatedTable; + + updatedTable = table.getTableWithColumns( ImmutableList.copyOf( columnIds ) ); tables.replace( tableId, updatedTable ); tableNames.replace( new Object[]{ updatedTable.databaseId, updatedTable.schemaId, updatedTable.name }, updatedTable ); @@ -2260,7 +2382,7 @@ public void setNullable( long columnId, boolean nullable ) throws GenericCatalog } } else { // TODO: Check that the column does not contain any null values - getColumnPlacements( columnId ); + getColumnPlacement( columnId ); } CatalogColumn column = new CatalogColumn( old.id, @@ -2342,30 +2464,36 @@ public void deleteColumn( long columnId ) { CatalogTable old = getTable( column.tableId ); List columnIds = new ArrayList<>( old.columnIds ); columnIds.remove( columnId ); - CatalogTable table = new CatalogTable( old.id, - old.name, - ImmutableList.copyOf( columnIds ), - old.schemaId, - old.databaseId, - old.ownerId, - old.ownerName, - old.tableType, - old.primaryKey, - old.placementsByAdapter, - old.modifiable, - old.numPartitions, - old.partitionType, - old.partitionIds, - old.partitionColumnId, - old.isPartitioned, - old.connectedViews ); + CatalogTable table; + + //This is needed otherwise this would reset the already partitioned table + if ( old.isPartitioned ) { + table = new CatalogTable( old.id + , old.name + , ImmutableList.copyOf( columnIds ) + , old.schemaId + , old.databaseId + , old.ownerId + , old.ownerName + , old.tableType + , old.primaryKey + , old.placementsByAdapter + , old.modifiable + , old.partitionType + , old.partitionColumnId + , old.isPartitioned + , old.partitionProperty + , old.connectedViews ); + } else { + table = new CatalogTable( old.id, old.name, ImmutableList.copyOf( columnIds ), old.schemaId, old.databaseId, old.ownerId, old.ownerName, old.tableType, old.primaryKey, old.placementsByAdapter, old.modifiable, old.partitionProperty, old.connectedViews ); + } synchronized ( this ) { columnNames.remove( new Object[]{ column.databaseId, column.schemaId, column.tableId, column.name } ); tableChildren.replace( column.tableId, ImmutableList.copyOf( children ) ); deleteDefaultValue( columnId ); - for ( CatalogColumnPlacement p : getColumnPlacements( columnId ) ) { + for ( CatalogColumnPlacement p : getColumnPlacement( columnId ) ) { deleteColumnPlacement( p.adapterId, p.columnId ); } tables.replace( column.tableId, table ); @@ -3223,26 +3351,232 @@ public void deleteQueryInterface( int ifaceId ) { * * @param tableId The unique id of the table * @param schemaId The unique id of the table - * @param ownerId the partitionId to be deleted * @param partitionType partition Type of the added partition * @return The id of the created partition */ @Override - public long addPartition( long tableId, String partitionName, long schemaId, int ownerId, PartitionType partitionType, List effectivePartitionQualifier, boolean isUnbound ) throws GenericCatalogException { + public long addPartitionGroup( long tableId, String partitionGroupName, long schemaId, PartitionType partitionType, long numberOfInternalPartitions, List effectivePartitionGroupQualifier, boolean isUnbound ) throws GenericCatalogException { + try { + long id = partitionGroupIdBuilder.getAndIncrement(); + if ( log.isDebugEnabled() ) { + log.debug( "Creating partitionGroup of type '{}' with id '{}'", partitionType, id ); + } + CatalogSchema schema = Objects.requireNonNull( schemas.get( schemaId ) ); + + List partitionIds = new ArrayList<>(); + for ( int i = 0; i < numberOfInternalPartitions; i++ ) { + long partId = addPartition( tableId, schemaId, id, effectivePartitionGroupQualifier, isUnbound ); + partitionIds.add( partId ); + } + + CatalogPartitionGroup partitionGroup = new CatalogPartitionGroup( + id, + partitionGroupName, + tableId, + schemaId, + schema.databaseId, + 0, + null, + ImmutableList.copyOf( partitionIds ) + , isUnbound ); + + synchronized ( this ) { + partitionGroups.put( id, partitionGroup ); + } + //listeners.firePropertyChange( "partitionGroups", null, partitionGroup ); + return id; + } catch ( NullPointerException e ) { + throw new GenericCatalogException( e ); + } + } + + + /** + * Should only be called from mergePartitions(). Deletes a single partition and all references. + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionGroupId The partitionId to be deleted + */ + @Override + public void deletePartitionGroup( long tableId, long schemaId, long partitionGroupId ) throws UnknownPartitionGroupIdRuntimeException { + if ( log.isDebugEnabled() ) { + log.debug( "Deleting partitionGroup with id '{}' on table with id '{}'", partitionGroupId, tableId ); + } + // Check whether there this partition id exists + CatalogPartitionGroup partitionGroup = getPartitionGroup( partitionGroupId ); + synchronized ( this ) { + for ( long partitionId : partitionGroup.partitionIds ) { + deletePartition( tableId, schemaId, partitionId ); + } + + for ( CatalogAdapter adapter : getAdaptersByPartitionGroup( tableId, partitionGroupId ) ) { + deletePartitionGroupsOnDataPlacement( adapter.id, partitionGroupId ); + } + + partitionGroups.remove( partitionGroupId ); + } + } + + + /** + * Updates the specified partition group with the attached partitionIds + * + * @param partitionGroupId Partition Group to be updated + * @param partitionIds List of new partitionIds + */ + @Override + public void updatePartitionGroup( long partitionGroupId, List partitionIds ) throws UnknownPartitionGroupIdRuntimeException { + + // Check whether there this partition id exists + CatalogPartitionGroup partitionGroup = getPartitionGroup( partitionGroupId ); + + CatalogPartitionGroup updatedCatalogPartitionGroup = new CatalogPartitionGroup( + partitionGroup.id, + partitionGroup.partitionGroupName, + partitionGroup.tableId, + partitionGroup.schemaId, + partitionGroup.databaseId, + partitionGroup.partitionKey, + partitionGroup.partitionQualifiers, + ImmutableList.copyOf( partitionIds ), + partitionGroup.isUnbound ); + + synchronized ( this ) { + partitionGroups.replace( partitionGroupId, updatedCatalogPartitionGroup ); + + } + listeners.firePropertyChange( "partitionGroup", partitionGroup, updatedCatalogPartitionGroup ); + } + + + /** + * Adds a partition to an already existing partition Group + * + * @param partitionGroupId Group to add to + * @param partitionId Partition to add + */ + @Override + public void addPartitionToGroup( long partitionGroupId, Long partitionId ) { + + // Check whether there this partition id exists + CatalogPartitionGroup partitionGroup = getPartitionGroup( partitionGroupId ); + List newPartitionIds = new ArrayList<>( partitionGroup.partitionIds ); + + CatalogPartition partition = getPartition( partitionId ); + + if ( !newPartitionIds.contains( partitionId ) ) { + newPartitionIds.add( partitionId ); + + updatePartitionGroup( partitionGroupId, newPartitionIds ); + } + + } + + + /** + * Removes a partition from an already existing partition Group + * + * @param partitionGroupId Group to remove the partition from + * @param partitionId Partition to remove + */ + @Override + public void removePartitionFromGroup( long partitionGroupId, Long partitionId ) { + // Check whether there this partition id exists + CatalogPartitionGroup partitionGroup = getPartitionGroup( partitionGroupId ); + List newPartitionIds = new ArrayList<>( partitionGroup.partitionIds ); + + if ( newPartitionIds.contains( partitionId ) ) { + newPartitionIds.remove( partitionId ); + + updatePartitionGroup( partitionGroupId, newPartitionIds ); + + } + } + + + /** + * Assign the partition to a new partitionGroup + * + * @param partitionId Partition to move + * @param partitionGroupId New target gorup to move the partion to + */ + @Override + public void updatePartition( long partitionId, Long partitionGroupId ) { + + // Check whether there this partition id exists + CatalogPartitionGroup partitionGroup = getPartitionGroup( partitionGroupId ); + List newPartitionIds = new ArrayList<>( partitionGroup.partitionIds ); + + CatalogPartition oldPartition = getPartition( partitionId ); + + if ( !newPartitionIds.contains( partitionId ) ) { + newPartitionIds.add( partitionId ); + + addPartitionToGroup( partitionGroupId, partitionId ); + removePartitionFromGroup( oldPartition.partitionGroupId, partitionId ); + + CatalogPartition updatedPartition = new CatalogPartition( + oldPartition.id, + oldPartition.tableId, + oldPartition.schemaId, + oldPartition.databaseId, + oldPartition.partitionQualifiers, + oldPartition.isUnbound, + partitionGroupId + ); + + synchronized ( this ) { + partitions.put( updatedPartition.id, updatedPartition ); + } + listeners.firePropertyChange( "partition", oldPartition, updatedPartition ); + } + + + } + + + /** + * Get a partition object by its unique id + * + * @param partitionGroupId The unique id of the partition + * @return A catalog partition + */ + @Override + public CatalogPartitionGroup getPartitionGroup( long partitionGroupId ) throws UnknownPartitionGroupIdRuntimeException { + try { + return Objects.requireNonNull( partitionGroups.get( partitionGroupId ) ); + } catch ( NullPointerException e ) { + throw new UnknownPartitionGroupIdRuntimeException( partitionGroupId ); + } + } + + + /** + * Adds a partition to the catalog + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionGroupId partitionGroupId where the partition should be initially added to + * @return The id of the created partition + */ + @Override + public long addPartition( long tableId, long schemaId, long partitionGroupId, List effectivePartitionQualifier, boolean isUnbound ) throws GenericCatalogException { try { long id = partitionIdBuilder.getAndIncrement(); - log.debug( "Creating partition of type '{}' with id '{}'", partitionType, id ); + if ( log.isDebugEnabled() ) { + log.debug( "Creating partition with id '{}'", id ); + } CatalogSchema schema = Objects.requireNonNull( schemas.get( schemaId ) ); CatalogPartition partition = new CatalogPartition( id, - partitionName, tableId, schemaId, schema.databaseId, - 0, effectivePartitionQualifier, - isUnbound ); + isUnbound, + partitionGroupId ); synchronized ( this ) { partitions.put( id, partition ); @@ -3256,18 +3590,23 @@ public long addPartition( long tableId, String partitionName, long schemaId, int /** - * Should only be called from mergePartitions(). Deletes a single partition and all references. + * Deletes a single partition and all references. * * @param tableId The unique id of the table * @param schemaId The unique id of the table * @param partitionId The partitionId to be deleted */ @Override - public void deletePartition( long tableId, long schemaId, long partitionId ) throws UnknownPartitionIdRuntimeException { - log.debug( "Deleting partition with id '{}' on table with id '{}'", partitionId, tableId ); + public void deletePartition( long tableId, long schemaId, long partitionId ) { + if ( log.isDebugEnabled() ) { + log.debug( "Deleting partition with id '{}' on table with id '{}'", partitionId, tableId ); + } // Check whether there this partition id exists getPartition( partitionId ); synchronized ( this ) { + for ( CatalogPartitionPlacement partitionPlacement : getPartitionPlacements( partitionId ) ) { + deletePartitionPlacement( partitionPlacement.adapterId, partitionId ); + } partitions.remove( partitionId ); } } @@ -3280,26 +3619,43 @@ public void deletePartition( long tableId, long schemaId, long partitionId ) thr * @return A catalog partition */ @Override - public CatalogPartition getPartition( long partitionId ) throws UnknownPartitionIdRuntimeException { + public CatalogPartition getPartition( long partitionId ) { try { return Objects.requireNonNull( partitions.get( partitionId ) ); } catch ( NullPointerException e ) { - throw new UnknownPartitionIdRuntimeException( partitionId ); + throw new UnknownPartitionGroupIdRuntimeException( partitionId ); } } + /** + * Retrieves a list of partitions which are associated with a specific table + * + * @param tableId Table for which partitions shall be gathered + * @return List of all partitions associated with that table + */ + @Override + public List getPartitionsByTable( long tableId ) { + + return partitions.values() + .stream() + .filter( p -> p.tableId == tableId ) + .collect( Collectors.toList() ); + + } + + /** * Effectively partitions a table with the specified partitionType * * @param tableId Table to be partitioned * @param partitionType Partition function to apply on the table * @param partitionColumnId Column used to apply the partition function on - * @param numPartitions Explicit number of partitions - * @param partitionIds List of ids of the catalog partitions + * @param numPartitionGroups Explicit number of partitions + * @param partitionGroupIds List of ids of the catalog partitions */ @Override - public void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitions, List partitionIds ) { + public void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitionGroups, List partitionGroupIds, PartitionProperty partitionProperty ) { CatalogTable old = Objects.requireNonNull( tables.get( tableId ) ); CatalogTable table = new CatalogTable( @@ -3314,15 +3670,18 @@ public void partitionTable( long tableId, PartitionType partitionType, long part old.primaryKey, old.placementsByAdapter, old.modifiable, - numPartitions, partitionType, - ImmutableList.copyOf( partitionIds ), partitionColumnId, + partitionProperty, old.connectedViews ); synchronized ( this ) { tables.replace( tableId, table ); tableNames.replace( new Object[]{ table.databaseId, table.schemaId, old.name }, table ); + + if ( table.partitionProperty.reliesOnPeriodicChecks ) { + addTableToPeriodicProcessing( tableId ); + } } listeners.firePropertyChange( "table", old, table ); @@ -3338,6 +3697,28 @@ public void partitionTable( long tableId, PartitionType partitionType, long part @Override public void mergeTable( long tableId ) { CatalogTable old = Objects.requireNonNull( tables.get( tableId ) ); + + if ( old.partitionProperty.reliesOnPeriodicChecks ) { + removeTableFromPeriodicProcessing( tableId ); + } + + //Technically every Table is partitioned. But tables classified as UNPARTITIONED only consist of one PartitionGroup and one large partition + List partitionGroupIds = new ArrayList<>(); + try { + partitionGroupIds.add( addPartitionGroup( tableId, "full", old.schemaId, PartitionType.NONE, 1, new ArrayList<>(), true ) ); + } catch ( GenericCatalogException e ) { + throw new RuntimeException( e ); + } + + //get All(only one) PartitionGroups and then get all partitionIds for each PG and add them to completeList of partitionIds + CatalogPartitionGroup defaultUnpartitionedGroup = getPartitionGroup( partitionGroupIds.get( 0 ) ); + PartitionProperty partitionProperty = PartitionProperty.builder() + .partitionType( PartitionType.NONE ) + .partitionGroupIds( ImmutableList.copyOf( partitionGroupIds ) ) + .partitionIds( ImmutableList.copyOf( defaultUnpartitionedGroup.partitionIds ) ) + .reliesOnPeriodicChecks( false ) + .build(); + CatalogTable table = new CatalogTable( old.id, old.name, @@ -3349,7 +3730,8 @@ public void mergeTable( long tableId ) { old.tableType, old.primaryKey, old.placementsByAdapter, - old.modifiable ); + old.modifiable, + partitionProperty ); synchronized ( this ) { tables.replace( tableId, table ); @@ -3364,14 +3746,47 @@ public void mergeTable( long tableId ) { // Basically get first part of PK even if its compound of PK it is sufficient CatalogColumn pkColumn = getColumn( pkColumnIds.get( 0 ) ); // This gets us only one ccp per store (first part of PK) - for ( CatalogColumnPlacement ccp : getColumnPlacements( pkColumn.id ) ) { - dataPartitionPlacement.remove( new Object[]{ ccp.adapterId, ccp.tableId } ); + for ( CatalogColumnPlacement ccp : getColumnPlacement( pkColumn.id ) ) { + dataPartitionGroupPlacement.replace( new Object[]{ ccp.adapterId, tableId }, ImmutableList.copyOf( partitionGroupIds ) ); } } listeners.firePropertyChange( "table", old, table ); } + /** + * Updates partitionProperties on table + * + * @param tableId Table to be partitioned + * @param partitionProperty Partition properties + */ + @Override + public void updateTablePartitionProperties( long tableId, PartitionProperty partitionProperty ) { + CatalogTable old = Objects.requireNonNull( tables.get( tableId ) ); + + CatalogTable table = new CatalogTable( + old.id, + old.name, + old.columnIds, + old.schemaId, + old.databaseId, + old.ownerId, + old.ownerName, + old.tableType, + old.primaryKey, + old.placementsByAdapter, + old.modifiable, + partitionProperty ); + + synchronized ( this ) { + tables.replace( tableId, table ); + tableNames.replace( new Object[]{ table.databaseId, table.schemaId, old.name }, table ); + } + + listeners.firePropertyChange( "table", old, table ); + } + + /** * Get a List of all partitions belonging to a specific table * @@ -3379,18 +3794,62 @@ public void mergeTable( long tableId ) { * @return list of all partitions on this table */ @Override - public List getPartitions( long tableId ) { + public List getPartitionGroups( long tableId ) { try { CatalogTable table = Objects.requireNonNull( tables.get( tableId ) ); + List partitionGroups = new ArrayList<>(); + if ( table.partitionProperty.partitionGroupIds == null ) { + return new ArrayList<>(); + } + for ( long partId : table.partitionProperty.partitionGroupIds ) { + partitionGroups.add( getPartitionGroup( partId ) ); + } + return partitionGroups; + } catch ( UnknownPartitionGroupIdRuntimeException e ) { + return new ArrayList<>(); + } + } + + + /** + * Get all partitions of the specified database which fit to the specified filter patterns. + * getColumns(xid, databaseName, null, null, null) returns all partitions of the database. + * + * @param databaseNamePattern Pattern for the database name. null returns all. + * @param schemaNamePattern Pattern for the schema name. null returns all. + * @param tableNamePattern Pattern for the table name. null returns all. + * @return List of columns which fit to the specified filters. If there is no column which meets the criteria, an empty list is returned. + */ + @Override + public List getPartitionGroups( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ) { + List catalogTables = getTables( databaseNamePattern, schemaNamePattern, tableNamePattern ); + Stream partitionGroupStream = Stream.of(); + for ( CatalogTable catalogTable : catalogTables ) { + partitionGroupStream = Stream.concat( partitionGroupStream, getPartitionGroups( catalogTable.id ).stream() ); + } + return partitionGroupStream.collect( Collectors.toList() ); + } + + + /** + * Get a List of all partitions currently assigned to to a specific PartitionGroup + * + * @param partitionGroupId Table to be queried + * @return list of all partitions on this table + */ + @Override + public List getPartitions( long partitionGroupId ) { + try { + CatalogPartitionGroup partitionGroup = Objects.requireNonNull( partitionGroups.get( partitionGroupId ) ); List partitions = new ArrayList<>(); - if ( table.partitionIds == null ) { + if ( partitionGroup.partitionIds == null ) { return new ArrayList<>(); } - for ( long partId : table.partitionIds ) { + for ( long partId : partitionGroup.partitionIds ) { partitions.add( getPartition( partId ) ); } return partitions; - } catch ( UnknownPartitionIdRuntimeException e ) { + } catch ( UnknownPartitionGroupIdRuntimeException e ) { return new ArrayList<>(); } } @@ -3402,15 +3861,15 @@ public List getPartitions( long tableId ) { * * @param databaseNamePattern Pattern for the database name. null returns all. * @param schemaNamePattern Pattern for the schema name. null returns all. - * @param tableNamePattern Pattern for the table name. null returns all. + * @param tableNamePattern Pattern for the table name. null returns catalog/src/test/java/org/polypheny/db/test/CatalogTest.javaall. * @return List of columns which fit to the specified filters. If there is no column which meets the criteria, an empty list is returned. */ @Override public List getPartitions( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ) { - List catalogTables = getTables( databaseNamePattern, schemaNamePattern, tableNamePattern ); + List catalogPartitionGroups = getPartitionGroups( databaseNamePattern, schemaNamePattern, tableNamePattern ); Stream partitionStream = Stream.of(); - for ( CatalogTable catalogTable : catalogTables ) { - partitionStream = Stream.concat( partitionStream, getPartitions( catalogTable.id ).stream() ); + for ( CatalogPartitionGroup catalogPartitionGroup : catalogPartitionGroups ) { + partitionStream = Stream.concat( partitionStream, getPartitions( catalogPartitionGroup.id ).stream() ); } return partitionStream.collect( Collectors.toList() ); } @@ -3423,12 +3882,12 @@ public List getPartitions( Pattern databaseNamePattern, Patter * @return list of all partition names on this table */ @Override - public List getPartitionNames( long tableId ) { - List partitionNames = new ArrayList<>(); - for ( CatalogPartition catalogPartition : getPartitions( tableId ) ) { - partitionNames.add( catalogPartition.partitionName ); + public List getPartitionGroupNames( long tableId ) { + List partitionGroupNames = new ArrayList<>(); + for ( CatalogPartitionGroup catalogPartitionGroup : getPartitionGroups( tableId ) ) { + partitionGroupNames.add( catalogPartitionGroup.partitionGroupName ); } - return partitionNames; + return partitionGroupNames; } @@ -3437,15 +3896,15 @@ public List getPartitionNames( long tableId ) { * Essentially returns all ColumnPlacements which hold the specified partitionID. * * @param tableId The id of the table - * @param partitionId The id of the partition + * @param partitionGroupId The id of the partition * @param columnId The id of tje column * @return List of CatalogColumnPlacements */ @Override - public List getColumnPlacementsByPartition( long tableId, long partitionId, long columnId ) { + public List getColumnPlacementsByPartitionGroup( long tableId, long partitionGroupId, long columnId ) { List catalogColumnPlacements = new ArrayList<>(); - for ( CatalogColumnPlacement ccp : getColumnPlacements( columnId ) ) { - if ( dataPartitionPlacement.get( new Object[]{ ccp.adapterId, tableId } ).contains( partitionId ) ) { + for ( CatalogColumnPlacement ccp : getColumnPlacement( columnId ) ) { + if ( dataPartitionGroupPlacement.get( new Object[]{ ccp.adapterId, tableId } ).contains( partitionGroupId ) ) { catalogColumnPlacements.add( ccp ); } } @@ -3462,15 +3921,15 @@ public List getColumnPlacementsByPartition( long tableId * Essentially returns all adapters which hold the specified partitionID * * @param tableId The unique id of the table - * @param partitionId The unique id of the partition + * @param partitionGroupId The unique id of the partition * @return List of CatalogAdapters */ @Override - public List getAdaptersByPartition( long tableId, long partitionId ) { + public List getAdaptersByPartitionGroup( long tableId, long partitionGroupId ) { List catalogAdapters = new ArrayList<>(); CatalogTable table = getTable( tableId ); for ( Entry> entry : table.placementsByAdapter.entrySet() ) { - if ( dataPartitionPlacement.get( new Object[]{ entry.getKey(), tableId } ).contains( partitionId ) ) { + if ( dataPartitionGroupPlacement.get( new Object[]{ entry.getKey(), tableId } ).contains( partitionGroupId ) ) { catalogAdapters.add( getAdapter( entry.getKey() ) ); } } @@ -3488,37 +3947,54 @@ public List getAdaptersByPartition( long tableId, long partition * * @param adapterId The unique id of the adapter * @param tableId The unique id of the table - * @param partitionIds List of partitionsIds to be updated + * @param partitionGroupIds List of partitionsIds to be updated */ @Override - public void updatePartitionsOnDataPlacement( int adapterId, long tableId, List partitionIds ) { + public void updatePartitionGroupsOnDataPlacement( int adapterId, long tableId, List partitionGroupIds ) { synchronized ( this ) { - if ( !dataPartitionPlacement.containsKey( new Object[]{ adapterId, tableId } ) ) { + if ( !dataPartitionGroupPlacement.containsKey( new Object[]{ adapterId, tableId } ) ) { if ( log.isDebugEnabled() ) { - log.debug( "Adding Partitions={} to DataPlacement={}.{}", partitionIds, getAdapter( adapterId ).uniqueName, getTable( tableId ).name ); + log.debug( "Adding PartitionGroups={} to DataPlacement={}.{}", partitionGroupIds, getAdapter( adapterId ).uniqueName, getTable( tableId ).name ); } - dataPartitionPlacement.put( new Object[]{ adapterId, tableId }, ImmutableList.builder().build() ); + dataPartitionGroupPlacement.put( new Object[]{ adapterId, tableId }, ImmutableList.builder().build() ); } else { if ( log.isDebugEnabled() ) { - log.debug( "Updating Partitions={} to DataPlacement={}.{}", partitionIds, getAdapter( adapterId ).uniqueName, getTable( tableId ).name ); + log.debug( "Updating PartitionGroups={} to DataPlacement={}.{}", partitionGroupIds, getAdapter( adapterId ).uniqueName, getTable( tableId ).name ); } - List tempPartition = dataPartitionPlacement.get( new Object[]{ adapterId, tableId } ); + List tempPartition = dataPartitionGroupPlacement.get( new Object[]{ adapterId, tableId } ); // Validate if partition distribution after update is successful otherwise rollback // Check if partition change has impact on the complete partition distribution for current Part.Type - for ( CatalogColumnPlacement ccp : getColumnPlacementsOnAdapter( adapterId, tableId ) ) { + for ( CatalogColumnPlacement ccp : getColumnPlacementsOnAdapterPerTable( adapterId, tableId ) ) { long columnId = ccp.columnId; - if ( !validatePartitionDistribution( adapterId, tableId, columnId ) ) { - dataPartitionPlacement.replace( new Object[]{ adapterId, tableId }, ImmutableList.copyOf( tempPartition ) ); - throw new RuntimeException( "Validation of partition distribution failed for column: '" + ccp.getLogicalColumnName() + "'" ); + if ( !validatePartitionGroupDistribution( adapterId, tableId, columnId, 0 ) ) { + dataPartitionGroupPlacement.replace( new Object[]{ adapterId, tableId }, ImmutableList.copyOf( tempPartition ) ); + throw new RuntimeException( "Validation of PartitionGroup distribution failed for column: '" + ccp.getLogicalColumnName() + "'" ); } } } - dataPartitionPlacement.replace( new Object[]{ adapterId, tableId }, ImmutableList.copyOf( partitionIds ) ); + dataPartitionGroupPlacement.replace( new Object[]{ adapterId, tableId }, ImmutableList.copyOf( partitionGroupIds ) ); } } + /** + * Get all partitionGroups of a DataPlacement (identified by adapterId and tableId) + * + * @param adapterId The unique id of the adapter + * @param tableId The unique id of the table + * @return List of partitionIds + */ + @Override + public List getPartitionGroupsOnDataPlacement( int adapterId, long tableId ) { + List partitionGroups = dataPartitionGroupPlacement.get( new Object[]{ adapterId, tableId } ); + if ( partitionGroups == null ) { + partitionGroups = new ArrayList<>(); + } + return partitionGroups; + } + + /** * Get all partitions of a DataPlacement (identified by adapterId and tableId) * @@ -3528,11 +4004,11 @@ public void updatePartitionsOnDataPlacement( int adapterId, long tableId, List getPartitionsOnDataPlacement( int adapterId, long tableId ) { - List partitions = dataPartitionPlacement.get( new Object[]{ adapterId, tableId } ); - if ( partitions == null ) { - partitions = new ArrayList<>(); - } - return partitions; + List tempPartitionIds = new ArrayList<>(); + //get All PartitionGroups and then get all partitionIds for each PG and add them to completeList of partitionIds + getPartitionGroupsOnDataPlacement( adapterId, tableId ).forEach( pgId -> getPartitionGroup( pgId ).partitionIds.forEach( tempPartitionIds::add ) ); + + return tempPartitionIds; } @@ -3544,20 +4020,20 @@ public List getPartitionsOnDataPlacement( int adapterId, long tableId ) { * @return List of partitionId Indices */ @Override - public List getPartitionsIndexOnDataPlacement( int adapterId, long tableId ) { - List partitions = dataPartitionPlacement.get( new Object[]{ adapterId, tableId } ); - if ( partitions == null ) { + public List getPartitionGroupsIndexOnDataPlacement( int adapterId, long tableId ) { + List partitionGroups = dataPartitionGroupPlacement.get( new Object[]{ adapterId, tableId } ); + if ( partitionGroups == null ) { return new ArrayList<>(); } - List partitionIndexList = new ArrayList<>(); + List partitionGroupIndexList = new ArrayList<>(); CatalogTable catalogTable = getTable( tableId ); - for ( int index = 0; index < catalogTable.numPartitions; index++ ) { - if ( partitions.contains( catalogTable.partitionIds.get( index ) ) ) { - partitionIndexList.add( (long) index ); + for ( int index = 0; index < catalogTable.partitionProperty.partitionGroupIds.size(); index++ ) { + if ( partitionGroups.contains( catalogTable.partitionProperty.partitionGroupIds.get( index ) ) ) { + partitionGroupIndexList.add( (long) index ); } } - return partitionIndexList; + return partitionGroupIndexList; } @@ -3568,18 +4044,15 @@ public List getPartitionsIndexOnDataPlacement( int adapterId, long tableId * @param tableId List of partitions which the placement should hold */ @Override - public void deletePartitionsOnDataPlacement( int adapterId, long tableId ) { + public void deletePartitionGroupsOnDataPlacement( int adapterId, long tableId ) { // Check if there is indeed no column placement left. - if ( getTable( tableId ).isPartitioned ) { - if ( getColumnPlacementsOnAdapter( adapterId, tableId ).isEmpty() ) { - synchronized ( this ) { - dataPartitionPlacement.remove( new Object[]{ adapterId, tableId } ); - log.debug( "Removed all dataPartitionPlacements" ); - } + if ( getColumnPlacementsOnAdapterPerTable( adapterId, tableId ).isEmpty() ) { + synchronized ( this ) { + dataPartitionGroupPlacement.remove( new Object[]{ adapterId, tableId } ); + log.debug( "Removed all dataPartitionGroupPlacements" ); } - } else { - log.debug( "Table wasn't even partitioned" ); } + } @@ -3590,18 +4063,19 @@ public void deletePartitionsOnDataPlacement( int adapterId, long tableId ) { * @param adapterId The id of the adapter to be checked * @param tableId The id of the table to be checked * @param columnId The id of the column to be checked + * @param threshold * @return If its correctly distributed or not */ @Override - public boolean validatePartitionDistribution( int adapterId, long tableId, long columnId ) { + public boolean validatePartitionGroupDistribution( int adapterId, long tableId, long columnId, int threshold ) { CatalogTable catalogTable = getTable( tableId ); if ( isTableFlaggedForDeletion( tableId ) ) { return true; } - PartitionManagerFactory partitionManagerFactory = new PartitionManagerFactory(); - PartitionManager partitionManager = partitionManagerFactory.getInstance( catalogTable.partitionType ); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( catalogTable.partitionType ); - return partitionManager.probePartitionDistributionChange( catalogTable, adapterId, columnId ); + return partitionManager.probePartitionGroupDistributionChange( catalogTable, adapterId, columnId, threshold ); } @@ -3636,6 +4110,208 @@ public boolean isTableFlaggedForDeletion( long tableId ) { } + /** + * Adds a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + * @param tableId The table for which a partition placement shall be created + * @param partitionId The id of a specific partition that shall create a new placement + * @param placementType The type of placement + * @param physicalSchemaName The schema name on the adapter + * @param physicalTableName The table name on the adapter + */ + @Override + public void addPartitionPlacement( int adapterId, long tableId, long partitionId, PlacementType placementType, String physicalSchemaName, String physicalTableName ) { + + if ( !checkIfExistsPartitionPlacement( adapterId, partitionId ) ) { + CatalogAdapter store = Objects.requireNonNull( adapters.get( adapterId ) ); + CatalogPartitionPlacement partitionPlacement = new CatalogPartitionPlacement( + tableId, + adapterId, + store.uniqueName, + placementType, + physicalSchemaName, + physicalTableName, + partitionId ); + + synchronized ( this ) { + partitionPlacements.put( new Object[]{ adapterId, partitionId }, partitionPlacement ); + } + listeners.firePropertyChange( "partitionPlacement", null, partitionPlacements ); + } + } + + + /** + * Deletes a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + * @param partitionId The id of a partition which shall be removed from that store. + */ + @Override + public void deletePartitionPlacement( int adapterId, long partitionId ) { + if ( checkIfExistsPartitionPlacement( adapterId, partitionId ) ) { + synchronized ( this ) { + partitionPlacements.remove( new Object[]{ adapterId, partitionId } ); + } + } + } + + + /** + * Returns a specific partition entity which is placed on a store. + * + * @param adapterId The adapter on which the requested partitions palcement resides + * @param partitionId The id of the requested partition + * @return The requested PartitionPlacement on that store for agiven is + */ + @Override + public CatalogPartitionPlacement getPartitionPlacement( int adapterId, long partitionId ) { + try { + return Objects.requireNonNull( partitionPlacements.get( new Object[]{ adapterId, partitionId } ) ); + } catch ( NullPointerException e ) { + getAdapter( adapterId ); + getPartition( partitionId ); + throw new UnknownPartitionPlacementException( adapterId, partitionId ); + } + } + + + /** + * Returns a list of all Partition Placements which currently reside on a adpater, disregarded of the table. + * + * @param adapterId The adapter on which the requested partition placements reside + * @return A list of all Partition Placements, that are currently located on that specific store + */ + @Override + public List getPartitionPlacementsByAdapter( int adapterId ) { + return new ArrayList<>( partitionPlacements.prefixSubMap( new Object[]{ adapterId } ).values() ); + } + + + /** + * Returns a list of all Partition Placements which currently reside on a adapter, for a specific table. + * + * @param adapterId The adapter on which the requested partition placements reside + * @param tableId The table for which all partition placements on a adapter should be considered + * @return A list of all Partition Placements, that are currently located on that specific store for a individual table + */ + @Override + public List getPartitionPlacementByTable( int adapterId, long tableId ) { + return getPartitionPlacementsByAdapter( adapterId ) + .stream() + .filter( p -> p.tableId == tableId ) + .collect( Collectors.toList() ); + } + + + /** + * Returns a list of all Partition Placements which are currently associated with a table. + * + * @param tableId The table on which the requested partition placements are currently associated with. + * @return A list of all Partition Placements, that belong to the desired table + */ + @Override + public List getAllPartitionPlacementsByTable( long tableId ) { + return partitionPlacements.values() + .stream() + .filter( p -> p.tableId == tableId ) + .collect( Collectors.toList() ); + } + + + /** + * Get all Partition Placements which are associated with a individual partition Id. + * Identifies on which locations and how often the individual partition is placed. + * + * @param partitionId The requested partition Id + * @return A list of Partition Placements which are physically responsible for that partition + */ + @Override + public List getPartitionPlacements( long partitionId ) { + return partitionPlacements.values() + .stream() + .filter( p -> p.partitionId == partitionId ) + .collect( Collectors.toList() ); + } + + + /** + * Returns all tables which are in need of special periodic treatment. + * + * @return List of tables which need to be periodically processed + */ + @Override + public List getTablesForPeriodicProcessing() { + List procTables = new ArrayList<>(); + + for ( Long tableId : frequencyDependentTables ) { + try { + procTables.add( getTable( tableId ) ); + } catch ( UnknownTableIdRuntimeException e ) { + frequencyDependentTables.remove( tableId ); + } + } + + return procTables; + } + + + /** + * Registers a table to be considered for periodic processing + * + * @param tableId Id of table to be considered for periodic processing + */ + @Override + public void addTableToPeriodicProcessing( long tableId ) { + + int beforeSize = frequencyDependentTables.size(); + getTable( tableId ); + if ( !frequencyDependentTables.contains( tableId ) ) { + frequencyDependentTables.add( tableId ); + } + //Initially starts the periodic job if this was the first table to enable periodic processing + if ( beforeSize == 0 && frequencyDependentTables.size() == 1 ) { + //Start Job for periodic processing + FrequencyMap.INSTANCE.initialize(); + } + } + + + /** + * Remove a table from periodic background processing + * + * @param tableId Id of table to be removed for periodic processing + */ + @Override + public void removeTableFromPeriodicProcessing( long tableId ) { + getTable( tableId ); + if ( !frequencyDependentTables.contains( tableId ) ) { + frequencyDependentTables.remove( tableId ); + } + + //Terminates the periodic job if this was the last table with periodic processing + if ( frequencyDependentTables.size() == 0 ) { + //Terminate Job for periodic processing + FrequencyMap.INSTANCE.terminate(); + } + } + + + /** + * Probes if a Partition Placement on a adapter for a specific partition already exists. + * + * @param adapterId Adapter on which to check + * @param partitionId Partition which to check + * @return teh response of the probe + */ + @Override + public boolean checkIfExistsPartitionPlacement( int adapterId, long partitionId ) { + CatalogPartitionPlacement placement = partitionPlacements.get( new Object[]{ adapterId, partitionId } ); + return placement != null; + } + + @Override public List getTableKeys( long tableId ) { return keys.values().stream().filter( k -> k.tableId == tableId ).collect( Collectors.toList() ); diff --git a/catalog/src/main/java/org/polypheny/db/catalog/CatalogInfoPage.java b/catalog/src/main/java/org/polypheny/db/catalog/CatalogInfoPage.java index cd38e68f63..a69d444d0a 100644 --- a/catalog/src/main/java/org/polypheny/db/catalog/CatalogInfoPage.java +++ b/catalog/src/main/java/org/polypheny/db/catalog/CatalogInfoPage.java @@ -45,6 +45,7 @@ public class CatalogInfoPage implements PropertyChangeListener { private final InformationTable columnInformation; private final InformationTable indexInformation; private final InformationTable adapterInformation; + private final InformationTable partitionGroupInformation; private final InformationTable partitionInformation; private final InformationTable debugInformation; @@ -57,25 +58,27 @@ public CatalogInfoPage( Catalog catalog ) { InformationPage page = new InformationPage( "Catalog" ); infoManager.addPage( page ); - this.adapterInformation = addCatalogInformationTable( page, "Adapters", Arrays.asList( "ID", "Name", "Type" ) ); - this.databaseInformation = addCatalogInformationTable( page, "Databases", Arrays.asList( "ID", "Name", "Default SchemaID" ) ); - this.schemaInformation = addCatalogInformationTable( page, "Schemas", Arrays.asList( "ID", "Name", "DatabaseID", "SchemaType" ) ); - this.tableInformation = addCatalogInformationTable( page, "Tables", Arrays.asList( "ID", "Name", "DatabaseID", "SchemaID", "TableType", "PartitionType", "Partitions" ) ); - this.columnInformation = addCatalogInformationTable( page, "Columns", Arrays.asList( "ID", "Name", "DatabaseID", "SchemaID", "TableID", "Placements" ) ); - this.indexInformation = addCatalogInformationTable( page, "Indexes", Arrays.asList( "ID", "Name", "KeyID", "Location", "Method", "Unique" ) ); - this.partitionInformation = addCatalogInformationTable( page, "Partitions", Arrays.asList( "ID", "Name", "TableID", "Qualifiers" ) ); + this.adapterInformation = addCatalogInformationTable( page, "Adapters", 5, Arrays.asList( "ID", "Name", "Type" ) ); + this.databaseInformation = addCatalogInformationTable( page, "Databases", 1, Arrays.asList( "ID", "Name", "Default SchemaID" ) ); + this.schemaInformation = addCatalogInformationTable( page, "Schemas", 2, Arrays.asList( "ID", "Name", "DatabaseID", "SchemaType" ) ); + this.tableInformation = addCatalogInformationTable( page, "Tables", 3, Arrays.asList( "ID", "Name", "DatabaseID", "SchemaID", "Type", "PartitionType", "PartitionGroups" ) ); + this.columnInformation = addCatalogInformationTable( page, "Columns", 4, Arrays.asList( "ID", "Name", "DatabaseID", "SchemaID", "TableID", "Placements" ) ); + this.indexInformation = addCatalogInformationTable( page, "Indexes", 6, Arrays.asList( "ID", "Name", "KeyID", "Location", "Method", "Unique" ) ); + this.partitionGroupInformation = addCatalogInformationTable( page, "Partition Groups", 7, Arrays.asList( "ID", "Name", "TableID", "# Partitions" ) ); + this.partitionInformation = addCatalogInformationTable( page, "Partitions", 8, Arrays.asList( "ID", "PartitionGroupID", "TableID", "Qualifiers" ) ); - this.debugInformation = addCatalogInformationTable( page, "Debug", Arrays.asList( "Time", "Message" ) ); + this.debugInformation = addCatalogInformationTable( page, "Debug", 10, Arrays.asList( "Time", "Message" ) ); addPersistentInfo( page ); - resetCatalogInformation(); + page.setRefreshFunction( this::resetCatalogInformation ); catalog.addObserver( this ); } - private InformationTable addCatalogInformationTable( InformationPage page, String name, List titles ) { + private InformationTable addCatalogInformationTable( InformationPage page, String name, int order, List titles ) { InformationGroup catalogGroup = new InformationGroup( page, name ); + catalogGroup.setOrder( order ); infoManager.addGroup( catalogGroup ); InformationTable table = new InformationTable( catalogGroup, titles ); infoManager.registerInformation( table ); @@ -85,6 +88,7 @@ private InformationTable addCatalogInformationTable( InformationPage page, Strin private void addPersistentInfo( InformationPage page ) { InformationGroup catalogGroup = new InformationGroup( page, "Persistency" ); + catalogGroup.setOrder( 9 ); infoManager.addGroup( catalogGroup ); InformationTable table = new InformationTable( catalogGroup, Collections.singletonList( "is persistent" ) ); infoManager.registerInformation( table ); @@ -95,7 +99,6 @@ private void addPersistentInfo( InformationPage page ) { @Override public void propertyChange( PropertyChangeEvent propertyChangeEvent ) { addDebugMessage( propertyChangeEvent ); - this.resetCatalogInformation(); } @@ -117,6 +120,7 @@ private void resetCatalogInformation() { columnInformation.reset(); adapterInformation.reset(); indexInformation.reset(); + partitionGroupInformation.reset(); partitionInformation.reset(); if ( catalog == null ) { @@ -134,17 +138,20 @@ private void resetCatalogInformation() { schemaInformation.addRow( s.id, s.name, s.databaseId, s.schemaType ); } ); catalog.getTables( null, null, null ).forEach( t -> { - tableInformation.addRow( t.id, t.name, t.databaseId, t.schemaId, t.tableType, t.partitionType.toString(), t.numPartitions ); + tableInformation.addRow( t.id, t.name, t.databaseId, t.schemaId, t.tableType, t.partitionProperty.partitionType.toString(), t.partitionProperty.partitionGroupIds.size() ); } ); catalog.getColumns( null, null, null, null ).forEach( c -> { - String placements = catalog.getColumnPlacements( c.id ).stream().map( plac -> String.valueOf( plac.adapterId ) ).collect( Collectors.joining( "," ) ); + String placements = catalog.getColumnPlacement( c.id ).stream().map( plac -> String.valueOf( plac.adapterId ) ).collect( Collectors.joining( "," ) ); columnInformation.addRow( c.id, c.name, c.databaseId, c.schemaId, c.tableId, placements ); } ); catalog.getIndexes().forEach( i -> { indexInformation.addRow( i.id, i.name, i.keyId, i.location, i.method, i.unique ); } ); + catalog.getPartitionGroups( null, null, null ).forEach( pg -> { + partitionGroupInformation.addRow( pg.id, pg.partitionGroupName, pg.tableId, pg.partitionIds.size() ); + } ); catalog.getPartitions( null, null, null ).forEach( p -> { - partitionInformation.addRow( p.id, p.partitionName, p.tableId, p.partitionQualifiers ); + partitionInformation.addRow( p.id, p.partitionGroupId, p.tableId, p.partitionQualifiers ); } ); } catch ( Exception e ) { log.error( "Exception while reset catalog information page", e ); diff --git a/catalog/src/test/java/org/polypheny/db/test/CatalogTest.java b/catalog/src/test/java/org/polypheny/db/test/CatalogTest.java index 87f4712655..111523b9a6 100644 --- a/catalog/src/test/java/org/polypheny/db/test/CatalogTest.java +++ b/catalog/src/test/java/org/polypheny/db/test/CatalogTest.java @@ -325,17 +325,17 @@ public void testColumnPlacement() throws UnknownAdapterException { catalog.addColumnPlacement( store1.id, columnId, PlacementType.AUTOMATIC, null, "table1", column.name, null ); - assertEquals( 1, catalog.getColumnPlacements( columnId ).size() ); - assertEquals( columnId, catalog.getColumnPlacements( columnId ).get( 0 ).columnId ); + assertEquals( 1, catalog.getColumnPlacement( columnId ).size() ); + assertEquals( columnId, catalog.getColumnPlacement( columnId ).get( 0 ).columnId ); catalog.addColumnPlacement( store2.id, columnId, PlacementType.AUTOMATIC, null, "table1", column.name, null ); - assertEquals( 2, catalog.getColumnPlacements( columnId ).size() ); - assertTrue( catalog.getColumnPlacements( columnId ).stream().map( p -> p.adapterId ).collect( Collectors.toList() ).containsAll( Arrays.asList( store2.id, store1.id ) ) ); + assertEquals( 2, catalog.getColumnPlacement( columnId ).size() ); + assertTrue( catalog.getColumnPlacement( columnId ).stream().map( p -> p.adapterId ).collect( Collectors.toList() ).containsAll( Arrays.asList( store2.id, store1.id ) ) ); catalog.deleteColumnPlacement( store1.id, columnId ); - assertEquals( 1, catalog.getColumnPlacements( columnId ).size() ); - assertEquals( store2.id, catalog.getColumnPlacements( columnId ).get( 0 ).adapterId ); + assertEquals( 1, catalog.getColumnPlacement( columnId ).size() ); + assertEquals( store2.id, catalog.getColumnPlacement( columnId ).get( 0 ).adapterId ); } diff --git a/core/_docs/reference.md b/core/_docs/reference.md index 84439eea73..3de95f37e0 100644 --- a/core/_docs/reference.md +++ b/core/_docs/reference.md @@ -132,6 +132,7 @@ The following is a list of SQL keywords. Reserved keywords are **bold**. **FOREIGN**, FORMAT, FORTRAN, FOUND, FRAC_SECOND, **FRAME_ROW**, **FREE**, +**FREQUENCY**, **FROM**, **FULL**, **FUNCTION**, @@ -328,6 +329,12 @@ The following is a list of SQL keywords. Reserved keywords are **bold**. **SYSTEM_TIME**, **SYSTEM_USER**, **TABLE**, +**TABLESAMPLE**, +TABLE_NAME, +**TEMPERATURE**, +TEMPORARY, +**THEN**, +TIES, **TABLESAMPLE**, TABLE_NAME, TEMPORARY, **THEN**, TIES, **TIME**, diff --git a/core/build.gradle b/core/build.gradle index 47d2a2f5a9..a9559a1fbf 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -69,6 +69,8 @@ dependencies { testImplementation group: "org.incava", name: "java-diff", version: java_diff_version // Apache 2.0 testImplementation group: "org.apache.commons", name: "commons-pool2", version: commons_pool2_version // Apache 2.0 + testImplementation group: "org.mockito", name: "mockito-core", version: mockito_core_version // MIT + //testImplementation group: "org.apache.calcite", name: "calcite-linq4j", version: calcite_linq4j_version // Apache 2.0 //testImplementation group: "com.h2database", name: "h2", version: h2_version //testImplementation group: "mysql", name: "mysql-connector-java", version: mysql_connector_java_version diff --git a/core/src/main/codegen/includes/ddlParser.ftl b/core/src/main/codegen/includes/ddlParser.ftl index 4998eea6a8..41d021af40 100644 --- a/core/src/main/codegen/includes/ddlParser.ftl +++ b/core/src/main/codegen/includes/ddlParser.ftl @@ -264,12 +264,16 @@ SqlCreate SqlCreateTable(Span s, boolean replace) : SqlIdentifier store = null; SqlIdentifier partitionColumn = null; SqlIdentifier partitionType = null; + int numPartitionGroups = 0; int numPartitions = 0; List partitionNamesList = new ArrayList(); SqlIdentifier partitionName = null; List< List> partitionQualifierList = new ArrayList>(); List partitionQualifiers = new ArrayList(); SqlNode partitionValues = null; + SqlIdentifier tmpIdent = null; + int tmpInt = 0; + RawPartitionInformation rawPartitionInfo; } { ifNotExists = IfNotExistsOpt() id = CompoundIdentifier() @@ -281,11 +285,58 @@ SqlCreate SqlCreateTable(Span s, boolean replace) : partitionType = SimpleIdentifier() | { partitionType = new SqlIdentifier( "RANGE", s.end(this) );} + | + { partitionType = new SqlIdentifier( "TEMPERATURE", s.end(this) ); + rawPartitionInfo = new RawTemperaturePartitionInformation(); + rawPartitionInfo.setPartitionType( partitionType ); + } + partitionColumn = SimpleIdentifier() { rawPartitionInfo.setPartitionColumn( partitionColumn ); } + + partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } + + partitionValues = Literal() + { + partitionQualifiers.add(partitionValues); + ((RawTemperaturePartitionInformation)rawPartitionInfo).setHotAccessPercentageIn( partitionValues ); + } + {partitionQualifierList.add(partitionQualifiers); partitionQualifiers = new ArrayList();} + + partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } + + partitionValues = Literal() + { + partitionQualifiers.add(partitionValues); + ((RawTemperaturePartitionInformation)rawPartitionInfo).setHotAccessPercentageOut( partitionValues ); + } + {partitionQualifierList.add(partitionQualifiers); partitionQualifiers = new ArrayList();} + + + + ( + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "ALL", s.end(this) ) ); tmpIdent = null; } + | + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "WRITE", s.end(this) ) ); tmpIdent = null; } + | + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "READ", s.end(this) ) ); tmpIdent = null;} + ) + + tmpInt = UnsignedIntLiteral() { ((RawTemperaturePartitionInformation)rawPartitionInfo).setInterval( tmpInt ); tmpInt = 0; } + tmpIdent = SimpleIdentifier() { ((RawTemperaturePartitionInformation)rawPartitionInfo).setIntervalUnit( tmpIdent ); tmpIdent = null; } + numPartitions = UnsignedIntLiteral() {rawPartitionInfo.setNumPartitions( numPartitions );} + tmpIdent = SimpleIdentifier() { + ((RawTemperaturePartitionInformation)rawPartitionInfo).setInternalPartitionFunction( tmpIdent ); tmpIdent = null; + } + { + rawPartitionInfo.setPartitionNamesList( partitionNamesList ); + rawPartitionInfo.setPartitionQualifierList( partitionQualifierList ); + + return SqlDdlNodes.createTable(s.end(this), replace, ifNotExists, id, tableElementList, query, store, partitionType, partitionColumn, numPartitionGroups, numPartitions, partitionNamesList, partitionQualifierList, rawPartitionInfo); + } ) partitionColumn = SimpleIdentifier() [ ( - numPartitions = UnsignedIntLiteral() + numPartitionGroups = UnsignedIntLiteral() | partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } @@ -318,7 +369,8 @@ SqlCreate SqlCreateTable(Span s, boolean replace) : ] ] { - return SqlDdlNodes.createTable(s.end(this), replace, ifNotExists, id, tableElementList, query, store, partitionType, partitionColumn, numPartitions, partitionNamesList, partitionQualifierList); + rawPartitionInfo = new RawPartitionInformation(); + return SqlDdlNodes.createTable(s.end(this), replace, ifNotExists, id, tableElementList, query, store, partitionType, partitionColumn, numPartitionGroups, numPartitions, partitionNamesList, partitionQualifierList, rawPartitionInfo); } } diff --git a/core/src/main/codegen/includes/parserImpls.ftl b/core/src/main/codegen/includes/parserImpls.ftl index 0d60bab5cc..56e375ab49 100644 --- a/core/src/main/codegen/includes/parserImpls.ftl +++ b/core/src/main/codegen/includes/parserImpls.ftl @@ -123,12 +123,16 @@ SqlAlterTable SqlAlterTable(Span s) : final SqlIdentifier partitionColumn; List partitionList = new ArrayList(); int partitionIndex = 0; + int numPartitionGroups = 0; int numPartitions = 0; List partitionNamesList = new ArrayList(); SqlIdentifier partitionName = null; List< List> partitionQualifierList = new ArrayList>(); List partitionQualifiers = new ArrayList(); SqlNode partitionValues = null; + SqlIdentifier tmpIdent = null; + int tmpInt = 0; + RawPartitionInformation rawPartitionInfo; } {
@@ -476,12 +480,59 @@ SqlAlterTable SqlAlterTable(Span s) : partitionType = SimpleIdentifier() | { partitionType = new SqlIdentifier( "RANGE", s.end(this) );} + + | + { partitionType = new SqlIdentifier( "TEMPERATURE", s.end(this) ); + rawPartitionInfo = new RawTemperaturePartitionInformation(); + rawPartitionInfo.setPartitionType( partitionType ); + } + partitionColumn = SimpleIdentifier() { rawPartitionInfo.setPartitionColumn( partitionColumn ); } + + partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } + + partitionValues = Literal() + { + partitionQualifiers.add(partitionValues); + ((RawTemperaturePartitionInformation)rawPartitionInfo).setHotAccessPercentageIn( partitionValues ); + } + {partitionQualifierList.add(partitionQualifiers); partitionQualifiers = new ArrayList();} + + partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } + + partitionValues = Literal() + { + partitionQualifiers.add(partitionValues); + ((RawTemperaturePartitionInformation)rawPartitionInfo).setHotAccessPercentageOut( partitionValues ); + } + {partitionQualifierList.add(partitionQualifiers); partitionQualifiers = new ArrayList();} + + + ( + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "ALL", s.end(this) ) ); tmpIdent = null; } + | + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "WRITE", s.end(this) ) ); tmpIdent = null; } + | + { ((RawTemperaturePartitionInformation)rawPartitionInfo).setAccessPattern( new SqlIdentifier( "READ", s.end(this) ) ); tmpIdent = null;} + ) + + tmpInt = UnsignedIntLiteral() { ((RawTemperaturePartitionInformation)rawPartitionInfo).setInterval( tmpInt ); tmpInt = 0; } + tmpIdent = SimpleIdentifier() { ((RawTemperaturePartitionInformation)rawPartitionInfo).setIntervalUnit( tmpIdent ); tmpIdent = null; } + numPartitions = UnsignedIntLiteral() {rawPartitionInfo.setNumPartitions( numPartitions );} + tmpIdent = SimpleIdentifier() { + ((RawTemperaturePartitionInformation)rawPartitionInfo).setInternalPartitionFunction( tmpIdent ); tmpIdent = null; + } + { + rawPartitionInfo.setPartitionNamesList( partitionNamesList ); + rawPartitionInfo.setPartitionQualifierList( partitionQualifierList ); + + return new SqlAlterTableAddPartitions(s.end(this), table, partitionColumn, partitionType, numPartitionGroups, numPartitions, partitionNamesList, partitionQualifierList, rawPartitionInfo); + } ) partitionColumn = SimpleIdentifier() [ ( - numPartitions = UnsignedIntLiteral() + numPartitionGroups = UnsignedIntLiteral() | partitionName = SimpleIdentifier() { partitionNamesList.add(partitionName); } @@ -512,7 +563,8 @@ SqlAlterTable SqlAlterTable(Span s) : ) ] { - return new SqlAlterTableAddPartitions(s.end(this), table, partitionColumn, partitionType, numPartitions, partitionNamesList, partitionQualifierList); + rawPartitionInfo = new RawPartitionInformation(); + return new SqlAlterTableAddPartitions(s.end(this), table, partitionColumn, partitionType, numPartitionGroups, numPartitions, partitionNamesList, partitionQualifierList, rawPartitionInfo); } | diff --git a/core/src/main/codegen/templates/Parser.jj b/core/src/main/codegen/templates/Parser.jj index 19995d81bd..ae18477ad6 100644 --- a/core/src/main/codegen/templates/Parser.jj +++ b/core/src/main/codegen/templates/Parser.jj @@ -132,6 +132,7 @@ import org.polypheny.db.util.SourceStringReader; import org.polypheny.db.util.Util; import org.polypheny.db.util.trace.PolyphenyDbTrace; import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.partition.raw.*; import org.slf4j.Logger; @@ -6285,6 +6286,7 @@ SqlPostfixOperator PostfixRowOperator() : | < FRAC_SECOND: "FRAC_SECOND" > | < FRAME_ROW: "FRAME_ROW" > | < FREE: "FREE" > +| < FREQUENCY: "FREQUENCY" > | < FROM: "FROM" > | < FULL: "FULL" > | < FUNCTION: "FUNCTION" > @@ -6667,6 +6669,7 @@ SqlPostfixOperator PostfixRowOperator() : | < TABLE: "TABLE" > | < TABLE_NAME: "TABLE_NAME" > | < TABLESAMPLE: "TABLESAMPLE" > +| < TEMPERATURE: "TEMPERATURE" > | < TEMPORARY: "TEMPORARY" > | < THEN: "THEN" > | < TIES: "TIES" > diff --git a/core/src/main/java/org/polypheny/db/adapter/Adapter.java b/core/src/main/java/org/polypheny/db/adapter/Adapter.java index 1dd7af92f1..fdc65cdf52 100644 --- a/core/src/main/java/org/polypheny/db/adapter/Adapter.java +++ b/core/src/main/java/org/polypheny/db/adapter/Adapter.java @@ -48,6 +48,7 @@ import org.polypheny.db.adapter.DeployMode.DeploySetting; import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.config.Config; import org.polypheny.db.config.Config.ConfigListener; @@ -298,9 +299,10 @@ public String getAdapterName() { return properties.name(); } + public abstract void createNewSchema( SchemaPlus rootSchema, String name ); - public abstract Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore ); + public abstract Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ); public abstract Schema getCurrentSchema(); @@ -441,10 +443,13 @@ public void addInformationPhysicalNames() { group.setRefreshFunction( () -> { physicalColumnNames.reset(); Catalog.getInstance().getColumnPlacementsOnAdapter( adapterId ).forEach( placement -> { - physicalColumnNames.addRow( - placement.columnId, - Catalog.getInstance().getColumn( placement.columnId ).name, - placement.physicalSchemaName + "." + placement.physicalTableName + "." + placement.physicalColumnName ); + List cpps = Catalog.getInstance().getPartitionPlacementsByAdapter( adapterId ); + cpps.forEach( cpp -> + physicalColumnNames.addRow( + placement.columnId, + Catalog.getInstance().getColumn( placement.columnId ).name, + cpp.physicalSchemaName + "." + cpp.physicalTableName + "." + placement.physicalColumnName ) + ); } ); } ); diff --git a/core/src/main/java/org/polypheny/db/adapter/DataStore.java b/core/src/main/java/org/polypheny/db/adapter/DataStore.java index 4cd32f27e7..da208d4591 100644 --- a/core/src/main/java/org/polypheny/db/adapter/DataStore.java +++ b/core/src/main/java/org/polypheny/db/adapter/DataStore.java @@ -47,17 +47,17 @@ public DataStore( final int adapterId, final String uniqueName, final Map partitionIds ); - public abstract void dropTable( Context context, CatalogTable combinedTable ); + public abstract void dropTable( Context context, CatalogTable combinedTable, List partitionIds ); public abstract void addColumn( Context context, CatalogTable catalogTable, CatalogColumn catalogColumn ); public abstract void dropColumn( Context context, CatalogColumnPlacement columnPlacement ); - public abstract void addIndex( Context context, CatalogIndex catalogIndex ); + public abstract void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ); - public abstract void dropIndex( Context context, CatalogIndex catalogIndex ); + public abstract void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ); public abstract void updateColumnType( Context context, CatalogColumnPlacement columnPlacement, CatalogColumn catalogColumn, PolyType oldType ); diff --git a/core/src/main/java/org/polypheny/db/catalog/Catalog.java b/core/src/main/java/org/polypheny/db/catalog/Catalog.java index 9ef818b41a..89661f351c 100644 --- a/core/src/main/java/org/polypheny/db/catalog/Catalog.java +++ b/core/src/main/java/org/polypheny/db/catalog/Catalog.java @@ -35,6 +35,8 @@ import org.polypheny.db.catalog.entity.CatalogIndex; import org.polypheny.db.catalog.entity.CatalogKey; import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionGroup; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogQueryInterface; import org.polypheny.db.catalog.entity.CatalogSchema; @@ -70,6 +72,7 @@ import org.polypheny.db.catalog.exceptions.UnknownTableTypeRuntimeException; import org.polypheny.db.catalog.exceptions.UnknownUserException; import org.polypheny.db.config.RuntimeConfig; +import org.polypheny.db.partition.properties.PartitionProperty; import org.polypheny.db.rel.RelCollation; import org.polypheny.db.rel.RelNode; import org.polypheny.db.rel.type.RelDataType; @@ -430,12 +433,12 @@ protected final boolean isValidIdentifier( final String str ) { * @param physicalSchemaName The schema name on the adapter * @param physicalTableName The table name on the adapter * @param physicalColumnName The column name on the adapter - * @param partitionIds List of partitions to place on this column placement (may be null) + * @param partitionGroupIds List of partitions to place on this column placement (may be null) */ - public abstract void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionIds ); + public abstract void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionGroupIds ); /** - * Deletes a column placement + * Deletes all dependent column placements * * @param adapterId The id of the adapter * @param columnId The id of the column @@ -443,7 +446,8 @@ protected final boolean isValidIdentifier( final String str ) { public abstract void deleteColumnPlacement( int adapterId, long columnId ); /** - * Get a specific column placement. + * Gets a collective list of column placements per column on a adapter. + * Effectively used to retrieve all relevant placements including partitions. * * @param adapterId The id of the adapter * @param columnId The id of the column @@ -466,7 +470,7 @@ protected final boolean isValidIdentifier( final String str ) { * @param columnId The id of the specific column * @return List of column placements of specific column */ - public abstract List getColumnPlacements( long columnId ); + public abstract List getColumnPlacement( long columnId ); /** * Get column placements of a specific table on a specific adapter @@ -474,7 +478,7 @@ protected final boolean isValidIdentifier( final String str ) { * @param adapterId The id of the adapter * @return List of column placements of the table on the specified adapter */ - public abstract List getColumnPlacementsOnAdapter( int adapterId, long tableId ); + public abstract List getColumnPlacementsOnAdapterPerTable( int adapterId, long tableId ); public abstract List getColumnPlacementsOnAdapterSortedByPhysicalPosition( int storeId, long tableId ); @@ -528,16 +532,15 @@ protected final boolean isValidIdentifier( final String str ) { public abstract void updateColumnPlacementPhysicalPosition( int adapterId, long columnId ); /** - * Change physical names of a placement. + * Change physical names of all column placements. * * @param adapterId The id of the adapter * @param columnId The id of the column * @param physicalSchemaName The physical schema name - * @param physicalTableName The physical table name * @param physicalColumnName The physical column name * @param updatePhysicalColumnPosition Whether to reset the column position (highest number in the table; represents that the column is now at the last position) */ - public abstract void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalTableName, String physicalColumnName, boolean updatePhysicalColumnPosition ); + public abstract void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalColumnName, boolean updatePhysicalColumnPosition ); /** * Get all columns of the specified table. @@ -588,7 +591,6 @@ protected final boolean isValidIdentifier( final String str ) { */ public abstract CatalogColumn getColumn( String databaseName, String schemaName, String tableName, String columnName ) throws UnknownColumnException, UnknownSchemaException, UnknownDatabaseException, UnknownTableException; - /** * Adds a column. * @@ -1007,11 +1009,37 @@ protected final boolean isValidIdentifier( final String str ) { * * @param tableId The unique id of the table * @param schemaId The unique id of the table - * @param ownerId the partitionId to be deleted * @param partitionType partition Type of the added partition + * @return The id of the created partitionGroup + */ + public abstract long addPartitionGroup( long tableId, String partitionGroupName, long schemaId, PartitionType partitionType, long numberOfInternalPartitions, List effectivePartitionGroupQualifier, boolean isUnbound ) throws GenericCatalogException; + + /** + * Deletes a single partition and all references. + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionGroupId The partitionGroupId to be deleted + */ + public abstract void deletePartitionGroup( long tableId, long schemaId, long partitionGroupId ); + + /** + * Get a partition object by its unique id + * + * @param partitionGroupId The unique id of the partition + * @return A catalog partitionGroup + */ + public abstract CatalogPartitionGroup getPartitionGroup( long partitionGroupId ); + + /** + * Adds a partition to the catalog + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionGroupId partitionGroupId where the partition should be initially added to * @return The id of the created partition */ - public abstract long addPartition( long tableId, String partitionName, long schemaId, int ownerId, PartitionType partitionType, List effectivePartitionQualifier, boolean isUnbound ) throws GenericCatalogException; + public abstract long addPartition( long tableId, long schemaId, long partitionGroupId, List effectivePartitionGroupQualifier, boolean isUnbound ) throws GenericCatalogException; /** * Deletes a single partition and all references. @@ -1030,16 +1058,25 @@ protected final boolean isValidIdentifier( final String str ) { */ public abstract CatalogPartition getPartition( long partitionId ); + + /** + * Retrieves a list of partitions which are associated with a specific table + * + * @param tableId Table for which partitions shall be gathered + * @return List of all partitions associated with that table + */ + public abstract List getPartitionsByTable( long tableId ); + /** * Effectively partitions a table with the specified partitionType * * @param tableId Table to be partitioned * @param partitionType Partition function to apply on the table * @param partitionColumnId Column used to apply the partition function on - * @param numPartitions Explicit number of partitions - * @param partitionIds List of ids of the catalog partitions + * @param numPartitionGroups Explicit number of partitions + * @param partitionGroupIds List of ids of the catalog partitions */ - public abstract void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitions, List partitionIds ); + public abstract void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitionGroups, List partitionGroupIds, PartitionProperty partitionProperty ); /** * Merges a partitioned table. @@ -1049,13 +1086,72 @@ protected final boolean isValidIdentifier( final String str ) { */ public abstract void mergeTable( long tableId ); + /** + * Updates partitionProperties on table + * + * @param tableId Table to be partitioned + * @param partitionProperty Partition properties + */ + public abstract void updateTablePartitionProperties( long tableId, PartitionProperty partitionProperty ); + /** * Get a List of all partitions belonging to a specific table * * @param tableId Table to be queried * @return list of all partitions on this table */ - public abstract List getPartitions( long tableId ); + public abstract List getPartitionGroups( long tableId ); + + /** + * Get all partitions of the specified database which fit to the specified filter patterns. + * getColumns(xid, databaseName, null, null, null) returns all partitions of the database. + * + * @param databaseNamePattern Pattern for the database name. null returns all. + * @param schemaNamePattern Pattern for the schema name. null returns all. + * @param tableNamePattern Pattern for the table name. null returns catalog/src/test/java/org/polypheny/db/test/CatalogTest.javaall. + * @return List of columns which fit to the specified filters. If there is no column which meets the criteria, an empty list is returned. + */ + public abstract List getPartitionGroups( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ); + + /** + * Updates the specified partition group with the attached partitionIds + * + * @param partitionGroupId Partition Group to be updated + * @param partitionIds List of new partitionIds + */ + public abstract void updatePartitionGroup( long partitionGroupId, List partitionIds ); + + /** + * Adds a partition to an already existing partition Group + * + * @param partitionGroupId Group to add to + * @param partitionId Partition to add + */ + public abstract void addPartitionToGroup( long partitionGroupId, Long partitionId ); + + /** + * Removes a partition from an already existing partition Group + * + * @param partitionGroupId Group to remove the partition from + * @param partitionId Partition to remove + */ + public abstract void removePartitionFromGroup( long partitionGroupId, Long partitionId ); + + /** + * Assign the partition to a new partitionGroup + * + * @param partitionId Partition to move + * @param partitionGroupId New target gorup to move the partion to + */ + public abstract void updatePartition( long partitionId, Long partitionGroupId ); + + /** + * Get a List of all partitions belonging to a specific table + * + * @param partitionGroupId Table to be queried + * @return list of all partitions on this table + */ + public abstract List getPartitions( long partitionGroupId ); /** * Get all partitions of the specified database which fit to the specified filter patterns. @@ -1074,37 +1170,46 @@ protected final boolean isValidIdentifier( final String str ) { * @param tableId Table to be queried * @return list of all partition names on this table */ - public abstract List getPartitionNames( long tableId ); + public abstract List getPartitionGroupNames( long tableId ); /** * Get placements by partition. Identify the location of partitions. * Essentially returns all ColumnPlacements which hold the specified partitionID. * * @param tableId The id of the table - * @param partitionId The id of the partition + * @param partitionGroupId The id of the partition * @param columnId The id of tje column * @return List of CatalogColumnPlacements */ - public abstract List getColumnPlacementsByPartition( long tableId, long partitionId, long columnId ); + public abstract List getColumnPlacementsByPartitionGroup( long tableId, long partitionGroupId, long columnId ); /** * Get adapters by partition. Identify the location of partitions/replicas * Essentially returns all adapters which hold the specified partitionID * * @param tableId The unique id of the table - * @param partitionId The unique id of the partition + * @param partitionGroupId The unique id of the partition * @return List of CatalogAdapters */ - public abstract List getAdaptersByPartition( long tableId, long partitionId ); + public abstract List getAdaptersByPartitionGroup( long tableId, long partitionGroupId ); /** * Updates the reference which partitions reside on which DataPlacement (identified by adapterId and tableId) * * @param adapterId The unique id of the adapter * @param tableId The unique id of the table - * @param partitionIds List of partitionsIds to be updated + * @param partitionGroupIds List of partitionsIds to be updated */ - public abstract void updatePartitionsOnDataPlacement( int adapterId, long tableId, List partitionIds ); + public abstract void updatePartitionGroupsOnDataPlacement( int adapterId, long tableId, List partitionGroupIds ); + + /** + * Get all partitions of a DataPlacement (identified by adapterId and tableId) + * + * @param adapterId The unique id of the adapter + * @param tableId The unique id of the table + * @return List of partitionIds + */ + public abstract List getPartitionGroupsOnDataPlacement( int adapterId, long tableId ); /** * Get all partitions of a DataPlacement (identified by adapterId and tableId) @@ -1122,7 +1227,7 @@ protected final boolean isValidIdentifier( final String str ) { * @param tableId The unique id of the table * @return List of partitionId Indices */ - public abstract List getPartitionsIndexOnDataPlacement( int adapterId, long tableId ); + public abstract List getPartitionGroupsIndexOnDataPlacement( int adapterId, long tableId ); /** * Mostly needed if a placement is dropped from a store. @@ -1130,7 +1235,7 @@ protected final boolean isValidIdentifier( final String str ) { * @param storeId Placement to be updated with new partitions * @param tableId List of partitions which the placement should hold */ - public abstract void deletePartitionsOnDataPlacement( int storeId, long tableId ); + public abstract void deletePartitionGroupsOnDataPlacement( int storeId, long tableId ); /** * Checks depending on the current partition distribution and partitionType @@ -1139,9 +1244,10 @@ protected final boolean isValidIdentifier( final String str ) { * @param adapterId The id of the adapter to be checked * @param tableId The id of the table to be checked * @param columnId The id of the column to be checked + * @param threshold Accepted toleration threshold for new setup of how many placements must remain after new partitionGroup distribution * @return If its correctly distributed or not */ - public abstract boolean validatePartitionDistribution( int adapterId, long tableId, long columnId ); + public abstract boolean validatePartitionGroupDistribution( int adapterId, long tableId, long columnId, int threshold ); /** * Flags the table for deletion. @@ -1162,6 +1268,109 @@ protected final boolean isValidIdentifier( final String str ) { */ public abstract boolean isTableFlaggedForDeletion( long tableId ); + /** + * Adds a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + * @param tableId The table for which a partition placement shall be created + * @param partitionId The id of a specific partition that shall create a new placement + * @param placementType The type of placement + * @param physicalSchemaName The schema name on the adapter + * @param physicalTableName The table name on the adapter + */ + public abstract void addPartitionPlacement( int adapterId, long tableId, long partitionId, PlacementType placementType, String physicalSchemaName, String physicalTableName ); + + /** + * Change physical names of a partition placement. + * + * @param adapterId The id of the adapter + * @param partitionId The id of the partition + * @param physicalSchemaName The physical schema name + * @param physicalTableName The physical table name + */ + public abstract void updatePartitionPlacementPhysicalNames( int adapterId, long partitionId, String physicalSchemaName, String physicalTableName ); + + /** + * Deletes a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + * @param partitionId The id of a partition which shall be removed from that store. + */ + public abstract void deletePartitionPlacement( int adapterId, long partitionId ); + + /** + * Returns a specific partition entity which is placed on a store. + * + * @param adapterId The adapter on which the requested partition placements reside + * @param partitionId The id of the requested partition + * @return The requested PartitionPlacement on that store for a given is + */ + public abstract CatalogPartitionPlacement getPartitionPlacement( int adapterId, long partitionId ); + + /** + * Returns a list of all Partition Placements which currently reside on a adapter, disregarded of the table. + * + * @param adapterId The adapter on which the requested partition placements reside + * @return A list of all Partition Placements, that are currently located on that specific store + */ + public abstract List getPartitionPlacementsByAdapter( int adapterId ); + + /** + * Returns a list of all Partition Placements which currently reside on a adapter, for a specific table. + * + * @param adapterId The adapter on which the requested partition placements reside + * @param tableId The table for which all partition placements on a adapter should be considered + * @return A list of all Partition Placements, that are currently located on that specific store for a individual table + */ + public abstract List getPartitionPlacementByTable( int adapterId, long tableId ); + + /** + * Returns a list of all Partition Placements which are currently associated with a table. + * + * @param tableId The table on which the requested partition placements are currently associated with. + * @return A list of all Partition Placements, that belong to the desired table + */ + public abstract List getAllPartitionPlacementsByTable( long tableId ); + + /** + * Get all Partition Placements which are associated with a individual partition Id. + * Identifies on which locations and how often the individual partition is placed. + * + * @param partitionId The requested partition Id + * @return A list of Partition Placements which are physically responsible for that partition + */ + public abstract List getPartitionPlacements( long partitionId ); + + /** + * Returns all tables which are in need of special periodic treatment. + * + * @return List of tables which need to be periodically processed + */ + public abstract List getTablesForPeriodicProcessing(); + + /** + * Registers a table to be considered for periodic processing + * + * @param tableId Id of table to be considered for periodic processing + */ + public abstract void addTableToPeriodicProcessing( long tableId ); + + /** + * Remove a table from periodic background processing + * + * @param tableId Id of table to be removed for periodic processing + */ + public abstract void removeTableFromPeriodicProcessing( long tableId ); + + /** + * Probes if a Partition Placement on a adapter for a specific partition already exists. + * + * @param adapterId Adapter on which to check + * @param partitionId Partition which to check + * @return teh response of the probe + */ + public abstract boolean checkIfExistsPartitionPlacement( int adapterId, long partitionId ); + /* * @@ -1487,7 +1696,9 @@ public enum PartitionType { NONE( 0 ), RANGE( 1 ), LIST( 2 ), - HASH( 3 ); + HASH( 3 ), + //TODO @HENNLO think about excluding "UDPF" here, these should only be used for internal Partition Functions + TEMPERATURE( 4 ); private final int id; diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogColumnPlacement.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogColumnPlacement.java index 56328dfeed..8c0c7ee47a 100644 --- a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogColumnPlacement.java +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogColumnPlacement.java @@ -39,7 +39,6 @@ public class CatalogColumnPlacement implements CatalogEntity { public final long physicalPosition; public final String physicalSchemaName; - public final String physicalTableName; public final String physicalColumnName; @@ -50,7 +49,6 @@ public CatalogColumnPlacement( @NonNull final String adapterUniqueName, @NonNull final PlacementType placementType, final String physicalSchemaName, - final String physicalTableName, final String physicalColumnName, final long physicalPosition ) { this.tableId = tableId; @@ -59,7 +57,6 @@ public CatalogColumnPlacement( this.adapterUniqueName = adapterUniqueName; this.placementType = placementType; this.physicalSchemaName = physicalSchemaName; - this.physicalTableName = physicalTableName; this.physicalColumnName = physicalColumnName; this.physicalPosition = physicalPosition; } @@ -97,7 +94,6 @@ public Serializable[] getParameterArray() { adapterUniqueName, placementType.name(), physicalSchemaName, - physicalTableName, physicalColumnName }; } diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartition.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartition.java index fc711346f0..ce94d1aeae 100644 --- a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartition.java +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartition.java @@ -19,62 +19,43 @@ import java.io.Serializable; import java.util.List; import lombok.EqualsAndHashCode; -import lombok.SneakyThrows; -import org.polypheny.db.catalog.Catalog; +import lombok.Getter; @EqualsAndHashCode -public final class CatalogPartition implements CatalogEntity { +public class CatalogPartition implements CatalogEntity { - private static final long serialVersionUID = 2312903632511266177L; + private static final long serialVersionUID = 6187228972854325431L; public final long id; - public final String partitionName; + @Getter + public final List partitionQualifiers; + + // To be checked if even needed + @Getter + public final long partitionGroupId; public final long tableId; public final long schemaId; public final long databaseId; - public final List partitionQualifiers; public final boolean isUnbound; - public final long partitionKey; - public CatalogPartition( final long id, - final String partitionName, final long tableId, final long schemaId, final long databaseId, - final long partitionKey, final List partitionQualifiers, - final boolean isUnbound ) { + final boolean isUnbound, + final long partitionGroupId ) { this.id = id; - this.partitionName = partitionName; this.tableId = tableId; this.schemaId = schemaId; this.databaseId = databaseId; - this.partitionKey = partitionKey; this.partitionQualifiers = partitionQualifiers; this.isUnbound = isUnbound; - } - - - @SneakyThrows - public String getTableName() { - return Catalog.getInstance().getTable( tableId ).name; - } - - - @SneakyThrows - public String getDatabaseName() { - return Catalog.getInstance().getDatabase( databaseId ).name; - } - - - @SneakyThrows - public String getSchemaName() { - return Catalog.getInstance().getSchema( schemaId ).name; + this.partitionGroupId = partitionGroupId; } diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionGroup.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionGroup.java new file mode 100644 index 0000000000..d0a7b49ac7 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionGroup.java @@ -0,0 +1,91 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.catalog.entity; + +import java.io.Serializable; +import java.util.List; +import lombok.EqualsAndHashCode; +import lombok.SneakyThrows; +import org.polypheny.db.catalog.Catalog; + + +@EqualsAndHashCode +public final class CatalogPartitionGroup implements CatalogEntity { + + private static final long serialVersionUID = 2312903632511266177L; + + public final long id; + public final String partitionGroupName; + + public final long tableId; + public final long schemaId; + public final long databaseId; + public final List partitionQualifiers; + public final List partitionIds; + public final boolean isUnbound; + + public final long partitionKey; + + + public CatalogPartitionGroup( + final long id, + final String partitionGroupName, + final long tableId, + final long schemaId, + final long databaseId, + final long partitionKey, + final List partitionQualifiers, + List partitionIds, + final boolean isUnbound ) { + this.id = id; + this.partitionGroupName = partitionGroupName; + this.tableId = tableId; + this.schemaId = schemaId; + this.databaseId = databaseId; + this.partitionKey = partitionKey; + // TODO @HENNLO Although the qualifiers are now part of CatalogPartitions, it might be a good improvement to accumulate all qualifiers of all + // internal partitions here to speed up query time. + this.partitionQualifiers = partitionQualifiers; + this.partitionIds = partitionIds; + this.isUnbound = isUnbound; + } + + + @SneakyThrows + public String getTableName() { + return Catalog.getInstance().getTable( tableId ).name; + } + + + @SneakyThrows + public String getDatabaseName() { + return Catalog.getInstance().getDatabase( databaseId ).name; + } + + + @SneakyThrows + public String getSchemaName() { + return Catalog.getInstance().getSchema( schemaId ).name; + } + + + @Override + public Serializable[] getParameterArray() { + throw new RuntimeException( "Not implemented" ); + } + +} diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionPlacement.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionPlacement.java new file mode 100644 index 0000000000..ed3a8867cc --- /dev/null +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogPartitionPlacement.java @@ -0,0 +1,61 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.catalog.entity; + +import java.io.Serializable; +import lombok.NonNull; +import org.polypheny.db.catalog.Catalog.PlacementType; + + +public class CatalogPartitionPlacement implements CatalogEntity { + + private static final long serialVersionUID = 3035193464866141590L; + + public final long tableId; + public final long partitionId; + public final int adapterId; + public final String adapterUniqueName; + public final PlacementType placementType; + + public final String physicalSchemaName; + public final String physicalTableName; + + + public CatalogPartitionPlacement( + final long tableId, + final int adapterId, + @NonNull final String adapterUniqueName, + @NonNull final PlacementType placementType, + final String physicalSchemaName, + final String physicalTableName, + final long partitionId ) { + this.tableId = tableId; + this.adapterId = adapterId; + this.adapterUniqueName = adapterUniqueName; + this.placementType = placementType; + this.physicalSchemaName = physicalSchemaName; + this.physicalTableName = physicalTableName; + this.partitionId = partitionId; + } + + + @Override + public Serializable[] getParameterArray() { + return new Serializable[0]; + } + +} diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogTable.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogTable.java index 5edcbd1b97..d35b977c65 100644 --- a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogTable.java +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogTable.java @@ -29,12 +29,13 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.Catalog.PartitionType; import org.polypheny.db.catalog.Catalog.TableType; +import org.polypheny.db.partition.properties.PartitionProperty; @EqualsAndHashCode public class CatalogTable implements CatalogEntity, Comparable { - private static final long serialVersionUID = 5426944084650275437L; + private static final long serialVersionUID = 1781666800808312001L; public final long id; public final String name; @@ -50,10 +51,8 @@ public class CatalogTable implements CatalogEntity, Comparable { public final boolean isPartitioned; public final Catalog.PartitionType partitionType; - public final ImmutableList partitionIds; public final long partitionColumnId; - - public final long numPartitions; + public final PartitionProperty partitionProperty; public final ImmutableList connectedViews; @@ -69,7 +68,8 @@ public CatalogTable( @NonNull final TableType type, final Long primaryKey, @NonNull final ImmutableMap> placementsByAdapter, - boolean modifiable ) { + boolean modifiable, + PartitionProperty partitionProperty ) { this.id = id; this.name = name; this.columnIds = columnIds; @@ -84,9 +84,8 @@ public CatalogTable( this.isPartitioned = false; this.partitionType = PartitionType.NONE; - this.partitionIds = null; this.partitionColumnId = 0; - this.numPartitions = 0; + this.partitionProperty = partitionProperty; this.connectedViews = ImmutableList.builder().build(); if ( type == TableType.TABLE && !modifiable ) { @@ -109,10 +108,9 @@ public CatalogTable( final Long primaryKey, @NonNull final ImmutableMap> placementsByAdapter, boolean modifiable, - final long numPartitions, final PartitionType partitionType, - final ImmutableList partitionIds, final long partitionColumnId, + PartitionProperty partitionProperty, ImmutableList connectedViews ) { this.id = id; this.name = name; @@ -126,17 +124,15 @@ public CatalogTable( this.placementsByAdapter = placementsByAdapter; this.modifiable = modifiable; this.partitionType = partitionType; - this.partitionIds = partitionIds; this.partitionColumnId = partitionColumnId; - this.numPartitions = numPartitions; this.isPartitioned = true; + this.partitionProperty = partitionProperty; this.connectedViews = connectedViews; if ( type == TableType.TABLE && !modifiable ) { throw new RuntimeException( "Tables of table type TABLE must be modifiable!" ); } - } @@ -152,11 +148,10 @@ public CatalogTable( final Long primaryKey, @NonNull final ImmutableMap> placementsByAdapter, boolean modifiable, - final long numPartitions, final PartitionType partitionType, - final ImmutableList partitionIds, final long partitionColumnId, boolean isPartitioned, + PartitionProperty partitionProperty, ImmutableList connectedViews ) { this.id = id; this.name = name; @@ -172,9 +167,49 @@ public CatalogTable( this.isPartitioned = isPartitioned; this.partitionType = partitionType; - this.partitionIds = partitionIds; this.partitionColumnId = partitionColumnId; - this.numPartitions = numPartitions; + + this.partitionProperty = partitionProperty; + + this.connectedViews = connectedViews; + + if ( type == TableType.TABLE && !modifiable ) { + throw new RuntimeException( "Tables of table type TABLE must be modifiable!" ); + } + } + + + public CatalogTable( + final long id, + @NonNull final String name, + final ImmutableList columnIds, + final long schemaId, + final long databaseId, + final int ownerId, + @NonNull final String ownerName, + @NonNull final TableType type, + final Long primaryKey, + @NonNull final ImmutableMap> placementsByAdapter, + boolean modifiable, + PartitionProperty partitionProperty, + ImmutableList connectedViews ) { + this.id = id; + this.name = name; + this.columnIds = columnIds; + this.schemaId = schemaId; + this.databaseId = databaseId; + this.ownerId = ownerId; + this.ownerName = ownerName; + this.tableType = type; + this.primaryKey = primaryKey; + this.placementsByAdapter = placementsByAdapter; + this.modifiable = modifiable; + + this.isPartitioned = false; + this.partitionType = PartitionType.NONE; + this.partitionColumnId = 0; + + this.partitionProperty = partitionProperty; this.connectedViews = connectedViews; @@ -263,11 +298,10 @@ public CatalogTable getRenamed( String newName ) { primaryKey, placementsByAdapter, modifiable, - numPartitions, partitionType, - partitionIds, partitionColumnId, isPartitioned, + partitionProperty, connectedViews ); } @@ -285,11 +319,10 @@ public CatalogTable getConnectedViews( ImmutableList newConnectedViews ) { primaryKey, placementsByAdapter, modifiable, - numPartitions, partitionType, - partitionIds, partitionColumnId, isPartitioned, + partitionProperty, newConnectedViews ); } @@ -307,11 +340,10 @@ public CatalogTable getTableWithColumns( ImmutableList newColumnIds ) { primaryKey, placementsByAdapter, modifiable, - numPartitions, partitionType, - partitionIds, partitionColumnId, isPartitioned, + partitionProperty, connectedViews ); } diff --git a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogView.java b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogView.java index da394d2c72..6cfefaa0f7 100644 --- a/core/src/main/java/org/polypheny/db/catalog/entity/CatalogView.java +++ b/core/src/main/java/org/polypheny/db/catalog/entity/CatalogView.java @@ -26,6 +26,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.Catalog.PartitionType; import org.polypheny.db.catalog.Catalog.TableType; +import org.polypheny.db.partition.properties.PartitionProperty; import org.polypheny.db.plan.Convention; import org.polypheny.db.plan.RelOptCluster; import org.polypheny.db.plan.RelTraitSet; @@ -67,8 +68,9 @@ public CatalogView( boolean modifiable, RelCollation relCollation, Map> underlyingTables, - RelDataType fieldList ) { - super( id, name, columnIds, schemaId, databaseId, ownerId, ownerName, type, primaryKey, placementsByAdapter, modifiable ); + RelDataType fieldList, + PartitionProperty partitionProperty ) { + super( id, name, columnIds, schemaId, databaseId, ownerId, ownerName, type, primaryKey, placementsByAdapter, modifiable, partitionProperty ); this.definition = definition; this.relCollation = relCollation; this.underlyingTables = underlyingTables; @@ -89,16 +91,15 @@ public CatalogView( Long primaryKey, ImmutableMap> placementsByAdapter, boolean modifiable, - long numPartitions, PartitionType partitionType, - ImmutableList partitionIds, long partitionColumnId, boolean isPartitioned, + PartitionProperty partitionProperty, RelCollation relCollation, ImmutableList connectedViews, Map> underlyingTables, RelDataType fieldList ) { - super( id, name, columnIds, schemaId, databaseId, ownerId, ownerName, tableType, primaryKey, placementsByAdapter, modifiable, numPartitions, partitionType, partitionIds, partitionColumnId, isPartitioned, connectedViews ); + super( id, name, columnIds, schemaId, databaseId, ownerId, ownerName, tableType, primaryKey, placementsByAdapter, modifiable, partitionType, partitionColumnId, isPartitioned, partitionProperty, connectedViews ); this.definition = definition; this.relCollation = relCollation; this.underlyingTables = underlyingTables; @@ -121,11 +122,10 @@ public CatalogTable getConnectedViews( ImmutableList newConnectedViews ) { primaryKey, placementsByAdapter, modifiable, - numPartitions, partitionType, - partitionIds, partitionColumnId, isPartitioned, + partitionProperty, relCollation, newConnectedViews, underlyingTables, @@ -148,11 +148,10 @@ public CatalogTable getRenamed( String newName ) { primaryKey, placementsByAdapter, modifiable, - numPartitions, partitionType, - partitionIds, partitionColumnId, isPartitioned, + partitionProperty, relCollation, connectedViews, underlyingTables, @@ -177,7 +176,8 @@ public CatalogTable getTableWithColumns( ImmutableList newColumnIds ) { modifiable, relCollation, underlyingTables, - fieldList ); + fieldList, + partitionProperty ); } diff --git a/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionIdRuntimeException.java b/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionGroupIdRuntimeException.java similarity index 82% rename from core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionIdRuntimeException.java rename to core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionGroupIdRuntimeException.java index 3eb2217769..fd2d42dd36 100644 --- a/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionIdRuntimeException.java +++ b/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionGroupIdRuntimeException.java @@ -17,9 +17,9 @@ package org.polypheny.db.catalog.exceptions; -public class UnknownPartitionIdRuntimeException extends CatalogRuntimeException { +public class UnknownPartitionGroupIdRuntimeException extends CatalogRuntimeException { - public UnknownPartitionIdRuntimeException( long partitionId ) { + public UnknownPartitionGroupIdRuntimeException( long partitionId ) { super( "There is no partition with id '" + partitionId + "'." ); } diff --git a/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionPlacementException.java b/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionPlacementException.java new file mode 100644 index 0000000000..561ef8673b --- /dev/null +++ b/core/src/main/java/org/polypheny/db/catalog/exceptions/UnknownPartitionPlacementException.java @@ -0,0 +1,26 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.catalog.exceptions; + + +public class UnknownPartitionPlacementException extends CatalogRuntimeException { + + public UnknownPartitionPlacementException( long adapterId, long partitionId ) { + super( "There is no partition placement for partition id '" + partitionId + "' on adapter with id '" + adapterId + "'" ); + } + +} diff --git a/core/src/main/java/org/polypheny/db/config/RuntimeConfig.java b/core/src/main/java/org/polypheny/db/config/RuntimeConfig.java index aa17ba42f3..6260ce3544 100644 --- a/core/src/main/java/org/polypheny/db/config/RuntimeConfig.java +++ b/core/src/main/java/org/polypheny/db/config/RuntimeConfig.java @@ -321,12 +321,14 @@ public enum RuntimeConfig { ConfigType.INSTANCE_LIST, "dockerGroup" ), + FILE_HANDLE_CACHE_SIZE( "runtime/fileHandleCacheSize", "Size (in Bytes) up to which media files are cached in-memory instead of creating a temporary file. Needs to be >= 0 and smaller than Integer.MAX_SIZE. Setting to zero disables caching of media files.", 0, ConfigType.INTEGER, "runtimeExecutionGroup" ), + QUEUE_PROCESSING_INTERVAL( "runtime/queueProcessingInterval", "Rate of passive tracking of statistics.", BackgroundTask.TaskSchedulingType.EVERY_TEN_SECONDS, @@ -337,7 +339,13 @@ public enum RuntimeConfig { "Number of elements in workload queue to process per time.", 50, ConfigType.INTEGER, - "monitoringSettingsQueueGroup" ); + "monitoringSettingsQueueGroup" ), + + TEMPERATURE_FREQUENCY_PROCESSING_INTERVAL( "runtime/partitionFrequencyProcessingInterval", + "Time interval in seconds, how often the access frequency of all TEMPERATURE-partitioned tables is analyzed and redistributed", + BackgroundTask.TaskSchedulingType.EVERY_MINUTE, + ConfigType.ENUM, + "temperaturePartitionProcessingSettingsGroup" ); private final String key; @@ -439,6 +447,16 @@ public enum RuntimeConfig { monitoringSettingsQueueGroup.withTitle( "Queue Processing" ); configManager.registerWebUiPage( monitoringSettingsPage ); configManager.registerWebUiGroup( monitoringSettingsQueueGroup ); + + // Partitioning specific setting + final WebUiPage partitionSettingsPage = new WebUiPage( + "partitionSettings", + "Partitioning", + "Settings for partitioning" ); + final WebUiGroup temperaturePartitionProcessingSettingsGroup = new WebUiGroup( "temperaturePartitionProcessingSettingsGroup", partitionSettingsPage.getId() ); + temperaturePartitionProcessingSettingsGroup.withTitle( "TEMPERATURE Partition Processing" ); + configManager.registerWebUiPage( partitionSettingsPage ); + configManager.registerWebUiGroup( temperaturePartitionProcessingSettingsGroup ); } diff --git a/core/src/main/java/org/polypheny/db/ddl/DdlManager.java b/core/src/main/java/org/polypheny/db/ddl/DdlManager.java index 5b2fcddbd4..09ff931963 100644 --- a/core/src/main/java/org/polypheny/db/ddl/DdlManager.java +++ b/core/src/main/java/org/polypheny/db/ddl/DdlManager.java @@ -47,16 +47,18 @@ import org.polypheny.db.ddl.exception.LastPlacementException; import org.polypheny.db.ddl.exception.MissingColumnPlacementException; import org.polypheny.db.ddl.exception.NotNullAndDefaultValueException; -import org.polypheny.db.ddl.exception.PartitionNamesNotUniqueException; +import org.polypheny.db.ddl.exception.PartitionGroupNamesNotUniqueException; import org.polypheny.db.ddl.exception.PlacementAlreadyExistsException; import org.polypheny.db.ddl.exception.PlacementIsPrimaryException; import org.polypheny.db.ddl.exception.PlacementNotExistsException; import org.polypheny.db.ddl.exception.SchemaNotExistException; import org.polypheny.db.ddl.exception.UnknownIndexMethodException; +import org.polypheny.db.partition.raw.RawPartitionInformation; import org.polypheny.db.rel.RelCollation; import org.polypheny.db.rel.RelNode; import org.polypheny.db.sql.SqlDataTypeSpec; import org.polypheny.db.sql.SqlIdentifier; +import org.polypheny.db.sql.SqlLiteral; import org.polypheny.db.sql.SqlNode; import org.polypheny.db.transaction.Statement; import org.polypheny.db.transaction.TransactionException; @@ -217,12 +219,12 @@ public static DdlManager getInstance() { * * @param catalogTable the table * @param columnIds the ids of the columns for which to create a new placement - * @param partitionIds the ids of the partitions of the column - * @param partitionNames the name for these partition + * @param partitionGroupIds the ids of the partitions of the column + * @param partitionGroupNames the name for these partition * @param dataStore the data store on which to create the placement * @param statement the query statement */ - public abstract void addPlacement( CatalogTable catalogTable, List columnIds, List partitionIds, List partitionNames, DataStore dataStore, Statement statement ) throws PlacementAlreadyExistsException; + public abstract void addPlacement( CatalogTable catalogTable, List columnIds, List partitionGroupIds, List partitionGroupNames, DataStore dataStore, Statement statement ) throws PlacementAlreadyExistsException; /** * Adds a new primary key to a table @@ -362,12 +364,23 @@ public static DdlManager getInstance() { * * @param catalogTable the table * @param columnIds which columns should be placed on the specified data store - * @param partitionIds the ids of the partitions of this column - * @param partitionNames the name of these partitions + * @param partitionGroupIds the ids of the partitions of this column + * @param partitionGroupNames the name of these partitions * @param storeInstance the data store * @param statement the used statement */ - public abstract void modifyColumnPlacement( CatalogTable catalogTable, List columnIds, List partitionIds, List partitionNames, DataStore storeInstance, Statement statement ) throws PlacementNotExistsException, IndexPreventsRemovalException, LastPlacementException; + public abstract void modifyColumnPlacement( CatalogTable catalogTable, List columnIds, List partitionGroupIds, List partitionGroupNames, DataStore storeInstance, Statement statement ) throws PlacementNotExistsException, IndexPreventsRemovalException, LastPlacementException; + + /** + * Modified the partition distribution on the selected store. Can be used to add or remove partitions on a store. + * Which consequently alters the Partition Placments. + * + * @param catalogTable the table + * @param partitionGroupIds the desired target state of partition groups which should remain on this store + * @param storeInstance the data store on which the partition placements should be altered + * @param statement the used statement + */ + public abstract void modifyPartitionPlacement( CatalogTable catalogTable, List partitionGroupIds, DataStore storeInstance, Statement statement ); /** * Add a column placement for a specified column on a specified data store. If the store already contains a placement of @@ -430,8 +443,7 @@ public static DdlManager getInstance() { * @param placementType which placement type should be used for the initial placements * @param statement the used statement */ - public abstract void createTable( long schemaId, String tableName, List columns, List constraints, boolean ifNotExists, List stores, PlacementType placementType, Statement statement ) throws TableAlreadyExistsException, ColumnNotExistsException, UnknownPartitionTypeException; - + public abstract void createTable( long schemaId, String tableName, List columns, List constraints, boolean ifNotExists, List stores, PlacementType placementType, Statement statement ) throws TableAlreadyExistsException, ColumnNotExistsException, UnknownPartitionTypeException, UnknownColumnException, PartitionGroupNamesNotUniqueException; /** * Create a new view @@ -448,7 +460,15 @@ public static DdlManager getInstance() { * * @param partitionInfo the information concerning the partition */ - public abstract void addPartition( PartitionInformation partitionInfo ) throws GenericCatalogException, UnknownPartitionTypeException, UnknownColumnException, PartitionNamesNotUniqueException; + public abstract void addPartitioning( PartitionInformation partitionInfo, List stores, Statement statement ) throws GenericCatalogException, UnknownPartitionTypeException, UnknownColumnException, PartitionGroupNamesNotUniqueException; + + /** + * Removes partitioning from Table + * + * @param catalogTable teh table to be merged + * @param statement the used Statement + */ + public abstract void removePartitioning( CatalogTable catalogTable, Statement statement ); /** * Adds a new constraint to a table @@ -608,24 +628,30 @@ public static class PartitionInformation { public final CatalogTable table; public final String columnName; public final String typeName; - public final List partitionNames; - public final int numberOf; + public final List partitionGroupNames; + public final int numberOfPartitionGroups; + public final int numberOfPartitions; public final List> qualifiers; + public final RawPartitionInformation rawPartitionInformation; public PartitionInformation( CatalogTable table, String typeName, String columnName, - List partitionNames, - int numberOf, - List> qualifiers ) { + List partitionGroupNames, + int numberOfPartitionGroups, + int numberOfPartitions, + List> qualifiers, + RawPartitionInformation rawPartitionInformation ) { this.table = table; this.typeName = typeName; this.columnName = columnName; - this.partitionNames = partitionNames; - this.numberOf = numberOf; + this.partitionGroupNames = partitionGroupNames; + this.numberOfPartitionGroups = numberOfPartitionGroups; + this.numberOfPartitions = numberOfPartitions; this.qualifiers = qualifiers; + this.rawPartitionInformation = rawPartitionInformation; } @@ -633,18 +659,36 @@ public static PartitionInformation fromSqlLists( CatalogTable table, String typeName, String columnName, - List partitionNames, - int numberOf, - List> partitionQualifierList ) { - List names = partitionNames + List partitionGroupNames, + int numberOfPartitionGroups, + int numberOfPartitions, + List> partitionQualifierList, + RawPartitionInformation rawPartitionInformation ) { + List names = partitionGroupNames .stream() .map( SqlIdentifier::getSimple ) .collect( Collectors.toList() ); List> qualifiers = partitionQualifierList .stream() - .map( qs -> qs.stream().map( SqlNode::toString ).collect( Collectors.toList() ) ) + .map( qs -> qs.stream().map( PartitionInformation::getValueOfSqlNode ).collect( Collectors.toList() ) ) .collect( Collectors.toList() ); - return new PartitionInformation( table, typeName, columnName, names, numberOf, qualifiers ); + return new PartitionInformation( table, typeName, columnName, names, numberOfPartitionGroups, numberOfPartitions, qualifiers, rawPartitionInformation ); + } + + + /** + * Needed to modify strings otherwise the SQL-input 'a' will be also added as the value "'a'" and not as "a" as intended + * Essentially removes " ' " at the start and end of value + * + * @param node Node to be modified + * @return String + */ + public static String getValueOfSqlNode( SqlNode node ) { + + if ( node instanceof SqlLiteral ) { + return ((SqlLiteral) node).toValue(); + } + return node.toString(); } } diff --git a/core/src/main/java/org/polypheny/db/ddl/exception/PartitionNamesNotUniqueException.java b/core/src/main/java/org/polypheny/db/ddl/exception/PartitionGroupNamesNotUniqueException.java similarity index 90% rename from core/src/main/java/org/polypheny/db/ddl/exception/PartitionNamesNotUniqueException.java rename to core/src/main/java/org/polypheny/db/ddl/exception/PartitionGroupNamesNotUniqueException.java index 5c2010c1ed..34d15d0fe2 100644 --- a/core/src/main/java/org/polypheny/db/ddl/exception/PartitionNamesNotUniqueException.java +++ b/core/src/main/java/org/polypheny/db/ddl/exception/PartitionGroupNamesNotUniqueException.java @@ -16,6 +16,6 @@ package org.polypheny.db.ddl.exception; -public class PartitionNamesNotUniqueException extends Exception { +public class PartitionGroupNamesNotUniqueException extends Exception { } diff --git a/core/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueue.java b/core/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueue.java index 25bbe06e69..f606e290dd 100644 --- a/core/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueue.java +++ b/core/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueue.java @@ -45,6 +45,6 @@ public interface MonitoringQueue { List> getInformationOnElementsInQueue(); - long getNumberOfProcessedEvents(); + long getNumberOfProcessedEvents( boolean all ); } diff --git a/core/src/main/java/org/polypheny/db/monitoring/events/BaseEvent.java b/core/src/main/java/org/polypheny/db/monitoring/events/BaseEvent.java index 3f157f27e8..29b34b0ccb 100644 --- a/core/src/main/java/org/polypheny/db/monitoring/events/BaseEvent.java +++ b/core/src/main/java/org/polypheny/db/monitoring/events/BaseEvent.java @@ -27,7 +27,7 @@ public abstract class BaseEvent implements MonitoringEvent { private final UUID id = UUID.randomUUID(); protected String eventType; - private final long recordedTimestamp; + private long recordedTimestamp; public BaseEvent() { diff --git a/core/src/main/java/org/polypheny/db/monitoring/events/StatementEvent.java b/core/src/main/java/org/polypheny/db/monitoring/events/StatementEvent.java index 727d79cf4d..fdc09ba24a 100644 --- a/core/src/main/java/org/polypheny/db/monitoring/events/StatementEvent.java +++ b/core/src/main/java/org/polypheny/db/monitoring/events/StatementEvent.java @@ -33,6 +33,7 @@ @Getter public abstract class StatementEvent extends BaseEvent { + protected String monitoringType; protected RelRoot routed; protected PolyphenyDbSignature signature; protected Statement statement; @@ -44,6 +45,7 @@ public abstract class StatementEvent extends BaseEvent { protected boolean isAnalyze; protected boolean isSubQuery; protected String durations; + protected List accessedPartitions; @Override diff --git a/core/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUi.java b/core/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUi.java index 46fa4aaa7c..32559e7ae2 100644 --- a/core/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUi.java +++ b/core/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUi.java @@ -16,6 +16,8 @@ package org.polypheny.db.monitoring.ui; +import org.polypheny.db.monitoring.events.MonitoringDataPoint; + /** * UI abstraction service for monitoring. */ @@ -23,4 +25,14 @@ public interface MonitoringServiceUi { void initializeInformationPage(); + /** + * Will add new section to monitoring information page for the specified + * MonitoringPersistentData type and register the refresh function to read from repository. + * + * @param metricClass + * @param + */ + void registerDataPointForUi( Class metricClass ); + + } diff --git a/core/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java b/core/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java deleted file mode 100644 index a9f9c6df5e..0000000000 --- a/core/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright 2019-2021 The Polypheny Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.polypheny.db.partition; - -import java.util.ArrayList; -import java.util.List; -import lombok.extern.slf4j.Slf4j; -import org.polypheny.db.catalog.Catalog; -import org.polypheny.db.catalog.entity.CatalogColumn; -import org.polypheny.db.catalog.entity.CatalogColumnPlacement; -import org.polypheny.db.catalog.entity.CatalogTable; - - -@Slf4j -public abstract class AbstractPartitionManager implements PartitionManager { - - - // returns the Index of the partition where to place the object - @Override - public abstract long getTargetPartitionId( CatalogTable catalogTable, String columnValue ); - - - /** - * Validates the table if the partitions are sufficiently distributed. - * There has to be at least on columnPlacement which contains all partitions - * - * @param table Table to be checked - * @return If its correctly distributed or not - */ - @Override - public boolean validatePartitionDistribution( CatalogTable table ) { - // Check for every column if there exists at least one placement which contains all partitions - for ( long columnId : table.columnIds ) { - int numberOfFullPlacements = getPlacementsWithAllPartitions( columnId, table.numPartitions ).size(); - if ( numberOfFullPlacements >= 1 ) { - log.debug( "Found ColumnPlacement which contains all partitions for column: {}", columnId ); - break; - } - - if ( log.isDebugEnabled() ) { - log.debug( "ERROR Column: '{}' has no placement containing all partitions", Catalog.getInstance().getColumn( columnId ).name ); - } - return false; - } - - return true; - } - - - @Override - public abstract boolean probePartitionDistributionChange( CatalogTable catalogTable, int storeId, long columnId ); - - @Override - public abstract List getRelevantPlacements( CatalogTable catalogTable, List partitionIds ); - - - @Override - public boolean validatePartitionSetup( - List> partitionQualifiers, - long numPartitions, - List partitionNames, - CatalogColumn partitionColumn ) { - if ( numPartitions == 0 && partitionNames.size() < 2 ) { - throw new RuntimeException( "Partitioning of table failed! Can't partition table with less than 2 partitions/names" ); - } - return true; - } - - - /** - * Returns number of placements for this column which contain all partitions - * - * @param columnId column to be checked - * @param numPartitions numPartitions - * @return If its correctly distributed or not - */ - protected List getPlacementsWithAllPartitions( long columnId, long numPartitions ) { - Catalog catalog = Catalog.getInstance(); - - // Return every placement of this column - List tempCcps = catalog.getColumnPlacements( columnId ); - List returnCcps = new ArrayList<>(); - int placementCounter = 0; - for ( CatalogColumnPlacement ccp : tempCcps ) { - // If the DataPlacement has stored all partitions and therefore all partitions for this placement - if ( catalog.getPartitionsOnDataPlacement( ccp.adapterId, ccp.tableId ).size() == numPartitions ) { - returnCcps.add( ccp ); - placementCounter++; - } - } - return returnCcps; - } - - - @Override - public abstract PartitionFunctionInfo getPartitionFunctionInfo(); - -} diff --git a/core/src/main/java/org/polypheny/db/partition/FrequencyMap.java b/core/src/main/java/org/polypheny/db/partition/FrequencyMap.java new file mode 100644 index 0000000000..ab89e792d1 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/partition/FrequencyMap.java @@ -0,0 +1,42 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition; + +import org.polypheny.db.catalog.entity.CatalogTable; + + +public abstract class FrequencyMap { + + public static FrequencyMap INSTANCE = null; + + + public static FrequencyMap setAndGetInstance( FrequencyMap frequencyMap ) { + if ( INSTANCE != null ) { + throw new RuntimeException( "Overwriting the FrequencyMap, when already set is not permitted." ); + } + INSTANCE = frequencyMap; + return INSTANCE; + } + + + public abstract void initialize(); + + public abstract void terminate(); + + public abstract void determinePartitionFrequency( CatalogTable table, long invocationTimestamp ); + +} diff --git a/core/src/main/java/org/polypheny/db/partition/PartitionManager.java b/core/src/main/java/org/polypheny/db/partition/PartitionManager.java index 66f09bdf79..477737dba5 100644 --- a/core/src/main/java/org/polypheny/db/partition/PartitionManager.java +++ b/core/src/main/java/org/polypheny/db/partition/PartitionManager.java @@ -17,6 +17,7 @@ package org.polypheny.db.partition; import java.util.List; +import java.util.Map; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogTable; @@ -29,18 +30,20 @@ public interface PartitionManager { */ long getTargetPartitionId( CatalogTable catalogTable, String columnValue ); - boolean validatePartitionDistribution( CatalogTable table ); + boolean probePartitionGroupDistributionChange( CatalogTable catalogTable, int storeId, long columnId, int threshold ); - boolean probePartitionDistributionChange( CatalogTable catalogTable, int storeId, long columnId ); + Map> getRelevantPlacements( CatalogTable catalogTable, List partitionIds, List excludedAdapters ); - List getRelevantPlacements( CatalogTable catalogTable, List partitionIds ); + boolean validatePartitionGroupSetup( List> partitionGroupQualifiers, long numPartitionGroups, List partitionGroupNames, CatalogColumn partitionColumn ); - boolean validatePartitionSetup( List> partitionQualifiers, long numPartitions, List partitionNames, CatalogColumn partitionColumn ); + int getNumberOfPartitionsPerGroup( int numberOfPartitions ); - boolean requiresUnboundPartition(); + boolean requiresUnboundPartitionGroup(); boolean supportsColumnOfType( PolyType type ); + String getUnifiedNullValue(); + /** * Returns an instance of PartitionFunctionInfo specifying the available parameters of the partition function. */ diff --git a/core/src/main/java/org/polypheny/db/partition/PartitionManagerFactory.java b/core/src/main/java/org/polypheny/db/partition/PartitionManagerFactory.java index 4daf1e73ba..f01e91028c 100644 --- a/core/src/main/java/org/polypheny/db/partition/PartitionManagerFactory.java +++ b/core/src/main/java/org/polypheny/db/partition/PartitionManagerFactory.java @@ -13,25 +13,36 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.polypheny.db.partition; + import org.polypheny.db.catalog.Catalog; -public class PartitionManagerFactory { - public PartitionManager getInstance( Catalog.PartitionType partitionType ) { - switch ( partitionType ) { - case HASH: - return new HashPartitionManager(); +public abstract class PartitionManagerFactory { + - case LIST: - return new ListPartitionManager(); + public static PartitionManagerFactory INSTANCE = null; - case RANGE: - return new RangePartitionManager(); + + public static PartitionManagerFactory setAndGetInstance( PartitionManagerFactory factory ) { + if ( INSTANCE != null ) { + throw new RuntimeException( "Setting the PartitionManager, when already set is not permitted." ); } + INSTANCE = factory; + return INSTANCE; + } - throw new RuntimeException( "Unknown partition type: " + partitionType ); + + public static PartitionManagerFactory getInstance() { + if ( INSTANCE == null ) { + throw new RuntimeException( "PartitionManager was not set correctly on Polypheny-DB start-up" ); + } + return INSTANCE; } + + public abstract PartitionManager getPartitionManager( Catalog.PartitionType partitionType ); + } diff --git a/core/src/main/java/org/polypheny/db/partition/properties/PartitionProperty.java b/core/src/main/java/org/polypheny/db/partition/properties/PartitionProperty.java new file mode 100644 index 0000000000..b3c87ebf2e --- /dev/null +++ b/core/src/main/java/org/polypheny/db/partition/properties/PartitionProperty.java @@ -0,0 +1,40 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition.properties; + +import com.google.common.collect.ImmutableList; +import java.io.Serializable; +import lombok.Getter; +import lombok.experimental.SuperBuilder; +import org.polypheny.db.catalog.Catalog.PartitionType; + + +@SuperBuilder +@Getter +public class PartitionProperty implements Serializable { + + public final PartitionType partitionType; + public final ImmutableList partitionGroupIds; + public final ImmutableList partitionIds; + public final long partitionColumnId; + + public final long numPartitionGroups; + public final long numPartitions; + + public final boolean reliesOnPeriodicChecks; + +} diff --git a/core/src/main/java/org/polypheny/db/partition/properties/TemperaturePartitionProperty.java b/core/src/main/java/org/polypheny/db/partition/properties/TemperaturePartitionProperty.java new file mode 100644 index 0000000000..eaca6f7771 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/partition/properties/TemperaturePartitionProperty.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition.properties; + +import lombok.Getter; +import lombok.experimental.SuperBuilder; +import org.polypheny.db.catalog.Catalog.PartitionType; + + +@SuperBuilder +@Getter +public class TemperaturePartitionProperty extends PartitionProperty { + + // Cost Model, Access Frequency: ALL, READ FREQUENCY, WRITE FREQUENCY + public enum PartitionCostIndication {ALL, READ, WRITE} + + + private final PartitionCostIndication partitionCostIndication; + private final PartitionType internalPartitionFunction; + + // Maybe get default if left empty, centrally by configuration + private final int hotAccessPercentageIn; + private final int hotAccessPercentageOut; + + private final long frequencyInterval; + + private final long hotPartitionGroupId; + private final long coldPartitionGroupId; + +} diff --git a/core/src/main/java/org/polypheny/db/partition/raw/RawPartitionInformation.java b/core/src/main/java/org/polypheny/db/partition/raw/RawPartitionInformation.java new file mode 100644 index 0000000000..62d619a8ae --- /dev/null +++ b/core/src/main/java/org/polypheny/db/partition/raw/RawPartitionInformation.java @@ -0,0 +1,39 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition.raw; + +import java.util.List; +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.sql.SqlIdentifier; +import org.polypheny.db.sql.SqlNode; + + +@Getter +@Setter +public class RawPartitionInformation { + + public SqlIdentifier partitionColumn; + public SqlIdentifier partitionType; + + public List partitionNamesList; + public List> partitionQualifierList; + + public long numPartitionGroups; + public long numPartitions; + +} diff --git a/core/src/main/java/org/polypheny/db/partition/raw/RawTemperaturePartitionInformation.java b/core/src/main/java/org/polypheny/db/partition/raw/RawTemperaturePartitionInformation.java new file mode 100644 index 0000000000..c6a229a695 --- /dev/null +++ b/core/src/main/java/org/polypheny/db/partition/raw/RawTemperaturePartitionInformation.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition.raw; + +import java.util.List; +import lombok.Getter; +import lombok.Setter; +import org.polypheny.db.sql.SqlIdentifier; +import org.polypheny.db.sql.SqlNode; + + +@Getter +@Setter +public class RawTemperaturePartitionInformation extends RawPartitionInformation { + + public SqlIdentifier internalPartitionFunction; + public SqlIdentifier accessPattern; + + public long interval; + public SqlIdentifier intervalUnit; // minutes | hours | days + + public List partitionNamesList; + public List> partitionQualifierList; + + private SqlNode hotAccessPercentageIn; + private SqlNode hotAccessPercentageOut; + + public long numPartitionGroups; + +} diff --git a/core/src/main/java/org/polypheny/db/processing/DataMigrator.java b/core/src/main/java/org/polypheny/db/processing/DataMigrator.java index 39f7b9821b..1ff0ec0c76 100644 --- a/core/src/main/java/org/polypheny/db/processing/DataMigrator.java +++ b/core/src/main/java/org/polypheny/db/processing/DataMigrator.java @@ -17,12 +17,60 @@ package org.polypheny.db.processing; import java.util.List; +import java.util.Map; import org.polypheny.db.catalog.entity.CatalogAdapter; import org.polypheny.db.catalog.entity.CatalogColumn; +import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.transaction.Transaction; + public interface DataMigrator { - void copyData( Transaction transaction, CatalogAdapter store, List columns ); + void copyData( + Transaction transaction, + CatalogAdapter store, + List columns, + List partitionIds ); + + /** + * Currently used to to transfer data if partitioned table is about to be merged. + * For Table Partitioning use {@link #copyPartitionData(Transaction, CatalogAdapter, CatalogTable, CatalogTable, List, List, List)} } instead + * + * @param transaction Transactional scope + * @param store Target Store where data should be migrated to + * @param sourceTable Source Table from where data is queried + * @param targetTable Source Table from where data is queried + * @param columns Necessary columns on target + * @param placementDistribution Pre computed mapping of partitions and the necessary column placements + * @param targetPartitionIds Target Partitions where data should be inserted + */ + void copySelectiveData( + Transaction transaction, + CatalogAdapter store, + CatalogTable sourceTable, CatalogTable targetTable, List columns, + Map> placementDistribution, + List targetPartitionIds ); + + /** + * Currently used to to transfer data if unpartitioned is about to be partitioned. + * For Table Merge use {@link #copySelectiveData(Transaction, CatalogAdapter, CatalogTable, CatalogTable, List, Map, List)} } instead + * + * @param transaction Transactional scope + * @param store Target Store where data should be migrated to + * @param sourceTable Source Table from where data is queried + * @param targetTable Target Table where data is to be inserted + * @param columns Necessary columns on target + * @param sourcePartitionIds Source Partitions which need to be considered for querying + * @param targetPartitionIds Target Partitions where data should be inserted + */ + void copyPartitionData( + Transaction transaction, + CatalogAdapter store, + CatalogTable sourceTable, + CatalogTable targetTable, + List columns, + List sourcePartitionIds, + List targetPartitionIds ); } diff --git a/core/src/main/java/org/polypheny/db/routing/Router.java b/core/src/main/java/org/polypheny/db/routing/Router.java index aa56d649b6..b8c3131bd7 100644 --- a/core/src/main/java/org/polypheny/db/routing/Router.java +++ b/core/src/main/java/org/polypheny/db/routing/Router.java @@ -17,6 +17,7 @@ package org.polypheny.db.routing; import java.util.List; +import java.util.Map; import org.polypheny.db.adapter.DataStore; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogTable; @@ -35,7 +36,8 @@ public interface Router { void dropPlacements( List placements ); - RelNode buildJoinedTableScan( Statement statement, RelOptCluster cluster, List placements ); + RelNode buildJoinedTableScan( Statement statement, RelOptCluster cluster, Map> placements ); void resetCaches(); + } diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/SqlCreateTable.java b/core/src/main/java/org/polypheny/db/sql/ddl/SqlCreateTable.java index 72c30528e1..438c13c7c3 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/SqlCreateTable.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/SqlCreateTable.java @@ -58,8 +58,9 @@ import org.polypheny.db.ddl.DdlManager.ConstraintInformation; import org.polypheny.db.ddl.DdlManager.PartitionInformation; import org.polypheny.db.ddl.exception.ColumnNotExistsException; -import org.polypheny.db.ddl.exception.PartitionNamesNotUniqueException; +import org.polypheny.db.ddl.exception.PartitionGroupNamesNotUniqueException; import org.polypheny.db.jdbc.Context; +import org.polypheny.db.partition.raw.RawPartitionInformation; import org.polypheny.db.sql.SqlCreate; import org.polypheny.db.sql.SqlExecutableStatement; import org.polypheny.db.sql.SqlIdentifier; @@ -88,8 +89,10 @@ public class SqlCreateTable extends SqlCreate implements SqlExecutableStatement private final SqlIdentifier store; private final SqlIdentifier partitionColumn; private final SqlIdentifier partitionType; + private final int numPartitionGroups; private final int numPartitions; - private final List partitionNamesList; + private final List partitionGroupNamesList; + private final RawPartitionInformation rawPartitionInfo; private final List> partitionQualifierList; @@ -109,9 +112,11 @@ public class SqlCreateTable extends SqlCreate implements SqlExecutableStatement SqlIdentifier store, SqlIdentifier partitionType, SqlIdentifier partitionColumn, + int numPartitionGroups, int numPartitions, - List partitionNamesList, - List> partitionQualifierList ) { + List partitionGroupNamesList, + List> partitionQualifierList, + RawPartitionInformation rawPartitionInfo ) { super( OPERATOR, pos, replace, ifNotExists ); this.name = Objects.requireNonNull( name ); this.columnList = columnList; // May be null @@ -119,9 +124,11 @@ public class SqlCreateTable extends SqlCreate implements SqlExecutableStatement this.store = store; // ON STORE [store name]; may be null this.partitionType = partitionType; // PARTITION BY (HASH | RANGE | LIST); may be null this.partitionColumn = partitionColumn; // May be null - this.numPartitions = numPartitions; // May be null and can only be used in association with PARTITION BY - this.partitionNamesList = partitionNamesList; // May be null and can only be used in association with PARTITION BY and PARTITIONS + this.numPartitionGroups = numPartitionGroups; // May be null and can only be used in association with PARTITION BY + this.numPartitions = numPartitions; + this.partitionGroupNamesList = partitionGroupNamesList; // May be null and can only be used in association with PARTITION BY and PARTITIONS this.partitionQualifierList = partitionQualifierList; + this.rawPartitionInfo = rawPartitionInfo; } @@ -133,19 +140,6 @@ public List getOperandList() { @Override public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { - // TODO @HENNLO: The partition part is still incomplete - /** There are several possible ways to unparse the partition section. - The To Do is deferred until we have decided if parsing of partition functions will be - self contained or not. If not than we need to unparse - `WITH PARTITIONS 3` - or something like - `( - PARTITION a892_233 VALUES(892, 233), - PARTITION a1001_1002 VALUES(1001, 1002), - PARTITION a8000_4003 VALUES(8000, 4003), - PARTITION a900_999 VALUES(900, 999) - )`*/ - writer.keyword( "CREATE" ); writer.keyword( "TABLE" ); if ( ifNotExists ) { @@ -174,6 +168,36 @@ PARTITION a900_999 VALUES(900, 999) writer.keyword( " BY" ); SqlWriter.Frame frame = writer.startList( "(", ")" ); partitionColumn.unparse( writer, 0, 0 ); + + switch ( partitionType.getSimple() ) { + case "HASH": + writer.keyword( "WITH" ); + frame = writer.startList( "(", ")" ); + for ( SqlIdentifier name : partitionGroupNamesList ) { + writer.sep( "," ); + name.unparse( writer, 0, 0 ); + } + break; + case "RANGE": + case "LIST": + writer.keyword( "(" ); + for ( int i = 0; i < partitionGroupNamesList.size(); i++ ) { + writer.keyword( "PARTITION" ); + partitionGroupNamesList.get( i ).unparse( writer, 0, 0 ); + writer.keyword( "VALUES" ); + writer.keyword( "(" ); + partitionQualifierList.get( i ).get( 0 ).unparse( writer, 0, 0 ); + writer.sep( "," ); + partitionQualifierList.get( i ).get( 1 ).unparse( writer, 0, 0 ); + writer.keyword( ")" ); + + if ( i + 1 < partitionGroupNamesList.size() ) { + writer.sep( "," ); + } + } + writer.keyword( ")" ); + break; + } writer.endList( frame ); } } @@ -189,7 +213,7 @@ public void execute( Context context, Statement statement ) { long schemaId; try { - // cannot use getTable here, as table does not yet exist + // Cannot use getTable() here since table does not yet exist if ( name.names.size() == 3 ) { // DatabaseName.SchemaName.TableName schemaId = catalog.getSchema( name.names.get( 0 ), name.names.get( 1 ) ).id; tableName = name.names.get( 2 ); @@ -231,13 +255,18 @@ public void execute( Context context, Statement statement ) { statement ); if ( partitionType != null ) { - DdlManager.getInstance().addPartition( PartitionInformation.fromSqlLists( - getCatalogTable( context, new SqlIdentifier( tableName, SqlParserPos.ZERO ) ), - partitionType.getSimple(), - partitionColumn.getSimple(), - partitionNamesList, - numPartitions, - partitionQualifierList ) ); + DdlManager.getInstance().addPartitioning( + PartitionInformation.fromSqlLists( + getCatalogTable( context, new SqlIdentifier( tableName, SqlParserPos.ZERO ) ), + partitionType.getSimple(), + partitionColumn.getSimple(), + partitionGroupNamesList, + numPartitionGroups, + numPartitions, + partitionQualifierList, + rawPartitionInfo ), + stores, + statement ); } } catch ( TableAlreadyExistsException e ) { @@ -246,18 +275,18 @@ public void execute( Context context, Statement statement ) { throw SqlUtil.newContextException( partitionColumn.getParserPosition(), RESOURCE.columnNotFoundInTable( e.columnName, e.tableName ) ); } catch ( UnknownPartitionTypeException e ) { throw SqlUtil.newContextException( partitionType.getParserPosition(), RESOURCE.unknownPartitionType( partitionType.getSimple() ) ); - } catch ( PartitionNamesNotUniqueException e ) { + } catch ( PartitionGroupNamesNotUniqueException e ) { throw SqlUtil.newContextException( partitionColumn.getParserPosition(), RESOURCE.partitionNamesNotUnique() ); } catch ( GenericCatalogException | UnknownColumnException e ) { - // we just added the table/column so it has to exist or we have a internal problem + // We just added the table/column so it has to exist or we have an internal problem throw new RuntimeException( e ); } } private Pair, List> separateColumnList() { - List columnInformations = new ArrayList<>(); - List constraintInformations = new ArrayList<>(); + List columnInformation = new ArrayList<>(); + List constraintInformation = new ArrayList<>(); int position = 1; for ( Ord c : Ord.zip( columnList ) ) { @@ -266,7 +295,7 @@ private Pair, List> separateColum String defaultValue = columnDeclaration.getExpression() == null ? null : columnDeclaration.getExpression().toString(); - columnInformations.add( + columnInformation.add( new ColumnInformation( columnDeclaration.getName().getSimple(), ColumnTypeInformation.fromSqlDataTypeSpec( columnDeclaration.getDataType() ), @@ -278,14 +307,21 @@ private Pair, List> separateColum SqlKeyConstraint constraint = (SqlKeyConstraint) c.e; String constraintName = constraint.getName() != null ? constraint.getName().getSimple() : null; - constraintInformations.add( new ConstraintInformation( constraintName, constraint.getConstraintType(), constraint.getColumnList().getList().stream().map( SqlNode::toString ).collect( Collectors.toList() ) ) ); + ConstraintInformation ci = new ConstraintInformation( + constraintName, + constraint.getConstraintType(), + constraint.getColumnList().getList().stream() + .map( SqlNode::toString ) + .collect( Collectors.toList() ) + ); + constraintInformation.add( ci ); } else { throw new AssertionError( c.e.getClass() ); } position++; } - return new Pair<>( columnInformations, constraintInformations ); + return new Pair<>( columnInformation, constraintInformation ); } diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/SqlDdlNodes.java b/core/src/main/java/org/polypheny/db/sql/ddl/SqlDdlNodes.java index b12ea6609a..443d7453f1 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/SqlDdlNodes.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/SqlDdlNodes.java @@ -36,6 +36,7 @@ import java.util.List; import org.polypheny.db.catalog.Catalog.SchemaType; +import org.polypheny.db.partition.raw.RawPartitionInformation; import org.polypheny.db.schema.ColumnStrategy; import org.polypheny.db.sql.SqlCollation; import org.polypheny.db.sql.SqlDataTypeSpec; @@ -75,8 +76,8 @@ public static SqlCreateType createType( SqlParserPos pos, boolean replace, SqlId /** * Creates a CREATE TABLE. */ - public static SqlCreateTable createTable( SqlParserPos pos, boolean replace, boolean ifNotExists, SqlIdentifier name, SqlNodeList columnList, SqlNode query, SqlIdentifier store, SqlIdentifier partitionType, SqlIdentifier partitionColumn, int numPartitions, List partitionNamesList, List> partitionQualifierList ) { - return new SqlCreateTable( pos, replace, ifNotExists, name, columnList, query, store, partitionType, partitionColumn, numPartitions, partitionNamesList, partitionQualifierList ); + public static SqlCreateTable createTable( SqlParserPos pos, boolean replace, boolean ifNotExists, SqlIdentifier name, SqlNodeList columnList, SqlNode query, SqlIdentifier store, SqlIdentifier partitionType, SqlIdentifier partitionColumn, int numPartitionGroups, int numPartitions, List partitionNamesList, List> partitionQualifierList, RawPartitionInformation rawPartitionInfo ) { + return new SqlCreateTable( pos, replace, ifNotExists, name, columnList, query, store, partitionType, partitionColumn, numPartitionGroups, numPartitions, partitionNamesList, partitionQualifierList, rawPartitionInfo ); } diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPartitions.java b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPartitions.java index fd31eb947e..ac9229f3d3 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPartitions.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPartitions.java @@ -29,8 +29,9 @@ import org.polypheny.db.catalog.exceptions.UnknownPartitionTypeException; import org.polypheny.db.ddl.DdlManager; import org.polypheny.db.ddl.DdlManager.PartitionInformation; -import org.polypheny.db.ddl.exception.PartitionNamesNotUniqueException; +import org.polypheny.db.ddl.exception.PartitionGroupNamesNotUniqueException; import org.polypheny.db.jdbc.Context; +import org.polypheny.db.partition.raw.RawPartitionInformation; import org.polypheny.db.sql.SqlIdentifier; import org.polypheny.db.sql.SqlNode; import org.polypheny.db.sql.SqlUtil; @@ -50,9 +51,11 @@ public class SqlAlterTableAddPartitions extends SqlAlterTable { private final SqlIdentifier table; private final SqlIdentifier partitionColumn; private final SqlIdentifier partitionType; + private final int numPartitionGroups; private final int numPartitions; - private final List partitionNamesList; + private final List partitionGroupNamesList; private final List> partitionQualifierList; + private final RawPartitionInformation rawPartitionInformation; public SqlAlterTableAddPartitions( @@ -60,16 +63,20 @@ public SqlAlterTableAddPartitions( SqlIdentifier table, SqlIdentifier partitionColumn, SqlIdentifier partitionType, + int numPartitionGroups, int numPartitions, - List partitionNamesList, - List> partitionQualifierList ) { + List partitionGroupNamesList, + List> partitionQualifierList, + RawPartitionInformation rawPartitionInformation ) { super( pos ); this.table = Objects.requireNonNull( table ); this.partitionType = Objects.requireNonNull( partitionType ); this.partitionColumn = Objects.requireNonNull( partitionColumn ); + this.numPartitionGroups = numPartitionGroups; //May be empty this.numPartitions = numPartitions; //May be empty - this.partitionNamesList = partitionNamesList; //May be null and can only be used in association with PARTITION BY and PARTITIONS + this.partitionGroupNamesList = partitionGroupNamesList; //May be null and can only be used in association with PARTITION BY and PARTITIONS this.partitionQualifierList = partitionQualifierList; + this.rawPartitionInformation = rawPartitionInformation; } @@ -81,16 +88,42 @@ public List getOperandList() { @Override public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { - // TODO @HENNLO: The partition part is still incomplete - /** There are several possible ways to unparse the partition section. - The To Do is deferred until we have decided if parsing of partition functions will be - self contained or not.*/ writer.keyword( "ALTER" ); writer.keyword( "TABLE" ); table.unparse( writer, leftPrec, rightPrec ); writer.keyword( "PARTITION" ); writer.keyword( "BY" ); partitionType.unparse( writer, leftPrec, rightPrec ); + + switch ( partitionType.getSimple() ) { + case "HASH": + writer.keyword( "WITH" ); + SqlWriter.Frame frame = writer.startList( "(", ")" ); + for ( SqlIdentifier name : partitionGroupNamesList ) { + writer.sep( "," ); + name.unparse( writer, 0, 0 ); + } + break; + case "RANGE": + case "LIST": + writer.keyword( "(" ); + for ( int i = 0; i < partitionGroupNamesList.size(); i++ ) { + writer.keyword( "PARTITION" ); + partitionGroupNamesList.get( i ).unparse( writer, 0, 0 ); + writer.keyword( "VALUES" ); + writer.keyword( "(" ); + partitionQualifierList.get( i ).get( 0 ).unparse( writer, 0, 0 ); + writer.sep( "," ); + partitionQualifierList.get( i ).get( 1 ).unparse( writer, 0, 0 ); + writer.keyword( ")" ); + + if ( i < partitionGroupNamesList.size() ) { + writer.sep( "," ); + } + } + writer.keyword( ")" ); + break; + } } @@ -105,20 +138,25 @@ public void execute( Context context, Statement statement ) { try { // Check if table is already partitioned if ( catalogTable.partitionType == Catalog.PartitionType.NONE ) { - DdlManager.getInstance().addPartition( PartitionInformation.fromSqlLists( - catalogTable, - partitionType.getSimple(), - partitionColumn.getSimple(), - partitionNamesList, - numPartitions, - partitionQualifierList ) ); + DdlManager.getInstance().addPartitioning( + PartitionInformation.fromSqlLists( + catalogTable, + partitionType.getSimple(), + partitionColumn.getSimple(), + partitionGroupNamesList, + numPartitionGroups, + numPartitions, + partitionQualifierList, + rawPartitionInformation ), + null, + statement ); } else { throw new RuntimeException( "Table '" + catalogTable.name + "' is already partitioned" ); } } catch ( UnknownPartitionTypeException | GenericCatalogException e ) { throw new RuntimeException( e ); - } catch ( PartitionNamesNotUniqueException e ) { + } catch ( PartitionGroupNamesNotUniqueException e ) { throw SqlUtil.newContextException( partitionColumn.getParserPosition(), RESOURCE.partitionNamesNotUnique() ); } catch ( UnknownColumnException e ) { throw SqlUtil.newContextException( partitionColumn.getParserPosition(), RESOURCE.columnNotFoundInTable( partitionColumn.getSimple(), catalogTable.name ) ); diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPlacement.java b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPlacement.java index 99105b6011..45716b3ca2 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPlacement.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableAddPlacement.java @@ -50,8 +50,8 @@ public class SqlAlterTableAddPlacement extends SqlAlterTable { private final SqlIdentifier table; private final SqlNodeList columnList; private final SqlIdentifier storeName; - private final List partitionList; - private final List partitionNamesList; + private final List partitionGroupsList; + private final List partitionGroupNamesList; public SqlAlterTableAddPlacement( @@ -59,14 +59,14 @@ public SqlAlterTableAddPlacement( SqlIdentifier table, SqlNodeList columnList, SqlIdentifier storeName, - List partitionList, - List partitionNamesList ) { + List partitionGroupsList, + List partitionGroupNamesList ) { super( pos ); this.table = Objects.requireNonNull( table ); this.columnList = Objects.requireNonNull( columnList ); this.storeName = Objects.requireNonNull( storeName ); - this.partitionList = partitionList; - this.partitionNamesList = partitionNamesList; + this.partitionGroupsList = partitionGroupsList; + this.partitionGroupNamesList = partitionGroupNamesList; } @@ -79,11 +79,6 @@ public List getOperandList() { @Override public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { - // TODO @HENNLO: The partition part is still incomplete - /** There are several possible ways to unparse the partition section. - The To Do is deferred until we have decided if parsing of partition functions will be - self contained or not.*/ - writer.keyword( "ALTER" ); writer.keyword( "TABLE" ); table.unparse( writer, leftPrec, rightPrec ); @@ -93,6 +88,21 @@ public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { writer.keyword( "ON" ); writer.keyword( "STORE" ); storeName.unparse( writer, leftPrec, rightPrec ); + + if ( partitionGroupsList != null || partitionGroupNamesList != null ) { + writer.keyword( " WITH " ); + writer.keyword( " PARTITIONS" ); + SqlWriter.Frame frame = writer.startList( "(", ")" ); + + if ( partitionGroupNamesList != null ) { + for ( int i = 0; i < partitionGroupNamesList.size(); i++ ) { + partitionGroupNamesList.get( i ).unparse( writer, leftPrec, rightPrec ); + if ( i + 1 < partitionGroupNamesList.size() ) { + writer.sep( "," ); + } + } + } + } } @@ -106,7 +116,7 @@ public void execute( Context context, Statement statement ) { } // You can't partition placements if the table is not partitioned - if ( !catalogTable.isPartitioned && (!partitionList.isEmpty() || !partitionNamesList.isEmpty()) ) { + if ( !catalogTable.isPartitioned && (!partitionGroupsList.isEmpty() || !partitionGroupNamesList.isEmpty()) ) { throw new RuntimeException( "Partition Placement is not allowed for unpartitioned table '" + catalogTable.name + "'" ); } @@ -120,8 +130,8 @@ public void execute( Context context, Statement statement ) { DdlManager.getInstance().addPlacement( catalogTable, columnIds, - partitionList, - partitionNamesList.stream().map( SqlIdentifier::toString ).collect( Collectors.toList() ), + partitionGroupsList, + partitionGroupNamesList.stream().map( SqlIdentifier::toString ).collect( Collectors.toList() ), storeInstance, statement ); } catch ( PlacementAlreadyExistsException e ) { @@ -133,4 +143,3 @@ public void execute( Context context, Statement statement ) { } } - diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableMergePartitions.java b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableMergePartitions.java index 06825250c7..2c2dcfbe0e 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableMergePartitions.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableMergePartitions.java @@ -22,6 +22,7 @@ import lombok.extern.slf4j.Slf4j; import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.ddl.DdlManager; import org.polypheny.db.jdbc.Context; import org.polypheny.db.sql.SqlIdentifier; import org.polypheny.db.sql.SqlNode; @@ -74,24 +75,12 @@ public void execute( Context context, Statement statement ) { // Check if table is even partitioned if ( catalogTable.partitionType != Catalog.PartitionType.NONE ) { - long tableId = catalogTable.id; if ( log.isDebugEnabled() ) { log.debug( "Merging partitions for table: {} with id {} on schema: {}", catalogTable.name, catalogTable.id, catalogTable.getSchemaName() ); } - // TODO : Data Migrate needed. - // We have partitioned data throughout many stores. And now want to merge all partitions. - // Currently although the table isn't partitioned anymore, the old data stays partitioned on the store. - // Therefore we need to make sure(maybe with migrator?) to gather all data from all partitions, and stores. That at the end of mergeTable() - // there aren't any partitioned chunks of data left on a single store. - - // Loop over **old.partitionIds** to delete all partitions which are part of table - for ( long partitionId : catalogTable.partitionIds ) { - catalog.deletePartition( tableId, catalogTable.schemaId, partitionId ); - } - - catalog.mergeTable( tableId ); + DdlManager.getInstance().removePartitioning( catalogTable, statement ); if ( log.isDebugEnabled() ) { log.debug( "Table: '{}' has been merged", catalogTable.name ); diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPartitions.java b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPartitions.java index f4f2b53acc..cf6c3d83c7 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPartitions.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPartitions.java @@ -24,11 +24,12 @@ import java.util.Objects; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; -import org.polypheny.db.adapter.Adapter; import org.polypheny.db.adapter.AdapterManager; +import org.polypheny.db.adapter.DataStore; import org.polypheny.db.catalog.Catalog; -import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionGroup; import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.ddl.DdlManager; import org.polypheny.db.jdbc.Context; import org.polypheny.db.sql.SqlIdentifier; import org.polypheny.db.sql.SqlNode; @@ -41,28 +42,28 @@ /** - * Parse tree for {@code ALTER TABLE name MODIFY PARTITIONS (partitionId [, partitonId]* ) } statement. + * Parse tree for {@code ALTER TABLE name MODIFY PARTITIONS (partitionId [, partitionId]* ) } statement. */ @Slf4j public class SqlAlterTableModifyPartitions extends SqlAlterTable { private final SqlIdentifier table; private final SqlIdentifier storeName; - private final List partitionList; - private final List partitionNamesList; + private final List partitionGroupList; + private final List partitionGroupNamesList; public SqlAlterTableModifyPartitions( SqlParserPos pos, SqlIdentifier table, SqlIdentifier storeName, - List partitionList, - List partitionNamesList ) { + List partitionGroupList, + List partitionGroupNamesList ) { super( pos ); this.table = Objects.requireNonNull( table ); this.storeName = Objects.requireNonNull( storeName ); - this.partitionList = partitionList; - this.partitionNamesList = partitionNamesList; //May be null and can only be used in association with PARTITION BY and PARTITIONS + this.partitionGroupList = partitionGroupList; + this.partitionGroupNamesList = partitionGroupNamesList; //May be null and can only be used in association with PARTITION BY and PARTITIONS } @@ -100,11 +101,11 @@ public void execute( Context context, Statement statement ) { long tableId = catalogTable.id; - if ( partitionList.isEmpty() && partitionNamesList.isEmpty() ) { + if ( partitionGroupList.isEmpty() && partitionGroupNamesList.isEmpty() ) { throw new RuntimeException( "Empty Partition Placement is not allowed for partitioned table '" + catalogTable.name + "'" ); } - Adapter storeInstance = AdapterManager.getInstance().getStore( storeName.getSimple() ); + DataStore storeInstance = AdapterManager.getInstance().getStore( storeName.getSimple() ); if ( storeInstance == null ) { throw SqlUtil.newContextException( storeName.getParserPosition(), @@ -121,46 +122,52 @@ public void execute( Context context, Statement statement ) { List tempPartitionList = new ArrayList<>(); // If index partitions are specified - if ( !partitionList.isEmpty() && partitionNamesList.isEmpty() ) { + if ( !partitionGroupList.isEmpty() && partitionGroupNamesList.isEmpty() ) { //First convert specified index to correct partitionId - for ( int partitionId : partitionList ) { + for ( int partitionId : partitionGroupList ) { // Check if specified partition index is even part of table and if so get corresponding uniquePartId try { - tempPartitionList.add( catalogTable.partitionIds.get( partitionId ) ); + tempPartitionList.add( catalogTable.partitionProperty.partitionGroupIds.get( partitionId ) ); } catch ( IndexOutOfBoundsException e ) { throw new RuntimeException( "Specified Partition-Index: '" + partitionId + "' is not part of table '" - + catalogTable.name + "', has only " + catalogTable.numPartitions + " partitions" ); + + catalogTable.name + "', has only " + catalogTable.partitionProperty.numPartitionGroups + " partitions" ); } } } // If name partitions are specified - else if ( !partitionNamesList.isEmpty() && partitionList.isEmpty() ) { - List catalogPartitions = catalog.getPartitions( tableId ); - for ( String partitionName : partitionNamesList.stream().map( Object::toString ) + else if ( !partitionGroupNamesList.isEmpty() && partitionGroupList.isEmpty() ) { + List catalogPartitionGroups = catalog.getPartitionGroups( tableId ); + for ( String partitionName : partitionGroupNamesList.stream().map( Object::toString ) .collect( Collectors.toList() ) ) { boolean isPartOfTable = false; - for ( CatalogPartition catalogPartition : catalogPartitions ) { - if ( partitionName.equals( catalogPartition.partitionName.toLowerCase() ) ) { - tempPartitionList.add( catalogPartition.id ); + for ( CatalogPartitionGroup catalogPartitionGroup : catalogPartitionGroups ) { + if ( partitionName.equals( catalogPartitionGroup.partitionGroupName.toLowerCase() ) ) { + tempPartitionList.add( catalogPartitionGroup.id ); isPartOfTable = true; break; } } if ( !isPartOfTable ) { throw new RuntimeException( "Specified Partition-Name: '" + partitionName + "' is not part of table '" - + catalogTable.name + "', has only " + catalog.getPartitionNames( tableId ) + " partitions" ); + + catalogTable.name + "', has only " + catalog.getPartitionGroupNames( tableId ) + " partitions" ); } } } // Check if in-memory dataPartitionPlacement Map should even be changed and therefore start costly partitioning // Avoid unnecessary partitioning when the placement is already partitioned in the same way it has been specified - if ( tempPartitionList.equals( catalog.getPartitionsOnDataPlacement( storeId, tableId ) ) ) { - log.info( "The data placement for table: '{}' on store: '{}' already contains all specified partitions of statement: {}", catalogTable.name, storeName, partitionList ); + if ( tempPartitionList.equals( catalog.getPartitionGroupsOnDataPlacement( storeId, tableId ) ) ) { + log.info( "The data placement for table: '{}' on store: '{}' already contains all specified partitions of statement: {}", + catalogTable.name, storeName, partitionGroupList ); return; } // Update - catalog.updatePartitionsOnDataPlacement( storeId, tableId, tempPartitionList ); + DdlManager.getInstance().modifyPartitionPlacement( + catalogTable, + tempPartitionList, + storeInstance, + statement + ); } } diff --git a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPlacement.java b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPlacement.java index f2773b7e53..b499ad22db 100644 --- a/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPlacement.java +++ b/core/src/main/java/org/polypheny/db/sql/ddl/altertable/SqlAlterTableModifyPlacement.java @@ -19,13 +19,11 @@ import static org.polypheny.db.util.Static.RESOURCE; -import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import lombok.extern.slf4j.Slf4j; import org.polypheny.db.adapter.DataStore; -import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.ddl.DdlManager; import org.polypheny.db.ddl.exception.IndexPreventsRemovalException; @@ -52,8 +50,8 @@ public class SqlAlterTableModifyPlacement extends SqlAlterTable { private final SqlIdentifier table; private final SqlNodeList columnList; private final SqlIdentifier storeName; - private final List partitionList; - private final List partitionNamesList; + private final List partitionGroupList; + private final List partitionGroupNamesList; public SqlAlterTableModifyPlacement( @@ -61,14 +59,14 @@ public SqlAlterTableModifyPlacement( SqlIdentifier table, SqlNodeList columnList, SqlIdentifier storeName, - List partitionList, - List partitionNamesList ) { + List partitionGroupList, + List partitionGroupNamesList ) { super( pos ); this.table = Objects.requireNonNull( table ); this.columnList = Objects.requireNonNull( columnList ); this.storeName = Objects.requireNonNull( storeName ); - this.partitionList = partitionList; - this.partitionNamesList = partitionNamesList; + this.partitionGroupList = partitionGroupList; + this.partitionGroupNamesList = partitionGroupNamesList; } @@ -80,10 +78,6 @@ public List getOperandList() { @Override public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { - // TODO @HENNLO: The partition part is still incomplete - /** There are several possible ways to unparse the partition section. - The To Do is deferred until we have decided if parsing of partition functions will be - self contained or not.*/ writer.keyword( "ALTER" ); writer.keyword( "TABLE" ); table.unparse( writer, leftPrec, rightPrec ); @@ -93,6 +87,21 @@ public void unparse( SqlWriter writer, int leftPrec, int rightPrec ) { writer.keyword( "ON" ); writer.keyword( "STORE" ); storeName.unparse( writer, leftPrec, rightPrec ); + + if ( partitionGroupList != null || partitionGroupNamesList != null ) { + writer.keyword( " WITH " ); + writer.keyword( " PARTITIONS" ); + SqlWriter.Frame frame = writer.startList( "(", ")" ); + if ( partitionGroupNamesList != null ) { + for ( int i = 0; i < partitionGroupNamesList.size(); i++ ) { + partitionGroupNamesList.get( i ).unparse( writer, leftPrec, rightPrec ); + if ( i + 1 < partitionGroupNamesList.size() ) { + writer.sep( "," ); + } + } + } + } + } @@ -105,23 +114,26 @@ public void execute( Context context, Statement statement ) { } // You can't partition placements if the table is not partitioned - if ( !catalogTable.isPartitioned && (!partitionList.isEmpty() || !partitionNamesList.isEmpty()) ) { - throw new RuntimeException( " Partition Placement is not allowed for unpartitioned table '" + catalogTable.name + "'" ); + if ( !catalogTable.isPartitioned && (!partitionGroupList.isEmpty() || !partitionGroupNamesList.isEmpty()) ) { + throw new RuntimeException( "Partition Placement is not allowed for unpartitioned table '" + catalogTable.name + "'" ); } - List columnIds = new LinkedList<>(); + // Check if all columns exist for ( SqlNode node : columnList.getList() ) { - CatalogColumn catalogColumn = getCatalogColumn( catalogTable.id, (SqlIdentifier) node ); - columnIds.add( catalogColumn.id ); + getCatalogColumn( catalogTable.id, (SqlIdentifier) node ); } - DataStore storeInstance = getDataStoreInstance( storeName ); + DataStore storeInstance = getDataStoreInstance( storeName ); try { DdlManager.getInstance().modifyColumnPlacement( catalogTable, - columnList.getList().stream().map( c -> getCatalogColumn( catalogTable.id, (SqlIdentifier) c ).id ).collect( Collectors.toList() ), - partitionList, - partitionNamesList.stream().map( SqlIdentifier::toString ).collect( Collectors.toList() ), + columnList.getList().stream() + .map( c -> getCatalogColumn( catalogTable.id, (SqlIdentifier) c ).id ) + .collect( Collectors.toList() ), + partitionGroupList, + partitionGroupNamesList.stream() + .map( SqlIdentifier::toString ) + .collect( Collectors.toList() ), storeInstance, statement ); } catch ( PlacementNotExistsException e ) { diff --git a/core/src/main/java/org/polypheny/db/transaction/Transaction.java b/core/src/main/java/org/polypheny/db/transaction/Transaction.java index 805e8909ea..f19854c203 100644 --- a/core/src/main/java/org/polypheny/db/transaction/Transaction.java +++ b/core/src/main/java/org/polypheny/db/transaction/Transaction.java @@ -23,7 +23,7 @@ import org.polypheny.db.adapter.java.JavaTypeFactory; import org.polypheny.db.catalog.entity.CatalogSchema; import org.polypheny.db.information.InformationManager; -import org.polypheny.db.monitoring.events.MonitoringEvent; +import org.polypheny.db.monitoring.events.StatementEvent; import org.polypheny.db.prepare.PolyphenyDbCatalogReader; import org.polypheny.db.processing.DataMigrator; import org.polypheny.db.processing.SqlProcessor; @@ -74,9 +74,9 @@ public interface Transaction { DataMigrator getDataMigrator(); - MonitoringEvent getMonitoringEvent(); + StatementEvent getMonitoringData(); - void setMonitoringEvent( MonitoringEvent event ); + void setMonitoringData( StatementEvent event ); /** * Flavor, how multimedia results should be returned from a store. diff --git a/core/src/main/java/org/polypheny/db/util/background/BackgroundTask.java b/core/src/main/java/org/polypheny/db/util/background/BackgroundTask.java index 6b973bf494..b6d43dd321 100644 --- a/core/src/main/java/org/polypheny/db/util/background/BackgroundTask.java +++ b/core/src/main/java/org/polypheny/db/util/background/BackgroundTask.java @@ -15,12 +15,14 @@ enum TaskPriority { enum TaskSchedulingType { - WORKLOAD( 0 ), EVERY_SECOND( 1000 ), EVERY_FIVE_SECONDS( 5000 ), EVERY_TEN_SECONDS( 10000 ), EVERY_THIRTY_SECONDS( 30000 ), - EVERY_MINUTE( 60000 ); + EVERY_MINUTE( 60000 ), + EVERY_TEN_MINUTES( 600000 ), + EVERY_FIFTEEN_MINUTES( 900000 ), + EVERY_THIRTY_MINUTES( 1800000 ); @Getter private long millis; diff --git a/core/src/main/java/org/polypheny/db/util/background/BackgroundTaskHandle.java b/core/src/main/java/org/polypheny/db/util/background/BackgroundTaskHandle.java index cf439b55b1..cb3a83eb29 100644 --- a/core/src/main/java/org/polypheny/db/util/background/BackgroundTaskHandle.java +++ b/core/src/main/java/org/polypheny/db/util/background/BackgroundTaskHandle.java @@ -32,7 +32,7 @@ class BackgroundTaskHandle implements Runnable { @Getter private long maxExecTime = 0L; - private ScheduledFuture runner; + private final ScheduledFuture runner; public BackgroundTaskHandle( String id, BackgroundTask task, String description, TaskPriority priority, TaskSchedulingType schedulingType ) { @@ -44,11 +44,7 @@ public BackgroundTaskHandle( String id, BackgroundTask task, String description, // Schedule ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor(); - if ( schedulingType == TaskSchedulingType.WORKLOAD ) { - this.runner = exec.scheduleWithFixedDelay( this, 0, 100, TimeUnit.MILLISECONDS ); // TODO MV: implement workload based scheduling - } else { - this.runner = exec.scheduleAtFixedRate( this, 0, schedulingType.getMillis(), TimeUnit.MILLISECONDS ); - } + this.runner = exec.scheduleAtFixedRate( this, 0, schedulingType.getMillis(), TimeUnit.MILLISECONDS ); } @@ -110,6 +106,7 @@ public double getAverage() { } return sum / (double) window.size(); } + } } diff --git a/core/src/test/java/org/polypheny/db/sql/parser/SqlParserTest.java b/core/src/test/java/org/polypheny/db/sql/parser/SqlParserTest.java index 9732b71798..9aeaf9b34e 100644 --- a/core/src/test/java/org/polypheny/db/sql/parser/SqlParserTest.java +++ b/core/src/test/java/org/polypheny/db/sql/parser/SqlParserTest.java @@ -248,6 +248,7 @@ public class SqlParserTest { "FOUND", "92", "99", "FRAME_ROW", "2014", "c", "FREE", "99", "2003", "2011", "2014", "c", + "FREQUENCY", "99", "2003", "2011", "2014", "c", "FROM", "92", "99", "2003", "2011", "2014", "c", "FULL", "92", "99", "2003", "2011", "2014", "c", "FUNCTION", "92", "99", "2003", "2011", "2014", "c", @@ -496,6 +497,7 @@ public class SqlParserTest { "SYSTEM_USER", "92", "99", "2003", "2011", "2014", "c", "TABLE", "92", "99", "2003", "2011", "2014", "c", "TABLESAMPLE", "2003", "2011", "2014", "c", + "TEMPERATURE", "99", "2003", "2011", "2014", "c", "TEMPORARY", "92", "99", "THEN", "92", "99", "2003", "2011", "2014", "c", "TIME", "92", "99", "2003", "2011", "2014", "c", diff --git a/core/src/test/java/org/polypheny/db/test/catalog/MockCatalog.java b/core/src/test/java/org/polypheny/db/test/catalog/MockCatalog.java index ec82e8a7b1..98cc24b006 100644 --- a/core/src/test/java/org/polypheny/db/test/catalog/MockCatalog.java +++ b/core/src/test/java/org/polypheny/db/test/catalog/MockCatalog.java @@ -30,6 +30,8 @@ import org.polypheny.db.catalog.entity.CatalogIndex; import org.polypheny.db.catalog.entity.CatalogKey; import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionGroup; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogQueryInterface; import org.polypheny.db.catalog.entity.CatalogSchema; @@ -48,6 +50,7 @@ import org.polypheny.db.catalog.exceptions.UnknownSchemaException; import org.polypheny.db.catalog.exceptions.UnknownTableException; import org.polypheny.db.catalog.exceptions.UnknownUserException; +import org.polypheny.db.partition.properties.PartitionProperty; import org.polypheny.db.rel.RelCollation; import org.polypheny.db.rel.RelNode; import org.polypheny.db.rel.type.RelDataType; @@ -268,7 +271,7 @@ public void setPrimaryKey( long tableId, Long keyId ) { @Override - public void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionIds ) { + public void addColumnPlacement( int adapterId, long columnId, PlacementType placementType, String physicalSchemaName, String physicalTableName, String physicalColumnName, List partitionGroupIds ) { throw new NotImplementedException(); } @@ -292,13 +295,13 @@ public boolean checkIfExistsColumnPlacement( int adapterId, long columnId ) { @Override - public List getColumnPlacements( long columnId ) { + public List getColumnPlacement( long columnId ) { throw new NotImplementedException(); } @Override - public List getColumnPlacementsOnAdapter( int adapterId, long tableId ) { + public List getColumnPlacementsOnAdapterPerTable( int adapterId, long tableId ) { throw new NotImplementedException(); } @@ -358,7 +361,7 @@ public void updateColumnPlacementPhysicalPosition( int adapterId, long columnId @Override - public void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalTableName, String physicalColumnName, boolean updatePhysicalColumnPosition ) { + public void updateColumnPlacementPhysicalNames( int adapterId, long columnId, String physicalSchemaName, String physicalColumnName, boolean updatePhysicalColumnPosition ) { throw new NotImplementedException(); } @@ -700,25 +703,25 @@ public void deleteQueryInterface( int ifaceId ) { @Override - public long addPartition( long tableId, String partitionName, long schemaId, int ownerId, PartitionType partitionType, List effectivePartitionQualifier, boolean isUnbound ) throws GenericCatalogException { + public long addPartitionGroup( long tableId, String partitionGroupName, long schemaId, PartitionType partitionType, long numberOfInternalPartitions, List effectivePartitionGroupQualifier, boolean isUnbound ) throws GenericCatalogException { throw new NotImplementedException(); } @Override - public void deletePartition( long tableId, long schemaId, long partitionId ) { + public void deletePartitionGroup( long tableId, long schemaId, long partitionGroupId ) { throw new NotImplementedException(); } @Override - public CatalogPartition getPartition( long partitionId ) { + public CatalogPartitionGroup getPartitionGroup( long partitionGroupId ) { throw new NotImplementedException(); } @Override - public void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitions, List partitionIds ) { + public void partitionTable( long tableId, PartitionType partitionType, long partitionColumnId, int numPartitionGroups, List partitionGroupIds, PartitionProperty partitionProperty ) { throw new NotImplementedException(); } @@ -730,61 +733,61 @@ public void mergeTable( long tableId ) { @Override - public List getPartitions( long tableId ) { + public List getPartitionGroups( long tableId ) { throw new NotImplementedException(); } @Override - public List getPartitions( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ) { + public List getPartitionGroups( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ) { throw new NotImplementedException(); } @Override - public List getPartitionNames( long tableId ) { + public List getPartitionGroupNames( long tableId ) { throw new NotImplementedException(); } @Override - public List getColumnPlacementsByPartition( long tableId, long partitionId, long columnId ) { + public List getColumnPlacementsByPartitionGroup( long tableId, long partitionGroupId, long columnId ) { throw new NotImplementedException(); } @Override - public List getAdaptersByPartition( long tableId, long partitionId ) { + public List getAdaptersByPartitionGroup( long tableId, long partitionGroupId ) { throw new NotImplementedException(); } @Override - public void updatePartitionsOnDataPlacement( int adapterId, long tableId, List partitionIds ) { + public void updatePartitionGroupsOnDataPlacement( int adapterId, long tableId, List partitionGroupIds ) { throw new NotImplementedException(); } @Override - public List getPartitionsOnDataPlacement( int adapterId, long tableId ) { + public List getPartitionGroupsOnDataPlacement( int adapterId, long tableId ) { throw new NotImplementedException(); } @Override - public List getPartitionsIndexOnDataPlacement( int adapterId, long tableId ) { + public List getPartitionGroupsIndexOnDataPlacement( int adapterId, long tableId ) { throw new NotImplementedException(); } @Override - public void deletePartitionsOnDataPlacement( int storeId, long tableId ) { + public void deletePartitionGroupsOnDataPlacement( int storeId, long tableId ) { throw new NotImplementedException(); } @Override - public boolean validatePartitionDistribution( int adapterId, long tableId, long columnId ) { + public boolean validatePartitionGroupDistribution( int adapterId, long tableId, long columnId, int threshold ) { throw new NotImplementedException(); } @@ -812,4 +815,226 @@ public void clear() { throw new NotImplementedException(); } + + /** + * Adds a partition to the catalog + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionGroupId partitionGroupId where the partition should be initially added to + * @return The id of the created partition + */ + @Override + public long addPartition( long tableId, long schemaId, long partitionGroupId, List effectivePartitionGroupQualifier, boolean isUnbound ) throws GenericCatalogException { + throw new NotImplementedException(); + } + + + /** + * Deletes a single partition and all references. + * + * @param tableId The unique id of the table + * @param schemaId The unique id of the table + * @param partitionId The partitionId to be deleted + */ + @Override + public void deletePartition( long tableId, long schemaId, long partitionId ) { + throw new NotImplementedException(); + } + + + /** + * Get a partition object by its unique id + * + * @param partitionId The unique id of the partition + * @return A catalog partition + */ + @Override + public CatalogPartition getPartition( long partitionId ) { + throw new NotImplementedException(); + } + + + /** + * Updates partitionProperties on table + * + * @param tableId Table to be partitioned + * @param partitionProperty Partition properties + */ + @Override + public void updateTablePartitionProperties( long tableId, PartitionProperty partitionProperty ) { + throw new NotImplementedException(); + } + + + /** + * Get a List of all partitions belonging to a specific table + * + * @param partitionGroupId Table to be queried + * @return list of all partitions on this table + */ + @Override + public List getPartitions( long partitionGroupId ) { + throw new NotImplementedException(); + } + + + /** + * Get all partitions of the specified database which fit to the specified filter patterns. + * getColumns(xid, databaseName, null, null, null) returns all partitions of the database. + * + * @param databaseNamePattern Pattern for the database name. null returns all. + * @param schemaNamePattern Pattern for the schema name. null returns all. + * @param tableNamePattern Pattern for the table name. null returns catalog/src/test/java/org/polypheny/db/test/CatalogTest.javaall. + * @return List of columns which fit to the specified filters. If there is no column which meets the criteria, an empty list is returned. + */ + @Override + public List getPartitions( Pattern databaseNamePattern, Pattern schemaNamePattern, Pattern tableNamePattern ) { + throw new NotImplementedException(); + } + + + /** + * Get all partitions of a DataPlacement (identified by adapterId and tableId) + * + * @param adapterId The unique id of the adapter + * @param tableId The unique id of the table + * @return List of partitionIds + */ + @Override + public List getPartitionsOnDataPlacement( int adapterId, long tableId ) { + throw new NotImplementedException(); + } + + + /** + * Adds a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + * @param placementType The type of placement + * @param physicalSchemaName The schema name on the adapter + * @param physicalTableName The table name on the adapter + */ + @Override + public void addPartitionPlacement( int adapterId, long tableId, long partitionId, PlacementType placementType, String physicalSchemaName, String physicalTableName ) { + throw new NotImplementedException(); + } + + + /** + * Change physical names of a partition placement. + * + * @param adapterId The id of the adapter + * @param partitionId The id of the partition + * @param physicalSchemaName The physical schema name + * @param physicalTableName The physical table name + */ + @Override + public void updatePartitionPlacementPhysicalNames( int adapterId, long partitionId, String physicalSchemaName, String physicalTableName ) { + throw new NotImplementedException(); + } + + + /** + * Delets a placement for a partition. + * + * @param adapterId The adapter on which the table should be placed on + */ + @Override + public void deletePartitionPlacement( int adapterId, long partitionId ) { + throw new NotImplementedException(); + } + + + @Override + public CatalogPartitionPlacement getPartitionPlacement( int adapterId, long partitionId ) { + throw new NotImplementedException(); + } + + + @Override + public List getPartitionPlacementsByAdapter( int adapterId ) { + throw new NotImplementedException(); + } + + + @Override + public List getPartitionPlacementByTable( int adapterId, long tableId ) { + throw new NotImplementedException(); + } + + + @Override + public List getAllPartitionPlacementsByTable( long tableId ) { + throw new NotImplementedException(); + } + + + @Override + public List getPartitionPlacements( long partitionId ) { + throw new NotImplementedException(); + } + + + @Override + public boolean checkIfExistsPartitionPlacement( int adapterId, long partitionId ) { + throw new NotImplementedException(); + } + + + @Override + public void removeTableFromPeriodicProcessing( long tableId ) { + throw new NotImplementedException(); + } + + + @Override + public void addTableToPeriodicProcessing( long tableId ) { + throw new NotImplementedException(); + } + + + @Override + public List getTablesForPeriodicProcessing() { + throw new NotImplementedException(); + } + + + @Override + public List getPartitionsByTable( long tableId ) { + throw new NotImplementedException(); + } + + + /** + * Updates the specified partition group with the attached partitionIds + * + * @param partitionIds List of new partitionIds + */ + @Override + public void updatePartitionGroup( long partitionGroupId, List partitionIds ) { + throw new NotImplementedException(); + } + + + @Override + public void addPartitionToGroup( long partitionGroupId, Long partitionId ) { + throw new NotImplementedException(); + } + + + @Override + public void removePartitionFromGroup( long partitionGroupId, Long partitionId ) { + throw new NotImplementedException(); + } + + + /** + * Assign the partition to a new partitionGroup + */ + @Override + public void updatePartition( long partitionId, Long partitionGroupId ) { + throw new NotImplementedException(); + } + } diff --git a/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/CottontailStore.java b/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/CottontailStore.java index d415d8906f..929d341fcf 100644 --- a/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/CottontailStore.java +++ b/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/CottontailStore.java @@ -41,6 +41,7 @@ import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.jdbc.Context; import org.polypheny.db.rel.type.RelDataType; @@ -175,21 +176,24 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore ) { + public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder(); List logicalColumnNames = new LinkedList<>(); List physicalColumnNames = new LinkedList<>(); String physicalSchemaName = null; String physicalTableName = null; + + if ( physicalSchemaName == null ) { + physicalSchemaName = partitionPlacement.physicalTableName != null ? partitionPlacement.physicalSchemaName : this.dbName; + } + if ( physicalTableName == null ) { + physicalTableName = partitionPlacement.physicalTableName != null ? partitionPlacement.physicalTableName : CottontailNameUtil.createPhysicalTableName( combinedTable.id, partitionPlacement.partitionId ); + } + for ( CatalogColumnPlacement placement : columnPlacementsOnStore ) { CatalogColumn catalogColumn = Catalog.getInstance().getColumn( placement.columnId ); - if ( physicalSchemaName == null ) { - physicalSchemaName = placement.physicalTableName != null ? placement.physicalSchemaName : this.dbName; - } - if ( physicalTableName == null ) { - physicalTableName = placement.physicalTableName != null ? placement.physicalTableName : "tab" + combinedTable.id; - } + RelDataType sqlType = catalogColumn.getRelDataType( typeFactory ); fieldInfo.add( catalogColumn.name, placement.physicalColumnName, sqlType ).nullable( catalogColumn.nullable ); logicalColumnNames.add( catalogColumn.name ); @@ -218,32 +222,42 @@ public Schema getCurrentSchema() { @Override - public void createTable( Context context, CatalogTable combinedTable ) { + public void createTable( Context context, CatalogTable combinedTable, List partitionIds ) { + /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); /* Prepare CREATE TABLE message. */ - final List columns = this.buildColumnDefinitions( this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), combinedTable.id ) ); - final String physicalTableName = CottontailNameUtil.createPhysicalTableName( combinedTable.id ); - final EntityName tableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( physicalTableName ) - .build(); - final EntityDefinition definition = EntityDefinition.newBuilder() - .setEntity( tableEntity ) - .addAllColumns( columns ) - .build(); + final List columns = this.buildColumnDefinitions( this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), combinedTable.id ) ); - if ( !this.wrapper.createEntityBlocking( CreateEntityMessage.newBuilder().setTxId( txId ).setDefinition( definition ).build() ) ) { - throw new RuntimeException( "Unable to create table." ); - } + for ( long partitionId : partitionIds ) { - for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), combinedTable.id ) ) { + final String physicalTableName = CottontailNameUtil.createPhysicalTableName( combinedTable.id, partitionId ); + catalog.updatePartitionPlacementPhysicalNames( + getAdapterId(), + partitionId, + this.dbName, + physicalTableName ); + + final EntityName tableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( physicalTableName ) + .build(); + final EntityDefinition definition = EntityDefinition.newBuilder() + .setEntity( tableEntity ) + .addAllColumns( columns ) + .build(); + + if ( !this.wrapper.createEntityBlocking( CreateEntityMessage.newBuilder().setTxId( txId ).setDefinition( definition ).build() ) ) { + throw new RuntimeException( "Unable to create table." ); + } + + } + for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), combinedTable.id ) ) { this.catalog.updateColumnPlacementPhysicalNames( this.getAdapterId(), placement.columnId, this.dbName, - physicalTableName, CottontailNameUtil.createPhysicalColumnName( placement.columnId ), true ); } @@ -275,18 +289,24 @@ private List buildColumnDefinitions( List partitionIds ) { /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - /* Prepare DROP TABLE message. */ - final String physicalTableName = CottontailNameUtil.getPhysicalTableName( this.getAdapterId(), combinedTable.id ); - final EntityName tableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( physicalTableName ) - .build(); + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + /* Prepare DROP TABLE message. */ + //final String physicalTableName = CottontailNameUtil.getPhysicalTableName( this.getAdapterId(), combinedTable.id ); + final String physicalTableName = partitionPlacement.physicalTableName; + final EntityName tableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( physicalTableName ) + .build(); - this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + } } @@ -295,83 +315,87 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - final List placements = this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), catalogTable.id ); + final List placements = this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), catalogTable.id ); final List columns = this.buildColumnDefinitions( placements ); - final String currentPhysicalTableName; - if ( placements.get( 0 ).columnId == catalogColumn.id ) { - currentPhysicalTableName = placements.get( 1 ).physicalTableName; - } else { - currentPhysicalTableName = placements.get( 0 ).physicalTableName; - } - final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); - final String newPhysicalColumnName = CottontailNameUtil.createPhysicalColumnName( catalogColumn.id ); + List partitionPlacements = catalog.getPartitionPlacementByTable( getAdapterId(), catalogTable.id ); - final EntityName tableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( currentPhysicalTableName ) - .build(); - final EntityName newTableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( newPhysicalTableName ) - .build(); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { - final CreateEntityMessage message = CreateEntityMessage.newBuilder() - .setTxId( txId ) - .setDefinition( EntityDefinition.newBuilder() - .setEntity( newTableEntity ) - .addAllColumns( columns ) ).build(); + //Since only one partition is available + final String currentPhysicalTableName = partitionPlacement.physicalTableName; - if ( !this.wrapper.createEntityBlocking( message ) ) { - throw new RuntimeException( "Unable to create table." ); - } + final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); + final String newPhysicalColumnName = CottontailNameUtil.createPhysicalColumnName( catalogColumn.id ); - PolyType actualDefaultType; - Object defaultValue; - if ( catalogColumn.defaultValue != null ) { - actualDefaultType = (catalogColumn.collectionsType != null) - ? catalogColumn.collectionsType - : catalogColumn.type; - defaultValue = CottontailTypeUtil.defaultValueParser( catalogColumn.defaultValue, actualDefaultType ); - } else { - defaultValue = null; - actualDefaultType = null; - } - CottontailGrpc.Literal defaultData = CottontailTypeUtil.toData( defaultValue, actualDefaultType, null ); - - final QueryMessage query = QueryMessage.newBuilder().setTxId( txId ).setQuery( Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ) ) ) ).build(); - final Iterator queryResponse = this.wrapper.query( query ); - queryResponse.forEachRemaining( responseMessage -> { - for ( Tuple tuple : responseMessage.getTuplesList() ) { - final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( - From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ) ) - ); - int i = 0; - for ( CottontailGrpc.Literal literal : tuple.getDataList() ) { - insert.addElementsBuilder().setColumn( responseMessage.getColumns( i++ ) ).setValue( literal ); - } - insert.addElementsBuilder() - .setColumn( ColumnName.newBuilder().setName( newPhysicalColumnName ).build() ) - .setValue( defaultData ); - if ( !this.wrapper.insert( insert.build() ) ) { - throw new RuntimeException( "Unable to migrate data." ); - } + final EntityName tableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( currentPhysicalTableName ) + .build(); + final EntityName newTableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( newPhysicalTableName ) + .build(); + + final CreateEntityMessage message = CreateEntityMessage.newBuilder() + .setTxId( txId ) + .setDefinition( EntityDefinition.newBuilder() + .setEntity( newTableEntity ) + .addAllColumns( columns ) ).build(); + + if ( !this.wrapper.createEntityBlocking( message ) ) { + throw new RuntimeException( "Unable to create table." ); + } + + PolyType actualDefaultType; + Object defaultValue; + if ( catalogColumn.defaultValue != null ) { + actualDefaultType = (catalogColumn.collectionsType != null) + ? catalogColumn.collectionsType + : catalogColumn.type; + defaultValue = CottontailTypeUtil.defaultValueParser( catalogColumn.defaultValue, actualDefaultType ); + } else { + defaultValue = null; + actualDefaultType = null; } - } ); + CottontailGrpc.Literal defaultData = CottontailTypeUtil.toData( defaultValue, actualDefaultType, null ); + + final QueryMessage query = QueryMessage.newBuilder().setTxId( txId ).setQuery( Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ) ) ) ).build(); + final Iterator queryResponse = this.wrapper.query( query ); + queryResponse.forEachRemaining( responseMessage -> { + for ( Tuple tuple : responseMessage.getTuplesList() ) { + final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( + From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ) ) + ); + int i = 0; + for ( CottontailGrpc.Literal literal : tuple.getDataList() ) { + insert.addElementsBuilder().setColumn( responseMessage.getColumns( i++ ) ).setValue( literal ); + } + insert.addElementsBuilder() + .setColumn( ColumnName.newBuilder().setName( newPhysicalColumnName ).build() ) + .setValue( defaultData ); + if ( !this.wrapper.insert( insert.build() ) ) { + throw new RuntimeException( "Unable to migrate data." ); + } + } + } ); + catalog.updatePartitionPlacementPhysicalNames( getAdapterId(), partitionPlacement.partitionId, partitionPlacement.physicalSchemaName, newPhysicalTableName ); + + // Delete old table + this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + + } // Update column placement physical table names - for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), catalogTable.id ) ) { + for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), catalogTable.id ) ) { this.catalog.updateColumnPlacementPhysicalNames( this.getAdapterId(), placement.columnId, this.dbName, - newPhysicalTableName, CottontailNameUtil.createPhysicalColumnName( placement.columnId ), true ); } - // Delete old table - this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); } @@ -381,111 +405,124 @@ public void dropColumn( Context context, CatalogColumnPlacement columnPlacement /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - final List placements = this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), columnPlacement.tableId ); + final List placements = this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), columnPlacement.tableId ); placements.removeIf( it -> it.columnId == columnPlacement.columnId ); final List columns = this.buildColumnDefinitions( placements ); + CatalogTable catalogTable = catalog.getTable( placements.get( 0 ).tableId ); - final String currentPhysicalTableName = placements.get( 0 ).physicalTableName; + List partitionPlacements = catalog.getPartitionPlacementByTable( getAdapterId(), catalogTable.id ); - final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); - final String oldPhysicalColumnName = columnPlacement.physicalColumnName; + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { - final EntityName tableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( currentPhysicalTableName ) - .build(); - final EntityName newTableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( newPhysicalTableName ) - .build(); + final String currentPhysicalTableName = partitionPlacement.physicalTableName; - final CreateEntityMessage message = CreateEntityMessage.newBuilder().setTxId( txId ).setDefinition( - EntityDefinition.newBuilder().setEntity( newTableEntity ).addAllColumns( columns ) - ).build(); + final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); + final String oldPhysicalColumnName = columnPlacement.physicalColumnName; - if ( !this.wrapper.createEntityBlocking( message ) ) { - throw new RuntimeException( "Unable to create table." ); - } + final EntityName tableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( currentPhysicalTableName ) + .build(); + final EntityName newTableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( newPhysicalTableName ) + .build(); - final Query query = Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ) ) ).build(); - final Iterator queryResponse = this.wrapper.query( QueryMessage.newBuilder().setTxId( txId ).setQuery( query ).build() ); - queryResponse.forEachRemaining( responseMessage -> { - int droppedIndex = 0; - for ( ColumnName c : responseMessage.getColumnsList() ) { - if ( c.getName().equals( oldPhysicalColumnName ) ) { - break; - } - droppedIndex++; + final CreateEntityMessage message = CreateEntityMessage.newBuilder().setTxId( txId ).setDefinition( + EntityDefinition.newBuilder().setEntity( newTableEntity ).addAllColumns( columns ) + ).build(); + + if ( !this.wrapper.createEntityBlocking( message ) ) { + throw new RuntimeException( "Unable to create table." ); } - for ( Tuple tuple : responseMessage.getTuplesList() ) { - final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ) ) ); - int i = 0; - for ( Literal l : tuple.getDataList() ) { - if ( i != droppedIndex ) { - insert.addElementsBuilder().setColumn( responseMessage.getColumns( i ) ).setValue( l ); + + final Query query = Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ) ) ).build(); + final Iterator queryResponse = this.wrapper.query( QueryMessage.newBuilder().setTxId( txId ).setQuery( query ).build() ); + queryResponse.forEachRemaining( responseMessage -> { + int droppedIndex = 0; + for ( ColumnName c : responseMessage.getColumnsList() ) { + if ( c.getName().equals( oldPhysicalColumnName ) ) { + break; } - i++; + droppedIndex++; } - if ( !this.wrapper.insert( insert.build() ) ) { - throw new RuntimeException( "Failed to migrate data." ); + for ( Tuple tuple : responseMessage.getTuplesList() ) { + final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ) ) ); + int i = 0; + for ( Literal l : tuple.getDataList() ) { + if ( i != droppedIndex ) { + insert.addElementsBuilder().setColumn( responseMessage.getColumns( i ) ).setValue( l ); + } + i++; + } + if ( !this.wrapper.insert( insert.build() ) ) { + throw new RuntimeException( "Failed to migrate data." ); + } } - } - } ); + } ); + + catalog.updatePartitionPlacementPhysicalNames( getAdapterId(), partitionPlacement.partitionId, partitionPlacement.physicalSchemaName, newPhysicalTableName ); + + // Delete old table + this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + } // Update column placement physical table names - for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapter( this.getAdapterId(), columnPlacement.tableId ) ) { + for ( CatalogColumnPlacement placement : this.catalog.getColumnPlacementsOnAdapterPerTable( this.getAdapterId(), columnPlacement.tableId ) ) { this.catalog.updateColumnPlacementPhysicalNames( this.getAdapterId(), placement.columnId, this.dbName, - newPhysicalTableName, CottontailNameUtil.createPhysicalColumnName( placement.columnId ), true ); } - // Delete old table - this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); } @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - /* Prepare CREATE INDEX message. */ - final IndexType indexType; - try { - indexType = IndexType.valueOf( catalogIndex.method.toUpperCase() ); - } catch ( Exception e ) { - throw new RuntimeException( "Unknown index type: " + catalogIndex.method ); - } - final IndexName.Builder indexName = IndexName.newBuilder() - .setName( "idx" + catalogIndex.id ).setEntity( - EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( Catalog.getInstance().getColumnPlacement( getAdapterId(), catalogIndex.key.columnIds.get( 0 ) ).physicalTableName ) ); - - final IndexDefinition.Builder definition = IndexDefinition.newBuilder().setType( indexType ).setName( indexName ); - for ( long columnId : catalogIndex.key.columnIds ) { - CatalogColumnPlacement placement = Catalog.getInstance().getColumnPlacement( getAdapterId(), columnId ); - definition.addColumns( ColumnName.newBuilder().setName( placement.physicalColumnName ) ); - } + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { - final CreateIndexMessage createIndex = CreateIndexMessage.newBuilder().setTxId( txId ).setDefinition( definition ).build(); - this.wrapper.createIndexBlocking( createIndex ); + /* Prepare CREATE INDEX message. */ + final IndexType indexType; + try { + indexType = IndexType.valueOf( catalogIndex.method.toUpperCase() ); + } catch ( Exception e ) { + throw new RuntimeException( "Unknown index type: " + catalogIndex.method ); + } + final IndexName.Builder indexName = IndexName.newBuilder() + .setName( "idx" + catalogIndex.id ).setEntity( + EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( partitionPlacement.physicalTableName ) ); + + final IndexDefinition.Builder definition = IndexDefinition.newBuilder().setType( indexType ).setName( indexName ); + for ( long columnId : catalogIndex.key.columnIds ) { + CatalogColumnPlacement placement = Catalog.getInstance().getColumnPlacement( getAdapterId(), columnId ); + definition.addColumns( ColumnName.newBuilder().setName( placement.physicalColumnName ) ); + } + + final CreateIndexMessage createIndex = CreateIndexMessage.newBuilder().setTxId( txId ).setDefinition( definition ).build(); + this.wrapper.createIndexBlocking( createIndex ); + } } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - + CatalogPartitionPlacement partitionPlacement = catalog.getPartitionPlacement( getAdapterId(), catalog.getTable( catalogIndex.key.tableId ).partitionProperty.partitionIds.get( 0 ) ); /* Prepare DROP INDEX message. */ final DropIndexMessage.Builder dropIndex = DropIndexMessage.newBuilder().setTxId( txId ); final IndexName indexName = IndexName.newBuilder() - .setEntity( EntityName.newBuilder().setName( Catalog.getInstance().getColumnPlacement( getAdapterId(), catalogIndex.key.columnIds.get( 0 ) ).physicalTableName ).setSchema( currentSchema.getCottontailSchema() ) ) + .setEntity( EntityName.newBuilder().setName( partitionPlacement.physicalTableName ).setSchema( currentSchema.getCottontailSchema() ) ) .setName( "idx" + catalogIndex.id ) .build(); @@ -518,12 +555,14 @@ public void truncate( Context context, CatalogTable table ) { /* Begin or continue Cottontail DB transaction. */ final TransactionId txId = this.wrapper.beginOrContinue( context.getStatement().getTransaction() ); - /* Prepare TRUNCATE message. */ - final String physicalTableName = CottontailNameUtil.getPhysicalTableName( this.getAdapterId(), table.id ); - final TruncateEntityMessage truncate = TruncateEntityMessage.newBuilder().setTxId( txId ).setEntity( - EntityName.newBuilder().setSchema( this.currentSchema.getCottontailSchema() ).setName( physicalTableName ) - ).buildPartial(); - this.wrapper.truncateEntityBlocking( truncate ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( getAdapterId(), table.id ) ) { + /* Prepare TRUNCATE message. */ + final String physicalTableName = partitionPlacement.physicalTableName; + final TruncateEntityMessage truncate = TruncateEntityMessage.newBuilder().setTxId( txId ).setEntity( + EntityName.newBuilder().setSchema( this.currentSchema.getCottontailSchema() ).setName( physicalTableName ) + ).buildPartial(); + this.wrapper.truncateEntityBlocking( truncate ); + } } @@ -535,56 +574,63 @@ public void updateColumnType( Context context, CatalogColumnPlacement columnPlac final List placements = this.catalog.getColumnPlacementsOnAdapterSortedByPhysicalPosition( this.getAdapterId(), catalogColumn.tableId ); final List columns = this.buildColumnDefinitions( placements ); - final String currentPhysicalTableName = placements.get( 0 ).physicalTableName; - final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); + List partitionPlacements = catalog.getPartitionPlacementByTable( getAdapterId(), catalogColumn.tableId ); - final EntityName tableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( currentPhysicalTableName ) - .build(); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { - final EntityName newTableEntity = EntityName.newBuilder() - .setSchema( this.currentSchema.getCottontailSchema() ) - .setName( newPhysicalTableName ) - .build(); + final String currentPhysicalTableName = partitionPlacement.physicalTableName; + final String newPhysicalTableName = CottontailNameUtil.incrementNameRevision( currentPhysicalTableName ); - final CreateEntityMessage create = CreateEntityMessage.newBuilder() - .setTxId( txId ) - .setDefinition( EntityDefinition.newBuilder().setEntity( newTableEntity ).addAllColumns( columns ) ) - .build(); + final EntityName tableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( currentPhysicalTableName ) + .build(); - if ( !this.wrapper.createEntityBlocking( create ) ) { - throw new RuntimeException( "Unable to create table." ); - } + final EntityName newTableEntity = EntityName.newBuilder() + .setSchema( this.currentSchema.getCottontailSchema() ) + .setName( newPhysicalTableName ) + .build(); - final Query query = Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ).build() ) ).build(); - final Iterator queryResponse = this.wrapper.query( QueryMessage.newBuilder().setTxId( txId ).setQuery( query ).build() ); - - final From from = From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ).build() ).build(); - queryResponse.forEachRemaining( response -> { - for ( Tuple tuple : response.getTuplesList() ) { - final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( from ); - int i = 0; - for ( Literal d : tuple.getDataList() ) { - insert.addElements( InsertElement.newBuilder() - .setColumn( response.getColumns( i++ ) ) - .setValue( d ) ); - } - this.wrapper.insert( insert.build() ); + final CreateEntityMessage create = CreateEntityMessage.newBuilder() + .setTxId( txId ) + .setDefinition( EntityDefinition.newBuilder().setEntity( newTableEntity ).addAllColumns( columns ) ) + .build(); + + if ( !this.wrapper.createEntityBlocking( create ) ) { + throw new RuntimeException( "Unable to create table." ); } - } ); + final Query query = Query.newBuilder().setFrom( From.newBuilder().setScan( Scan.newBuilder().setEntity( tableEntity ).build() ) ).build(); + final Iterator queryResponse = this.wrapper.query( QueryMessage.newBuilder().setTxId( txId ).setQuery( query ).build() ); + + final From from = From.newBuilder().setScan( Scan.newBuilder().setEntity( newTableEntity ).build() ).build(); + queryResponse.forEachRemaining( response -> { + for ( Tuple tuple : response.getTuplesList() ) { + final InsertMessage.Builder insert = InsertMessage.newBuilder().setTxId( txId ).setFrom( from ); + int i = 0; + for ( Literal d : tuple.getDataList() ) { + insert.addElements( InsertElement.newBuilder() + .setColumn( response.getColumns( i++ ) ) + .setValue( d ) ); + } + this.wrapper.insert( insert.build() ); + } + } ); + + catalog.updatePartitionPlacementPhysicalNames( getAdapterId(), partitionPlacement.partitionId, partitionPlacement.physicalSchemaName, newPhysicalTableName ); + + this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + } for ( CatalogColumnPlacement ccp : placements ) { catalog.updateColumnPlacementPhysicalNames( getAdapterId(), ccp.columnId, ccp.physicalSchemaName, - newPhysicalTableName, ccp.physicalColumnName, false ); } - this.wrapper.dropEntityBlocking( DropEntityMessage.newBuilder().setTxId( txId ).setEntity( tableEntity ).build() ); + } diff --git a/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/util/CottontailNameUtil.java b/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/util/CottontailNameUtil.java index 3abec115b1..c4e4d0d1dd 100644 --- a/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/util/CottontailNameUtil.java +++ b/cottontail-adapter/src/main/java/org/polypheny/db/adapter/cottontail/util/CottontailNameUtil.java @@ -17,30 +17,21 @@ package org.polypheny.db.adapter.cottontail.util; -import java.util.List; import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.polypheny.db.catalog.Catalog; -import org.polypheny.db.catalog.entity.CatalogColumnPlacement; public class CottontailNameUtil { - private final static Pattern idRevPattern = Pattern.compile( "^(col|tab|sch)([0-9]+)(?>r([0-9]+))?$" ); + private final static Pattern idRevPattern = Pattern.compile( "^(col|tab|sch)([0-9]+)(_part)([0-9]+)(?>r([0-9]+))?$" ); - public static String getPhysicalTableName( int storeId, long tableId ) { - List placements = Catalog.getInstance().getColumnPlacementsOnAdapter( storeId, tableId ); - if ( placements.isEmpty() ) { - throw new RuntimeException( "Placements not registered in catalog. This should not happen!" ); + public static String createPhysicalTableName( long tableId, long partitionId ) { + String physicalTableName = "tab" + tableId; + if ( partitionId >= 0 ) { + physicalTableName += "_part" + partitionId; } - - return placements.get( 0 ).physicalTableName; - } - - - public static String createPhysicalTableName( long tableId ) { - return "tab" + tableId; + return physicalTableName; } @@ -52,15 +43,17 @@ public static String createPhysicalColumnName( long columnId ) { public static String incrementNameRevision( String name ) { Matcher m = idRevPattern.matcher( name ); long id; + long partId; long rev; String type; if ( m.find() ) { type = m.group( 1 ); id = Long.parseLong( m.group( 2 ) ); - if ( m.group( 3 ) == null ) { + partId = Long.parseLong( m.group( 4 ) ); + if ( m.group( 5 ) == null ) { rev = 0L; } else { - rev = Long.parseLong( m.group( 3 ) ); + rev = Long.parseLong( m.group( 5 ) ); } } else { throw new IllegalArgumentException( "Not a physical name!" ); @@ -68,7 +61,7 @@ public static String incrementNameRevision( String name ) { rev += 1L; - return type + id + "r" + rev; + return type + id + "_part" + partId + "r" + rev; } } diff --git a/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSchema.java b/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSchema.java index 8a94bf6aa8..80a059e08a 100644 --- a/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSchema.java +++ b/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSchema.java @@ -44,6 +44,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.rel.type.RelDataType; import org.polypheny.db.rel.type.RelDataTypeFactory; @@ -82,7 +83,7 @@ public CsvSchema( URL directoryUrl, CsvTable.Flavor flavor ) { } - public Table createCsvTable( CatalogTable catalogTable, List columnPlacementsOnStore, CsvSource csvSource ) { + public Table createCsvTable( CatalogTable catalogTable, List columnPlacementsOnStore, CsvSource csvSource, CatalogPartitionPlacement partitionPlacement ) { final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder(); List fieldTypes = new LinkedList<>(); @@ -97,7 +98,7 @@ public Table createCsvTable( CatalogTable catalogTable, List i ).toArray(); CsvTable table = createTable( source, RelDataTypeImpl.proto( fieldInfo.build() ), fieldTypes, fields, csvSource ); - tableMap.put( catalogTable.name, table ); + tableMap.put( catalogTable.name + "_" + partitionPlacement.partitionId, table ); return table; } diff --git a/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSource.java b/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSource.java index cac4c37890..08bc663573 100644 --- a/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSource.java +++ b/csv-adapter/src/main/java/org/polypheny/db/adapter/csv/CsvSource.java @@ -23,6 +23,7 @@ import org.polypheny.db.adapter.DeployMode; import org.polypheny.db.adapter.csv.CsvTable.Flavor; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.information.InformationGroup; import org.polypheny.db.information.InformationManager; @@ -91,8 +92,8 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentSchema.createCsvTable( catalogTable, columnPlacementsOnStore, this ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentSchema.createCsvTable( catalogTable, columnPlacementsOnStore, this, partitionPlacement ); } @@ -122,8 +123,8 @@ public Map> getExportedColumns() { } } else { fileNames = Arrays.stream( Sources.of( csvDir ) - .file() - .listFiles( ( d, name ) -> name.endsWith( ".csv" ) || name.endsWith( ".csv.gz" ) ) ) + .file() + .listFiles( ( d, name ) -> name.endsWith( ".csv" ) || name.endsWith( ".csv.gz" ) ) ) .sequential() .map( File::getName ) .collect( Collectors.toSet() ); diff --git a/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java b/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java index 2e65af41d0..b3674cdeef 100644 --- a/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java +++ b/dbms/src/main/java/org/polypheny/db/PolyphenyDb.java @@ -44,6 +44,12 @@ import org.polypheny.db.iface.QueryInterfaceManager; import org.polypheny.db.information.HostInformation; import org.polypheny.db.information.JavaInformation; +import org.polypheny.db.monitoring.core.MonitoringService; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.partition.FrequencyMap; +import org.polypheny.db.partition.FrequencyMapImpl; +import org.polypheny.db.partition.PartitionManagerFactory; +import org.polypheny.db.partition.PartitionManagerFactoryImpl; import org.polypheny.db.processing.AuthenticatorImpl; import org.polypheny.db.statistic.StatisticQueryProcessor; import org.polypheny.db.statistic.StatisticsManager; @@ -239,6 +245,10 @@ public void join( final long millis ) throws InterruptedException { // Initialize DdlManager DdlManager.setAndGetInstance( new DdlManagerImpl( catalog ) ); + // Initialize PartitionMangerFactory + PartitionManagerFactory.setAndGetInstance( new PartitionManagerFactoryImpl() ); + FrequencyMap.setAndGetInstance( new FrequencyMapImpl( catalog ) ); + // Start Polypheny UI final HttpServer httpServer = new HttpServer( transactionManager, authenticator ); Thread polyphenyUiThread = new Thread( httpServer ); @@ -275,6 +285,8 @@ public void join( final long millis ) throws InterruptedException { new UiTestingMonitoringPage(); } + MonitoringService monitoringService = MonitoringServiceProvider.getInstance(); + log.info( "****************************************************************************************************" ); log.info( " Polypheny-DB successfully started and ready to process your queries!" ); log.info( " The UI is waiting for you on port {}:", RuntimeConfig.WEBUI_SERVER_PORT.getInteger() ); diff --git a/dbms/src/main/java/org/polypheny/db/ddl/DdlManagerImpl.java b/dbms/src/main/java/org/polypheny/db/ddl/DdlManagerImpl.java index f28fc8241d..1ef59105f1 100644 --- a/dbms/src/main/java/org/polypheny/db/ddl/DdlManagerImpl.java +++ b/dbms/src/main/java/org/polypheny/db/ddl/DdlManagerImpl.java @@ -16,10 +16,9 @@ package org.polypheny.db.ddl; -import static org.reflections.Reflections.log; - import com.google.common.collect.ImmutableList; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; @@ -27,6 +26,7 @@ import java.util.Map; import java.util.Set; import java.util.stream.Collectors; +import lombok.extern.slf4j.Slf4j; import org.apache.commons.lang3.StringUtils; import org.polypheny.db.adapter.Adapter; import org.polypheny.db.adapter.AdapterManager; @@ -40,6 +40,7 @@ import org.polypheny.db.catalog.Catalog.ConstraintType; import org.polypheny.db.catalog.Catalog.ForeignKeyOption; import org.polypheny.db.catalog.Catalog.IndexType; +import org.polypheny.db.catalog.Catalog.PartitionType; import org.polypheny.db.catalog.Catalog.PlacementType; import org.polypheny.db.catalog.Catalog.SchemaType; import org.polypheny.db.catalog.Catalog.TableType; @@ -52,7 +53,8 @@ import org.polypheny.db.catalog.entity.CatalogForeignKey; import org.polypheny.db.catalog.entity.CatalogIndex; import org.polypheny.db.catalog.entity.CatalogKey; -import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionGroup; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogSchema; import org.polypheny.db.catalog.entity.CatalogTable; @@ -82,7 +84,7 @@ import org.polypheny.db.ddl.exception.LastPlacementException; import org.polypheny.db.ddl.exception.MissingColumnPlacementException; import org.polypheny.db.ddl.exception.NotNullAndDefaultValueException; -import org.polypheny.db.ddl.exception.PartitionNamesNotUniqueException; +import org.polypheny.db.ddl.exception.PartitionGroupNamesNotUniqueException; import org.polypheny.db.ddl.exception.PlacementAlreadyExistsException; import org.polypheny.db.ddl.exception.PlacementIsPrimaryException; import org.polypheny.db.ddl.exception.PlacementNotExistsException; @@ -90,6 +92,10 @@ import org.polypheny.db.ddl.exception.UnknownIndexMethodException; import org.polypheny.db.partition.PartitionManager; import org.polypheny.db.partition.PartitionManagerFactory; +import org.polypheny.db.partition.properties.PartitionProperty; +import org.polypheny.db.partition.properties.TemperaturePartitionProperty; +import org.polypheny.db.partition.properties.TemperaturePartitionProperty.PartitionCostIndication; +import org.polypheny.db.partition.raw.RawTemperaturePartitionInformation; import org.polypheny.db.prepare.RelOptTableImpl; import org.polypheny.db.processing.DataMigrator; import org.polypheny.db.rel.AbstractRelNode; @@ -105,12 +111,14 @@ import org.polypheny.db.runtime.PolyphenyDbException; import org.polypheny.db.schema.LogicalTable; import org.polypheny.db.schema.LogicalView; +import org.polypheny.db.schema.PolySchemaBuilder; import org.polypheny.db.transaction.Statement; import org.polypheny.db.transaction.TransactionException; import org.polypheny.db.type.ArrayType; import org.polypheny.db.type.PolyType; +@Slf4j public class DdlManagerImpl extends DdlManager { private final Catalog catalog; @@ -247,7 +255,7 @@ public void addAdapter( String adapterName, String clazzName, Map placeholder catalog.updateColumnPlacementPhysicalPosition( adapter.getAdapterId(), columnId, exportedColumn.physicalPosition ); if ( exportedColumn.primary ) { primaryKeyColIds.add( columnId ); @@ -255,6 +263,14 @@ public void addAdapter( String adapterName, String clazzName, Map exportedColumns = dataSource.getExportedColumns().get( physicalTableName ); // Check if physicalColumnName is valid @@ -388,7 +404,7 @@ public void addColumnToSourceTable( CatalogTable catalogTable, String columnPhys } // Make sure this physical column has not already been added to this table - for ( CatalogColumnPlacement ccp : catalog.getColumnPlacementsOnAdapter( adapterId, catalogTable.id ) ) { + for ( CatalogColumnPlacement ccp : catalog.getColumnPlacementsOnAdapterPerTable( adapterId, catalogTable.id ) ) { if ( ccp.physicalColumnName.equalsIgnoreCase( columnPhysicalName ) ) { throw new RuntimeException( "The physical column '" + columnPhysicalName + "' has already been added to this table!" ); } @@ -422,7 +438,7 @@ public void addColumnToSourceTable( CatalogTable catalogTable, String columnPhys exportedColumn.physicalSchemaName, exportedColumn.physicalTableName, exportedColumn.physicalColumnName, - null ); + null );//Not a valid partitionID --> placeholder // Set column position catalog.updateColumnPlacementPhysicalPosition( adapterId, columnId, exportedColumn.physicalPosition ); @@ -496,7 +512,7 @@ public void addColumn( String columnName, CatalogTable catalogTable, String befo null, // Will be set later null, // Will be set later null, // Will be set later - null ); + null );//Not a valid partitionID --> placeholder AdapterManager.getInstance().getStore( store.getAdapterId() ).addColumn( statement.getPrepareContext(), catalogTable, addedColumn ); } @@ -610,16 +626,16 @@ public void addIndex( CatalogTable catalogTable, String indexMethodName, List columnIds, List partitionIds, List partitionNames, DataStore dataStore, Statement statement ) throws PlacementAlreadyExistsException { + public void addPlacement( CatalogTable catalogTable, List columnIds, List partitionGroupIds, List partitionGroupNames, DataStore dataStore, Statement statement ) throws PlacementAlreadyExistsException { List addedColumns = new LinkedList<>(); - List tempPartitionList = new ArrayList<>(); + List tempPartitionGroupList = new ArrayList<>(); // Check whether this placement already exists for ( int storeId : catalogTable.placementsByAdapter.keySet() ) { @@ -633,72 +649,78 @@ public void addPlacement( CatalogTable catalogTable, List columnIds, List< } // Select partitions to create on this placement - if ( catalogTable.isPartitioned ) { - boolean isDataPlacementPartitioned = false; - long tableId = catalogTable.id; - // Needed to ensure that column placements on the same store contain all the same partitions - // Check if this column placement is the first on the data placement - // If this returns null this means that this is the first placement and partition list can therefore be specified - List currentPartList = new ArrayList<>(); - currentPartList = catalog.getPartitionsOnDataPlacement( dataStore.getAdapterId(), catalogTable.id ); - - isDataPlacementPartitioned = !currentPartList.isEmpty(); - - if ( !partitionIds.isEmpty() && partitionNames.isEmpty() ) { - - // Abort if a manual partitionList has been specified even though the data placement has already been partitioned - if ( isDataPlacementPartitioned ) { - throw new RuntimeException( "WARNING: The Data Placement for table: '" + catalogTable.name + "' on store: '" - + dataStore.getAdapterName() + "' already contains manually specified partitions: " + currentPartList + ". Use 'ALTER TABLE ... MODIFY PARTITIONS...' instead" ); - } + // if ( catalogTable.isPartitioned ) { + boolean isDataPlacementPartitioned = false; + long tableId = catalogTable.id; + // Needed to ensure that column placements on the same store contain all the same partitions + // Check if this column placement is the first on the data placement + // If this returns null this means that this is the first placement and partition list can therefore be specified + List currentPartList = new ArrayList<>(); + currentPartList = catalog.getPartitionGroupsOnDataPlacement( dataStore.getAdapterId(), catalogTable.id ); + + isDataPlacementPartitioned = !currentPartList.isEmpty(); + + if ( !partitionGroupIds.isEmpty() && partitionGroupNames.isEmpty() ) { + + // Abort if a manual partitionList has been specified even though the data placement has already been partitioned + if ( isDataPlacementPartitioned ) { + throw new RuntimeException( "WARNING: The Data Placement for table: '" + catalogTable.name + "' on store: '" + + dataStore.getAdapterName() + "' already contains manually specified partitions: " + currentPartList + ". Use 'ALTER TABLE ... MODIFY PARTITIONS...' instead" ); + } - log.debug( "Table is partitioned and concrete partitionList has been specified " ); - // First convert specified index to correct partitionId - for ( int partitionId : partitionIds ) { - // Check if specified partition index is even part of table and if so get corresponding uniquePartId - try { - tempPartitionList.add( catalogTable.partitionIds.get( partitionId ) ); - } catch ( IndexOutOfBoundsException e ) { - throw new RuntimeException( "Specified Partition-Index: '" + partitionId + "' is not part of table '" - + catalogTable.name + "', has only " + catalogTable.numPartitions + " partitions" ); - } + log.debug( "Table is partitioned and concrete partitionList has been specified " ); + // First convert specified index to correct partitionGroupId + for ( int partitionGroupId : partitionGroupIds ) { + // Check if specified partition index is even part of table and if so get corresponding uniquePartId + try { + tempPartitionGroupList.add( catalogTable.partitionProperty.partitionGroupIds.get( partitionGroupId ) ); + } catch ( IndexOutOfBoundsException e ) { + throw new RuntimeException( "Specified Partition-Index: '" + partitionGroupId + "' is not part of table '" + + catalogTable.name + "', has only " + catalogTable.partitionProperty.numPartitionGroups + " partitions" ); } - } else if ( !partitionNames.isEmpty() && partitionIds.isEmpty() ) { + } + } else if ( !partitionGroupNames.isEmpty() && partitionGroupIds.isEmpty() ) { - if ( isDataPlacementPartitioned ) { - throw new RuntimeException( "WARNING: The Data Placement for table: '" + catalogTable.name + "' on store: '" - + dataStore.getAdapterName() + "' already contains manually specified partitions: " + currentPartList + ". Use 'ALTER TABLE ... MODIFY PARTITIONS...' instead" ); - } + if ( isDataPlacementPartitioned ) { + throw new RuntimeException( "WARNING: The Data Placement for table: '" + catalogTable.name + "' on store: '" + + dataStore.getAdapterName() + "' already contains manually specified partitions: " + currentPartList + ". Use 'ALTER TABLE ... MODIFY PARTITIONS...' instead" ); + } - List catalogPartitions = catalog.getPartitions( tableId ); - for ( String partitionName : partitionNames ) { - boolean isPartOfTable = false; - for ( CatalogPartition catalogPartition : catalogPartitions ) { - if ( partitionName.equals( catalogPartition.partitionName.toLowerCase() ) ) { - tempPartitionList.add( catalogPartition.id ); - isPartOfTable = true; - break; - } + List catalogPartitionGroups = catalog.getPartitionGroups( tableId ); + for ( String partitionName : partitionGroupNames ) { + boolean isPartOfTable = false; + for ( CatalogPartitionGroup catalogPartitionGroup : catalogPartitionGroups ) { + if ( partitionName.equals( catalogPartitionGroup.partitionGroupName.toLowerCase() ) ) { + tempPartitionGroupList.add( catalogPartitionGroup.id ); + isPartOfTable = true; + break; } - if ( !isPartOfTable ) { - throw new RuntimeException( "Specified Partition-Name: '" + partitionName + "' is not part of table '" - + catalogTable.name + "'. Available partitions: " + String.join( ",", catalog.getPartitionNames( tableId ) ) ); + } + if ( !isPartOfTable ) { + throw new RuntimeException( "Specified Partition-Name: '" + partitionName + "' is not part of table '" + + catalogTable.name + "'. Available partitions: " + String.join( ",", catalog.getPartitionGroupNames( tableId ) ) ); - } } } - // Simply Place all partitions on placement since nothing has been specified - else if ( partitionIds.isEmpty() && partitionNames.isEmpty() ) { - log.debug( "Table is partitioned and concrete partitionList has NOT been specified " ); + } + // Simply Place all partitions on placement since nothing has been specified + else if ( partitionGroupIds.isEmpty() && partitionGroupNames.isEmpty() ) { + log.debug( "Table is partitioned and concrete partitionList has NOT been specified " ); - if ( isDataPlacementPartitioned ) { - // If DataPlacement already contains partitions then create new placement with same set of partitions. - tempPartitionList = currentPartList; - } else { - tempPartitionList = catalogTable.partitionIds; - } + if ( isDataPlacementPartitioned ) { + // If DataPlacement already contains partitions then create new placement with same set of partitions. + tempPartitionGroupList = currentPartList; + } else { + tempPartitionGroupList = catalogTable.partitionProperty.partitionGroupIds; } } + //} + + //all internal partitions placed on this store + List partitionIds = new ArrayList<>(); + + // Gather all partitions relevant to add depending on the specified partitionGroup + tempPartitionGroupList.forEach( pg -> catalog.getPartitions( pg ).forEach( p -> partitionIds.add( p.id ) ) ); // Create column placements for ( long cid : columnIds ) { @@ -709,10 +731,10 @@ else if ( partitionIds.isEmpty() && partitionNames.isEmpty() ) { null, null, null, - tempPartitionList ); + tempPartitionGroupList ); addedColumns.add( catalog.getColumn( cid ) ); } - //Check if placement includes primary key columns + // Check if placement includes primary key columns CatalogPrimaryKey primaryKey = catalog.getPrimaryKey( catalogTable.primaryKey ); for ( long cid : primaryKey.columnIds ) { if ( !columnIds.contains( cid ) ) { @@ -723,15 +745,27 @@ else if ( partitionIds.isEmpty() && partitionNames.isEmpty() ) { null, null, null, - tempPartitionList ); + tempPartitionGroupList ); addedColumns.add( catalog.getColumn( cid ) ); } } + + // Need to create partitionPlacements first in order to trigger schema creation on PolySchemaBuilder + for ( long partitionId : partitionIds ) { + catalog.addPartitionPlacement( + dataStore.getAdapterId(), + catalogTable.id, + partitionId, + PlacementType.AUTOMATIC, + null, + null ); + } + // Create table on store - dataStore.createTable( statement.getPrepareContext(), catalogTable ); + dataStore.createTable( statement.getPrepareContext(), catalogTable, catalogTable.partitionProperty.partitionIds ); // Copy data to the newly added placements DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); - dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( dataStore.getAdapterId() ), addedColumns ); + dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( dataStore.getAdapterId() ), addedColumns, partitionIds ); } @@ -752,7 +786,7 @@ public void addPrimaryKey( CatalogTable catalogTable, List columnNames, // Add new column placements long pkColumnId = oldPk.columnIds.get( 0 ); // It is sufficient to check for one because all get replicated on all stores - List oldPkPlacements = catalog.getColumnPlacements( pkColumnId ); + List oldPkPlacements = catalog.getColumnPlacement( pkColumnId ); for ( CatalogColumnPlacement ccp : oldPkPlacements ) { for ( long columnId : columnIds ) { if ( !catalog.checkIfExistsColumnPlacement( ccp.adapterId, columnId ) ) { @@ -885,7 +919,7 @@ public void dropIndex( CatalogTable catalogTable, String indexName, Statement st IndexManager.getInstance().deleteIndex( index ); } else { DataStore storeInstance = AdapterManager.getInstance().getStore( index.location ); - storeInstance.dropIndex( statement.getPrepareContext(), index ); + storeInstance.dropIndex( statement.getPrepareContext(), index, catalog.getPartitionsOnDataPlacement( index.location, catalogTable.id ) ); } catalog.deleteIndex( index.id ); @@ -903,8 +937,8 @@ public void dropPlacement( CatalogTable catalogTable, DataStore storeInstance, S } // Check if there are is another placement for every column on this store - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( storeInstance.getAdapterId(), catalogTable.id ) ) { - List existingPlacements = catalog.getColumnPlacements( placement.columnId ); + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( storeInstance.getAdapterId(), catalogTable.id ) ) { + List existingPlacements = catalog.getColumnPlacement( placement.columnId ); if ( existingPlacements.size() < 2 ) { throw new LastPlacementException(); } @@ -917,24 +951,27 @@ public void dropPlacement( CatalogTable catalogTable, DataStore storeInstance, S IndexManager.getInstance().deleteIndex( index ); } else { // Delete index on store - AdapterManager.getInstance().getStore( index.location ).dropIndex( statement.getPrepareContext(), index ); + AdapterManager.getInstance().getStore( index.location ).dropIndex( + statement.getPrepareContext(), + index, + catalog.getPartitionsOnDataPlacement( index.location, catalogTable.id ) ); } // Delete index in catalog catalog.deleteIndex( index.id ); } } // Physically delete the data from the store - storeInstance.dropTable( statement.getPrepareContext(), catalogTable ); + storeInstance.dropTable( statement.getPrepareContext(), catalogTable, catalog.getPartitionsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id ) ); // Inform routing - statement.getRouter().dropPlacements( catalog.getColumnPlacementsOnAdapter( storeInstance.getAdapterId(), catalogTable.id ) ); + statement.getRouter().dropPlacements( catalog.getColumnPlacementsOnAdapterPerTable( storeInstance.getAdapterId(), catalogTable.id ) ); // Delete placement in the catalog - List placements = catalog.getColumnPlacementsOnAdapter( storeInstance.getAdapterId(), catalogTable.id ); + List placements = catalog.getColumnPlacementsOnAdapterPerTable( storeInstance.getAdapterId(), catalogTable.id ); for ( CatalogColumnPlacement placement : placements ) { catalog.deleteColumnPlacement( storeInstance.getAdapterId(), placement.columnId ); } // Remove All - catalog.deletePartitionsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id ); + catalog.deletePartitionGroupsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id ); } @@ -965,7 +1002,7 @@ public void setColumnType( CatalogTable catalogTable, String columnName, ColumnT type.scale, type.dimension, type.cardinality ); - for ( CatalogColumnPlacement placement : catalog.getColumnPlacements( catalogColumn.id ) ) { + for ( CatalogColumnPlacement placement : catalog.getColumnPlacement( catalogColumn.id ) ) { AdapterManager.getInstance().getStore( placement.adapterId ).updateColumnType( statement.getPrepareContext(), placement, @@ -1077,17 +1114,19 @@ public void dropDefaultValue( CatalogTable catalogTable, String columnName, Stat @Override - public void modifyColumnPlacement( CatalogTable catalogTable, List columnIds, List partitionIds, List partitionNames, DataStore storeInstance, Statement statement ) throws PlacementNotExistsException, IndexPreventsRemovalException, LastPlacementException { + public void modifyColumnPlacement( CatalogTable catalogTable, List columnIds, List partitionGroupIds, List partitionGroupNames, DataStore storeInstance, Statement statement ) + throws PlacementNotExistsException, IndexPreventsRemovalException, LastPlacementException { // Check whether this placement already exists if ( !catalogTable.placementsByAdapter.containsKey( storeInstance.getAdapterId() ) ) { throw new PlacementNotExistsException(); } - //check if views are dependent from this view + // Check if views are dependent from this view checkViewDependencies( catalogTable ); - // Which columns to remove - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( storeInstance.getAdapterId(), catalogTable.id ) ) { + // Checks before physically removing of placement that the partition distribution is still valid and sufficient + // Identifies which columns need to be removed + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( storeInstance.getAdapterId(), catalogTable.id ) ) { if ( !columnIds.contains( placement.columnId ) ) { // Check whether there are any indexes located on the store requiring this column for ( CatalogIndex index : catalog.getIndexes( catalogTable.id, false ) ) { @@ -1109,7 +1148,7 @@ public void modifyColumnPlacement( CatalogTable catalogTable, List columnI } else { // It is not a primary key. Remove the column // Check if there are is another placement for this column - List existingPlacements = catalog.getColumnPlacements( placement.columnId ); + List existingPlacements = catalog.getColumnPlacement( placement.columnId ); if ( existingPlacements.size() < 2 ) { throw new LastPlacementException(); } @@ -1121,47 +1160,62 @@ public void modifyColumnPlacement( CatalogTable catalogTable, List columnI } } - List tempPartitionList = new ArrayList<>(); + List tempPartitionGroupList = new ArrayList<>(); + // Select partitions to create on this placement if ( catalogTable.isPartitioned ) { long tableId = catalogTable.id; // If index partitions are specified - if ( !partitionIds.isEmpty() && partitionNames.isEmpty() ) { - // First convert specified index to correct partitionId - for ( int partitionId : partitionIds ) { + if ( !partitionGroupIds.isEmpty() && partitionGroupNames.isEmpty() ) { + // First convert specified index to correct partitionGroupId + for ( int partitionGroupId : partitionGroupIds ) { // Check if specified partition index is even part of table and if so get corresponding uniquePartId try { - tempPartitionList.add( catalogTable.partitionIds.get( partitionId ) ); + tempPartitionGroupList.add( catalogTable.partitionProperty.partitionGroupIds.get( partitionGroupId ) ); } catch ( IndexOutOfBoundsException e ) { - throw new RuntimeException( "Specified Partition-Index: '" + partitionId + "' is not part of table '" - + catalogTable.name + "', has only " + catalogTable.numPartitions + " partitions" ); + throw new RuntimeException( "Specified Partition-Index: '" + partitionGroupId + "' is not part of table '" + + catalogTable.name + "', has only " + catalogTable.partitionProperty.partitionGroupIds.size() + " partitions" ); } } - catalog.updatePartitionsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id, tempPartitionList ); + catalog.updatePartitionGroupsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id, tempPartitionGroupList ); } // If name partitions are specified - else if ( !partitionNames.isEmpty() && partitionIds.isEmpty() ) { - List catalogPartitions = catalog.getPartitions( tableId ); - for ( String partitionName : partitionNames ) { + else if ( !partitionGroupNames.isEmpty() && partitionGroupIds.isEmpty() ) { + List catalogPartitionGroups = catalog.getPartitionGroups( tableId ); + for ( String partitionName : partitionGroupNames ) { boolean isPartOfTable = false; - for ( CatalogPartition catalogPartition : catalogPartitions ) { - if ( partitionName.equals( catalogPartition.partitionName.toLowerCase() ) ) { - tempPartitionList.add( catalogPartition.id ); + for ( CatalogPartitionGroup catalogPartitionGroup : catalogPartitionGroups ) { + if ( partitionName.equals( catalogPartitionGroup.partitionGroupName.toLowerCase() ) ) { + tempPartitionGroupList.add( catalogPartitionGroup.id ); isPartOfTable = true; break; } } if ( !isPartOfTable ) { throw new RuntimeException( "Specified partition name: '" + partitionName + "' is not part of table '" - + catalogTable.name + "'. Available partitions: " + String.join( ",", catalog.getPartitionNames( tableId ) ) ); + + catalogTable.name + "'. Available partitions: " + String.join( ",", catalog.getPartitionGroupNames( tableId ) ) ); } } - catalog.updatePartitionsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id, tempPartitionList ); + catalog.updatePartitionGroupsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id, tempPartitionGroupList ); + } else if ( partitionGroupNames.isEmpty() && partitionGroupIds.isEmpty() ) { + // If nothing has been explicitly specified keep current placement of partitions. + // Since it's impossible to have a placement without any partitions anyway + log.debug( "Table is partitioned and concrete partitionList has NOT been specified " ); + tempPartitionGroupList = catalogTable.partitionProperty.partitionGroupIds; } + } else { + tempPartitionGroupList.add( catalogTable.partitionProperty.partitionGroupIds.get( 0 ) ); } + // All internal partitions placed on this store + List partitionIds = new ArrayList<>(); + + // Gather all partitions relevant to add depending on the specified partitionGroup + tempPartitionGroupList.forEach( pg -> catalog.getPartitions( pg ).forEach( p -> partitionIds.add( p.id ) ) ); + // Which columns to add List addedColumns = new LinkedList<>(); + for ( long cid : columnIds ) { if ( catalog.checkIfExistsColumnPlacement( storeInstance.getAdapterId(), cid ) ) { CatalogColumnPlacement placement = catalog.getColumnPlacement( storeInstance.getAdapterId(), cid ); @@ -1178,17 +1232,108 @@ else if ( !partitionNames.isEmpty() && partitionIds.isEmpty() ) { null, null, null, - tempPartitionList ); + tempPartitionGroupList ); // Add column on store storeInstance.addColumn( statement.getPrepareContext(), catalogTable, catalog.getColumn( cid ) ); // Add to list of columns for which we need to copy data addedColumns.add( catalog.getColumn( cid ) ); } } + // Copy the data to the newly added column placements DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); if ( addedColumns.size() > 0 ) { - dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( storeInstance.getAdapterId() ), addedColumns ); + dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( storeInstance.getAdapterId() ), addedColumns, partitionIds ); + } + } + + + @Override + public void modifyPartitionPlacement( CatalogTable catalogTable, List partitionGroupIds, DataStore storeInstance, Statement statement ) { + int storeId = storeInstance.getAdapterId(); + List newPartitions = new ArrayList<>(); + List removedPartitions = new ArrayList<>(); + + List currentPartitionGroupsOnStore = catalog.getPartitionGroupsOnDataPlacement( storeId, catalogTable.id ); + + // Get PartitionGroups that have been removed + for ( long partitionGroupId : currentPartitionGroupsOnStore ) { + if ( !partitionGroupIds.contains( partitionGroupId ) ) { + catalog.getPartitions( partitionGroupId ).forEach( p -> removedPartitions.add( p.id ) ); + + } + } + + // Get PartitionGroups that have been newly added + for ( Long partitionGroupId : partitionGroupIds ) { + if ( !currentPartitionGroupsOnStore.contains( partitionGroupId ) ) { + catalog.getPartitions( partitionGroupId ).forEach( p -> newPartitions.add( p.id ) ); + } + } + + // Check for the specified columnId if we still have a ColumnPlacement for every partitionGroup + // Check for removed partitions if every CCP still has all partitions somewhere + for ( long partitionId : removedPartitions ) { + List tempIds = new ArrayList<>( catalogTable.columnIds ); + boolean partitionChecked = false; + + for ( CatalogPartitionPlacement cpp : catalog.getPartitionPlacements( partitionId ) ) { + if ( cpp.adapterId == storeId ) { + continue; + } + catalog.getColumnPlacementsOnAdapter( cpp.adapterId ).forEach( ccp -> tempIds.remove( ccp.columnId ) ); + if ( tempIds.isEmpty() ) { + partitionChecked = true; + break; + } + } + + if ( partitionChecked == false ) { + throw new RuntimeException( "Invalid partition distribution" ); + } + } + + // Update + catalog.updatePartitionGroupsOnDataPlacement( storeId, catalogTable.id, partitionGroupIds ); + + // Copy the data to the newly added column placements + DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); + if ( newPartitions.size() > 0 ) { + // Need to create partitionPlacements first in order to trigger schema creation on PolySchemaBuilder + for ( long partitionId : newPartitions ) { + catalog.addPartitionPlacement( + storeInstance.getAdapterId(), + catalogTable.id, + partitionId, + PlacementType.AUTOMATIC, + null, + null ); + } + + storeInstance.createTable( statement.getPrepareContext(), catalogTable, newPartitions ); + + // Get only columns that are actually on that store + List necessaryColumns = new LinkedList<>(); + catalog.getColumnPlacementsOnAdapterPerTable( storeInstance.getAdapterId(), catalogTable.id ).forEach( cp -> necessaryColumns.add( catalog.getColumn( cp.columnId ) ) ); + dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( storeId ), necessaryColumns, newPartitions ); + + // Add indexes on this new Partition Placement if there is already an index + for ( CatalogIndex currentIndex : catalog.getIndexes( catalogTable.id, false ) ) { + if ( currentIndex.location == storeId ) { + storeInstance.addIndex( statement.getPrepareContext(), currentIndex, newPartitions ); + } + } + } + + if ( removedPartitions.size() > 0 ) { + storeInstance.dropTable( statement.getPrepareContext(), catalogTable, removedPartitions ); + + // Indexes on this new Partition Placement if there is already an index + for ( CatalogIndex currentIndex : catalog.getIndexes( catalogTable.id, false ) ) { + if ( currentIndex.location == storeId ) { + storeInstance.dropIndex( statement.getPrepareContext(), currentIndex, removedPartitions ); + } + } } } @@ -1231,7 +1376,8 @@ public void addColumnPlacement( CatalogTable catalogTable, String columnName, Da storeInstance.addColumn( statement.getPrepareContext(), catalogTable, catalogColumn ); // Copy the data to the newly added column placements DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); - dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( storeInstance.getAdapterId() ), ImmutableList.of( catalogColumn ) ); + dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( storeInstance.getAdapterId() ), + ImmutableList.of( catalogColumn ), catalog.getPartitionsOnDataPlacement( storeInstance.getAdapterId(), catalogTable.id ) ); } } @@ -1259,7 +1405,7 @@ public void dropColumnPlacement( CatalogTable catalogTable, String columnName, D } } // Check if there are is another placement for this column - List existingPlacements = catalog.getColumnPlacements( catalogColumn.id ); + List existingPlacements = catalog.getColumnPlacement( catalogColumn.id ); if ( existingPlacements.size() < 2 ) { throw new LastPlacementException(); } @@ -1445,7 +1591,7 @@ private List getUnderlyingColumns( RelNode relNode, RelDataType fieldList @Override - public void createTable( long schemaId, String tableName, List columns, List constraints, boolean ifNotExists, List stores, PlacementType placementType, Statement statement ) throws TableAlreadyExistsException, ColumnNotExistsException, UnknownPartitionTypeException { + public void createTable( long schemaId, String tableName, List columns, List constraints, boolean ifNotExists, List stores, PlacementType placementType, Statement statement ) throws TableAlreadyExistsException, ColumnNotExistsException, UnknownPartitionTypeException, UnknownColumnException, PartitionGroupNamesNotUniqueException { try { // Check if there is already a table with this name if ( catalog.checkIfExistsTable( schemaId, tableName ) ) { @@ -1491,9 +1637,22 @@ public void createTable( long schemaId, String tableName, List stores, Statement statement ) throws GenericCatalogException, UnknownPartitionTypeException, UnknownColumnException, PartitionGroupNamesNotUniqueException { CatalogColumn catalogColumn = catalog.getColumn( partitionInfo.table.id, partitionInfo.columnName ); - Catalog.PartitionType actualPartitionType = Catalog.PartitionType.getByName( partitionInfo.typeName ); + PartitionType actualPartitionType = PartitionType.getByName( partitionInfo.typeName ); // Convert partition names and check whether they are unique - List sanitizedPartitionNames = partitionInfo.partitionNames + List sanitizedPartitionGroupNames = partitionInfo.partitionGroupNames .stream() .map( name -> name.trim().toLowerCase() ) .collect( Collectors.toList() ); - if ( sanitizedPartitionNames.size() != new HashSet<>( sanitizedPartitionNames ).size() ) { - throw new PartitionNamesNotUniqueException(); + if ( sanitizedPartitionGroupNames.size() != new HashSet<>( sanitizedPartitionGroupNames ).size() ) { + throw new PartitionGroupNamesNotUniqueException(); } // Check if specified partitionColumn is even part of the table if ( log.isDebugEnabled() ) { - log.debug( "Creating partition for table: {} with id {} on schema: {} on column: {}", partitionInfo.table.name, partitionInfo.table.id, partitionInfo.table.getSchemaName(), catalogColumn.id ); + log.debug( "Creating partition group for table: {} with id {} on schema: {} on column: {}", partitionInfo.table.name, partitionInfo.table.id, partitionInfo.table.getSchemaName(), catalogColumn.id ); } + CatalogTable unPartitionedTable = catalog.getTable( partitionInfo.table.id ); + // Get partition manager - PartitionManagerFactory partitionManagerFactory = new PartitionManagerFactory(); - PartitionManager partitionManager = partitionManagerFactory.getInstance( actualPartitionType ); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( actualPartitionType ); // Check whether partition function supports type of partition column if ( !partitionManager.supportsColumnOfType( catalogColumn.type ) ) { throw new RuntimeException( "The partition function " + actualPartitionType + " does not support columns of type " + catalogColumn.type ); } - int numberOfPartitions = partitionInfo.numberOf; + int numberOfPartitionGroups = partitionInfo.numberOfPartitionGroups; // Calculate how many partitions exist if partitioning is applied. long partId; - if ( partitionInfo.partitionNames.size() >= 2 && partitionInfo.numberOf == 0 ) { - numberOfPartitions = partitionInfo.partitionNames.size(); + if ( partitionInfo.partitionGroupNames.size() >= 2 && partitionInfo.numberOfPartitionGroups == 0 ) { + numberOfPartitionGroups = partitionInfo.partitionGroupNames.size(); } - if ( partitionManager.requiresUnboundPartition() ) { + int numberOfPartitions = partitionInfo.numberOfPartitions; + int numberOfPartitionsPerGroup = partitionManager.getNumberOfPartitionsPerGroup( numberOfPartitions ); + + if ( partitionManager.requiresUnboundPartitionGroup() ) { // Because of the implicit unbound partition - numberOfPartitions = partitionInfo.partitionNames.size(); - numberOfPartitions += 1; + numberOfPartitionGroups = partitionInfo.partitionGroupNames.size(); + numberOfPartitionGroups += 1; } // Validate partition setup - if ( !partitionManager.validatePartitionSetup( partitionInfo.qualifiers, numberOfPartitions, partitionInfo.partitionNames, catalogColumn ) ) { + if ( !partitionManager.validatePartitionGroupSetup( partitionInfo.qualifiers, numberOfPartitionGroups, partitionInfo.partitionGroupNames, catalogColumn ) ) { throw new RuntimeException( "Partitioning failed for table: " + partitionInfo.table.name ); } // Loop over value to create those partitions with partitionKey to uniquelyIdentify partition - List partitionIds = new ArrayList<>(); - for ( int i = 0; i < numberOfPartitions; i++ ) { - String partitionName; + List partitionGroupIds = new ArrayList<>(); + for ( int i = 0; i < numberOfPartitionGroups; i++ ) { + String partitionGroupName; // Make last partition unbound partition - if ( partitionManager.requiresUnboundPartition() && i == numberOfPartitions - 1 ) { - partId = catalog.addPartition( + if ( partitionManager.requiresUnboundPartitionGroup() && i == numberOfPartitionGroups - 1 ) { + partId = catalog.addPartitionGroup( partitionInfo.table.id, "Unbound", partitionInfo.table.schemaId, - partitionInfo.table.ownerId, actualPartitionType, + numberOfPartitionsPerGroup, new ArrayList<>(), true ); } else { // If no names have been explicitly defined - if ( partitionInfo.partitionNames.isEmpty() ) { - partitionName = "part_" + i; + if ( partitionInfo.partitionGroupNames.isEmpty() ) { + partitionGroupName = "part_" + i; } else { - partitionName = partitionInfo.partitionNames.get( i ); + partitionGroupName = partitionInfo.partitionGroupNames.get( i ); } // Mainly needed for HASH if ( partitionInfo.qualifiers.isEmpty() ) { - partId = catalog.addPartition( + partId = catalog.addPartitionGroup( partitionInfo.table.id, - partitionName, + partitionGroupName, partitionInfo.table.schemaId, - partitionInfo.table.ownerId, actualPartitionType, + numberOfPartitionsPerGroup, new ArrayList<>(), false ); } else { - //partId = catalog.addPartition( tableId, partitionName, old.schemaId, old.ownerId, partitionType, new ArrayList<>( Collections.singletonList( partitionQualifiers.get( i ) ) ), false ); - partId = catalog.addPartition( + partId = catalog.addPartitionGroup( partitionInfo.table.id, - partitionName, + partitionGroupName, partitionInfo.table.schemaId, - partitionInfo.table.ownerId, actualPartitionType, + numberOfPartitionsPerGroup, partitionInfo.qualifiers.get( i ), false ); } } - partitionIds.add( partId ); + partitionGroupIds.add( partId ); + } + + List partitionIds = new ArrayList<>(); + //get All PartitionGroups and then get all partitionIds for each PG and add them to completeList of partitionIds + //catalog.getPartitionGroups( partitionInfo.table.id ).forEach( pg -> partitionIds.forEach( p -> partitionIds.add( p ) ) ); + partitionGroupIds.forEach( pg -> catalog.getPartitions( pg ).forEach( p -> partitionIds.add( p.id ) ) ); + + PartitionProperty partitionProperty; + if ( actualPartitionType == PartitionType.TEMPERATURE ) { + long frequencyInterval = ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getInterval(); + switch ( ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getIntervalUnit().toString() ) { + case "days": + frequencyInterval = frequencyInterval * 60 * 60 * 24; + break; + + case "hours": + frequencyInterval = frequencyInterval * 60 * 60; + break; + + case "minutes": + frequencyInterval = frequencyInterval * 60; + break; + } + + int hotPercentageIn = Integer.valueOf( ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getHotAccessPercentageIn().toString() ); + int hotPercentageOut = Integer.valueOf( ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getHotAccessPercentageOut().toString() ); + + //Initially distribute partitions as intended in a running system + long numberOfPartitionsInHot = numberOfPartitions * hotPercentageIn / 100; + if ( numberOfPartitionsInHot == 0 ) { + numberOfPartitionsInHot = 1; + } + + long numberOfPartitionsInCold = numberOfPartitions - numberOfPartitionsInHot; + + // -1 because one partition is already created in COLD + List partitionsForHot = new ArrayList<>(); + catalog.getPartitions( partitionGroupIds.get( 0 ) ).forEach( p -> partitionsForHot.add( p.id ) ); + + // -1 because one partition is already created in HOT + for ( int i = 0; i < numberOfPartitionsInHot - 1; i++ ) { + long tempId; + tempId = catalog.addPartition( partitionInfo.table.id, partitionInfo.table.schemaId, partitionGroupIds.get( 0 ), partitionInfo.qualifiers.get( 0 ), false ); + partitionIds.add( tempId ); + partitionsForHot.add( tempId ); + } + + catalog.updatePartitionGroup( partitionGroupIds.get( 0 ), partitionsForHot ); + + // -1 because one partition is already created in COLD + List partitionsForCold = new ArrayList<>(); + catalog.getPartitions( partitionGroupIds.get( 1 ) ).forEach( p -> partitionsForCold.add( p.id ) ); + + for ( int i = 0; i < numberOfPartitionsInCold - 1; i++ ) { + long tempId; + tempId = catalog.addPartition( partitionInfo.table.id, partitionInfo.table.schemaId, partitionGroupIds.get( 1 ), partitionInfo.qualifiers.get( 1 ), false ); + partitionIds.add( tempId ); + partitionsForCold.add( tempId ); + } + + catalog.updatePartitionGroup( partitionGroupIds.get( 1 ), partitionsForCold ); + + partitionProperty = TemperaturePartitionProperty.builder() + .partitionType( actualPartitionType ) + .internalPartitionFunction( PartitionType.valueOf( ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getInternalPartitionFunction().toString().toUpperCase() ) ) + .partitionColumnId( catalogColumn.id ) + .partitionGroupIds( ImmutableList.copyOf( partitionGroupIds ) ) + .partitionIds( ImmutableList.copyOf( partitionIds ) ) + .partitionCostIndication( PartitionCostIndication.valueOf( ((RawTemperaturePartitionInformation) partitionInfo.rawPartitionInformation).getAccessPattern().toString().toUpperCase() ) ) + .frequencyInterval( frequencyInterval ) + .hotAccessPercentageIn( hotPercentageIn ) + .hotAccessPercentageOut( hotPercentageOut ) + .reliesOnPeriodicChecks( true ) + .hotPartitionGroupId( partitionGroupIds.get( 0 ) ) + .coldPartitionGroupId( partitionGroupIds.get( 1 ) ) + .numPartitions( partitionIds.size() ) + .numPartitionGroups( partitionGroupIds.size() ) + .build(); + } else { + partitionProperty = PartitionProperty.builder() + .partitionType( actualPartitionType ) + .partitionColumnId( catalogColumn.id ) + .partitionGroupIds( ImmutableList.copyOf( partitionGroupIds ) ) + .partitionIds( ImmutableList.copyOf( partitionIds ) ) + .reliesOnPeriodicChecks( false ) + .build(); } // Update catalog table - catalog.partitionTable( partitionInfo.table.id, actualPartitionType, catalogColumn.id, numberOfPartitions, partitionIds ); + catalog.partitionTable( partitionInfo.table.id, actualPartitionType, catalogColumn.id, numberOfPartitionGroups, partitionGroupIds, partitionProperty ); // Get primary key of table and use PK to find all DataPlacements of table long pkid = partitionInfo.table.primaryKey; @@ -1606,8 +1855,147 @@ public void addPartition( PartitionInformation partitionInfo ) throws GenericCat // Basically get first part of PK even if its compound of PK it is sufficient CatalogColumn pkColumn = catalog.getColumn( pkColumnIds.get( 0 ) ); // This gets us only one ccp per store (first part of PK) - for ( CatalogColumnPlacement ccp : catalog.getColumnPlacements( pkColumn.id ) ) { - catalog.updatePartitionsOnDataPlacement( ccp.adapterId, ccp.tableId, partitionIds ); + + boolean fillStores = false; + if ( stores == null ) { + stores = new ArrayList<>(); + fillStores = true; + } + List catalogColumnPlacements = catalog.getColumnPlacement( pkColumn.id ); + for ( CatalogColumnPlacement ccp : catalogColumnPlacements ) { + catalog.updatePartitionGroupsOnDataPlacement( ccp.adapterId, ccp.tableId, partitionGroupIds ); + if ( fillStores ) { + // Ask router on which store(s) the table should be placed + Adapter adapter = AdapterManager.getInstance().getAdapter( ccp.adapterId ); + if ( adapter instanceof DataStore ) { + stores.add( (DataStore) adapter ); + } + } + } + + // Now get the partitioned table, partitionInfo still contains the basic/unpartitioned table. + CatalogTable partitionedTable = catalog.getTable( partitionInfo.table.id ); + DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); + for ( DataStore store : stores ) { + for ( long partitionId : partitionIds ) { + catalog.addPartitionPlacement( + store.getAdapterId(), + partitionedTable.id, + partitionId, + PlacementType.AUTOMATIC, + null, + null ); + } + + // First create new tables + store.createTable( statement.getPrepareContext(), partitionedTable, partitionedTable.partitionProperty.partitionIds ); + + // Copy data from unpartitioned to partitioned + // Get only columns that are actually on that store + // Every store of a newly partitioned table, initially will hold all partitions + List necessaryColumns = new LinkedList<>(); + catalog.getColumnPlacementsOnAdapterPerTable( store.getAdapterId(), partitionedTable.id ).forEach( cp -> necessaryColumns.add( catalog.getColumn( cp.columnId ) ) ); + + // Copy data from the old partition to new partitions + dataMigrator.copyPartitionData( + statement.getTransaction(), + catalog.getAdapter( store.getAdapterId() ), + unPartitionedTable, + partitionedTable, + necessaryColumns, + unPartitionedTable.partitionProperty.partitionIds, + partitionedTable.partitionProperty.partitionIds ); + } + //Remove old tables + stores.forEach( store -> store.dropTable( statement.getPrepareContext(), unPartitionedTable, unPartitionedTable.partitionProperty.partitionIds ) ); + catalog.deletePartitionGroup( unPartitionedTable.id, unPartitionedTable.schemaId, unPartitionedTable.partitionProperty.partitionGroupIds.get( 0 ) ); + } + + + @Override + public void removePartitioning( CatalogTable partitionedTable, Statement statement ) { + long tableId = partitionedTable.id; + + if ( log.isDebugEnabled() ) { + log.debug( "Merging partitions for table: {} with id {} on schema: {}", + partitionedTable.name, partitionedTable.id, partitionedTable.getSchemaName() ); + } + + // Need to gather the partitionDistribution before actually merging + // We need a columnPlacement for every partition + Map> placementDistribution = new HashMap<>(); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( partitionedTable.partitionProperty.partitionType ); + placementDistribution = partitionManager.getRelevantPlacements( partitionedTable, partitionedTable.partitionProperty.partitionIds, new ArrayList<>( Arrays.asList( -1 ) ) ); + + // Update catalog table + catalog.mergeTable( tableId ); + + // Now get the merged table + CatalogTable mergedTable = catalog.getTable( tableId ); + + List stores = new ArrayList<>(); + // Get primary key of table and use PK to find all DataPlacements of table + long pkid = partitionedTable.primaryKey; + List pkColumnIds = catalog.getPrimaryKey( pkid ).columnIds; + // Basically get first part of PK even if its compound of PK it is sufficient + CatalogColumn pkColumn = catalog.getColumn( pkColumnIds.get( 0 ) ); + // This gets us only one ccp per store (first part of PK) + + List catalogColumnPlacements = catalog.getColumnPlacement( pkColumn.id ); + for ( CatalogColumnPlacement ccp : catalogColumnPlacements ) { + // Ask router on which store(s) the table should be placed + Adapter adapter = AdapterManager.getInstance().getAdapter( ccp.adapterId ); + if ( adapter instanceof DataStore ) { + stores.add( (DataStore) adapter ); + } + } + + DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); + boolean firstIteration = true; + // For merge create only full placements on the used stores. Otherwise partition constraints might not hold + for ( DataStore store : stores ) { + // Need to create partitionPlacements first in order to trigger schema creation on PolySchemaBuilder + catalog.addPartitionPlacement( + store.getAdapterId(), + mergedTable.id, + mergedTable.partitionProperty.partitionIds.get( 0 ), + PlacementType.AUTOMATIC, + null, + null ); + + // First create new tables + store.createTable( statement.getPrepareContext(), mergedTable, mergedTable.partitionProperty.partitionIds ); + + // Get only columns that are actually on that store + List necessaryColumns = new LinkedList<>(); + catalog.getColumnPlacementsOnAdapterPerTable( store.getAdapterId(), mergedTable.id ).forEach( cp -> necessaryColumns.add( catalog.getColumn( cp.columnId ) ) ); + + dataMigrator.copySelectiveData( + statement.getTransaction(), + catalog.getAdapter( store.getAdapterId() ), + partitionedTable, + mergedTable, + necessaryColumns, + placementDistribution, + mergedTable.partitionProperty.partitionIds ); + } + + // Needs to be separated from loop above. Otherwise we loose data + for ( DataStore store : stores ) { + List partitionIdsOnStore = new ArrayList<>(); + catalog.getPartitionPlacementByTable( store.getAdapterId(), partitionedTable.id ).forEach( p -> partitionIdsOnStore.add( p.partitionId ) ); + + // Otherwise everything will be dropped again, leaving the table inaccessible + partitionIdsOnStore.remove( mergedTable.partitionProperty.partitionIds.get( 0 ) ); + + // Drop all partitionedTables (table contains old partitionIds) + store.dropTable( statement.getPrepareContext(), partitionedTable, partitionIdsOnStore ); + } + // Loop over **old.partitionIds** to delete all partitions which are part of table + // Needs to be done separately because partitionPlacements will be recursively dropped in `deletePartitionGroup` but are needed in dropTable + for ( long partitionGroupId : partitionedTable.partitionProperty.partitionGroupIds ) { + catalog.deletePartitionGroup( tableId, partitionedTable.schemaId, partitionGroupId ); } } @@ -1748,7 +2136,10 @@ public void dropTable( CatalogTable catalogTable, Statement statement ) throws D IndexManager.getInstance().deleteIndex( index ); } else { // Delete index on store - AdapterManager.getInstance().getStore( index.location ).dropIndex( statement.getPrepareContext(), index ); + AdapterManager.getInstance().getStore( index.location ).dropIndex( + statement.getPrepareContext(), + index, + catalog.getPartitionsOnDataPlacement( index.location, catalogTable.id ) ); } // Delete index in catalog catalog.deleteIndex( index.id ); @@ -1758,9 +2149,12 @@ public void dropTable( CatalogTable catalogTable, Statement statement ) throws D catalog.flagTableForDeletion( catalogTable.id, true ); for ( int storeId : catalogTable.placementsByAdapter.keySet() ) { // Delete table on store - AdapterManager.getInstance().getStore( storeId ).dropTable( statement.getPrepareContext(), catalogTable ); + List partitionIdsOnStore = new ArrayList<>(); + catalog.getPartitionPlacementByTable( storeId, catalogTable.id ).forEach( p -> partitionIdsOnStore.add( p.partitionId ) ); + + AdapterManager.getInstance().getStore( storeId ).dropTable( statement.getPrepareContext(), catalogTable, partitionIdsOnStore ); // Inform routing - statement.getRouter().dropPlacements( catalog.getColumnPlacementsOnAdapter( storeId, catalogTable.id ) ); + statement.getRouter().dropPlacements( catalog.getColumnPlacementsOnAdapterPerTable( storeId, catalogTable.id ) ); // Delete column placement in catalog for ( Long columnId : catalogTable.columnIds ) { if ( catalog.checkIfExistsColumnPlacement( storeId, columnId ) ) { diff --git a/dbms/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java b/dbms/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java new file mode 100644 index 0000000000..c137f4ba1c --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/partition/AbstractPartitionManager.java @@ -0,0 +1,126 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.entity.CatalogColumn; +import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogTable; + + +@Slf4j +public abstract class AbstractPartitionManager implements PartitionManager { + + + // Returns the Index of the partition where to place the object + @Override + public abstract long getTargetPartitionId( CatalogTable catalogTable, String columnValue ); + + + @Override + public boolean probePartitionGroupDistributionChange( CatalogTable catalogTable, int storeId, long columnId, int threshold ) { + Catalog catalog = Catalog.getInstance(); + + // Check for the specified columnId if we still have a ColumnPlacement for every partitionGroup + for ( Long partitionGroupId : catalogTable.partitionProperty.partitionGroupIds ) { + List ccps = catalog.getColumnPlacementsByPartitionGroup( catalogTable.id, partitionGroupId, columnId ); + if ( ccps.size() <= threshold ) { + for ( CatalogColumnPlacement placement : ccps ) { + if ( placement.adapterId == storeId ) { + return false; + } + } + } + } + return true; + } + + + @Override + public Map> getRelevantPlacements( CatalogTable catalogTable, List partitionIds, List excludedAdapters ) { + Catalog catalog = Catalog.getInstance(); + + Map> placementDistribution = new HashMap<>(); + + if ( partitionIds != null ) { + for ( long partitionId : partitionIds ) { + + CatalogPartition catalogPartition = catalog.getPartition( partitionId ); + List relevantCcps = new ArrayList<>(); + + for ( long columnId : catalogTable.columnIds ) { + List ccps = catalog.getColumnPlacementsByPartitionGroup( catalogTable.id, catalogPartition.partitionGroupId, columnId ); + ccps.removeIf( ccp -> excludedAdapters.contains( ccp.adapterId ) ); + if ( !ccps.isEmpty() ) { + //get first column placement which contains partition + relevantCcps.add( ccps.get( 0 ) ); + if ( log.isDebugEnabled() ) { + log.debug( "{} {} with part. {}", ccps.get( 0 ).adapterUniqueName, ccps.get( 0 ).getLogicalColumnName(), partitionId ); + } + } + } + placementDistribution.put( partitionId, relevantCcps ); + } + } + + return placementDistribution; + } + + + @Override + public boolean validatePartitionGroupSetup( + List> partitionGroupQualifiers, + long numPartitionGroups, + List partitionGroupNames, + CatalogColumn partitionColumn ) { + if ( numPartitionGroups == 0 && partitionGroupNames.size() < 2 ) { + throw new RuntimeException( "Partitioning of table failed! Can't partition table with less than 2 partitions/names" ); + } + return true; + } + + + // Returns 1 for most PartitionFunctions since they have a 1:1 relation between Groups and Internal Partitions + // In that case the input of numberOfPartitions is omitted + @Override + public int getNumberOfPartitionsPerGroup( int numberOfPartitions ) { + return 1; + } + + + /** + * Returns the unified null value for all partition managers. + * Such that every partionValue occurence of null ist treated equally + * + * @return null String + */ + @Override + public String getUnifiedNullValue() { + return "null"; + } + + + @Override + public abstract PartitionFunctionInfo getPartitionFunctionInfo(); + +} diff --git a/dbms/src/main/java/org/polypheny/db/partition/FrequencyMapImpl.java b/dbms/src/main/java/org/polypheny/db/partition/FrequencyMapImpl.java new file mode 100644 index 0000000000..9635b18c01 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/partition/FrequencyMapImpl.java @@ -0,0 +1,464 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition; + +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import lombok.extern.slf4j.Slf4j; +import org.polypheny.db.adapter.Adapter; +import org.polypheny.db.adapter.AdapterManager; +import org.polypheny.db.adapter.DataStore; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.Catalog.PartitionType; +import org.polypheny.db.catalog.Catalog.PlacementType; +import org.polypheny.db.catalog.entity.CatalogAdapter; +import org.polypheny.db.catalog.entity.CatalogColumn; +import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.catalog.exceptions.GenericCatalogException; +import org.polypheny.db.catalog.exceptions.UnknownDatabaseException; +import org.polypheny.db.catalog.exceptions.UnknownSchemaException; +import org.polypheny.db.catalog.exceptions.UnknownUserException; +import org.polypheny.db.config.RuntimeConfig; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.monitoring.events.metrics.DmlDataPoint; +import org.polypheny.db.monitoring.events.metrics.QueryDataPoint; +import org.polypheny.db.partition.properties.TemperaturePartitionProperty; +import org.polypheny.db.processing.DataMigrator; +import org.polypheny.db.transaction.Statement; +import org.polypheny.db.transaction.Transaction; +import org.polypheny.db.transaction.TransactionException; +import org.polypheny.db.transaction.TransactionManager; +import org.polypheny.db.transaction.TransactionManagerImpl; +import org.polypheny.db.util.background.BackgroundTask.TaskPriority; +import org.polypheny.db.util.background.BackgroundTask.TaskSchedulingType; +import org.polypheny.db.util.background.BackgroundTaskManager; + + +/** + * Periodically retrieves information from the MonitoringService to get current statistics about + * the frequency map to determine which chunk of data should reside in HOT & which in COLD partition + * + * Only one instance of the MAP exists. + * Which gets created once the first TEMPERATURE partitioned table gets created. (Including creation of BackgroundTask) + * and consequently will be shutdown when no TEMPERATURE partitioned tables exist anymore + */ +@Slf4j +public class FrequencyMapImpl extends FrequencyMap { + + public static FrequencyMap INSTANCE = null; + + private final Catalog catalog; + + // Make use of central configuration + private String backgroundTaskId; + private Map accessCounter = new HashMap<>(); + + + public FrequencyMapImpl( Catalog catalog ) { + this.catalog = catalog; + } + + + /** + * Initializes the periodic frequency check by starting a background task. + * Which gathers frequency related access information on + */ + @Override + public void initialize() { + startBackgroundTask(); + } + + + /** + * Stops all background processing and disables the accumulation of frequency related access information. + */ + @Override + public void terminate() { + BackgroundTaskManager.INSTANCE.removeBackgroundTask( backgroundTaskId ); + } + + + private void startBackgroundTask() { + if ( backgroundTaskId == null ) { + backgroundTaskId = BackgroundTaskManager.INSTANCE.registerTask( + this::processAllPeriodicTables, + "Send monitoring jobs to job consumers", + TaskPriority.MEDIUM, + (TaskSchedulingType) RuntimeConfig.TEMPERATURE_FREQUENCY_PROCESSING_INTERVAL.getEnum() ); + } + } + + + /** + * Retrieves all tables which require periodic processing and starts the access frequency process + */ + private void processAllPeriodicTables() { + log.debug( "Start processing access frequency of tables" ); + Catalog catalog = Catalog.getInstance(); + + long invocationTimestamp = System.currentTimeMillis(); + + // Retrieve all Tables which rely on periodic processing + for ( CatalogTable table : catalog.getTablesForPeriodicProcessing() ) { + if ( table.partitionType == PartitionType.TEMPERATURE ) { + determinePartitionFrequency( table, invocationTimestamp ); + } + } + log.debug( "Finished processing access frequency of tables" ); + } + + + private void incrementPartitionAccess( long identifiedPartitionId, List partitionIds ) { + // Outer if is needed to ignore frequencies from old non-existing partitionIds + // Which are not yet linked to the table but are still in monitoring + // TODO @CEDRIC or @HENNLO introduce monitoring cleaning of data points + if ( partitionIds.contains( identifiedPartitionId ) ) { + if ( accessCounter.containsKey( identifiedPartitionId ) ) { + accessCounter.replace( identifiedPartitionId, accessCounter.get( identifiedPartitionId ) + 1 ); + } else { + accessCounter.put( identifiedPartitionId, (long) 1 ); + } + } + } + + + /** + * Determines the partition distribution for temperature partitioned tables by deciding which partitions should be moved from HOT to COLD + * and from COLD to HOT. To setup the table corresponding to the current access frequencies patterns. + * + * @param table Temperature partitioned Table + */ + private void determinePartitionDistribution( CatalogTable table ) { + if ( log.isDebugEnabled() ) { + log.debug( "Determine access frequency of partitions of table: {}", table.name ); + } + + // Get percentage of tables which can remain in HOT + long numberOfPartitionsInHot = (table.partitionProperty.partitionIds.size() * ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageIn()) / 100; + + // These are the tables than can remain in HOT + long allowedTablesInHot = (table.partitionProperty.partitionIds.size() * ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageOut()) / 100; + + if ( numberOfPartitionsInHot == 0 ) { + numberOfPartitionsInHot = 1; + } + if ( allowedTablesInHot == 0 ) { + allowedTablesInHot = 1; + } + + List partitionsFromColdToHot = new ArrayList<>(); + List partitionsFromHotToCold = new ArrayList<>(); + + List partitionsAllowedInHot = new ArrayList<>(); + + HashMap descSortedMap = accessCounter + .entrySet() + .stream() + .sorted( (Map.Entry.comparingByValue().reversed()) ) + .collect( Collectors.toMap( Map.Entry::getKey, Map.Entry::getValue, ( e1, e2 ) -> e1, LinkedHashMap::new ) ); + + //Start gathering the partitions beginning with the most frequently accessed + int hotCounter = 0; + int toleranceCounter = 0; + boolean skip = false; + boolean firstRound = true; + for ( Entry currentEntry : descSortedMap.entrySet() ) { + if ( currentEntry.getValue() == 0 ) { + if ( firstRound ) { + skip = true; + } + break; + } + firstRound = false; + // Gather until you reach getHotAccessPercentageIn() #tables + if ( hotCounter < numberOfPartitionsInHot ) { + //Tables that should be placed in HOT if not already there + partitionsFromColdToHot.add( currentEntry.getKey() ); + hotCounter++; + + } + + if ( toleranceCounter >= allowedTablesInHot ) { + break; + } else { + // Tables that can remain in HOT if they happen to be in that threshold + partitionsAllowedInHot.add( currentEntry.getKey() ); + toleranceCounter++; + } + } + + if ( !skip ) { + // Which partitions are in top X % (to be placed in HOT) + + // Which of those are currently in cold --> action needed + + List currentHotPartitions = Catalog.INSTANCE.getPartitions( ((TemperaturePartitionProperty) table.partitionProperty).getHotPartitionGroupId() ); + for ( CatalogPartition catalogPartition : currentHotPartitions ) { + + // Remove partitions from List if they are already in HOT (not necessary to send to DataMigrator) + if ( partitionsFromColdToHot.contains( catalogPartition.id ) ) { + partitionsFromColdToHot.remove( catalogPartition.id ); + + } else { // If they are currently in hot but should not be placed in HOT anymore. This means that they should possibly be thrown out and placed in cold + + if ( partitionsAllowedInHot.contains( catalogPartition.id ) ) { + continue; + } else { // place from HOT to cold + partitionsFromHotToCold.add( catalogPartition.id ); + } + } + + } + + if ( (!partitionsFromColdToHot.isEmpty() || !partitionsFromHotToCold.isEmpty()) ) { + redistributePartitions( table, partitionsFromColdToHot, partitionsFromHotToCold ); + } + } + } + + + /** + * Physically executes the data redistribution of the specific internal partitions and consequently creates new physical tables + * as well as removing tables which are not needed anymore. + * + * @param table Temperature partitioned table + * @param partitionsFromColdToHot Partitions which should be moved from COLD to HOT PartitionGroup + * @param partitionsFromHotToCold Partitions which should be moved from HOT to COLD PartitionGroup + */ + private void redistributePartitions( CatalogTable table, List partitionsFromColdToHot, List partitionsFromHotToCold ) { + if ( log.isDebugEnabled() ) { + log.debug( "Execute physical redistribution of partitions for table: {}", table.name ); + log.debug( "Partitions to move from HOT to COLD: {}", partitionsFromHotToCold ); + log.debug( "Partitions to move from COLD to HOT: {}", partitionsFromColdToHot ); + } + + Map> partitionsToRemoveFromStore = new HashMap<>(); + + TransactionManager transactionManager = new TransactionManagerImpl(); + Transaction transaction = null; + try { + transaction = transactionManager.startTransaction( "pa", table.getDatabaseName(), false, "FrequencyMap" ); + + Statement statement = transaction.createStatement(); + DataMigrator dataMigrator = statement.getTransaction().getDataMigrator(); + + List adaptersWithHot = Catalog.getInstance().getAdaptersByPartitionGroup( table.id, ((TemperaturePartitionProperty) table.partitionProperty).getHotPartitionGroupId() ); + List adaptersWithCold = Catalog.getInstance().getAdaptersByPartitionGroup( table.id, ((TemperaturePartitionProperty) table.partitionProperty).getColdPartitionGroupId() ); + + log.debug( "Get adapters to create physical tables" ); + // Validate that partition does not already exist on store + for ( CatalogAdapter catalogAdapter : adaptersWithHot ) { + // Skip creation/deletion because this adapter contains both groups HOT & COLD + if ( adaptersWithCold.contains( catalogAdapter ) ) { + if ( log.isDebugEnabled() ) { + log.debug( " Skip adapter {}, hold both partitionGroups HOT & COLD", catalogAdapter.uniqueName ); + } + continue; + } + + // First create new HOT tables + Adapter adapter = AdapterManager.getInstance().getAdapter( catalogAdapter.id ); + if ( adapter instanceof DataStore ) { + DataStore store = (DataStore) adapter; + + List hotPartitionsToCreate = filterList( catalogAdapter.id, table.id, partitionsFromColdToHot ); + //List coldPartitionsToDelete = filterList( catalogAdapter.id, table.id, partitionsFromHotToCold ); + + // If this store contains both Groups HOT & COLD do nothing + if ( hotPartitionsToCreate.size() != 0 ) { + Catalog.getInstance().getPartitionsOnDataPlacement( store.getAdapterId(), table.id ); + + for ( long partitionId : hotPartitionsToCreate ) { + catalog.addPartitionPlacement( + store.getAdapterId(), + table.id, + partitionId, + PlacementType.AUTOMATIC, + null, + null ); + } + + store.createTable( statement.getPrepareContext(), table, hotPartitionsToCreate ); + + List catalogColumns = new ArrayList<>(); + catalog.getColumnPlacementsOnAdapterPerTable( store.getAdapterId(), table.id ).forEach( cp -> catalogColumns.add( catalog.getColumn( cp.columnId ) ) ); + + dataMigrator.copyData( + statement.getTransaction(), + catalog.getAdapter( store.getAdapterId() ), + catalogColumns, + hotPartitionsToCreate ); + + if ( !partitionsToRemoveFromStore.containsKey( store ) ) { + partitionsToRemoveFromStore.put( store, partitionsFromHotToCold ); + } else { + partitionsToRemoveFromStore.replace( + store, + Stream.of( partitionsToRemoveFromStore.get( store ), partitionsFromHotToCold ) + .flatMap( Collection::stream ) + .collect( Collectors.toList() ) + ); + } + } + } + } + + for ( CatalogAdapter catalogAdapter : adaptersWithCold ) { + // Skip creation/deletion because this adapter contains both groups HOT & COLD + if ( adaptersWithHot.contains( catalogAdapter ) ) { + continue; + } + // First create new HOT tables + Adapter adapter = AdapterManager.getInstance().getAdapter( catalogAdapter.id ); + if ( adapter instanceof DataStore ) { + DataStore store = (DataStore) adapter; + List coldPartitionsToCreate = filterList( catalogAdapter.id, table.id, partitionsFromHotToCold ); + if ( coldPartitionsToCreate.size() != 0 ) { + Catalog.getInstance().getPartitionsOnDataPlacement( store.getAdapterId(), table.id ); + + for ( long partitionId : coldPartitionsToCreate ) { + catalog.addPartitionPlacement( + store.getAdapterId(), + table.id, + partitionId, + PlacementType.AUTOMATIC, + null, + null ); + } + store.createTable( statement.getPrepareContext(), table, coldPartitionsToCreate ); + + List catalogColumns = new ArrayList<>(); + catalog.getColumnPlacementsOnAdapterPerTable( store.getAdapterId(), table.id ).forEach( cp -> catalogColumns.add( catalog.getColumn( cp.columnId ) ) ); + + dataMigrator.copyData( statement.getTransaction(), catalog.getAdapter( store.getAdapterId() ), catalogColumns, coldPartitionsToCreate ); + + if ( !partitionsToRemoveFromStore.containsKey( store ) ) { + partitionsToRemoveFromStore.put( store, partitionsFromColdToHot ); + } else { + partitionsToRemoveFromStore.replace( + store, + Stream.of( partitionsToRemoveFromStore.get( store ), partitionsFromColdToHot ).flatMap( Collection::stream ).collect( Collectors.toList() ) + ); + } + } + } + } + + // DROP all partitions on each store + + long hotPartitionGroupId = ((TemperaturePartitionProperty) table.partitionProperty).getHotPartitionGroupId(); + long coldPartitionGroupId = ((TemperaturePartitionProperty) table.partitionProperty).getColdPartitionGroupId(); + + // Update catalogInformation + partitionsFromColdToHot.forEach( p -> Catalog.getInstance().updatePartition( p, hotPartitionGroupId ) ); + partitionsFromHotToCold.forEach( p -> Catalog.getInstance().updatePartition( p, coldPartitionGroupId ) ); + + // Remove all tables that have been moved + for ( DataStore store : partitionsToRemoveFromStore.keySet() ) { + store.dropTable( statement.getPrepareContext(), table, partitionsToRemoveFromStore.get( store ) ); + } + + transaction.commit(); + } catch ( GenericCatalogException | UnknownUserException | UnknownDatabaseException | UnknownSchemaException | TransactionException e ) { + log.error( "Error while reassigning new location for temperature-based partitions", e ); + if ( transaction != null ) { + try { + transaction.rollback(); + } catch ( TransactionException ex ) { + log.error( "Error while rolling back the transaction", e ); + } + } + } + } + + + /** + * Cleanses the List if physical partitions already resides on store. Happens if PartitionGroups HOT and COLD logically reside on same store. + * Therefore no actual data distribution has to take place + * + * @param adapterId Adapter which ist subject of receiving new tables + * @param tableId Id of temperature partitioned table + * @param partitionsToFilter List of partitions to be filtered + * @return The filtered and cleansed list + */ + private List filterList( int adapterId, long tableId, List partitionsToFilter ) { + // Remove partition from list if it's already contained on the store + for ( long partitionId : Catalog.getInstance().getPartitionsOnDataPlacement( adapterId, tableId ) ) { + if ( partitionsToFilter.contains( partitionId ) ) { + partitionsToFilter.remove( partitionId ); + } + } + return partitionsToFilter; + } + + + /** + * Determines the partition frequency for each partition of a temperature partitioned table based on the chosen Cost Indication (ALL, WRITE,READ) + * in a desired time interval. + * + * @param table Temperature partitioned table + * @param invocationTimestamp Timestamp do determine the interval for which monitoring metrics should be collected. + */ + @Override + public void determinePartitionFrequency( CatalogTable table, long invocationTimestamp ) { + Timestamp queryStart = new Timestamp( invocationTimestamp - ((TemperaturePartitionProperty) table.partitionProperty).getFrequencyInterval() * 1000 ); + + accessCounter = new HashMap<>(); + List tempPartitionIds = new ArrayList<>( table.partitionProperty.partitionIds ); + + tempPartitionIds.forEach( p -> accessCounter.put( p, (long) 0 ) ); + + switch ( ((TemperaturePartitionProperty) table.partitionProperty).getPartitionCostIndication() ) { + case ALL: + for ( QueryDataPoint queryDataPoint : MonitoringServiceProvider.getInstance().getDataPointsAfter( QueryDataPoint.class, queryStart ) ) { + queryDataPoint.getAccessedPartitions().forEach( p -> incrementPartitionAccess( p, tempPartitionIds ) ); + } + for ( DmlDataPoint dmlDataPoint : MonitoringServiceProvider.getInstance().getDataPointsAfter( DmlDataPoint.class, queryStart ) ) { + dmlDataPoint.getAccessedPartitions().forEach( p -> incrementPartitionAccess( p, tempPartitionIds ) ); + } + + break; + + case READ: + List readAccesses = MonitoringServiceProvider.getInstance().getDataPointsAfter( QueryDataPoint.class, queryStart ); + for ( QueryDataPoint queryDataPoint : readAccesses ) { + queryDataPoint.getAccessedPartitions().forEach( p -> incrementPartitionAccess( p, tempPartitionIds ) ); + } + break; + + case WRITE: + List writeAccesses = MonitoringServiceProvider.getInstance().getDataPointsAfter( DmlDataPoint.class, queryStart ); + for ( DmlDataPoint dmlDataPoint : writeAccesses ) { + dmlDataPoint.getAccessedPartitions().forEach( p -> incrementPartitionAccess( p, tempPartitionIds ) ); + } + } + + // To gain observability + // Update infoPage here + determinePartitionDistribution( table ); + } + +} diff --git a/core/src/main/java/org/polypheny/db/partition/HashPartitionManager.java b/dbms/src/main/java/org/polypheny/db/partition/HashPartitionManager.java similarity index 54% rename from core/src/main/java/org/polypheny/db/partition/HashPartitionManager.java rename to dbms/src/main/java/org/polypheny/db/partition/HashPartitionManager.java index 7ed7311229..124d722bc7 100644 --- a/core/src/main/java/org/polypheny/db/partition/HashPartitionManager.java +++ b/dbms/src/main/java/org/polypheny/db/partition/HashPartitionManager.java @@ -16,14 +16,11 @@ package org.polypheny.db.partition; - import java.util.ArrayList; import java.util.Arrays; import java.util.List; import lombok.extern.slf4j.Slf4j; -import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; -import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumn; import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumnType; @@ -33,65 +30,36 @@ @Slf4j public class HashPartitionManager extends AbstractPartitionManager { - public static final boolean REQUIRES_UNBOUND_PARTITION = false; + public static final boolean REQUIRES_UNBOUND_PARTITION_GROUP = false; public static final String FUNCTION_TITLE = "HASH"; @Override public long getTargetPartitionId( CatalogTable catalogTable, String columnValue ) { - long partitionID = columnValue.hashCode() * -1; + long hashValue = columnValue.hashCode() * -1; // Don't want any neg. value for now - if ( partitionID <= 0 ) { - partitionID *= -1; + if ( hashValue <= 0 ) { + hashValue *= -1; } - // Finally decide on which partition to put it - return catalogTable.partitionIds.get( (int) (partitionID % catalogTable.numPartitions) ); - } - + // Get designated HASH partition based on number of internal partitions + int partitionIndex = (int) (hashValue % catalogTable.partitionProperty.partitionIds.size()); - // Needed when columnPlacements are being dropped - // HASH Partitioning needs at least one column placement which contains all partitions as a fallback - @Override - public boolean probePartitionDistributionChange( CatalogTable catalogTable, int storeId, long columnId ) { - // Change is only critical if there is only one column left with the characteristics - int numberOfFullPlacements = getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).size(); - if ( numberOfFullPlacements <= 1 ) { - Catalog catalog = Catalog.getInstance(); - //Check if this one column is the column we are about to delete - if ( catalog.getPartitionsOnDataPlacement( storeId, catalogTable.id ).size() == catalogTable.numPartitions ) { - return false; - } - } - - return true; - } - - - @Override - public List getRelevantPlacements( CatalogTable catalogTable, List partitionIds ) { - List relevantCcps = new ArrayList<>(); - // Find stores with full placements (partitions) - // Pick for each column the column placement which has full partitioning //SELECT WORST-CASE ergo Fallback - for ( long columnId : catalogTable.columnIds ) { - // Take the first column placement - relevantCcps.add( getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).get( 0 ) ); - } - - return relevantCcps; + // Finally decide on which partition to put it + return catalogTable.partitionProperty.partitionIds.get( partitionIndex ); } @Override - public boolean validatePartitionSetup( List> partitionQualifiers, long numPartitions, List partitionNames, CatalogColumn partitionColumn ) { - super.validatePartitionSetup( partitionQualifiers, numPartitions, partitionNames, partitionColumn ); + public boolean validatePartitionGroupSetup( List> partitionGroupQualifiers, long numPartitionGroups, List partitionGroupNames, CatalogColumn partitionColumn ) { + super.validatePartitionGroupSetup( partitionGroupQualifiers, numPartitionGroups, partitionGroupNames, partitionColumn ); - if ( !partitionQualifiers.isEmpty() ) { + if ( !partitionGroupQualifiers.isEmpty() ) { throw new RuntimeException( "PartitionType HASH does not support the assignment of values to partitions" ); } - if ( numPartitions < 2 ) { - throw new RuntimeException( "You can't partition a table with less than 2 partitions. You only specified: '" + numPartitions + "'" ); + if ( numPartitionGroups < 2 ) { + throw new RuntimeException( "You can't partition a table with less than 2 partitions. You only specified: '" + numPartitionGroups + "'" ); } return true; @@ -100,7 +68,7 @@ public boolean validatePartitionSetup( List> partitionQualifiers, l @Override public PartitionFunctionInfo getPartitionFunctionInfo() { - //Dynamic content which will be generated by selected numPartitions + // Dynamic content which will be generated by selected numPartitions List dynamicRows = new ArrayList<>(); dynamicRows.add( PartitionFunctionInfoColumn.builder() .fieldType( PartitionFunctionInfoColumnType.STRING ) @@ -127,8 +95,8 @@ public PartitionFunctionInfo getPartitionFunctionInfo() { @Override - public boolean requiresUnboundPartition() { - return REQUIRES_UNBOUND_PARTITION; + public boolean requiresUnboundPartitionGroup() { + return REQUIRES_UNBOUND_PARTITION_GROUP; } diff --git a/core/src/main/java/org/polypheny/db/partition/ListPartitionManager.java b/dbms/src/main/java/org/polypheny/db/partition/ListPartitionManager.java similarity index 57% rename from core/src/main/java/org/polypheny/db/partition/ListPartitionManager.java rename to dbms/src/main/java/org/polypheny/db/partition/ListPartitionManager.java index b7fcd62d8c..55a8a011de 100644 --- a/core/src/main/java/org/polypheny/db/partition/ListPartitionManager.java +++ b/dbms/src/main/java/org/polypheny/db/partition/ListPartitionManager.java @@ -23,7 +23,6 @@ import lombok.extern.slf4j.Slf4j; import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; -import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogPartition; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumn; @@ -35,41 +34,38 @@ @Slf4j public class ListPartitionManager extends AbstractPartitionManager { - public static final boolean REQUIRES_UNBOUND_PARTITION = true; + public static final boolean REQUIRES_UNBOUND_PARTITION_GROUP = true; public static final String FUNCTION_TITLE = "LIST"; public static final List SUPPORTED_TYPES = ImmutableList.of( PolyType.INTEGER, PolyType.BIGINT, PolyType.SMALLINT, PolyType.TINYINT, PolyType.VARCHAR ); @Override public long getTargetPartitionId( CatalogTable catalogTable, String columnValue ) { - log.debug( "ListPartitionManager" ); - - Catalog catalog = Catalog.getInstance(); - long selectedPartitionId = -1; long unboundPartitionId = -1; + long selectedPartitionId = -1; - for ( long partitionID : catalogTable.partitionIds ) { - - CatalogPartition catalogPartition = catalog.getPartition( partitionID ); - - if ( catalogPartition.isUnbound ) { + // Process all accumulated CatalogPartitions + for ( CatalogPartition catalogPartition : Catalog.getInstance().getPartitionsByTable( catalogTable.id ) ) { + if ( unboundPartitionId == -1 && catalogPartition.isUnbound ) { unboundPartitionId = catalogPartition.id; + break; } + for ( int i = 0; i < catalogPartition.partitionQualifiers.size(); i++ ) { - //Could be int + // Could be int if ( catalogPartition.partitionQualifiers.get( i ).equals( columnValue ) ) { if ( log.isDebugEnabled() ) { log.debug( "Found column value: {} on partitionID {} with qualifiers: {}", columnValue, - partitionID, + catalogPartition.id, catalogPartition.partitionQualifiers ); } selectedPartitionId = catalogPartition.id; break; } } - } + // If no concrete partition could be identified, report back the unbound/default partition if ( selectedPartitionId == -1 ) { selectedPartitionId = unboundPartitionId; @@ -79,90 +75,12 @@ public long getTargetPartitionId( CatalogTable catalogTable, String columnValue } - // Needed when columnPlacements are being dropped - @Override - public boolean probePartitionDistributionChange( CatalogTable catalogTable, int storeId, long columnId ) { - - Catalog catalog = Catalog.getInstance(); - - //TODO Enable following code block without FullPartitionPlacement fallback - - /* try { - int thresholdCounter = 0; - boolean validDistribution = false; - //check for every partition if the column in question has still all partition somewhere even when columnId on Store would be removed - for (long partitionId : catalogTable.partitionIds) { - - //check if a column is dropped from a store if this column has still other placements with all partitions - List ccps = catalog.getColumnPlacementsByPartition(catalogTable.id, partitionId, columnId); - for ( CatalogColumnPlacement columnPlacement : ccps){ - if (columnPlacement.storeId != storeId){ - thresholdCounter++; - break; - } - } - if ( thresholdCounter < 1){ - return false; - } - } - - } catch ( UnknownPartitionException e) { - throw new RuntimeException(e); - }*/ - - // TODO can be removed if upper codeblock is enabled - // change is only critical if there is only one column left with the characteristics - int numberOfFullPlacements = getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).size(); - if ( numberOfFullPlacements <= 1 ) { - //Check if this one column is the column we are about to delete - if ( catalog.getPartitionsOnDataPlacement( storeId, catalogTable.id ).size() == catalogTable.numPartitions ) { - return false; - } - } - - return true; - - } - - - // Relevant for select @Override - public List getRelevantPlacements( CatalogTable catalogTable, List partitionIds ) { - Catalog catalog = Catalog.getInstance(); - List relevantCcps = new ArrayList<>(); - - if ( partitionIds != null ) { - for ( long partitionId : partitionIds ) { - // Find stores with full placements (partitions) - // Pick for each column the column placement which has full partitioning //SELECT WORST-CASE ergo Fallback - for ( long columnId : catalogTable.columnIds ) { - List ccps = catalog.getColumnPlacementsByPartition( catalogTable.id, partitionId, columnId ); - if ( !ccps.isEmpty() ) { - //get first column placement which contains partition - relevantCcps.add( ccps.get( 0 ) ); - if ( log.isDebugEnabled() ) { - log.debug( "{} {} with part. {}", ccps.get( 0 ).adapterUniqueName, ccps.get( 0 ).getLogicalColumnName(), partitionId ); - } - } - } - } - } else { - // Take the first column placement - // Worst-case - for ( long columnId : catalogTable.columnIds ) { - relevantCcps.add( getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).get( 0 ) ); - } - } - return relevantCcps; - } - - - @Override - public boolean validatePartitionSetup( List> partitionQualifiers, long numPartitions, List partitionNames, CatalogColumn partitionColumn ) { - super.validatePartitionSetup( partitionQualifiers, numPartitions, partitionNames, partitionColumn ); + public boolean validatePartitionGroupSetup( List> partitionGroupQualifiers, long numPartitionGroups, List partitionGroupNames, CatalogColumn partitionColumn ) { + super.validatePartitionGroupSetup( partitionGroupQualifiers, numPartitionGroups, partitionGroupNames, partitionColumn ); if ( partitionColumn.type.getFamily() == PolyTypeFamily.NUMERIC ) { - for ( List singlePartitionQualifiers : partitionQualifiers ) { + for ( List singlePartitionQualifiers : partitionGroupQualifiers ) { for ( String qualifier : singlePartitionQualifiers ) { try { Integer.valueOf( qualifier ); @@ -173,14 +91,14 @@ public boolean validatePartitionSetup( List> partitionQualifiers, l } } - if ( partitionQualifiers.isEmpty() ) { - throw new RuntimeException( "LIST Partitioning doesn't support empty Partition Qualifiers: '" + partitionQualifiers + + if ( partitionGroupQualifiers.isEmpty() ) { + throw new RuntimeException( "LIST Partitioning doesn't support empty Partition Qualifiers: '" + partitionGroupQualifiers + "'. USE (PARTITION name1 VALUES(value1)[(,PARTITION name1 VALUES(value1))*])" ); } - if ( partitionQualifiers.size() + 1 != numPartitions ) { - throw new RuntimeException( "Number of partitionQualifiers '" + partitionQualifiers + - "' + (mandatory 'Unbound' partition) is not equal to number of specified partitions '" + numPartitions + "'" ); + if ( partitionGroupQualifiers.size() + 1 != numPartitionGroups ) { + throw new RuntimeException( "Number of partitionQualifiers '" + partitionGroupQualifiers + + "' + (mandatory 'Unbound' partition) is not equal to number of specified partitions '" + numPartitionGroups + "'" ); } return true; @@ -189,8 +107,7 @@ public boolean validatePartitionSetup( List> partitionQualifiers, l @Override public PartitionFunctionInfo getPartitionFunctionInfo() { - - //Dynamic content which will be generated by selected numPartitions + // Dynamic content which will be generated by selected numPartitions List dynamicRows = new ArrayList<>(); dynamicRows.add( PartitionFunctionInfoColumn.builder() .fieldType( PartitionFunctionInfoColumnType.STRING ) @@ -212,7 +129,7 @@ public PartitionFunctionInfo getPartitionFunctionInfo() { .defaultValue( "" ) .build() ); - //Fixed rows to display after dynamically generated ones + // Fixed rows to display after dynamically generated ones List> rowsAfter = new ArrayList<>(); List unboundRow = new ArrayList<>(); unboundRow.add( PartitionFunctionInfoColumn.builder() @@ -255,8 +172,8 @@ public PartitionFunctionInfo getPartitionFunctionInfo() { @Override - public boolean requiresUnboundPartition() { - return REQUIRES_UNBOUND_PARTITION; + public boolean requiresUnboundPartitionGroup() { + return REQUIRES_UNBOUND_PARTITION_GROUP; } diff --git a/dbms/src/main/java/org/polypheny/db/partition/PartitionManagerFactoryImpl.java b/dbms/src/main/java/org/polypheny/db/partition/PartitionManagerFactoryImpl.java new file mode 100644 index 0000000000..048a425065 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/partition/PartitionManagerFactoryImpl.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.polypheny.db.partition; + +import org.polypheny.db.catalog.Catalog; + + +public class PartitionManagerFactoryImpl extends PartitionManagerFactory { + + @Override + public PartitionManager getPartitionManager( Catalog.PartitionType partitionType ) { + switch ( partitionType ) { + case HASH: + return new HashPartitionManager(); + + case LIST: + return new ListPartitionManager(); + + case RANGE: + return new RangePartitionManager(); + + // TODO @HENNLO think about excluding "UDPF" here, these should only be used for internal Partition Functions + // Or create an internal mapping from PARTITIONTYPE to teh handling partition manager + case TEMPERATURE: + return new TemperatureAwarePartitionManager(); + } + + throw new RuntimeException( "Unknown partition type: " + partitionType ); + } + +} diff --git a/core/src/main/java/org/polypheny/db/partition/RangePartitionManager.java b/dbms/src/main/java/org/polypheny/db/partition/RangePartitionManager.java similarity index 66% rename from core/src/main/java/org/polypheny/db/partition/RangePartitionManager.java rename to dbms/src/main/java/org/polypheny/db/partition/RangePartitionManager.java index 3bfa517f2b..2361931502 100644 --- a/core/src/main/java/org/polypheny/db/partition/RangePartitionManager.java +++ b/dbms/src/main/java/org/polypheny/db/partition/RangePartitionManager.java @@ -25,7 +25,6 @@ import lombok.extern.slf4j.Slf4j; import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; -import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogPartition; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumn; @@ -37,38 +36,36 @@ @Slf4j public class RangePartitionManager extends AbstractPartitionManager { - public static final boolean REQUIRES_UNBOUND_PARTITION = true; + public static final boolean REQUIRES_UNBOUND_PARTITION_GROUP = true; public static final String FUNCTION_TITLE = "RANGE"; public static final List SUPPORTED_TYPES = ImmutableList.of( PolyType.INTEGER, PolyType.BIGINT, PolyType.SMALLINT, PolyType.TINYINT ); @Override public long getTargetPartitionId( CatalogTable catalogTable, String columnValue ) { - Catalog catalog = Catalog.getInstance(); - long selectedPartitionId = -1; long unboundPartitionId = -1; + long selectedPartitionId = -1; - for ( long partitionID : catalogTable.partitionIds ) { - - CatalogPartition catalogPartition = catalog.getPartition( partitionID ); - - if ( catalogPartition.isUnbound ) { + // Process all accumulated CatalogPartitions + for ( CatalogPartition catalogPartition : Catalog.getInstance().getPartitionsByTable( catalogTable.id ) ) { + if ( unboundPartitionId == -1 && catalogPartition.isUnbound ) { unboundPartitionId = catalogPartition.id; - continue; + break; } if ( isValueInRange( columnValue, catalogPartition ) ) { if ( log.isDebugEnabled() ) { log.debug( "Found column value: {} on partitionID {} in range: [{} - {}]", columnValue, - partitionID, + catalogPartition.id, catalogPartition.partitionQualifiers.get( 0 ), catalogPartition.partitionQualifiers.get( 1 ) ); } selectedPartitionId = catalogPartition.id; - return selectedPartitionId; + break; } } + // If no concrete partition could be identified, report back the unbound/default partition if ( selectedPartitionId == -1 ) { selectedPartitionId = unboundPartitionId; @@ -78,69 +75,18 @@ public long getTargetPartitionId( CatalogTable catalogTable, String columnValue } - // Needed when columnPlacements are being dropped @Override - public boolean probePartitionDistributionChange( CatalogTable catalogTable, int storeId, long columnId ) { - Catalog catalog = Catalog.getInstance(); - - // change is only critical if there is only one column left with the characteristics - int numberOfFullPlacements = getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).size(); - if ( numberOfFullPlacements <= 1 ) { - //Check if this one column is the column we are about to delete - if ( catalog.getPartitionsOnDataPlacement( storeId, catalogTable.id ).size() == catalogTable.numPartitions ) { - return false; - } - } - - return true; - } - - - @Override - public List getRelevantPlacements( CatalogTable catalogTable, List partitionIds ) { - Catalog catalog = Catalog.getInstance(); - List relevantCcps = new ArrayList<>(); - - if ( partitionIds != null ) { - - for ( long partitionId : partitionIds ) { - // Find stores with full placements (partitions) - // Pick for each column the column placement which has full partitioning //SELECT WORST-CASE ergo Fallback - for ( long columnId : catalogTable.columnIds ) { - List ccps = catalog.getColumnPlacementsByPartition( catalogTable.id, partitionId, columnId ); - if ( !ccps.isEmpty() ) { - //get first column placement which contains partition - relevantCcps.add( ccps.get( 0 ) ); - if ( log.isDebugEnabled() ) { - log.debug( "{} {} with part. {}", ccps.get( 0 ).adapterUniqueName, ccps.get( 0 ).getLogicalColumnName(), partitionId ); - } - } - } - } - } else { - // Take the first column placement - // Worst-case - for ( long columnId : catalogTable.columnIds ) { - relevantCcps.add( getPlacementsWithAllPartitions( columnId, catalogTable.numPartitions ).get( 0 ) ); - } - } - return relevantCcps; - } - - - @Override - public boolean validatePartitionSetup( List> partitionQualifierList, long numPartitions, List partitionNames, CatalogColumn partitionColumn ) { - super.validatePartitionSetup( partitionQualifierList, numPartitions, partitionNames, partitionColumn ); + public boolean validatePartitionGroupSetup( List> partitionGroupQualifiers, long numPartitionGroups, List partitionGroupNames, CatalogColumn partitionColumn ) { + super.validatePartitionGroupSetup( partitionGroupQualifiers, numPartitionGroups, partitionGroupNames, partitionColumn ); if ( partitionColumn.type.getFamily() != PolyTypeFamily.NUMERIC ) { throw new RuntimeException( "You cannot specify RANGE partitioning for a non-numeric type. Detected Type: " + partitionColumn.type + " for column: '" + partitionColumn.name + "'" ); } - for ( List partitionQualifiers : partitionQualifierList ) { + for ( List partitionQualifiers : partitionGroupQualifiers ) { for ( String partitionQualifier : partitionQualifiers ) { - if ( partitionQualifier.isEmpty() ) { - throw new RuntimeException( "RANGE Partitioning doesn't support empty Partition Qualifiers: '" + partitionQualifierList + "'. USE (PARTITION name1 VALUES(value1)[(,PARTITION name1 VALUES(value1))*])" ); + throw new RuntimeException( "RANGE Partitioning doesn't support empty Partition Qualifiers: '" + partitionGroupQualifiers + "'. USE (PARTITION name1 VALUES(value1)[(,PARTITION name1 VALUES(value1))*])" ); } if ( !(partitionQualifier.chars().allMatch( Character::isDigit )) ) { @@ -153,19 +99,19 @@ public boolean validatePartitionSetup( List> partitionQualifierList } } - if ( partitionQualifierList.size() + 1 != numPartitions ) { - throw new RuntimeException( "Number of partitionQualifiers '" + partitionQualifierList + "' + (mandatory 'Unbound' partition) is not equal to number of specified partitions '" + numPartitions + "'" ); + if ( partitionGroupQualifiers.size() + 1 != numPartitionGroups ) { + throw new RuntimeException( "Number of partitionQualifiers '" + partitionGroupQualifiers + "' + (mandatory 'Unbound' partition) is not equal to number of specified partitions '" + numPartitionGroups + "'" ); } - if ( partitionQualifierList.isEmpty() ) { - throw new RuntimeException( "Partition Qualifiers are empty '" + partitionQualifierList + "'" ); + if ( partitionGroupQualifiers.isEmpty() ) { + throw new RuntimeException( "Partition Qualifiers are empty '" + partitionGroupQualifiers + "'" ); } // Check if range is overlapping - for ( int i = 0; i < partitionQualifierList.size(); i++ ) { + for ( int i = 0; i < partitionGroupQualifiers.size(); i++ ) { - int lowerBound = Integer.parseInt( partitionQualifierList.get( i ).get( 0 ) ); - int upperBound = Integer.parseInt( partitionQualifierList.get( i ).get( 1 ) ); + int lowerBound = Integer.parseInt( partitionGroupQualifiers.get( i ).get( 0 ) ); + int upperBound = Integer.parseInt( partitionGroupQualifiers.get( i ).get( 1 ) ); // Check if ( upperBound < lowerBound ) { @@ -174,30 +120,30 @@ public boolean validatePartitionSetup( List> partitionQualifierList lowerBound = temp; // Rearrange List values lower < upper - partitionQualifierList.set( i, Stream.of( partitionQualifierList.get( i ).get( 1 ), partitionQualifierList.get( i ).get( 0 ) ).collect( Collectors.toList() ) ); + partitionGroupQualifiers.set( i, Stream.of( partitionGroupQualifiers.get( i ).get( 1 ), partitionGroupQualifiers.get( i ).get( 0 ) ).collect( Collectors.toList() ) ); } else if ( upperBound == lowerBound ) { throw new RuntimeException( "No Range specified. Lower and upper bound are equal:" + lowerBound + " = " + upperBound ); } - for ( int k = i; k < partitionQualifierList.size() - 1; k++ ) { - int contestingLowerBound = Integer.parseInt( partitionQualifierList.get( k + 1 ).get( 0 ) ); - int contestingUpperBound = Integer.parseInt( partitionQualifierList.get( k + 1 ).get( 1 ) ); + for ( int k = i; k < partitionGroupQualifiers.size() - 1; k++ ) { + int contestingLowerBound = Integer.parseInt( partitionGroupQualifiers.get( k + 1 ).get( 0 ) ); + int contestingUpperBound = Integer.parseInt( partitionGroupQualifiers.get( k + 1 ).get( 1 ) ); if ( contestingUpperBound < contestingLowerBound ) { int temp = contestingUpperBound; - contestingUpperBound = contestingUpperBound; + contestingUpperBound = contestingLowerBound; contestingLowerBound = temp; - List list = Stream.of( partitionQualifierList.get( k + 1 ).get( 1 ), partitionQualifierList.get( k + 1 ).get( 0 ) ) + List list = Stream.of( partitionGroupQualifiers.get( k + 1 ).get( 1 ), partitionGroupQualifiers.get( k + 1 ).get( 0 ) ) .collect( Collectors.toList() ); - partitionQualifierList.set( k + 1, list ); + partitionGroupQualifiers.set( k + 1, list ); } else if ( contestingUpperBound == contestingLowerBound ) { throw new RuntimeException( "No Range specified. Lower and upper bound are equal:" + contestingLowerBound + " = " + contestingUpperBound ); } - //Check if they are overlapping + // Check if they are overlapping if ( lowerBound <= contestingUpperBound && upperBound >= contestingLowerBound ) { throw new RuntimeException( "Several ranges are overlapping: [" + lowerBound + " - " + upperBound + "] and [" + contestingLowerBound + " - " + contestingUpperBound + "] You need to specify distinct ranges." ); } @@ -212,8 +158,7 @@ public boolean validatePartitionSetup( List> partitionQualifierList @Override public PartitionFunctionInfo getPartitionFunctionInfo() { - - //Dynamic content which will be generated by selected numPartitions + // Dynamic content which will be generated by selected numPartitions List dynamicRows = new ArrayList<>(); dynamicRows.add( PartitionFunctionInfoColumn.builder() .fieldType( PartitionFunctionInfoColumnType.STRING ) @@ -245,7 +190,7 @@ public PartitionFunctionInfo getPartitionFunctionInfo() { .defaultValue( "" ) .build() ); - //Fixed rows to display after dynamically generated ones + // Fixed rows to display after dynamically generated ones List> rowsAfter = new ArrayList<>(); List unboundRow = new ArrayList<>(); unboundRow.add( PartitionFunctionInfoColumn.builder() @@ -297,8 +242,8 @@ public PartitionFunctionInfo getPartitionFunctionInfo() { @Override - public boolean requiresUnboundPartition() { - return REQUIRES_UNBOUND_PARTITION; + public boolean requiresUnboundPartitionGroup() { + return REQUIRES_UNBOUND_PARTITION_GROUP; } diff --git a/dbms/src/main/java/org/polypheny/db/partition/TemperatureAwarePartitionManager.java b/dbms/src/main/java/org/polypheny/db/partition/TemperatureAwarePartitionManager.java new file mode 100644 index 0000000000..0e3c9a2e75 --- /dev/null +++ b/dbms/src/main/java/org/polypheny/db/partition/TemperatureAwarePartitionManager.java @@ -0,0 +1,305 @@ +/* + * Copyright 2019-2021 The Polypheny Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.polypheny.db.partition; + +import com.google.common.collect.ImmutableList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import org.polypheny.db.catalog.entity.CatalogColumn; +import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumn; +import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumnType; +import org.polypheny.db.partition.properties.TemperaturePartitionProperty; +import org.polypheny.db.type.PolyType; + + +public class TemperatureAwarePartitionManager extends AbstractPartitionManager { + + public static final boolean REQUIRES_UNBOUND_PARTITION_GROUP = false; + public static final String FUNCTION_TITLE = "TEMPERATURE"; + public static final List SUPPORTED_TYPES = ImmutableList.of( PolyType.INTEGER, PolyType.BIGINT, PolyType.SMALLINT, PolyType.TINYINT, PolyType.VARCHAR ); + + + @Override + public long getTargetPartitionId( CatalogTable catalogTable, String columnValue ) { + // Get partition manager + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( + ((TemperaturePartitionProperty) catalogTable.partitionProperty).getInternalPartitionFunction() + ); + + return partitionManager.getTargetPartitionId( catalogTable, columnValue ); + } + + + @Override + public Map> getRelevantPlacements( CatalogTable catalogTable, List partitionIds, List excludedAdapters ) { + // Get partition manager + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( + ((TemperaturePartitionProperty) catalogTable.partitionProperty).getInternalPartitionFunction() + ); + + return partitionManager.getRelevantPlacements( catalogTable, partitionIds, excludedAdapters ); + } + + + @Override + public boolean requiresUnboundPartitionGroup() { + return REQUIRES_UNBOUND_PARTITION_GROUP; + } + + + @Override + public boolean supportsColumnOfType( PolyType type ) { + return SUPPORTED_TYPES.contains( type ); + } + + + @Override + public int getNumberOfPartitionsPerGroup( int numberOfPartitions ) { + return 1; + } + + + @Override + public boolean validatePartitionGroupSetup( List> partitionGroupQualifiers, long numPartitionGroups, List partitionGroupNames, CatalogColumn partitionColumn ) { + super.validatePartitionGroupSetup( partitionGroupQualifiers, numPartitionGroups, partitionGroupNames, partitionColumn ); + + return true; + } + + + @Override + public PartitionFunctionInfo getPartitionFunctionInfo() { + List> rowsBefore = new ArrayList<>(); + + //ROW for HOT partition infos about custom name & hot-label, + List hotRow = new ArrayList<>(); + hotRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( true ) + .modifiable( true ) + .sqlPrefix( "(PARTITION" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "HOT" ) + .build() ); + hotRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "HOT" ) + .build() ); + + //ROW for COLD partition infos about custom name & cold-label, + List coldRow = new ArrayList<>(); + coldRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( true ) + .modifiable( true ) + .sqlPrefix( "PARTITION" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "COLD" ) + .build() ); + coldRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "COLD" ) + .build() ); + + List rowInHot = new ArrayList<>(); + rowInHot.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "% Threshold into HOT" ) + .build() ); + + rowInHot.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "VALUES(" ) + .sqlSuffix( "%)," ) + .valueSeparation( "" ) + .defaultValue( "10" ) + .build() ); + + List rowOutHot = new ArrayList<>(); + rowOutHot.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "% Threshold out of HOT" ) + .build() ); + + rowOutHot.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "VALUES(" ) + .sqlSuffix( "%))" ) + .valueSeparation( "" ) + .defaultValue( "15" ) + .build() ); + + rowsBefore.add( hotRow ); + rowsBefore.add( rowInHot ); + rowsBefore.add( coldRow ); + rowsBefore.add( rowOutHot ); + + // COST MODEL + // Fixed rows to display after dynamically generated ones + List> rowsAfter = new ArrayList<>(); + + List costRow = new ArrayList<>(); + costRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "Cost Model" ) + .build() ); + + costRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LIST ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "USING FREQUENCY" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .options( new ArrayList<>( Arrays.asList( "ALL", "WRITE", "READ" ) ) ) + .build() ); + + List extendedCostRow = new ArrayList<>(); + + extendedCostRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "Time Window" ) + .build() ); + + extendedCostRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "INTERVAL" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "2" ) + .build() ); + + extendedCostRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LIST ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .options( new ArrayList<>( Arrays.asList( "Minutes", "Hours", "Days" ) ) ) + .build() ); + + List chunkRow = new ArrayList<>(); + chunkRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "Number of internal data chunks" ) + .build() ); + + chunkRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.STRING ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "WITH" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "-04071993" ) + .build() ); + + List unboundRow = new ArrayList<>(); + unboundRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LABEL ) + .mandatory( false ) + .modifiable( false ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .valueSeparation( "" ) + .defaultValue( "Internal Partitioning" ) + .build() ); + + unboundRow.add( PartitionFunctionInfoColumn.builder() + .fieldType( PartitionFunctionInfoColumnType.LIST ) + .mandatory( false ) + .modifiable( true ) + .sqlPrefix( "" ) + .sqlSuffix( "PARTITIONS" ) + .valueSeparation( "" ) + .options( new ArrayList<>( Arrays.asList( "HASH" ) ) ) + .build() ); + + rowsAfter.add( costRow ); + rowsAfter.add( extendedCostRow ); + rowsAfter.add( chunkRow ); + rowsAfter.add( unboundRow ); + + // Bring all rows and columns together + PartitionFunctionInfo uiObject = PartitionFunctionInfo.builder() + .functionTitle( FUNCTION_TITLE ) + .description( "Automatically partitions data into HOT and COLD based on a selected cost model which is automatically applied to " + + "the values of the partition column. " + + "Further the data inside the table will be internally partitioned into chunks to apply the cost model on. " + + "Therefore a secondary partitioning can be used" ) + .sqlPrefix( "" ) + .sqlSuffix( "" ) + .rowSeparation( "" ) + .rowsBefore( rowsBefore ) + .rowsAfter( rowsAfter ) + .headings( new ArrayList<>( Arrays.asList( "Partition Name", "Classification" ) ) ) + .build(); + + return uiObject; + } + +} diff --git a/dbms/src/main/java/org/polypheny/db/processing/AbstractQueryProcessor.java b/dbms/src/main/java/org/polypheny/db/processing/AbstractQueryProcessor.java index baa9fd34c0..fd4b4c5234 100644 --- a/dbms/src/main/java/org/polypheny/db/processing/AbstractQueryProcessor.java +++ b/dbms/src/main/java/org/polypheny/db/processing/AbstractQueryProcessor.java @@ -67,6 +67,9 @@ import org.polypheny.db.interpreter.BindableConvention; import org.polypheny.db.interpreter.Interpreters; import org.polypheny.db.jdbc.PolyphenyDbSignature; +import org.polypheny.db.monitoring.events.DmlEvent; +import org.polypheny.db.monitoring.events.QueryEvent; +import org.polypheny.db.monitoring.events.StatementEvent; import org.polypheny.db.plan.Convention; import org.polypheny.db.plan.RelOptUtil; import org.polypheny.db.plan.RelTraitSet; @@ -140,14 +143,14 @@ @Slf4j public abstract class AbstractQueryProcessor implements QueryProcessor { - private final Statement statement; - protected static final boolean ENABLE_BINDABLE = false; protected static final boolean ENABLE_COLLATION_TRAIT = true; protected static final boolean ENABLE_ENUMERABLE = true; protected static final boolean CONSTANT_REDUCTION = false; protected static final boolean ENABLE_STREAM = true; + private final Statement statement; + protected AbstractQueryProcessor( Statement statement ) { this.statement = statement; @@ -178,6 +181,15 @@ protected PolyphenyDbSignature prepareQuery( RelRoot logicalRoot, RelDataType pa if ( log.isDebugEnabled() ) { log.debug( "Preparing statement ..." ); } + + if ( statement.getTransaction().getMonitoringData() == null ) { + if ( logicalRoot.kind.belongsTo( SqlKind.DML ) ) { + statement.getTransaction().setMonitoringData( new DmlEvent() ); + } else if ( logicalRoot.kind.belongsTo( SqlKind.QUERY ) ) { + statement.getTransaction().setMonitoringData( new QueryEvent() ); + } + } + stopWatch.start(); if ( logicalRoot.rel.hasView() ) { @@ -295,6 +307,25 @@ protected PolyphenyDbSignature prepareQuery( RelRoot logicalRoot, RelDataType pa if ( isAnalyze ) { statement.getDuration().stop( "Implementation Caching" ); } + + //TODO @Cedric this produces an error causing several checks to fail. Please investigate + //needed for row results + + //final Enumerable enumerable = signature.enumerable( statement.getDataContext() ); + //Iterator iterator = enumerable.iterator(); + + if ( statement.getTransaction().getMonitoringData() != null ) { + StatementEvent eventData = statement.getTransaction().getMonitoringData(); + eventData.setMonitoringType( parameterizedRoot.kind.sql ); + eventData.setDescription( "Test description: " + signature.statementType.toString() ); + eventData.setRouted( logicalRoot ); + eventData.setFieldNames( ImmutableList.copyOf( signature.rowType.getFieldNames() ) ); + //eventData.setRows( MetaImpl.collect( signature.cursorFactory, iterator, new ArrayList<>() ) ); + eventData.setAnalyze( isAnalyze ); + eventData.setSubQuery( isSubquery ); + eventData.setDurations( statement.getDuration().asJson() ); + } + return signature; } } @@ -366,6 +397,24 @@ protected PolyphenyDbSignature prepareQuery( RelRoot logicalRoot, RelDataType pa log.debug( "Preparing statement ... done. [{}]", stopWatch ); } + //TODO @Cedric this produces an error causing several checks to fail. Please investigate + //needed for row results + //final Enumerable enumerable = signature.enumerable( statement.getDataContext() ); + //Iterator iterator = enumerable.iterator(); + + TransactionImpl transaction = (TransactionImpl) statement.getTransaction(); + if ( transaction.getMonitoringData() != null ) { + StatementEvent eventData = transaction.getMonitoringData(); + eventData.setMonitoringType( parameterizedRoot.kind.sql ); + eventData.setDescription( "Test description: " + signature.statementType.toString() ); + eventData.setRouted( logicalRoot ); + eventData.setFieldNames( ImmutableList.copyOf( signature.rowType.getFieldNames() ) ); + //eventData.setRows( MetaImpl.collect( signature.cursorFactory, iterator, new ArrayList<>() ) ); + eventData.setAnalyze( isAnalyze ); + eventData.setSubQuery( isSubquery ); + eventData.setDurations( statement.getDuration().asJson() ); + } + return signature; } @@ -1132,6 +1181,14 @@ public RelRoot expandView( RelDataType rowType, String queryString, List } + @Override + public void resetCaches() { + ImplementationCache.INSTANCE.reset(); + QueryPlanCache.INSTANCE.reset(); + statement.getRouter().resetCaches(); + } + + static class RelDeepCopyShuttle extends RelShuttleImpl { private RelTraitSet copy( final RelTraitSet other ) { @@ -1251,12 +1308,4 @@ public RelNode visit( RelNode other ) { } - - @Override - public void resetCaches() { - ImplementationCache.INSTANCE.reset(); - QueryPlanCache.INSTANCE.reset(); - statement.getRouter().resetCaches(); - } - } diff --git a/dbms/src/main/java/org/polypheny/db/processing/DataContextImpl.java b/dbms/src/main/java/org/polypheny/db/processing/DataContextImpl.java index ea238ae012..40be81cd84 100644 --- a/dbms/src/main/java/org/polypheny/db/processing/DataContextImpl.java +++ b/dbms/src/main/java/org/polypheny/db/processing/DataContextImpl.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2020 The Polypheny Project + * Copyright 2019-2021 The Polypheny Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -16,7 +16,6 @@ package org.polypheny.db.processing; - import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -150,35 +149,6 @@ public void resetParameterValues() { parameterValues.clear(); } - /* - private SqlAdvisor getSqlAdvisor() { - final String schemaName; - try { - schemaName = con.getSchema(); - } catch ( SQLException e ) { - throw new RuntimeException( e ); - } - final List schemaPath = - schemaName == null - ? ImmutableList.of() - : ImmutableList.of( schemaName ); - final SqlValidatorWithHints validator = - new SqlAdvisorValidator( - SqlStdOperatorTable.instance(), - new PolyphenyDbCatalogReader( rootSchema, schemaPath, typeFactory ), typeFactory, SqlConformanceEnum.DEFAULT ); - final PolyphenyDbConnectionConfig config = con.config(); - // This duplicates org.polypheny.db.prepare.PolyphenyDbPrepareImpl.prepare2_ - final Config parserConfig = SqlParser.configBuilder() - .setQuotedCasing( config.quotedCasing() ) - .setUnquotedCasing( config.unquotedCasing() ) - .setQuoting( config.quoting() ) - .setConformance( config.conformance() ) - .setCaseSensitive( RuntimeConfig.CASE_SENSITIVE.getBoolean() ) - .build(); - return new SqlAdvisor( validator, parserConfig ); - } -*/ - @Override public SchemaPlus getRootSchema() { diff --git a/dbms/src/main/java/org/polypheny/db/processing/DataMigratorImpl.java b/dbms/src/main/java/org/polypheny/db/processing/DataMigratorImpl.java index 31175929bc..ce8a769e54 100644 --- a/dbms/src/main/java/org/polypheny/db/processing/DataMigratorImpl.java +++ b/dbms/src/main/java/org/polypheny/db/processing/DataMigratorImpl.java @@ -18,6 +18,7 @@ import com.google.common.collect.ImmutableList; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; @@ -36,6 +37,8 @@ import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.config.RuntimeConfig; import org.polypheny.db.jdbc.PolyphenyDbSignature; +import org.polypheny.db.partition.PartitionManager; +import org.polypheny.db.partition.PartitionManagerFactory; import org.polypheny.db.plan.RelOptCluster; import org.polypheny.db.plan.RelOptTable; import org.polypheny.db.plan.ViewExpanders; @@ -58,25 +61,22 @@ import org.polypheny.db.type.PolyTypeFactoryImpl; import org.polypheny.db.util.LimitIterator; + @Slf4j public class DataMigratorImpl implements DataMigrator { - @Override - public void copyData( Transaction transaction, CatalogAdapter store, List columns ) { - - Statement sourceStatement = transaction.createStatement(); - Statement targetStatement = transaction.createStatement(); + public void copyData( Transaction transaction, CatalogAdapter store, List columns, List partitionIds ) { + CatalogTable table = Catalog.getInstance().getTable( columns.get( 0 ).tableId ); + CatalogPrimaryKey primaryKey = Catalog.getInstance().getPrimaryKey( table.primaryKey ); // Check Lists - List columnPlacements = new LinkedList<>(); + List targetColumnPlacements = new LinkedList<>(); for ( CatalogColumn catalogColumn : columns ) { - columnPlacements.add( Catalog.getInstance().getColumnPlacement( store.id, catalogColumn.id ) ); + targetColumnPlacements.add( Catalog.getInstance().getColumnPlacement( store.id, catalogColumn.id ) ); } List selectColumnList = new LinkedList<>( columns ); - CatalogTable table = Catalog.getInstance().getTable( columnPlacements.get( 0 ).tableId ); - CatalogPrimaryKey primaryKey = Catalog.getInstance().getPrimaryKey( table.primaryKey ); // Add primary keys to select column list for ( long cid : primaryKey.columnIds ) { @@ -86,72 +86,87 @@ public void copyData( Transaction transaction, CatalogAdapter store, List> placementDistribution = new HashMap<>(); + if ( table.isPartitioned ) { + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( table.partitionProperty.partitionType ); + placementDistribution = partitionManager.getRelevantPlacements( table, partitionIds, new ArrayList<>( Arrays.asList( store.id ) ) ); } else { - // Build update statement - targetRel = buildUpdateStatement( targetStatement, columnPlacements ); + placementDistribution.put( table.partitionProperty.partitionIds.get( 0 ), selectSourcePlacements( table, selectColumnList, targetColumnPlacements.get( 0 ).adapterId ) ); } - // Execute Query - try { - PolyphenyDbSignature signature = sourceStatement.getQueryProcessor().prepareQuery( sourceRel, sourceRel.rel.getCluster().getTypeFactory().builder().build(), true ); - final Enumerable enumerable = signature.enumerable( sourceStatement.getDataContext() ); - //noinspection unchecked - Iterator sourceIterator = enumerable.iterator(); + for ( long partitionId : partitionIds ) { + Statement sourceStatement = transaction.createStatement(); + Statement targetStatement = transaction.createStatement(); - Map resultColMapping = new HashMap<>(); - for ( CatalogColumn catalogColumn : selectColumnList ) { - int i = 0; - for ( ColumnMetaData metaData : signature.columns ) { - if ( metaData.columnName.equalsIgnoreCase( catalogColumn.name ) ) { - resultColMapping.put( catalogColumn.id, i ); - } - i++; - } + RelRoot sourceRel = getSourceIterator( sourceStatement, placementDistribution ); + RelRoot targetRel; + if ( Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( store.id, table.id ).size() == columns.size() ) { + // There have been no placements for this table on this store before. Build insert statement + targetRel = buildInsertStatement( targetStatement, targetColumnPlacements, partitionId ); + } else { + // Build update statement + targetRel = buildUpdateStatement( targetStatement, targetColumnPlacements, partitionId ); } - int batchSize = RuntimeConfig.DATA_MIGRATOR_BATCH_SIZE.getInteger(); - while ( sourceIterator.hasNext() ) { - List> rows = MetaImpl.collect( signature.cursorFactory, LimitIterator.of( sourceIterator, batchSize ), new ArrayList<>() ); - Map> values = new HashMap<>(); - for ( List list : rows ) { - for ( Map.Entry entry : resultColMapping.entrySet() ) { - if ( !values.containsKey( entry.getKey() ) ) { - values.put( entry.getKey(), new LinkedList<>() ); + // Execute Query + try { + PolyphenyDbSignature signature = sourceStatement.getQueryProcessor().prepareQuery( sourceRel, sourceRel.rel.getCluster().getTypeFactory().builder().build(), true ); + final Enumerable enumerable = signature.enumerable( sourceStatement.getDataContext() ); + //noinspection unchecked + Iterator sourceIterator = enumerable.iterator(); + + Map resultColMapping = new HashMap<>(); + for ( CatalogColumn catalogColumn : selectColumnList ) { + int i = 0; + for ( ColumnMetaData metaData : signature.columns ) { + if ( metaData.columnName.equalsIgnoreCase( catalogColumn.name ) ) { + resultColMapping.put( catalogColumn.id, i ); } - values.get( entry.getKey() ).add( list.get( entry.getValue() ) ); + i++; } } - for ( Map.Entry> v : values.entrySet() ) { - targetStatement.getDataContext().addParameterValues( v.getKey(), null, v.getValue() ); - } - Iterator iterator = targetStatement.getQueryProcessor() - .prepareQuery( targetRel, sourceRel.validatedRowType, true ) - .enumerable( targetStatement.getDataContext() ) - .iterator(); - //noinspection WhileLoopReplaceableByForEach - while ( iterator.hasNext() ) { - iterator.next(); + + int batchSize = RuntimeConfig.DATA_MIGRATOR_BATCH_SIZE.getInteger(); + while ( sourceIterator.hasNext() ) { + List> rows = MetaImpl.collect( + signature.cursorFactory, + LimitIterator.of( sourceIterator, batchSize ), + new ArrayList<>() ); + Map> values = new HashMap<>(); + for ( List list : rows ) { + for ( Map.Entry entry : resultColMapping.entrySet() ) { + if ( !values.containsKey( entry.getKey() ) ) { + values.put( entry.getKey(), new LinkedList<>() ); + } + values.get( entry.getKey() ).add( list.get( entry.getValue() ) ); + } + } + for ( Map.Entry> v : values.entrySet() ) { + targetStatement.getDataContext().addParameterValues( v.getKey(), null, v.getValue() ); + } + Iterator iterator = targetStatement.getQueryProcessor() + .prepareQuery( targetRel, sourceRel.validatedRowType, true ) + .enumerable( targetStatement.getDataContext() ) + .iterator(); + //noinspection WhileLoopReplaceableByForEach + while ( iterator.hasNext() ) { + iterator.next(); + } + targetStatement.getDataContext().resetParameterValues(); } - targetStatement.getDataContext().resetParameterValues(); + } catch ( Throwable t ) { + throw new RuntimeException( t ); } - } catch ( Throwable t ) { - throw new RuntimeException( t ); } } - private RelRoot buildInsertStatement( Statement statement, List to ) { + private RelRoot buildInsertStatement( Statement statement, List to, long partitionId ) { List qualifiedTableName = ImmutableList.of( - PolySchemaBuilder.buildAdapterSchemaName( - to.get( 0 ).adapterUniqueName, - to.get( 0 ).getLogicalSchemaName(), - to.get( 0 ).physicalSchemaName ), - to.get( 0 ).getLogicalTableName() ); + PolySchemaBuilder.buildAdapterSchemaName( to.get( 0 ).adapterUniqueName, to.get( 0 ).getLogicalSchemaName(), to.get( 0 ).physicalSchemaName ), + to.get( 0 ).getLogicalTableName() + "_" + partitionId ); RelOptTable physical = statement.getTransaction().getCatalogReader().getTableForMember( qualifiedTableName ); ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); @@ -185,13 +200,10 @@ private RelRoot buildInsertStatement( Statement statement, List to ) { + private RelRoot buildUpdateStatement( Statement statement, List to, long partitionId ) { List qualifiedTableName = ImmutableList.of( - PolySchemaBuilder.buildAdapterSchemaName( - to.get( 0 ).adapterUniqueName, - to.get( 0 ).getLogicalSchemaName(), - to.get( 0 ).physicalSchemaName ), - to.get( 0 ).getLogicalTableName() ); + PolySchemaBuilder.buildAdapterSchemaName( to.get( 0 ).adapterUniqueName, to.get( 0 ).getLogicalSchemaName(), to.get( 0 ).physicalSchemaName ), + to.get( 0 ).getLogicalTableName() + "_" + partitionId ); RelOptTable physical = statement.getTransaction().getCatalogReader().getTableForMember( qualifiedTableName ); ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); @@ -252,20 +264,13 @@ private RelRoot buildUpdateStatement( Statement statement, List placements ) { - // Get map of placements by adapter - Map> placementsByAdapter = new HashMap<>(); - for ( CatalogColumnPlacement p : placements ) { - placementsByAdapter.putIfAbsent( p.getAdapterUniqueName(), new LinkedList<>() ); - placementsByAdapter.get( p.getAdapterUniqueName() ).add( p ); - } - + private RelRoot getSourceIterator( Statement statement, Map> placementDistribution ) { // Build Query RelOptCluster cluster = RelOptCluster.create( statement.getQueryProcessor().getPlanner(), new RexBuilder( statement.getTransaction().getTypeFactory() ) ); - RelNode node = statement.getRouter().buildJoinedTableScan( statement, cluster, placements ); + RelNode node = statement.getRouter().buildJoinedTableScan( statement, cluster, placementDistribution ); return RelRoot.of( node, SqlKind.SELECT ); } @@ -293,7 +298,7 @@ private List selectSourcePlacements( CatalogTable table, if ( table.placementsByAdapter.get( adapterIdWithMostPlacements ).contains( cid ) ) { placementList.add( Catalog.getInstance().getColumnPlacement( adapterIdWithMostPlacements, cid ) ); } else { - for ( CatalogColumnPlacement placement : Catalog.getInstance().getColumnPlacements( cid ) ) { + for ( CatalogColumnPlacement placement : Catalog.getInstance().getColumnPlacement( cid ) ) { if ( placement.adapterId != excludingAdapterId ) { placementList.add( placement ); break; @@ -306,4 +311,260 @@ private List selectSourcePlacements( CatalogTable table, return placementList; } + + /** + * Currently used to to transfer data if partitioned table is about to be merged. + * For Table Partitioning use {@link #copyPartitionData(Transaction, CatalogAdapter, CatalogTable, CatalogTable, List, List, List)} } instead + * + * @param transaction Transactional scope + * @param store Target Store where data should be migrated to + * @param sourceTable Source Table from where data is queried + * @param targetTable Source Table from where data is queried + * @param columns Necessary columns on target + * @param placementDistribution Pre computed mapping of partitions and the necessary column placements + * @param targetPartitionIds Target Partitions where data should be inserted + */ + @Override + public void copySelectiveData( Transaction transaction, CatalogAdapter store, CatalogTable sourceTable, CatalogTable targetTable, List columns, Map> placementDistribution, List targetPartitionIds ) { + CatalogPrimaryKey sourcePrimaryKey = Catalog.getInstance().getPrimaryKey( sourceTable.primaryKey ); + + // Check Lists + List targetColumnPlacements = new LinkedList<>(); + for ( CatalogColumn catalogColumn : columns ) { + targetColumnPlacements.add( Catalog.getInstance().getColumnPlacement( store.id, catalogColumn.id ) ); + } + + List selectColumnList = new LinkedList<>( columns ); + + // Add primary keys to select column list + for ( long cid : sourcePrimaryKey.columnIds ) { + CatalogColumn catalogColumn = Catalog.getInstance().getColumn( cid ); + if ( !selectColumnList.contains( catalogColumn ) ) { + selectColumnList.add( catalogColumn ); + } + } + + Statement sourceStatement = transaction.createStatement(); + Statement targetStatement = transaction.createStatement(); + + RelRoot sourceRel = getSourceIterator( sourceStatement, placementDistribution ); + RelRoot targetRel; + if ( Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( store.id, targetTable.id ).size() == columns.size() ) { + // There have been no placements for this table on this store before. Build insert statement + targetRel = buildInsertStatement( targetStatement, targetColumnPlacements, targetPartitionIds.get( 0 ) ); + } else { + // Build update statement + targetRel = buildUpdateStatement( targetStatement, targetColumnPlacements, targetPartitionIds.get( 0 ) ); + } + + // Execute Query + try { + PolyphenyDbSignature signature = sourceStatement.getQueryProcessor().prepareQuery( sourceRel, sourceRel.rel.getCluster().getTypeFactory().builder().build(), true ); + final Enumerable enumerable = signature.enumerable( sourceStatement.getDataContext() ); + //noinspection unchecked + Iterator sourceIterator = enumerable.iterator(); + + Map resultColMapping = new HashMap<>(); + for ( CatalogColumn catalogColumn : selectColumnList ) { + int i = 0; + for ( ColumnMetaData metaData : signature.columns ) { + if ( metaData.columnName.equalsIgnoreCase( catalogColumn.name ) ) { + resultColMapping.put( catalogColumn.id, i ); + } + i++; + } + } + + int batchSize = RuntimeConfig.DATA_MIGRATOR_BATCH_SIZE.getInteger(); + while ( sourceIterator.hasNext() ) { + List> rows = MetaImpl.collect( + signature.cursorFactory, + LimitIterator.of( sourceIterator, batchSize ), + new ArrayList<>() ); + Map> values = new HashMap<>(); + for ( List list : rows ) { + for ( Map.Entry entry : resultColMapping.entrySet() ) { + if ( !values.containsKey( entry.getKey() ) ) { + values.put( entry.getKey(), new LinkedList<>() ); + } + values.get( entry.getKey() ).add( list.get( entry.getValue() ) ); + } + } + for ( Map.Entry> v : values.entrySet() ) { + targetStatement.getDataContext().addParameterValues( v.getKey(), null, v.getValue() ); + } + Iterator iterator = targetStatement.getQueryProcessor() + .prepareQuery( targetRel, sourceRel.validatedRowType, true ) + .enumerable( targetStatement.getDataContext() ) + .iterator(); + + //noinspection WhileLoopReplaceableByForEach + while ( iterator.hasNext() ) { + iterator.next(); + } + targetStatement.getDataContext().resetParameterValues(); + } + } catch ( Throwable t ) { + throw new RuntimeException( t ); + } + } + + + /** + * Currently used to to transfer data if unpartitioned is about to be partitioned. + * For Table Merge use {@link #copySelectiveData(Transaction, CatalogAdapter, CatalogTable, CatalogTable, List, Map, List)} } instead + * + * @param transaction Transactional scope + * @param store Target Store where data should be migrated to + * @param sourceTable Source Table from where data is queried + * @param targetTable Target Table where data is to be inserted + * @param columns Necessary columns on target + * @param sourcePartitionIds Source Partitions which need to be considered for querying + * @param targetPartitionIds Target Partitions where data should be inserted + */ + @Override + public void copyPartitionData( Transaction transaction, CatalogAdapter store, CatalogTable sourceTable, CatalogTable targetTable, List columns, List sourcePartitionIds, List targetPartitionIds ) { + if ( sourceTable.id != targetTable.id ) { + throw new RuntimeException( "Unsupported migration scenario. Table ID mismatch" ); + } + + CatalogPrimaryKey primaryKey = Catalog.getInstance().getPrimaryKey( sourceTable.primaryKey ); + + // Check Lists + List targetColumnPlacements = new LinkedList<>(); + for ( CatalogColumn catalogColumn : columns ) { + targetColumnPlacements.add( Catalog.getInstance().getColumnPlacement( store.id, catalogColumn.id ) ); + } + + List selectColumnList = new LinkedList<>( columns ); + + // Add primary keys to select column list + for ( long cid : primaryKey.columnIds ) { + CatalogColumn catalogColumn = Catalog.getInstance().getColumn( cid ); + if ( !selectColumnList.contains( catalogColumn ) ) { + selectColumnList.add( catalogColumn ); + } + } + + // Add partition columns to select column list + long partitionColumnId = targetTable.partitionProperty.partitionColumnId; + CatalogColumn partitionColumn = Catalog.getInstance().getColumn( partitionColumnId ); + if ( !selectColumnList.contains( partitionColumn ) ) { + selectColumnList.add( partitionColumn ); + } + + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( targetTable.partitionProperty.partitionType ); + + //We need a columnPlacement for every partition + Map> placementDistribution = new HashMap<>(); + + placementDistribution.put( sourceTable.partitionProperty.partitionIds.get( 0 ), selectSourcePlacements( sourceTable, selectColumnList, -1 ) ); + + Statement sourceStatement = transaction.createStatement(); + + //Map PartitionId to TargetStatementQueue + Map targetStatements = new HashMap<>(); + + //Creates queue of target Statements depending + targetPartitionIds.forEach( id -> targetStatements.put( id, transaction.createStatement() ) ); + + Map targetRels = new HashMap<>(); + + RelRoot sourceRel = getSourceIterator( sourceStatement, placementDistribution ); + if ( Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( store.id, sourceTable.id ).size() == columns.size() ) { + // There have been no placements for this table on this store before. Build insert statement + targetPartitionIds.forEach( id -> targetRels.put( id, buildInsertStatement( targetStatements.get( id ), targetColumnPlacements, id ) ) ); + } else { + // Build update statement + targetPartitionIds.forEach( id -> targetRels.put( id, buildUpdateStatement( targetStatements.get( id ), targetColumnPlacements, id ) ) ); + } + + // Execute Query + try { + PolyphenyDbSignature signature = sourceStatement.getQueryProcessor().prepareQuery( sourceRel, sourceRel.rel.getCluster().getTypeFactory().builder().build(), true ); + final Enumerable enumerable = signature.enumerable( sourceStatement.getDataContext() ); + //noinspection unchecked + Iterator sourceIterator = enumerable.iterator(); + + Map resultColMapping = new HashMap<>(); + for ( CatalogColumn catalogColumn : selectColumnList ) { + int i = 0; + for ( ColumnMetaData metaData : signature.columns ) { + if ( metaData.columnName.equalsIgnoreCase( catalogColumn.name ) ) { + resultColMapping.put( catalogColumn.id, i ); + } + i++; + } + } + + int partitionColumnIndex = -1; + String parsedValue = null; + String nullifiedPartitionValue = partitionManager.getUnifiedNullValue(); + if ( targetTable.isPartitioned ) { + if ( resultColMapping.containsKey( targetTable.partitionProperty.partitionColumnId ) ) { + partitionColumnIndex = resultColMapping.get( targetTable.partitionProperty.partitionColumnId ); + } else { + parsedValue = nullifiedPartitionValue; + } + } + + int batchSize = RuntimeConfig.DATA_MIGRATOR_BATCH_SIZE.getInteger(); + while ( sourceIterator.hasNext() ) { + List> rows = MetaImpl.collect( signature.cursorFactory, LimitIterator.of( sourceIterator, batchSize ), new ArrayList<>() ); + + Map>> partitionValues = new HashMap<>(); + + for ( List row : rows ) { + long currentPartitionId = -1; + if ( partitionColumnIndex >= 0 ) { + parsedValue = nullifiedPartitionValue; + if ( row.get( partitionColumnIndex ) != null ) { + parsedValue = row.get( partitionColumnIndex ).toString(); + } + } + + currentPartitionId = partitionManager.getTargetPartitionId( targetTable, parsedValue ); + + for ( Map.Entry entry : resultColMapping.entrySet() ) { + if ( entry.getKey() == partitionColumn.id && !columns.contains( partitionColumn ) ) { + continue; + } + if ( !partitionValues.containsKey( currentPartitionId ) ) { + partitionValues.put( currentPartitionId, new HashMap<>() ); + } + if ( !partitionValues.get( currentPartitionId ).containsKey( entry.getKey() ) ) { + partitionValues.get( currentPartitionId ).put( entry.getKey(), new LinkedList<>() ); + } + partitionValues.get( currentPartitionId ).get( entry.getKey() ).add( row.get( entry.getValue() ) ); + } + } + + // Iterate over partitionValues in that way we don't even execute a statement which has no rows + for ( Map.Entry>> dataOnPartition : partitionValues.entrySet() ) { + long partitionId = dataOnPartition.getKey(); + Map> values = dataOnPartition.getValue(); + Statement currentTargetStatement = targetStatements.get( partitionId ); + + for ( Map.Entry> columnDataOnPartition : values.entrySet() ) { + // Check partitionValue + currentTargetStatement.getDataContext().addParameterValues( columnDataOnPartition.getKey(), null, columnDataOnPartition.getValue() ); + } + + Iterator iterator = currentTargetStatement.getQueryProcessor() + .prepareQuery( targetRels.get( partitionId ), sourceRel.validatedRowType, true ) + .enumerable( currentTargetStatement.getDataContext() ) + .iterator(); + //noinspection WhileLoopReplaceableByForEach + while ( iterator.hasNext() ) { + iterator.next(); + } + currentTargetStatement.getDataContext().resetParameterValues(); + } + } + } catch ( Throwable t ) { + throw new RuntimeException( t ); + } + } + } diff --git a/dbms/src/main/java/org/polypheny/db/processing/QueryParameterizer.java b/dbms/src/main/java/org/polypheny/db/processing/QueryParameterizer.java index f153c78d62..28f698edd5 100644 --- a/dbms/src/main/java/org/polypheny/db/processing/QueryParameterizer.java +++ b/dbms/src/main/java/org/polypheny/db/processing/QueryParameterizer.java @@ -31,6 +31,7 @@ import org.polypheny.db.rel.RelShuttleImpl; import org.polypheny.db.rel.core.TableModify; import org.polypheny.db.rel.logical.LogicalFilter; +import org.polypheny.db.rel.logical.LogicalModifyCollect; import org.polypheny.db.rel.logical.LogicalProject; import org.polypheny.db.rel.logical.LogicalTableModify; import org.polypheny.db.rel.logical.LogicalValues; @@ -113,17 +114,27 @@ public RelNode visit( RelNode other ) { if ( input instanceof LogicalValues ) { List projects = new ArrayList<>(); boolean firstRow = true; + HashMap idxMapping = new HashMap<>(); for ( ImmutableList node : ((LogicalValues) input).getTuples() ) { int i = 0; for ( RexLiteral literal : node ) { - int idx = index.getAndIncrement(); - RelDataType type = input.getRowType().getFieldList().get( i++ ).getValue(); + int idx; + if ( !idxMapping.containsKey( i ) ) { + idx = index.getAndIncrement(); + idxMapping.put( i, idx ); + } else { + idx = idxMapping.get( i ); + } + RelDataType type = input.getRowType().getFieldList().get( i ).getValue(); if ( firstRow ) { - types.add( type ); projects.add( new RexDynamicParam( type, idx ) ); - values.put( i, new ArrayList<>( ((LogicalValues) input).getTuples().size() ) ); } - values.get( i ).add( new ParameterValue( idx, type, literal.getValueForQueryParameterizer() ) ); + if ( !values.containsKey( idx ) ) { + types.add( type ); + values.put( idx, new ArrayList<>( ((LogicalValues) input).getTuples().size() ) ); + } + values.get( idx ).add( new ParameterValue( idx, type, literal.getValueForQueryParameterizer() ) ); + i++; } firstRow = false; } @@ -146,6 +157,17 @@ public RelNode visit( RelNode other ) { modify.getUpdateColumnList(), newSourceExpression, modify.isFlattened() ); + } else if ( other instanceof LogicalModifyCollect ) { + List inputs = new ArrayList<>( other.getInputs().size() ); + for ( RelNode node : other.getInputs() ) { + inputs.add( visit( node ) ); + } + return new LogicalModifyCollect( + other.getCluster(), + other.getTraitSet(), + inputs, + ((LogicalModifyCollect) other).all + ); } else { return super.visit( other ); } diff --git a/dbms/src/main/java/org/polypheny/db/router/AbstractRouter.java b/dbms/src/main/java/org/polypheny/db/router/AbstractRouter.java index a9641b5577..09e5cc730b 100644 --- a/dbms/src/main/java/org/polypheny/db/router/AbstractRouter.java +++ b/dbms/src/main/java/org/polypheny/db/router/AbstractRouter.java @@ -16,6 +16,7 @@ package org.polypheny.db.router; + import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableList; @@ -23,11 +24,13 @@ import java.util.Comparator; import java.util.Deque; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; +import java.util.Set; import java.util.stream.Collectors; import lombok.AllArgsConstructor; import lombok.Getter; @@ -36,6 +39,7 @@ import org.polypheny.db.catalog.Catalog.TableType; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.catalog.exceptions.UnknownColumnException; import org.polypheny.db.config.RuntimeConfig; @@ -64,6 +68,7 @@ import org.polypheny.db.rel.logical.LogicalTableModify; import org.polypheny.db.rel.logical.LogicalTableScan; import org.polypheny.db.rel.logical.LogicalValues; +import org.polypheny.db.rel.type.RelDataType; import org.polypheny.db.rel.type.RelDataTypeField; import org.polypheny.db.rex.RexCall; import org.polypheny.db.rex.RexDynamicParam; @@ -118,7 +123,6 @@ public RelRoot route( RelRoot logicalRoot, Statement statement, ExecutionTimeMon RelNode routed; analyze( statement, logicalRoot ); if ( logicalRoot.rel instanceof LogicalTableModify ) { - routed = routeDml( logicalRoot.rel, statement ); } else if ( logicalRoot.rel instanceof ConditionalExecute ) { routed = handleConditionalExecute( logicalRoot.rel, statement ); @@ -178,10 +182,6 @@ protected RelBuilder buildSelect( RelNode node, RelBuilder builder, Statement st RelOptTableImpl table = (RelOptTableImpl) ((LogicalFilter) node).getInput().getTable(); if ( table.getTable() instanceof LogicalTable ) { - - // TODO Routing of partitioned tables is very limited. This should be improved to also apply sophisticated - // routing strategies, especially when we also get rid of the worst-case routing. - LogicalTable t = ((LogicalTable) table.getTable()); CatalogTable catalogTable; catalogTable = Catalog.getInstance().getTable( t.getTableId() ); @@ -196,7 +196,8 @@ public RelNode visit( LogicalFilter filter ) { } } ); - if ( whereClauseVisitor.valueIdentified ) { + if ( whereClauseVisitor.valueIdentified && !whereClauseVisitor.unsupportedFilter ) { + //if ( whereClauseVisitor.valueIdentified ) { List values = whereClauseVisitor.getValues().stream() .map( Object::toString ) .collect( Collectors.toList() ); @@ -228,21 +229,23 @@ public RelNode visit( LogicalFilter filter ) { LogicalTable t = ((LogicalTable) table.getTable()); CatalogTable catalogTable; List placements; + Map> placementDistribution = new HashMap<>(); catalogTable = Catalog.getInstance().getTable( t.getTableId() ); + List accessedPartitionList; // Check if table is even partitioned if ( catalogTable.isPartitioned ) { - // TODO Routing of partitioned tables is very limited. This should be improved to also apply sophisticated - // routing strategies, especially when we also get rid of the worst-case routing. - if ( log.isDebugEnabled() ) { log.debug( "VALUE from Map: {} id: {}", filterMap.get( node.getId() ), node.getId() ); } List partitionValues = filterMap.get( node.getId() ); - PartitionManagerFactory partitionManagerFactory = new PartitionManagerFactory(); - PartitionManager partitionManager = partitionManagerFactory.getInstance( catalogTable.partitionType ); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( catalogTable.partitionType ); + + //Only possible if partitionsCan be uniquely identified + // For anything related to != , worst case routing is applied (selecting from all partitions) if ( partitionValues != null ) { if ( log.isDebugEnabled() ) { log.debug( "TableID: {} is partitioned on column: {} - {}", @@ -250,30 +253,43 @@ public RelNode visit( LogicalFilter filter ) { catalogTable.partitionColumnId, catalog.getColumn( catalogTable.partitionColumnId ).name ); } - if ( partitionValues.size() == 1 ) { + if ( partitionValues.size() != 0 ) { List identPartitions = new ArrayList<>(); for ( String partitionValue : partitionValues ) { - log.debug( "Extracted PartitionValue: {}", partitionValue ); + if ( log.isDebugEnabled() ) { + log.debug( "Extracted PartitionValue: {}", partitionValue ); + } long identPart = partitionManager.getTargetPartitionId( catalogTable, partitionValue ); identPartitions.add( identPart ); - log.debug( "Identified PartitionId: {} for value: {}", identPart, partitionValue ); + if ( log.isDebugEnabled() ) { + log.debug( "Identified PartitionId: {} for value: {}", identPart, partitionValue ); + } } - placements = partitionManager.getRelevantPlacements( catalogTable, identPartitions ); + // Add identified partitions to monitoring object + placementDistribution = partitionManager.getRelevantPlacements( catalogTable, identPartitions, new ArrayList<>() ); + accessedPartitionList = identPartitions; } else { - placements = partitionManager.getRelevantPlacements( catalogTable, null ); + placementDistribution = partitionManager.getRelevantPlacements( catalogTable, catalogTable.partitionProperty.partitionIds, new ArrayList<>() ); + accessedPartitionList = catalogTable.partitionProperty.partitionIds; } } else { - // TODO Change to worst-case - placements = partitionManager.getRelevantPlacements( catalogTable, null ); - //placements = selectPlacement( node, catalogTable ); + placementDistribution = partitionManager.getRelevantPlacements( catalogTable, catalogTable.partitionProperty.partitionIds, new ArrayList<>() ); + accessedPartitionList = catalogTable.partitionProperty.partitionIds; } } else { - log.debug( "{} is NOT partitioned - Routing will be easy", catalogTable.name ); + if ( log.isDebugEnabled() ) { + log.debug( "{} is NOT partitioned - Routing will be easy", catalogTable.name ); + } placements = selectPlacement( node, catalogTable ); + accessedPartitionList = catalogTable.partitionProperty.partitionIds; + placementDistribution.put( catalogTable.partitionProperty.partitionIds.get( 0 ), placements ); } - return builder.push( buildJoinedTableScan( statement, cluster, placements ) ); + if ( statement.getTransaction().getMonitoringData() != null ) { + statement.getTransaction().getMonitoringData().setAccessedPartitions( accessedPartitionList ); + } + return builder.push( buildJoinedTableScan( statement, cluster, placementDistribution ) ); } else { throw new RuntimeException( "Unexpected table. Only logical tables expected here!" ); @@ -350,39 +366,40 @@ protected RelNode routeDml( RelNode node, Statement statement ) { long pkid = catalogTable.primaryKey; List pkColumnIds = Catalog.getInstance().getPrimaryKey( pkid ).columnIds; CatalogColumn pkColumn = Catalog.getInstance().getColumn( pkColumnIds.get( 0 ) ); - List pkPlacements = catalog.getColumnPlacements( pkColumn.id ); + + // Essentially gets a list of all stores where this table resides + List pkPlacements = catalog.getColumnPlacement( pkColumn.id ); if ( catalogTable.isPartitioned && log.isDebugEnabled() ) { - log.debug( "\nListing all relevant stores for table: '{}' and all partitions: {}", catalogTable.name, catalogTable.partitionIds ); + log.debug( "\nListing all relevant stores for table: '{}' and all partitions: {}", catalogTable.name, catalogTable.partitionProperty.partitionGroupIds ); for ( CatalogColumnPlacement dataPlacement : pkPlacements ) { log.debug( "\t\t -> '{}' {}\t{}", dataPlacement.adapterUniqueName, - catalog.getPartitionsOnDataPlacement( dataPlacement.adapterId, dataPlacement.tableId ), - catalog.getPartitionsIndexOnDataPlacement( dataPlacement.adapterId, dataPlacement.tableId ) ); + catalog.getPartitionGroupsOnDataPlacement( dataPlacement.adapterId, dataPlacement.tableId ), + catalog.getPartitionGroupsIndexOnDataPlacement( dataPlacement.adapterId, dataPlacement.tableId ) ); } } // Execute on all primary key placements - List modifies = new ArrayList<>( pkPlacements.size() ); + List modifies = new ArrayList<>(); + + // Needed for partitioned updates when source partition and target partition are not equal + // SET Value is the new partition, where clause is the source + boolean operationWasRewritten = false; + + Map newParameterValues = new HashMap<>(); for ( CatalogColumnPlacement pkPlacement : pkPlacements ) { - CatalogReader catalogReader = statement.getTransaction().getCatalogReader(); - List qualifiedTableName = ImmutableList.of( - PolySchemaBuilder.buildAdapterSchemaName( - pkPlacement.adapterUniqueName, - catalogTable.getSchemaName(), - pkPlacement.physicalSchemaName ), - t.getLogicalTableName() ); - RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); - ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + CatalogReader catalogReader = statement.getTransaction().getCatalogReader(); // Get placements on store - List placementsOnAdapter = catalog.getColumnPlacementsOnAdapter( pkPlacement.adapterId, catalogTable.id ); + List placementsOnAdapter = catalog.getColumnPlacementsOnAdapterPerTable( pkPlacement.adapterId, catalogTable.id ); // If this is a update, check whether we need to execute on this store at all List updateColumnList = ((LogicalTableModify) node).getUpdateColumnList(); List sourceExpressionList = ((LogicalTableModify) node).getSourceExpressionList(); if ( placementsOnAdapter.size() != catalogTable.columnIds.size() ) { + if ( ((LogicalTableModify) node).getOperation() == Operation.UPDATE ) { updateColumnList = new LinkedList<>( ((LogicalTableModify) node).getUpdateColumnList() ); sourceExpressionList = new LinkedList<>( ((LogicalTableModify) node).getSourceExpressionList() ); @@ -407,13 +424,17 @@ protected RelNode routeDml( RelNode node, Statement statement ) { } } + long identPart = -1; + long identifiedPartitionForSetValue = -1; + Set accessedPartitionList = new HashSet<>(); // Identify where clause of UPDATE if ( catalogTable.isPartitioned ) { boolean worstCaseRouting = false; + Set identifiedPartitionsInFilter = new HashSet<>(); - PartitionManagerFactory partitionManagerFactory = new PartitionManagerFactory(); - PartitionManager partitionManager = partitionManagerFactory.getInstance( catalogTable.partitionType ); - partitionManager.validatePartitionDistribution( catalogTable ); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( catalogTable.partitionType ); + // partitionManager.validatePartitionGroupDistribution( catalogTable ); WhereClauseVisitor whereClauseVisitor = new WhereClauseVisitor( statement, catalogTable.columnIds.indexOf( catalogTable.partitionColumnId ) ); node.accept( new RelShuttleImpl() { @@ -425,22 +446,29 @@ public RelNode visit( LogicalFilter filter ) { } } ); - List whereClauseValue = null; + List whereClauseValues = null; if ( !whereClauseVisitor.getValues().isEmpty() ) { - if ( whereClauseVisitor.getValues().size() == 1 ) { - whereClauseValue = whereClauseVisitor.getValues().stream() - .map( Object::toString ) - .collect( Collectors.toList() ); - log.debug( "Found Where Clause Values: {}", whereClauseValue ); - worstCaseRouting = true; + whereClauseValues = whereClauseVisitor.getValues().stream() + .map( Object::toString ) + .collect( Collectors.toList() ); + if ( log.isDebugEnabled() ) { + log.debug( "Found Where Clause Values: {}", whereClauseValues ); } + worstCaseRouting = true; } - long identPart = -1; + if ( whereClauseValues != null ) { + for ( String value : whereClauseValues ) { + worstCaseRouting = false; + identPart = (int) partitionManager.getTargetPartitionId( catalogTable, value ); + accessedPartitionList.add( identPart ); + identifiedPartitionsInFilter.add( identPart ); + } + } String partitionValue = ""; - //set true if partitionColumn is part of UPDATE Statement, else assume worst case routing - boolean partitionColumnIdentified = false; + // Set true if partitionColumn is part of UPDATE Statement, else assume worst case routing + if ( ((LogicalTableModify) node).getOperation() == Operation.UPDATE ) { // In case of update always use worst case routing for now. // Since you have to identify the current partition to delete the entry and then create a new entry on the correct partitions @@ -449,10 +477,11 @@ public RelNode visit( LogicalFilter filter ) { for ( String cn : updateColumnList ) { try { if ( catalog.getColumn( catalogTable.id, cn ).id == catalogTable.partitionColumnId ) { - log.debug( " UPDATE: Found PartitionColumnID Match: '{}' at index: {}", catalogTable.partitionColumnId, index ); + if ( log.isDebugEnabled() ) { + log.debug( " UPDATE: Found PartitionColumnID Match: '{}' at index: {}", catalogTable.partitionColumnId, index ); + } - //Routing/Locking can now be executed on certain partitions - partitionColumnIdentified = true; + // Routing/Locking can now be executed on certain partitions partitionValue = sourceExpressionList.get( index ).toString().replace( "'", "" ); if ( log.isDebugEnabled() ) { log.debug( "UPDATE: partitionColumn-value: '{}' should be put on partition: {}", @@ -460,6 +489,9 @@ public RelNode visit( LogicalFilter filter ) { partitionManager.getTargetPartitionId( catalogTable, partitionValue ) ); } identPart = (int) partitionManager.getTargetPartitionId( catalogTable, partitionValue ); + // Needed to verify if UPDATE shall be executed on two partitions or not + identifiedPartitionForSetValue = identPart; + accessedPartitionList.add( identPart ); break; } } catch ( UnknownColumnException e ) { @@ -468,62 +500,318 @@ public RelNode visit( LogicalFilter filter ) { index++; } - // If only one where clause op - if ( whereClauseValue != null && partitionColumnIdentified ) { - if ( whereClauseValue.size() == 1 && identPart == partitionManager.getTargetPartitionId( catalogTable, whereClauseValue.get( 0 ) ) ) { - worstCaseRouting = false; - } else { - worstCaseRouting = true; - log.debug( "Activate WORST-CASE ROUTING" ); + // If WHERE clause has any value for partition column + if ( identifiedPartitionsInFilter.size() > 0 ) { + + // Partition has been identified in SET + if ( identifiedPartitionForSetValue != -1 ) { + + // SET value and single WHERE clause point to same partition. + // Inplace update possible + if ( identifiedPartitionsInFilter.size() == 1 && identifiedPartitionsInFilter.contains( identifiedPartitionForSetValue ) ) { + if ( log.isDebugEnabled() ) { + log.debug( "oldValue and new value reside on same partition: {}", identifiedPartitionForSetValue ); + } + worstCaseRouting = false; + } else { + throw new RuntimeException( "Updating partition key is not allowed" ); + + /* TODO add possibility to substitute the update as a insert into target partition from all source partitions + // IS currently blocked + //needs to to a insert into target partition select from all other partitoins first and then delete on source partiitons + worstCaseRouting = false; + log.debug( "oldValue and new value reside on same partition: " + identifiedPartitionForSetValue ); + + //Substitute UPDATE operation with DELETE on all partitionIds of WHERE Clause + for ( long currentPart : identifiedPartitionsInFilter ) { + + if ( !catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, catalogTable.id ).contains( currentPart ) ) { + continue; + } + + List qualifiedTableName = ImmutableList.of( + PolySchemaBuilder.buildAdapterSchemaName( + pkPlacement.adapterUniqueName, + catalogTable.getSchemaName(), + pkPlacement.physicalSchemaName, + currentPart ), + t.getLogicalTableName() ); + RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); + ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + + RelNode input = buildDml( + recursiveCopy( node.getInput( 0 ) ), + RelBuilder.create( statement, cluster ), + catalogTable, + placementsOnAdapter, + catalog.getPartitionPlacement( pkPlacement.adapterId, currentPart ), + statement, + cluster ).build(); + + TableModify deleteModify = LogicalTableModify.create( + physical, + catalogReader, + input, + Operation.DELETE, + null, + null, + ((LogicalTableModify) node).isFlattened() ); + + modifies.add( deleteModify ); + + + } + + //Inject INSERT statement for identified SET partitionId + //Otherwise data migrator would be needed + if ( catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, catalogTable.id ).contains( identifiedPartitionForSetValue ) ) { + + /* List qualifiedTableName = ImmutableList.of( + PolySchemaBuilder.buildAdapterSchemaName( + pkPlacement.adapterUniqueName, + catalogTable.getSchemaName(), + pkPlacement.physicalSchemaName, + identifiedPartitionForSetValue ), + t.getLogicalTableName() ); + RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); + ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + + RelNode input = buildDml( + recursiveCopy( node.getInput( 0 ) ), + RelBuilder.create( statement, cluster ), + catalogTable, + placementsOnAdapter, + catalog.getPartitionPlacement( pkPlacement.adapterId, identifiedPartitionForSetValue ), + statement, + cluster ).build(); + + TableModify insertModify = modifiableTable.toModificationRel( + cluster, + physical, + catalogReader, + input, + Operation.INSERT, + null, + null, + ((LogicalTableModify) node).isFlattened() + ); + + modifies.add( insertModify ); + } + //operationWasRewritten = true; + + */ + } + + }//WHERE clause only + else { + throw new RuntimeException( "Updating partition key is not allowed" ); + + //Simply execute the UPDATE on all identified partitions + //Nothing to do + //worstCaseRouting = false; } - } else if ( whereClauseValue == null ) { + }// If only SET is specified + // Changes the value of partition column of complete table to only reside on one partition + else if ( identifiedPartitionForSetValue != -1 ) { + + //Data Migrate copy of all other partitions beside the identifed on towards the identified one + //Then inject a DELETE statement for all those partitions + + //Do the update only on the identified partition + + }// If nothing has been specified + //Partition functionality cannot be used --> worstCase --> send query to every partition + else { worstCaseRouting = true; - log.debug( "Activate WORST-CASE ROUTING! No WHERE clause specified for partition column" ); - } else if ( whereClauseValue != null && !partitionColumnIdentified ) { - if ( whereClauseValue.size() == 1 ) { - identPart = (int) partitionManager.getTargetPartitionId( catalogTable, whereClauseValue.get( 0 ) ); - worstCaseRouting = false; - } else { - worstCaseRouting = true; - } + accessedPartitionList = new HashSet<>( catalogTable.partitionProperty.partitionIds ); } - // Since update needs to take current partition and target partition into account - //partitionColumnIdentified = false; } else if ( ((LogicalTableModify) node).getOperation() == Operation.INSERT ) { int i; + if ( ((LogicalTableModify) node).getInput() instanceof LogicalValues ) { - if ( ((LogicalValues) ((LogicalTableModify) node).getInput()).tuples.size() == 1 ) { - for ( i = 0; i < catalogTable.columnIds.size(); i++ ) { - if ( catalogTable.columnIds.get( i ) == catalogTable.partitionColumnId ) { - log.debug( "INSERT: Found PartitionColumnID: '{}' at column index: {}", catalogTable.partitionColumnId, i ); - partitionColumnIdentified = true; + // Get fieldList and map columns to index since they could be in arbitrary order + int partitionColumnIndex = -1; + Map resultColMapping = new HashMap<>(); + for ( int j = 0; j < (((LogicalTableModify) node).getInput()).getRowType().getFieldList().size(); j++ ) { + String columnFieldName = (((LogicalTableModify) node).getInput()).getRowType().getFieldList().get( j ).getKey(); + + // Retrieve columnId of fieldName and map it to its fieldList location of INSERT Stmt + int columnIndex = catalogTable.getColumnNames().indexOf( columnFieldName ); + resultColMapping.put( catalogTable.columnIds.get( columnIndex ), j ); + + // Determine location of partitionColumn in fieldList + if ( catalogTable.columnIds.get( columnIndex ) == catalogTable.partitionColumnId ) { + partitionColumnIndex = columnIndex; + if ( log.isDebugEnabled() ) { + log.debug( "INSERT: Found PartitionColumnID: '{}' at column index: {}", catalogTable.partitionColumnId, j ); worstCaseRouting = false; - partitionValue = ((LogicalValues) ((LogicalTableModify) node).getInput()).tuples.get( 0 ).get( i ).toString().replace( "'", "" ); - identPart = (int) partitionManager.getTargetPartitionId( catalogTable, partitionValue ); - break; } } - } else { - worstCaseRouting = true; } + + // Will executed all required tuples that belong on the same partition jointly + Map>> tuplesOnPartition = new HashMap<>(); + for ( ImmutableList currentTuple : ((LogicalValues) ((LogicalTableModify) node).getInput()).tuples ) { + + worstCaseRouting = false; + if ( partitionColumnIndex == -1 || currentTuple.get( partitionColumnIndex ).getValue() == null ) { + partitionValue = partitionManager.getUnifiedNullValue(); + } else { + partitionValue = currentTuple.get( partitionColumnIndex ).toString().replace( "'", "" ); + } + identPart = (int) partitionManager.getTargetPartitionId( catalogTable, partitionValue ); + accessedPartitionList.add( identPart ); + + if ( !tuplesOnPartition.containsKey( identPart ) ) { + tuplesOnPartition.put( identPart, new ArrayList<>() ); + } + tuplesOnPartition.get( identPart ).add( currentTuple ); + + } + + for ( Map.Entry>> partitionMapping : tuplesOnPartition.entrySet() ) { + Long currentPartitionId = partitionMapping.getKey(); + for ( ImmutableList row : partitionMapping.getValue() ) { + LogicalValues newLogicalValues = new LogicalValues( + cluster, + cluster.traitSet(), + (((LogicalTableModify) node).getInput()).getRowType(), + ImmutableList.copyOf( ImmutableList.of( row ) ) ); + + RelNode input = buildDml( + newLogicalValues, + RelBuilder.create( statement, cluster ), + catalogTable, + placementsOnAdapter, + catalog.getPartitionPlacement( pkPlacement.adapterId, currentPartitionId ), + statement, + cluster, + true, + statement.getDataContext().getParameterValues() ).build(); + + List qualifiedTableName = ImmutableList.of( + PolySchemaBuilder.buildAdapterSchemaName( + pkPlacement.adapterUniqueName, + catalogTable.getSchemaName(), + pkPlacement.physicalSchemaName + ), + t.getLogicalTableName() + "_" + currentPartitionId ); + RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); + ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + + // Build DML + TableModify modify; + + modify = modifiableTable.toModificationRel( + cluster, + physical, + catalogReader, + input, + ((LogicalTableModify) node).getOperation(), + updateColumnList, + sourceExpressionList, + ((LogicalTableModify) node).isFlattened() ); + + modifies.add( modify ); + + } + } + operationWasRewritten = true; + } else if ( ((LogicalTableModify) node).getInput() instanceof LogicalProject && ((LogicalProject) ((LogicalTableModify) node).getInput()).getInput() instanceof LogicalValues ) { String partitionColumnName = catalog.getColumn( catalogTable.partitionColumnId ).name; List fieldNames = ((LogicalTableModify) node).getInput().getRowType().getFieldNames(); + + LogicalTableModify ltm = ((LogicalTableModify) node); + LogicalProject lproject = (LogicalProject) ltm.getInput(); + + List fieldValues = lproject.getProjects(); + for ( i = 0; i < fieldNames.size(); i++ ) { String columnName = fieldNames.get( i ); + if ( partitionColumnName.equals( columnName ) ) { + if ( ((LogicalTableModify) node).getInput().getChildExps().get( i ).getKind().equals( SqlKind.DYNAMIC_PARAM ) ) { - worstCaseRouting = true; + + // Needed to identify the column which contains the partition value + long partitionValueIndex = ((RexDynamicParam) fieldValues.get( i )).getIndex(); + + long tempPartitionId = 0; + // Get partitionValue per row/tuple to be inserted + // Create as many independent TableModifies as there are entries in getParameterValues + + for ( Map currentRow : statement.getDataContext().getParameterValues() ) { + + tempPartitionId = partitionManager.getTargetPartitionId( catalogTable, currentRow.get( partitionValueIndex ).toString() ); + + if ( !catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, catalogTable.id ).contains( tempPartitionId ) ) { + continue; + } + + List> parameterValues = new ArrayList<>(); + parameterValues.add( new HashMap<>( newParameterValues ) ); + parameterValues.get( 0 ).putAll( currentRow ); + + RelNode input = buildDml( + recursiveCopy( node.getInput( 0 ) ), + RelBuilder.create( statement, cluster ), + catalogTable, + placementsOnAdapter, + catalog.getPartitionPlacement( pkPlacement.adapterId, tempPartitionId ), + statement, + cluster, + true, + parameterValues ).build(); + + newParameterValues.putAll( parameterValues.get( 0 ) ); + + List qualifiedTableName = ImmutableList.of( + PolySchemaBuilder.buildAdapterSchemaName( + pkPlacement.adapterUniqueName, + catalogTable.getSchemaName(), + pkPlacement.physicalSchemaName + ), + t.getLogicalTableName() + "_" + tempPartitionId ); + RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); + ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + + // Build DML + TableModify modify; + + modify = modifiableTable.toModificationRel( + cluster, + physical, + catalogReader, + input, + ((LogicalTableModify) node).getOperation(), + updateColumnList, + sourceExpressionList, + ((LogicalTableModify) node).isFlattened() ); + + modifies.add( modify ); + } + + operationWasRewritten = true; + worstCaseRouting = false; } else { - partitionColumnIdentified = true; partitionValue = ((LogicalTableModify) node).getInput().getChildExps().get( i ).toString().replace( "'", "" ); identPart = (int) partitionManager.getTargetPartitionId( catalogTable, partitionValue ); + accessedPartitionList.add( identPart ); + worstCaseRouting = false; } break; + } else { + // When loop is finished + if ( i == fieldNames.size() - 1 ) { + worstCaseRouting = true; + // Because partitionColumn has not been specified in insert + } } } } else { @@ -532,85 +820,111 @@ public RelNode visit( LogicalFilter filter ) { if ( log.isDebugEnabled() ) { String partitionColumnName = catalog.getColumn( catalogTable.partitionColumnId ).name; - String partitionName = catalog.getPartition( identPart ).partitionName; - log.debug( "INSERT: partitionColumn-value: '{}' should be put on partition: {} ({}), which is partitioned with column", + String partitionName = catalog.getPartitionGroup( identPart ).partitionGroupName; + log.debug( "INSERT: partitionColumn-value: '{}' should be put on partition: {} ({}), which is partitioned with column {}", partitionValue, identPart, partitionName, partitionColumnName ); } } else if ( ((LogicalTableModify) node).getOperation() == Operation.DELETE ) { - if ( whereClauseValue == null ) { + if ( whereClauseValues == null ) { worstCaseRouting = true; } else { - if ( whereClauseValue.size() >= 2 ) { + if ( whereClauseValues.size() >= 4 ) { worstCaseRouting = true; - partitionColumnIdentified = false; } else { worstCaseRouting = false; - identPart = (int) partitionManager.getTargetPartitionId( catalogTable, whereClauseValue.get( 0 ) ); } } } - if ( !worstCaseRouting ) { - log.debug( "Get all Placements by identified Partition: {}", identPart ); - if ( !catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, pkPlacement.tableId ).contains( identPart ) ) { - if ( log.isDebugEnabled() ) { - log.debug( "DataPlacement: {}.{} SKIPPING since it does NOT contain identified partition: '{}' {}", - pkPlacement.adapterUniqueName, - pkPlacement.physicalTableName, - identPart, - catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, pkPlacement.tableId ) ); - } + if ( worstCaseRouting ) { + log.debug( "PartitionColumnID was not an explicit part of statement, partition routing will therefore assume worst-case: Routing to ALL PARTITIONS" ); + accessedPartitionList = catalogTable.partitionProperty.partitionIds.stream().collect( Collectors.toSet() ); + } + } else { + // unpartitioned tables only have one partition anyway + identPart = catalogTable.partitionProperty.partitionIds.get( 0 ); + accessedPartitionList.add( identPart ); + + } + + if ( statement.getTransaction().getMonitoringData() != null ) { + statement.getTransaction() + .getMonitoringData() + .setAccessedPartitions( accessedPartitionList.stream().collect( Collectors.toList() ) ); + } + + if ( !operationWasRewritten ) { + + for ( long partitionId : accessedPartitionList ) { + + if ( !catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, catalogTable.id ).contains( partitionId ) ) { continue; - } else { - if ( log.isDebugEnabled() ) { - log.debug( "DataPlacement: {}.{} contains identified partition: '{}' {}", + } + + List qualifiedTableName = ImmutableList.of( + PolySchemaBuilder.buildAdapterSchemaName( pkPlacement.adapterUniqueName, - pkPlacement.physicalTableName, - identPart, - catalog.getPartitionsOnDataPlacement( pkPlacement.adapterId, pkPlacement.tableId ) ); - } + catalogTable.getSchemaName(), + pkPlacement.physicalSchemaName + ), + t.getLogicalTableName() + "_" + partitionId ); + RelOptTable physical = catalogReader.getTableForMember( qualifiedTableName ); + ModifiableTable modifiableTable = physical.unwrap( ModifiableTable.class ); + + // Build DML + TableModify modify; + RelNode input = buildDml( + recursiveCopy( node.getInput( 0 ) ), + RelBuilder.create( statement, cluster ), + catalogTable, + placementsOnAdapter, + catalog.getPartitionPlacement( pkPlacement.adapterId, partitionId ), + statement, + cluster, + false, + statement.getDataContext().getParameterValues() ).build(); + if ( modifiableTable != null && modifiableTable == physical.unwrap( Table.class ) ) { + modify = modifiableTable.toModificationRel( + cluster, + physical, + catalogReader, + input, + ((LogicalTableModify) node).getOperation(), + updateColumnList, + sourceExpressionList, + ((LogicalTableModify) node).isFlattened() + ); + } else { + modify = LogicalTableModify.create( + physical, + catalogReader, + input, + ((LogicalTableModify) node).getOperation(), + updateColumnList, + sourceExpressionList, + ((LogicalTableModify) node).isFlattened() + ); } - } else { - log.debug( "PartitionColumnID was not an explicit part of statement, partition routing will therefore assume worst-case: Routing to ALL PARTITIONS" ); + modifies.add( modify ); } } + } - // Build DML - TableModify modify; - RelNode input = buildDml( - recursiveCopy( node.getInput( 0 ) ), - RelBuilder.create( statement, cluster ), - catalogTable, - placementsOnAdapter, - statement, - cluster ).build(); - if ( modifiableTable != null && modifiableTable == physical.unwrap( Table.class ) ) { - modify = modifiableTable.toModificationRel( - cluster, - physical, - catalogReader, - input, - ((LogicalTableModify) node).getOperation(), - updateColumnList, - sourceExpressionList, - ((LogicalTableModify) node).isFlattened() - ); - } else { - modify = LogicalTableModify.create( - physical, - catalogReader, - input, - ((LogicalTableModify) node).getOperation(), - updateColumnList, - sourceExpressionList, - ((LogicalTableModify) node).isFlattened() - ); + // Update parameter values (horizontal partitioning) + if ( !newParameterValues.isEmpty() ) { + statement.getDataContext().resetParameterValues(); + int idx = 0; + for ( Map.Entry entry : newParameterValues.entrySet() ) { + statement.getDataContext().addParameterValues( + entry.getKey(), + statement.getDataContext().getParameterType( idx++ ), + ImmutableList.of( entry.getValue() ) ); } - modifies.add( modify ); } + if ( modifies.size() == 1 ) { return modifies.get( 0 ); } else { @@ -635,25 +949,27 @@ public RelNode visit( LogicalFilter filter ) { } - protected RelBuilder buildDml( RelNode node, RelBuilder builder, CatalogTable catalogTable, List placements, Statement statement, RelOptCluster cluster ) { + protected RelBuilder buildDml( RelNode node, RelBuilder builder, CatalogTable catalogTable, List placements, CatalogPartitionPlacement partitionPlacement, Statement statement, RelOptCluster cluster, boolean remapParameterValues, List> parameterValues ) { for ( int i = 0; i < node.getInputs().size(); i++ ) { - buildDml( node.getInput( i ), builder, catalogTable, placements, statement, cluster ); + buildDml( node.getInput( i ), builder, catalogTable, placements, partitionPlacement, statement, cluster, remapParameterValues, parameterValues ); } if ( log.isDebugEnabled() ) { log.debug( "List of Store specific ColumnPlacements: " ); for ( CatalogColumnPlacement ccp : placements ) { - log.debug( "{}.{}.{}", ccp.adapterUniqueName, ccp.physicalTableName, ccp.getLogicalColumnName() ); + log.debug( "{}.{}", ccp.adapterUniqueName, ccp.getLogicalColumnName() ); } } if ( node instanceof LogicalTableScan && node.getTable() != null ) { RelOptTableImpl table = (RelOptTableImpl) node.getTable(); + if ( table.getTable() instanceof LogicalTable ) { // Special handling for INSERT INTO foo SELECT * FROM foo2 if ( ((LogicalTable) table.getTable()).getTableId() != catalogTable.id ) { return buildSelect( node, builder, statement, cluster ); } + builder = handleTableScan( builder, placements.get( 0 ).tableId, @@ -661,8 +977,11 @@ protected RelBuilder buildDml( RelNode node, RelBuilder builder, CatalogTable ca catalogTable.getSchemaName(), catalogTable.name, placements.get( 0 ).physicalSchemaName, - placements.get( 0 ).physicalTableName ); + partitionPlacement.physicalTableName, + partitionPlacement.partitionId ); + return builder; + } else { throw new RuntimeException( "Unexpected table. Only logical tables expected here!" ); } @@ -682,9 +1001,16 @@ protected RelBuilder buildDml( RelNode node, RelBuilder builder, CatalogTable ca } } else if ( node instanceof LogicalProject ) { if ( catalogTable.columnIds.size() == placements.size() ) { // full placement, generic handling is sufficient - return handleGeneric( node, builder ); - } else { // partitioned, adjust project + if ( catalogTable.isPartitioned && remapParameterValues ) { // && ((LogicalProject) node).getInput().getRowType().toString().equals( "RecordType(INTEGER ZERO)" ) + return remapParameterizedDml( node, builder, statement, parameterValues ); + } else { + return handleGeneric( node, builder ); + } + } else { // vertically partitioned, adjust project if ( ((LogicalProject) node).getInput().getRowType().toString().equals( "RecordType(INTEGER ZERO)" ) ) { + if ( catalogTable.isPartitioned && remapParameterValues ) { + builder = remapParameterizedDml( node, builder, statement, parameterValues ); + } builder.push( node.copy( node.getTraitSet(), ImmutableList.of( builder.peek( 0 ) ) ) ); ArrayList rexNodes = new ArrayList<>(); for ( CatalogColumnPlacement ccp : placements ) { @@ -719,6 +1045,38 @@ protected RelBuilder buildDml( RelNode node, RelBuilder builder, CatalogTable ca } + private RelBuilder remapParameterizedDml( RelNode node, RelBuilder builder, Statement statement, List> parameterValues ) { + if ( parameterValues.size() != 1 ) { + throw new RuntimeException( "The parameter values is expected to have a size of one in this case!" ); + } + + List projects = new ArrayList<>(); + for ( RexNode project : ((LogicalProject) node).getProjects() ) { + if ( project instanceof RexDynamicParam ) { + long newIndex = parameterValues.get( 0 ).size(); + long oldIndex = ((RexDynamicParam) project).getIndex(); + RelDataType type = statement.getDataContext().getParameterType( oldIndex ); + if ( type == null ) { + type = project.getType(); + } + Object value = parameterValues.get( 0 ).get( oldIndex ); + projects.add( new RexDynamicParam( type, newIndex ) ); + parameterValues.get( 0 ).put( newIndex, value ); + } + } + + LogicalValues logicalValues = LogicalValues.createOneRow( node.getCluster() ); + LogicalProject newProject = new LogicalProject( + node.getCluster(), + node.getTraitSet(), + logicalValues, + projects, + node.getRowType() + ); + return handleGeneric( newProject, builder ); + } + + private void dmlConditionCheck( LogicalFilter node, CatalogTable catalogTable, List placements, RexNode operand ) { if ( operand instanceof RexInputRef ) { int index = ((RexInputRef) operand).getIndex(); @@ -763,7 +1121,7 @@ private void dmlConditionCheck( LogicalFilter node, CatalogTable catalogTable, L @Override - public RelNode buildJoinedTableScan( Statement statement, RelOptCluster cluster, List placements ) { + public RelNode buildJoinedTableScan( Statement statement, RelOptCluster cluster, Map> placements ) { RelBuilder builder = RelBuilder.create( statement, cluster ); if ( RuntimeConfig.JOINED_TABLE_SCAN_CACHE.getBoolean() ) { @@ -773,104 +1131,124 @@ public RelNode buildJoinedTableScan( Statement statement, RelOptCluster cluster, } } - // Sort by adapter - Map> placementsByAdapter = new HashMap<>(); - for ( CatalogColumnPlacement placement : placements ) { - if ( !placementsByAdapter.containsKey( placement.adapterId ) ) { - placementsByAdapter.put( placement.adapterId, new LinkedList<>() ); + for ( Entry partitionToPlacement : placements.entrySet() ) { + long partitionId = (long) partitionToPlacement.getKey(); + List currentPlacements = (List) partitionToPlacement.getValue(); + // Sort by adapter + Map> placementsByAdapter = new HashMap<>(); + for ( CatalogColumnPlacement placement : currentPlacements ) { + if ( !placementsByAdapter.containsKey( placement.adapterId ) ) { + placementsByAdapter.put( placement.adapterId, new LinkedList<>() ); + } + placementsByAdapter.get( placement.adapterId ).add( placement ); } - placementsByAdapter.get( placement.adapterId ).add( placement ); - } - if ( placementsByAdapter.size() == 1 ) { - List ccp = placementsByAdapter.values().iterator().next(); - builder = handleTableScan( - builder, - ccp.get( 0 ).tableId, - ccp.get( 0 ).adapterUniqueName, - ccp.get( 0 ).getLogicalSchemaName(), - ccp.get( 0 ).getLogicalTableName(), - ccp.get( 0 ).physicalSchemaName, - ccp.get( 0 ).physicalTableName ); - // final project - ArrayList rexNodes = new ArrayList<>(); - List placementList = placements.stream() - .sorted( Comparator.comparingInt( p -> Catalog.getInstance().getColumn( p.columnId ).position ) ) - .collect( Collectors.toList() ); - for ( CatalogColumnPlacement catalogColumnPlacement : placementList ) { - rexNodes.add( builder.field( catalogColumnPlacement.getLogicalColumnName() ) ); - } - builder.project( rexNodes ); - } else if ( placementsByAdapter.size() > 1 ) { - // We need to join placements on different adapters - - // Get primary key - long pkid = catalog.getTable( placements.get( 0 ).tableId ).primaryKey; - List pkColumnIds = Catalog.getInstance().getPrimaryKey( pkid ).columnIds; - List pkColumns = new LinkedList<>(); - for ( long pkColumnId : pkColumnIds ) { - pkColumns.add( Catalog.getInstance().getColumn( pkColumnId ) ); - } + if ( placementsByAdapter.size() == 1 ) { + List ccps = placementsByAdapter.values().iterator().next(); + CatalogColumnPlacement ccp = ccps.get( 0 ); + CatalogPartitionPlacement cpp = catalog.getPartitionPlacement( ccp.adapterId, partitionId ); - // Add primary key - for ( Entry> entry : placementsByAdapter.entrySet() ) { - for ( CatalogColumn pkColumn : pkColumns ) { - CatalogColumnPlacement pkPlacement = Catalog.getInstance().getColumnPlacement( entry.getKey(), pkColumn.id ); - if ( !entry.getValue().contains( pkPlacement ) ) { - entry.getValue().add( pkPlacement ); - } + builder = handleTableScan( + builder, + ccp.tableId, + ccp.adapterUniqueName, + ccp.getLogicalSchemaName(), + ccp.getLogicalTableName(), + ccp.physicalSchemaName, + cpp.physicalTableName, + cpp.partitionId ); + // final project + ArrayList rexNodes = new ArrayList<>(); + List placementList = currentPlacements.stream() + .sorted( Comparator.comparingInt( p -> Catalog.getInstance().getColumn( p.columnId ).position ) ) + .collect( Collectors.toList() ); + for ( CatalogColumnPlacement catalogColumnPlacement : placementList ) { + rexNodes.add( builder.field( catalogColumnPlacement.getLogicalColumnName() ) ); } - } + builder.project( rexNodes ); - Deque queue = new LinkedList<>(); - boolean first = true; - for ( List ccps : placementsByAdapter.values() ) { - handleTableScan( - builder, - ccps.get( 0 ).tableId, - ccps.get( 0 ).adapterUniqueName, - ccps.get( 0 ).getLogicalSchemaName(), - ccps.get( 0 ).getLogicalTableName(), - ccps.get( 0 ).physicalSchemaName, - ccps.get( 0 ).physicalTableName ); - if ( first ) { - first = false; - } else { - ArrayList rexNodes = new ArrayList<>(); - for ( CatalogColumnPlacement p : ccps ) { - if ( pkColumnIds.contains( p.columnId ) ) { - String alias = ccps.get( 0 ).adapterUniqueName + "_" + p.getLogicalColumnName(); - rexNodes.add( builder.alias( builder.field( p.getLogicalColumnName() ), alias ) ); - queue.addFirst( alias ); - queue.addFirst( p.getLogicalColumnName() ); - } else { - rexNodes.add( builder.field( p.getLogicalColumnName() ) ); + } else if ( placementsByAdapter.size() > 1 ) { + // We need to join placements on different adapters + + // Get primary key + long pkid = catalog.getTable( currentPlacements.get( 0 ).tableId ).primaryKey; + List pkColumnIds = Catalog.getInstance().getPrimaryKey( pkid ).columnIds; + List pkColumns = new LinkedList<>(); + for ( long pkColumnId : pkColumnIds ) { + pkColumns.add( Catalog.getInstance().getColumn( pkColumnId ) ); + } + + // Add primary key + for ( Entry> entry : placementsByAdapter.entrySet() ) { + for ( CatalogColumn pkColumn : pkColumns ) { + CatalogColumnPlacement pkPlacement = Catalog.getInstance().getColumnPlacement( entry.getKey(), pkColumn.id ); + if ( !entry.getValue().contains( pkPlacement ) ) { + entry.getValue().add( pkPlacement ); } } - builder.project( rexNodes ); - List joinConditions = new LinkedList<>(); - for ( int i = 0; i < pkColumnIds.size(); i++ ) { - joinConditions.add( builder.call( - SqlStdOperatorTable.EQUALS, - builder.field( 2, ccps.get( 0 ).getLogicalTableName(), queue.removeFirst() ), - builder.field( 2, ccps.get( 0 ).getLogicalTableName(), queue.removeFirst() ) ) ); - } - builder.join( JoinRelType.INNER, joinConditions ); + } + + Deque queue = new LinkedList<>(); + boolean first = true; + for ( List ccps : placementsByAdapter.values() ) { + + CatalogColumnPlacement ccp = ccps.get( 0 ); + CatalogPartitionPlacement cpp = catalog.getPartitionPlacement( ccp.adapterId, partitionId ); + + handleTableScan( + builder, + ccp.tableId, + ccp.adapterUniqueName, + ccp.getLogicalSchemaName(), + ccp.getLogicalTableName(), + ccp.physicalSchemaName, + cpp.physicalTableName, + cpp.partitionId ); + if ( first ) { + first = false; + } else { + ArrayList rexNodes = new ArrayList<>(); + for ( CatalogColumnPlacement p : ccps ) { + if ( pkColumnIds.contains( p.columnId ) ) { + String alias = ccps.get( 0 ).adapterUniqueName + "_" + p.getLogicalColumnName(); + rexNodes.add( builder.alias( builder.field( p.getLogicalColumnName() ), alias ) ); + queue.addFirst( alias ); + queue.addFirst( p.getLogicalColumnName() ); + } else { + rexNodes.add( builder.field( p.getLogicalColumnName() ) ); + } + } + builder.project( rexNodes ); + List joinConditions = new LinkedList<>(); + for ( int i = 0; i < pkColumnIds.size(); i++ ) { + joinConditions.add( builder.call( + SqlStdOperatorTable.EQUALS, + builder.field( 2, ccp.getLogicalTableName() + "_" + partitionId, queue.removeFirst() ), + builder.field( 2, ccp.getLogicalTableName() + "_" + partitionId, queue.removeFirst() ) ) ); + } + builder.join( JoinRelType.INNER, joinConditions ); + } } + // final project + ArrayList rexNodes = new ArrayList<>(); + List placementList = currentPlacements.stream() + .sorted( Comparator.comparingInt( p -> Catalog.getInstance().getColumn( p.columnId ).position ) ) + .collect( Collectors.toList() ); + for ( CatalogColumnPlacement ccp : placementList ) { + rexNodes.add( builder.field( ccp.getLogicalColumnName() ) ); + } + builder.project( rexNodes ); + } else { + throw new RuntimeException( "The table '" + currentPlacements.get( 0 ).getLogicalTableName() + "' seems to have no placement. This should not happen!" ); } - // final project - ArrayList rexNodes = new ArrayList<>(); - List placementList = placements.stream() - .sorted( Comparator.comparingInt( p -> Catalog.getInstance().getColumn( p.columnId ).position ) ) - .collect( Collectors.toList() ); - for ( CatalogColumnPlacement ccp : placementList ) { - rexNodes.add( builder.field( ccp.getLogicalColumnName() ) ); - } - builder.project( rexNodes ); - } else { - throw new RuntimeException( "The table '" + placements.get( 0 ).getLogicalTableName() + "' seems to have no placement. This should not happen!" ); } + + //Union is only needed if there are more than one partition to be selected + if ( placements.size() > 1 ) { + builder.union( true, placements.size() ); + } + RelNode node = builder.build(); if ( RuntimeConfig.JOINED_TABLE_SCAN_CACHE.getBoolean() ) { joinedTableScanCache.put( placements.hashCode(), node ); @@ -886,13 +1264,14 @@ protected RelBuilder handleTableScan( String logicalSchemaName, String logicalTableName, String physicalSchemaName, - String physicalTableName ) { + String physicalTableName, + long partitionId ) { if ( selectedAdapter != null ) { selectedAdapter.put( tableId, new SelectedAdapterInfo( storeUniqueName, physicalSchemaName, physicalTableName ) ); } return builder.scan( ImmutableList.of( PolySchemaBuilder.buildAdapterSchemaName( storeUniqueName, logicalSchemaName, physicalSchemaName ), - logicalTableName ) ); + logicalTableName + "_" + partitionId ) ); } @@ -940,6 +1319,7 @@ private static class WhereClauseVisitor extends RexShuttle { private final long partitionColumnIndex; @Getter private boolean valueIdentified = false; + private boolean unsupportedFilter = false; public WhereClauseVisitor( Statement statement, long partitionColumnIndex ) { @@ -954,34 +1334,37 @@ public RexNode visitCall( final RexCall call ) { super.visitCall( call ); if ( call.operands.size() == 2 ) { - - if ( call.operands.get( 0 ) instanceof RexInputRef ) { - if ( ((RexInputRef) call.operands.get( 0 )).getIndex() == partitionColumnIndex ) { - if ( call.operands.get( 1 ) instanceof RexLiteral ) { - value = ((RexLiteral) call.operands.get( 1 )).getValueForQueryParameterizer(); - values.add( value ); - valueIdentified = true; - } else if ( call.operands.get( 1 ) instanceof RexDynamicParam ) { - long index = ((RexDynamicParam) call.operands.get( 1 )).getIndex(); - value = statement.getDataContext().getParameterValue( index );//.get("?" + index); - values.add( value ); - valueIdentified = true; + if ( call.op.getKind().equals( SqlKind.EQUALS ) ) { + if ( call.operands.get( 0 ) instanceof RexInputRef ) { + if ( ((RexInputRef) call.operands.get( 0 )).getIndex() == partitionColumnIndex ) { + if ( call.operands.get( 1 ) instanceof RexLiteral ) { + value = ((RexLiteral) call.operands.get( 1 )).getValueForQueryParameterizer(); + values.add( value ); + valueIdentified = true; + } else if ( call.operands.get( 1 ) instanceof RexDynamicParam ) { + long index = ((RexDynamicParam) call.operands.get( 1 )).getIndex(); + value = statement.getDataContext().getParameterValue( index ); + values.add( value ); + valueIdentified = true; + } } - } - } else if ( call.operands.get( 1 ) instanceof RexInputRef ) { - - if ( ((RexInputRef) call.operands.get( 1 )).getIndex() == partitionColumnIndex ) { - if ( call.operands.get( 0 ) instanceof RexLiteral ) { - value = ((RexLiteral) call.operands.get( 0 )).getValueForQueryParameterizer(); - values.add( value ); - valueIdentified = true; - } else if ( call.operands.get( 0 ) instanceof RexDynamicParam ) { - long index = ((RexDynamicParam) call.operands.get( 0 )).getIndex(); - value = statement.getDataContext().getParameterValue( index );//get("?" + index); //.getParameterValues // - values.add( value ); - valueIdentified = true; + } else if ( call.operands.get( 1 ) instanceof RexInputRef ) { + if ( ((RexInputRef) call.operands.get( 1 )).getIndex() == partitionColumnIndex ) { + if ( call.operands.get( 0 ) instanceof RexLiteral ) { + value = ((RexLiteral) call.operands.get( 0 )).getValueForQueryParameterizer(); + values.add( value ); + valueIdentified = true; + } else if ( call.operands.get( 0 ) instanceof RexDynamicParam ) { + long index = ((RexDynamicParam) call.operands.get( 0 )).getIndex(); + value = statement.getDataContext().getParameterValue( index ); + values.add( value ); + valueIdentified = true; + } } } + } else { + //Enable worstcase routing + unsupportedFilter = true; } } return call; diff --git a/dbms/src/main/java/org/polypheny/db/router/IcarusRouter.java b/dbms/src/main/java/org/polypheny/db/router/IcarusRouter.java index 7ceda06511..3855e2e35b 100644 --- a/dbms/src/main/java/org/polypheny/db/router/IcarusRouter.java +++ b/dbms/src/main/java/org/polypheny/db/router/IcarusRouter.java @@ -268,7 +268,7 @@ protected List selectPlacement( RelNode node, CatalogTab selectedAdapterId = table.placementsByAdapter.keySet().asList().get( 0 ); } if ( table.placementsByAdapter.containsKey( selectedAdapterId ) ) { - List placements = Catalog.getInstance().getColumnPlacementsOnAdapter( selectedAdapterId, table.id ); + List placements = Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( selectedAdapterId, table.id ); if ( placements.size() != table.columnIds.size() ) { throw new RuntimeException( "The data store '" + selectedAdapterId + "' does not contain a full table placement!" ); } @@ -567,7 +567,7 @@ private Map calc( Map map, int similarThresho if ( sum == 0 ) { log.error( "Routing table row is empty! This should not happen!" ); } else if ( sum > 100 ) { - log.error( "Routing table row does sum up to a value greater 100! This should not happen! The value is: " + sum + " | Entries: " + row.values().toString() ); + log.error( "Routing table row does sum up to a value greater 100! This should not happen! The value is: {} | Entries: {}", sum, row.values().toString() ); } else if ( sum < 100 ) { if ( fastestStore == -1 ) { log.error( "Fastest Store is -1! This should not happen!" ); @@ -591,6 +591,7 @@ private static > SortedSet> entri sortedEntries.addAll( map.entrySet() ); return sortedEntries; } + } @@ -600,6 +601,7 @@ private static class ExecutionTime { private final String queryClassString; private final int adapterId; private final long nanoTime; + } @@ -751,5 +753,7 @@ public RelNode visit( RelNode other ) { hashBasis.add( "other#" + other.getClass().getSimpleName() ); return visitChildren( other ); } + } + } diff --git a/dbms/src/main/java/org/polypheny/db/router/SimpleRouter.java b/dbms/src/main/java/org/polypheny/db/router/SimpleRouter.java index 867dfef61d..948b34f799 100644 --- a/dbms/src/main/java/org/polypheny/db/router/SimpleRouter.java +++ b/dbms/src/main/java/org/polypheny/db/router/SimpleRouter.java @@ -71,7 +71,7 @@ protected List selectPlacement( RelNode node, CatalogTab if ( table.placementsByAdapter.get( adapterIdWithMostPlacements ).contains( cid ) ) { placementList.add( Catalog.getInstance().getColumnPlacement( adapterIdWithMostPlacements, cid ) ); } else { - placementList.add( Catalog.getInstance().getColumnPlacements( cid ).get( 0 ) ); + placementList.add( Catalog.getInstance().getColumnPlacement( cid ).get( 0 ) ); } } diff --git a/dbms/src/main/java/org/polypheny/db/schema/PolySchemaBuilder.java b/dbms/src/main/java/org/polypheny/db/schema/PolySchemaBuilder.java index 956c9c35cc..bb04581f03 100644 --- a/dbms/src/main/java/org/polypheny/db/schema/PolySchemaBuilder.java +++ b/dbms/src/main/java/org/polypheny/db/schema/PolySchemaBuilder.java @@ -36,6 +36,7 @@ import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogDatabase; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogSchema; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.config.RuntimeConfig; @@ -52,6 +53,7 @@ public class PolySchemaBuilder implements PropertyChangeListener { private final static PolySchemaBuilder INSTANCE = new PolySchemaBuilder(); private AbstractPolyphenyDbSchema current; + private boolean isOutdated = true; private PolySchemaBuilder() { @@ -68,7 +70,7 @@ public AbstractPolyphenyDbSchema getCurrent() { if ( !RuntimeConfig.SCHEMA_CACHING.getBoolean() ) { return buildSchema(); } - if ( current == null ) { + if ( current == null || isOutdated ) { current = buildSchema(); } return current; @@ -81,7 +83,7 @@ private synchronized AbstractPolyphenyDbSchema buildSchema() { SchemaPlus rootSchema = polyphenyDbSchema.plus(); Catalog catalog = Catalog.getInstance(); - // + // Build logical schema CatalogDatabase catalogDatabase = catalog.getDatabase( 1 ); for ( CatalogSchema catalogSchema : catalog.getSchemas( catalogDatabase.id, null ) ) { @@ -133,7 +135,6 @@ private synchronized AbstractPolyphenyDbSchema buildSchema() { s.polyphenyDbSchema().setSchema( new LogicalSchema( catalogSchema.name, tableMap ) ); } - // // Build adapter schema (physical schema) List adapters = Catalog.getInstance().getAdapters(); for ( CatalogSchema catalogSchema : catalog.getSchemas( catalogDatabase.id, null ) ) { @@ -147,26 +148,37 @@ private synchronized AbstractPolyphenyDbSchema buildSchema() { for ( String physicalSchemaName : tableIdsPerSchema.keySet() ) { Set tableIds = tableIdsPerSchema.get( physicalSchemaName ); - Map physicalTables = new HashMap<>(); + + HashMap physicalTables = new HashMap<>(); Adapter adapter = AdapterManager.getInstance().getAdapter( catalogAdapter.id ); + final String schemaName = buildAdapterSchemaName( catalogAdapter.uniqueName, catalogSchema.name, physicalSchemaName ); + adapter.createNewSchema( rootSchema, schemaName ); SchemaPlus s = new SimplePolyphenyDbSchema( polyphenyDbSchema, adapter.getCurrentSchema(), schemaName ).plus(); + for ( long tableId : tableIds ) { CatalogTable catalogTable = catalog.getTable( tableId ); - Table table = adapter.createTableSchema( - catalogTable, - Catalog.getInstance().getColumnPlacementsOnAdapterSortedByPhysicalPosition( adapter.getAdapterId(), catalogTable.id ) ); - physicalTables.put( catalog.getTable( tableId ).name, table ); - s.add( catalog.getTable( tableId ).name, table ); + + List partitionPlacements = catalog.getPartitionPlacementByTable( adapter.getAdapterId(), tableId ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + Table table = adapter.createTableSchema( + catalogTable, + Catalog.getInstance().getColumnPlacementsOnAdapterSortedByPhysicalPosition( adapter.getAdapterId(), catalogTable.id ), + partitionPlacement ); + + physicalTables.put( catalog.getTable( tableId ).name + "_" + partitionPlacement.partitionId, table ); + + rootSchema.add( schemaName, s ); + physicalTables.forEach( rootSchema.getSubSchema( schemaName )::add ); + rootSchema.getSubSchema( schemaName ).polyphenyDbSchema().setSchema( adapter.getCurrentSchema() ); + } } - rootSchema.add( schemaName, s ); - physicalTables.forEach( rootSchema.getSubSchema( schemaName )::add ); - rootSchema.getSubSchema( schemaName ).polyphenyDbSchema().setSchema( adapter.getCurrentSchema() ); } } } - + isOutdated = false; return polyphenyDbSchema; } @@ -179,8 +191,8 @@ public static String buildAdapterSchemaName( String storeName, String logicalSch // Listens on changes to the catalog @Override public void propertyChange( PropertyChangeEvent evt ) { - // Catalog changed, rebuild schema - current = buildSchema(); + // Catalog changed, flag as outdated + isOutdated = true; } diff --git a/dbms/src/main/java/org/polypheny/db/transaction/TransactionImpl.java b/dbms/src/main/java/org/polypheny/db/transaction/TransactionImpl.java index fbb5c49cfb..451267bed3 100644 --- a/dbms/src/main/java/org/polypheny/db/transaction/TransactionImpl.java +++ b/dbms/src/main/java/org/polypheny/db/transaction/TransactionImpl.java @@ -38,7 +38,7 @@ import org.polypheny.db.config.RuntimeConfig; import org.polypheny.db.information.InformationManager; import org.polypheny.db.jdbc.JavaTypeFactoryImpl; -import org.polypheny.db.monitoring.events.MonitoringEvent; +import org.polypheny.db.monitoring.events.StatementEvent; import org.polypheny.db.prepare.PolyphenyDbCatalogReader; import org.polypheny.db.processing.DataMigrator; import org.polypheny.db.processing.DataMigratorImpl; @@ -80,12 +80,20 @@ public class TransactionImpl implements Transaction, Comparable { @Getter private final boolean analyze; + + + private StatementEvent statementEventData; + + private final AtomicLong statementCounter = new AtomicLong(); + private final List statements = new ArrayList<>(); + private final List changedTables = new ArrayList<>(); + @Getter private final List involvedAdapters = new CopyOnWriteArrayList<>(); + private final Set lockList = new HashSet<>(); - private MonitoringEvent monitoringEvent; TransactionImpl( @@ -232,7 +240,9 @@ public StatementImpl createStatement() { @Override public void addChangedTable( String qualifiedTableName ) { if ( !this.changedTables.contains( qualifiedTableName ) ) { - log.debug( "Add changed table: {}", qualifiedTableName ); + if ( log.isDebugEnabled() ) { + log.debug( "Add changed table: {}", qualifiedTableName ); + } this.changedTables.add( qualifiedTableName ); } } @@ -265,14 +275,14 @@ public boolean equals( Object o ) { @Override - public MonitoringEvent getMonitoringEvent() { - return this.monitoringEvent; + public StatementEvent getMonitoringData() { + return this.statementEventData; } @Override - public void setMonitoringEvent( MonitoringEvent event ) { - this.monitoringEvent = event; + public void setMonitoringData( StatementEvent event ) { + this.statementEventData = event; } // For locking diff --git a/dbms/src/test/java/org/polypheny/db/adapter/FileAdapterTest.java b/dbms/src/test/java/org/polypheny/db/adapter/FileAdapterTest.java index bfcef3bf73..0f09623793 100644 --- a/dbms/src/test/java/org/polypheny/db/adapter/FileAdapterTest.java +++ b/dbms/src/test/java/org/polypheny/db/adapter/FileAdapterTest.java @@ -142,7 +142,7 @@ public void testDateTime() throws SQLException { Connection connection = jdbcConnection.getConnection(); try ( Statement statement = connection.createStatement() ) { try { - statement.executeUpdate( "CREATE TABLE testDateTime (a INTEGER NOT NULL, b DATE, c TIME, d TIMESTAMP , PRIMARY KEY (a)) ON STORE \"mm\"" ); + statement.executeUpdate( "CREATE TABLE testDateTime (a INTEGER NOT NULL, b DATE, c TIME, d TIMESTAMP, PRIMARY KEY (a)) ON STORE \"mm\"" ); PreparedStatement preparedStatement = connection.prepareStatement( "INSERT INTO testDateTime (a,b,c,d) VALUES (?,?,?,?)" ); preparedStatement.setInt( 1, 1 ); diff --git a/dbms/src/test/java/org/polypheny/db/misc/HorizontalPartitioningTest.java b/dbms/src/test/java/org/polypheny/db/misc/HorizontalPartitioningTest.java index a0612132d3..85988075e7 100644 --- a/dbms/src/test/java/org/polypheny/db/misc/HorizontalPartitioningTest.java +++ b/dbms/src/test/java/org/polypheny/db/misc/HorizontalPartitioningTest.java @@ -18,8 +18,12 @@ import com.google.common.collect.ImmutableList; import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; import org.apache.calcite.avatica.AvaticaSqlException; import org.junit.Assert; import org.junit.BeforeClass; @@ -28,11 +32,25 @@ import org.polypheny.db.AdapterTestSuite; import org.polypheny.db.TestHelper; import org.polypheny.db.TestHelper.JdbcConnection; +import org.polypheny.db.catalog.Catalog; +import org.polypheny.db.catalog.Catalog.PartitionType; +import org.polypheny.db.catalog.Catalog.Pattern; +import org.polypheny.db.catalog.entity.CatalogPartition; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; +import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.config.Config; +import org.polypheny.db.config.ConfigEnum; +import org.polypheny.db.config.ConfigManager; import org.polypheny.db.excluded.CassandraExcluded; +import org.polypheny.db.excluded.FileExcluded; +import org.polypheny.db.partition.PartitionManager; +import org.polypheny.db.partition.PartitionManagerFactory; +import org.polypheny.db.partition.properties.TemperaturePartitionProperty; +import org.polypheny.db.util.background.BackgroundTask.TaskSchedulingType; @SuppressWarnings({ "SqlNoDataSourceInspection", "SqlDialectInspection" }) -@Category(AdapterTestSuite.class) +@Category({ AdapterTestSuite.class, CassandraExcluded.class }) public class HorizontalPartitioningTest { @BeforeClass @@ -88,7 +106,7 @@ public void basicHorizontalPartitioningTest() throws SQLException { } finally { // Drop tables and stores statement.executeUpdate( "DROP TABLE horizontalparttest" ); - statement.executeUpdate( "DROP TABLE horizontalparttestfalsepartition" ); + //statement.executeUpdate( "DROP TABLE horizontalparttestfalsepartition" ); } } } @@ -142,17 +160,26 @@ public void modifyPartitionTest() throws SQLException { statement.executeUpdate( "ALTER ADAPTERS ADD \"store2\" USING 'org.polypheny.db.adapter.jdbc.stores.HsqldbStore'" + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); - // Merge partition - statement.executeUpdate( "ALTER TABLE horizontalparttestextension MERGE PARTITIONs" ); - // Add placement for second table statement.executeUpdate( "ALTER TABLE \"horizontalparttestextension\" ADD PLACEMENT (tvarchar) ON STORE \"store2\"" ); + statement.executeUpdate( "ALTER TABLE \"horizontalparttestextension\" MERGE PARTITIONS" ); + + // DROP Table to repartition + statement.executeUpdate( "DROP TABLE \"horizontalparttestextension\" " ); + // Partition by name - statement.executeUpdate( "ALTER TABLE horizontalparttestextension " + statement.executeUpdate( "CREATE TABLE horizontalparttestextension( " + + "tprimary INTEGER NOT NULL, " + + "tinteger INTEGER NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "PRIMARY KEY (tprimary) )" + "PARTITION BY HASH (tinteger) " + " WITH (name1, name2, name3)" ); + // Add placement for second table + statement.executeUpdate( "ALTER TABLE \"horizontalparttestextension\" ADD PLACEMENT (tvarchar) ON STORE \"store2\"" ); + // name partitioning can be modified with index statement.executeUpdate( "ALTER TABLE \"horizontalparttestextension\" MODIFY PARTITIONS (1) ON STORE \"store2\" " ); @@ -223,6 +250,82 @@ public void partitionNumberTest() throws SQLException { } + @Test + public void dataMigrationTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + try { + statement.executeUpdate( "CREATE TABLE hashpartition( " + + "tprimary INTEGER NOT NULL, " + + "tinteger INTEGER , " + + "tvarchar VARCHAR(20) , " + + "PRIMARY KEY (tprimary) )" ); + + statement.executeUpdate( "INSERT INTO hashpartition VALUES (1, 3, 'hans')" ); + statement.executeUpdate( "INSERT INTO hashpartition VALUES (2, 7, 'bob')" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM hashpartition ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, 3, "hans" }, + new Object[]{ 2, 7, "bob" } ) ); + + // ADD adapter + statement.executeUpdate( "ALTER ADAPTERS ADD \"storehash\" USING 'org.polypheny.db.adapter.jdbc.stores.HsqldbStore'" + + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); + + // ADD FullPlacement + statement.executeUpdate( "ALTER TABLE \"hashpartition\" ADD PLACEMENT (tprimary, tinteger, tvarchar) ON STORE \"storehash\"" ); + + statement.executeUpdate( "ALTER TABLE hashpartition " + + "PARTITION BY HASH (tvarchar) " + + "PARTITIONS 3" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM hashpartition ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, 3, "hans" }, + new Object[]{ 2, 7, "bob" } ) ); + + statement.executeUpdate( "ALTER TABLE \"hashpartition\" MERGE PARTITIONS" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM hashpartition ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, 3, "hans" }, + new Object[]{ 2, 7, "bob" } ) ); + + //Combined with verticalPartitioning + + statement.executeUpdate( "ALTER TABLE hashpartition MODIFY PLACEMENT" + + " DROP COLUMN tvarchar ON STORE storehash" ); + + statement.executeUpdate( "ALTER TABLE hashpartition MODIFY PLACEMENT" + + " DROP COLUMN tinteger ON STORE hsqldb" ); + + statement.executeUpdate( "ALTER TABLE hashpartition " + + "PARTITION BY HASH (tvarchar) " + + "PARTITIONS 3" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM hashpartition ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, 3, "hans" }, + new Object[]{ 2, 7, "bob" } ) ); + + statement.executeUpdate( "ALTER TABLE \"hashpartition\" MERGE PARTITIONS" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM hashpartition ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, 3, "hans" }, + new Object[]{ 2, 7, "bob" } ) ); + + } finally { + statement.executeUpdate( "DROP TABLE hashpartition" ); + statement.executeUpdate( "ALTER ADAPTERS DROP \"storehash\"" ); + } + } + } + } + + @Test public void hashPartitioningTest() throws SQLException { try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { @@ -265,15 +368,7 @@ public void hashPartitioningTest() throws SQLException { // Change placement on second store statement.executeUpdate( "ALTER TABLE \"hashpartition\" MODIFY PARTITIONS (0,1) ON STORE \"storehash\"" ); - // Change placement on second store - // Check partition distribution violation - failed = false; - try { - statement.executeUpdate( "ALTER TABLE \"hashpartition\" MODIFY PARTITIONS (2) ON STORE \"hsqldb\"" ); - } catch ( AvaticaSqlException e ) { - failed = true; - } - Assert.assertTrue( failed ); + statement.executeUpdate( "ALTER TABLE \"hashpartition\" MERGE PARTITIONS" ); // You can't change the distribution unless there exists at least one full partition placement of each column as a fallback failed = false; @@ -291,9 +386,9 @@ public void hashPartitioningTest() throws SQLException { } Assert.assertTrue( failed ); } finally { - statement.executeUpdate( "DROP TABLE hashpartitioning" ); statement.executeUpdate( "DROP TABLE hashpartition" ); - statement.executeUpdate( "DROP TABLE hashpartitioningValidate" ); + statement.executeUpdate( "DROP TABLE IF EXISTS hashpartitioning" ); + statement.executeUpdate( "DROP TABLE IF EXISTS hashpartitioningvalidate" ); statement.executeUpdate( "ALTER ADAPTERS DROP \"storehash\"" ); } } @@ -381,21 +476,41 @@ public void rangePartitioningTest() throws SQLException { new Object[]{ 1, 3, "hans" }, new Object[]{ 2, 7, "bob" } ) ); - statement.executeUpdate( "UPDATE rangepartitioning1 SET tinteger = 4 WHERE tinteger = 7" ); + statement.executeUpdate( "UPDATE rangepartitioning1 SET tinteger = 6 WHERE tinteger = 7" ); TestHelper.checkResultSet( statement.executeQuery( "SELECT * FROM rangepartitioning1 ORDER BY tprimary" ), ImmutableList.of( new Object[]{ 1, 3, "hans" }, - new Object[]{ 2, 4, "bob" } ) ); + new Object[]{ 2, 6, "bob" } ) ); TestHelper.checkResultSet( - statement.executeQuery( "SELECT * FROM rangepartitioning1 WHERE tinteger = 4" ), + statement.executeQuery( "SELECT * FROM rangepartitioning1 WHERE tinteger = 6" ), ImmutableList.of( - new Object[]{ 2, 4, "bob" } ) ); + new Object[]{ 2, 6, "bob" } ) ); + + // Checks if the input is ordered correctly. e.g. if the range for MIN and MAX is swapped when necessary + statement.executeUpdate( "CREATE TABLE rangepartitioning3( " + + "tprimary INTEGER NOT NULL, " + + "tinteger INTEGER NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY RANGE (tinteger) " + + "( PARTITION parta VALUES(5,4), " + + "PARTITION partb VALUES(10,6))" ); + + CatalogTable table = Catalog.getInstance().getTables( null, null, new Pattern( "rangepartitioning3" ) ).get( 0 ); + + List catalogPartitions = Catalog.getInstance().getPartitionsByTable( table.id ); + + Assert.assertEquals( new ArrayList<>( Arrays.asList( "4", "5" ) ) + , catalogPartitions.get( 0 ).partitionQualifiers ); + + Assert.assertEquals( new ArrayList<>( Arrays.asList( "6", "10" ) ) + , catalogPartitions.get( 1 ).partitionQualifiers ); // RANGE partitioning can't be created without specifying ranges boolean failed = false; try { - statement.executeUpdate( "CREATE TABLE rangepartitioning2( " + statement.executeUpdate( "CREATE TABLE rangepartitioning3( " + "tprimary INTEGER NOT NULL, " + "tinteger INTEGER NULL, " + "tvarchar VARCHAR(20) NULL, " @@ -409,9 +524,386 @@ public void rangePartitioningTest() throws SQLException { } finally { statement.executeUpdate( "DROP TABLE rangepartitioning1" ); statement.executeUpdate( "DROP TABLE IF EXISTS rangepartitioning2" ); + statement.executeUpdate( "DROP TABLE IF EXISTS rangepartitioning3" ); + } + } + } + } + + + @Test + public void partitionFilterTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + + long partitionsToCreate = 4; + + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE TABLE physicalPartitionFilter( " + + "tprimary INTEGER NOT NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "tinteger INTEGER NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY HASH (tvarchar) " + + "WITH (foo, bar, foobar, barfoo) " ); + + try { + + statement.executeUpdate( "INSERT INTO physicalPartitionFilter VALUES (10, 'e', 100)" ); + statement.executeUpdate( "INSERT INTO physicalPartitionFilter VALUES (21, 'f', 200)" ); + + // Check if filter on partitionValue can be applied + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM physicalPartitionFilter WHERE tvarchar = 'e'" ), + ImmutableList.of( + new Object[]{ 10, "e", 100 } ) ); + + // Check if negative Value can be used on partitionColumn + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM physicalPartitionFilter WHERE tvarchar != 'e'" ), + ImmutableList.of( + new Object[]{ 21, "f", 200 } ) ); + + // Check if filter can be applied to arbitrary column != partitionColumn + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM physicalPartitionFilter WHERE tinteger = 100" ), + ImmutableList.of( + new Object[]{ 10, "e", 100 } ) ); + + // Check if FILTER Compound can be used - OR + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM physicalPartitionFilter WHERE tvarchar = 'e' OR tvarchar = 'f' ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 10, "e", 100 }, + new Object[]{ 21, "f", 200 } ) ); + + // Check if FILTER Compound can be used - AND + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM physicalPartitionFilter WHERE tvarchar = 'e' AND tvarchar = 'f'" ), + ImmutableList.of() ); + } finally { + // Drop tables and stores + statement.executeUpdate( "DROP TABLE IF EXISTS physicalPartitionFilter" ); + } + } + } + } + + + @Test + public void partitionPlacementTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + + long partitionsToCreate = 4; + + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE TABLE physicalPartitionTest( " + + "tprimary INTEGER NOT NULL, " + + "tinteger INTEGER NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY HASH (tvarchar) " + + "WITH (foo, bar, foobar, barfoo) " ); + + try { + CatalogTable table = Catalog.getInstance().getTables( null, null, new Pattern( "physicalpartitiontest" ) ).get( 0 ); + // Check if sufficient PartitionPlacements have been created + + // Check if initially as many partitionPlacements are created as requested + Assert.assertEquals( partitionsToCreate, Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + + // ADD adapter + statement.executeUpdate( "ALTER ADAPTERS ADD \"anotherstore\" USING 'org.polypheny.db.adapter.jdbc.stores.HsqldbStore'" + + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); + List debugPlacements = Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ); + // ADD FullPlacement + statement.executeUpdate( "ALTER TABLE \"physicalPartitionTest\" ADD PLACEMENT ON STORE \"anotherstore\"" ); + Assert.assertEquals( partitionsToCreate * 2, Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + debugPlacements = Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ); + // Modify partitions on second store + statement.executeUpdate( "ALTER TABLE \"physicalPartitionTest\" MODIFY PARTITIONS (\"foo\") ON STORE anotherstore" ); + Assert.assertEquals( partitionsToCreate + 1, Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + debugPlacements = Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ); + // After MERGE should only hold one partition + statement.executeUpdate( "ALTER TABLE \"physicalPartitionTest\" MERGE PARTITIONS" ); + Assert.assertEquals( 2, Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + debugPlacements = Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ); + // DROP STORE and verify number of partition Placements + statement.executeUpdate( "ALTER TABLE \"physicalPartitionTest\" DROP PLACEMENT ON STORE \"anotherstore\"" ); + Assert.assertEquals( 1, Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + + } finally { + // Drop tables and stores + statement.executeUpdate( "DROP TABLE IF EXISTS physicalPartitionTest" ); + statement.executeUpdate( "ALTER ADAPTERS DROP anotherstore" ); + } + } + } + } + + + @Test + public void temperaturePartitionTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + + // Sets the background processing of Workload Monitoring an Temperature monitoring to one second to get immediate results + ConfigManager cm = ConfigManager.getInstance(); + Config c1 = cm.getConfig( "runtime/partitionFrequencyProcessingInterval" ); + Config c2 = cm.getConfig( "runtime/queueProcessingInterval" ); + ((ConfigEnum) c1).setEnum( TaskSchedulingType.EVERY_FIVE_SECONDS ); + ((ConfigEnum) c2).setEnum( TaskSchedulingType.EVERY_FIVE_SECONDS ); + + statement.executeUpdate( "CREATE TABLE temperaturetest( " + + "tprimary INTEGER NOT NULL, " + + "tinteger INTEGER NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY TEMPERATURE(tvarchar)" + + "(PARTITION hot VALUES(12%)," + + "PARTITION cold VALUES(14%))" + + " USING FREQUENCY write INTERVAL 10 minutes WITH 20 HASH PARTITIONS" ); + + try { + + CatalogTable table = Catalog.getInstance().getTables( null, null, new Pattern( "temperaturetest" ) ).get( 0 ); + + // Check if partition properties are correctly set and parsed + Assert.assertEquals( 600, ((TemperaturePartitionProperty) table.partitionProperty).getFrequencyInterval() ); + Assert.assertEquals( 12, ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageIn() ); + Assert.assertEquals( 14, ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageOut() ); + Assert.assertEquals( PartitionType.HASH, ((TemperaturePartitionProperty) table.partitionProperty).getInternalPartitionFunction() ); + + Assert.assertEquals( 2, table.partitionProperty.getPartitionGroupIds().size() ); + Assert.assertEquals( 20, table.partitionProperty.getPartitionIds().size() ); + + // Check if initially as many partitionPlacements are created as requested and stored in the partitionproperty + Assert.assertEquals( table.partitionProperty.getPartitionIds().size(), Catalog.getInstance().getAllPartitionPlacementsByTable( table.id ).size() ); + + // Retrieve partition distribution + // Get percentage of tables which can remain in HOT + long numberOfPartitionsInHot = (table.partitionProperty.partitionIds.size() * ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageIn()) / 100; + //These are the tables than can remain in HOT + long allowedTablesInHot = (table.partitionProperty.partitionIds.size() * ((TemperaturePartitionProperty) table.partitionProperty).getHotAccessPercentageOut()) / 100; + if ( numberOfPartitionsInHot == 0 ) { + numberOfPartitionsInHot = 1; + } + if ( allowedTablesInHot == 0 ) { + allowedTablesInHot = 1; + } + long numberOfPartitionsInCold = table.partitionProperty.partitionIds.size() - numberOfPartitionsInHot; + + List hotPartitions = Catalog.getInstance().getPartitions( ((TemperaturePartitionProperty) table.partitionProperty).getHotPartitionGroupId() ); + List coldPartitions = Catalog.getInstance().getPartitions( ((TemperaturePartitionProperty) table.partitionProperty).getColdPartitionGroupId() ); + + Assert.assertTrue( (numberOfPartitionsInHot == hotPartitions.size()) || (numberOfPartitionsInHot == allowedTablesInHot) ); + + // ADD adapter + statement.executeUpdate( "ALTER ADAPTERS ADD \"hot\" USING 'org.polypheny.db.adapter.jdbc.stores.HsqldbStore'" + + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); + + statement.executeUpdate( "ALTER ADAPTERS ADD \"cold\" USING 'org.polypheny.db.adapter.jdbc.stores.HsqldbStore'" + + " WITH '{maxConnections:\"25\",path:., trxControlMode:locks,trxIsolationLevel:read_committed,type:Memory,tableType:Memory,mode:embedded}'" ); + + String partitionValue = "Foo"; + + statement.executeUpdate( "INSERT INTO temperaturetest VALUES (1, 3, '" + partitionValue + "')" ); + statement.executeUpdate( "INSERT INTO temperaturetest VALUES (2, 4, '" + partitionValue + "')" ); + statement.executeUpdate( "INSERT INTO temperaturetest VALUES (3, 5, '" + partitionValue + "')" ); + statement.executeUpdate( "INSERT INTO temperaturetest VALUES (4, 6, '" + partitionValue + "')" ); + + //Do batch INSERT to check if BATCH INSERT works for partitioned tables + PreparedStatement preparedInsert = connection.prepareStatement( "INSERT INTO temperaturetest(tprimary,tinteger,tvarchar) VALUES (?, ?, ?)" ); + + preparedInsert.setInt( 1, 7 ); + preparedInsert.setInt( 2, 55 ); + preparedInsert.setString( 3, partitionValue ); + preparedInsert.addBatch(); + + /* preparedInsert.setInt( 1, 8 ); + preparedInsert.setString( 2, partitionValue ); + preparedInsert.addBatch(); +*/ + preparedInsert.executeBatch(); + // This should execute two DML INSERTS on the target PartitionId and therefore redistribute the data + + // Verify that the partition is now in HOT and was not before + CatalogTable updatedTable = Catalog.getInstance().getTables( null, null, new Pattern( "temperaturetest" ) ).get( 0 ); + + // Manually get the target partitionID of query + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); + PartitionManager partitionManager = partitionManagerFactory.getPartitionManager( table.partitionType ); + long targetId = partitionManager.getTargetPartitionId( table, partitionValue ); + + List hotPartitionsAfterChange = Catalog.getInstance().getPartitions( ((TemperaturePartitionProperty) updatedTable.partitionProperty).getHotPartitionGroupId() ); + Assert.assertTrue( hotPartitionsAfterChange.contains( Catalog.getInstance().getPartition( targetId ) ) ); + + + } finally { + // Drop tables and stores + statement.executeUpdate( "DROP TABLE IF EXISTS temperaturetest" ); + statement.executeUpdate( "ALTER ADAPTERS DROP hot" ); + statement.executeUpdate( "ALTER ADAPTERS DROP cold" ); + } + } + } + } + + + @Test + public void multiInsertTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE TABLE multiinsert( " + + "tprimary INTEGER NOT NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "tinteger INTEGER NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY HASH (tvarchar) " + + "PARTITIONS 20" ); + + try { + statement.executeUpdate( "INSERT INTO multiinsert(tprimary,tvarchar,tinteger) VALUES (1,'Hans',5),(2,'Eva',7),(3,'Alice',89)" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM multiinsert ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, "Hans", 5 }, + new Object[]{ 2, "Eva", 7 }, + new Object[]{ 3, "Alice", 89 } ) ); + + // Check if the values are correctly associated with the corresponding partition + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM multiinsert WHERE tvarchar = 'Hans' ORDER BY tprimary" ), + ImmutableList.of( new Object[]{ 1, "Hans", 5 } ) ); + + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM multiinsert WHERE tvarchar = 'Eva' ORDER BY tprimary" ), + ImmutableList.of( new Object[]{ 2, "Eva", 7 } ) ); + + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM multiinsert WHERE tvarchar = 'Alice' ORDER BY tprimary" ), + ImmutableList.of( new Object[]{ 3, "Alice", 89 } ) ); + + } finally { + // Drop tables and stores + statement.executeUpdate( "DROP TABLE IF EXISTS batchtest" ); + } + } + } + + } + + + @Test + @Category(FileExcluded.class) + public void batchPartitionTest() throws SQLException { + try ( JdbcConnection polyphenyDbConnection = new JdbcConnection( true ) ) { + Connection connection = polyphenyDbConnection.getConnection(); + try ( Statement statement = connection.createStatement() ) { + statement.executeUpdate( "CREATE TABLE batchtest( " + + "tprimary INTEGER NOT NULL, " + + "tvarchar VARCHAR(20) NULL, " + + "tinteger INTEGER NULL, " + + "PRIMARY KEY (tprimary) )" + + "PARTITION BY HASH (tvarchar) " + + "PARTITIONS 20" ); + + try { + // + // INSERT + PreparedStatement preparedInsert = connection.prepareStatement( "INSERT INTO batchtest(tprimary,tvarchar,tinteger) VALUES (?, ?, ?)" ); + + preparedInsert.setInt( 1, 1 ); + preparedInsert.setString( 2, "Foo" ); + preparedInsert.setInt( 3, 4 ); + preparedInsert.addBatch(); + + preparedInsert.setInt( 1, 2 ); + preparedInsert.setString( 2, "Bar" ); + preparedInsert.setInt( 3, 55 ); + preparedInsert.addBatch(); + + preparedInsert.setInt( 1, 3 ); + preparedInsert.setString( 2, "Foo" ); + preparedInsert.setInt( 3, 67 ); + preparedInsert.addBatch(); + + preparedInsert.setInt( 1, 4 ); + preparedInsert.setString( 2, "FooBar" ); + preparedInsert.setInt( 3, 89 ); + preparedInsert.addBatch(); + + preparedInsert.executeBatch(); + + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM batchtest ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, "Foo", 4 }, + new Object[]{ 2, "Bar", 55 }, + new Object[]{ 3, "Foo", 67 }, + new Object[]{ 4, "FooBar", 89 } ) ); + + // + // UPDATE + PreparedStatement preparedUpdate = connection.prepareStatement( "UPDATE batchtest SET tinteger = ? WHERE tprimary = ?" ); + + preparedUpdate.setInt( 1, 31 ); + preparedUpdate.setInt( 2, 1 ); + preparedUpdate.addBatch(); + + preparedUpdate.setInt( 1, 32 ); + preparedUpdate.setInt( 2, 2 ); + preparedUpdate.addBatch(); + + preparedUpdate.setInt( 1, 33 ); + preparedUpdate.setInt( 2, 3 ); + preparedUpdate.addBatch(); + + preparedUpdate.executeBatch(); + + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM batchtest ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 1, "Foo", 31 }, + new Object[]{ 2, "Bar", 32 }, + new Object[]{ 3, "Foo", 33 }, + new Object[]{ 4, "FooBar", 89 } ) ); + + // + // DELETE + PreparedStatement preparedDelete = connection.prepareStatement( "DELETE FROM batchtest WHERE tinteger = ?" ); + + preparedDelete.setInt( 1, 31 ); + preparedDelete.addBatch(); + + preparedDelete.setInt( 1, 89 ); + preparedDelete.addBatch(); + + preparedDelete.executeBatch(); + + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM batchtest ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 2, "Bar", 32 }, + new Object[]{ 3, "Foo", 33 } ) ); + + statement.executeUpdate( "ALTER TABLE \"batchtest\" MERGE PARTITIONS" ); + TestHelper.checkResultSet( + statement.executeQuery( "SELECT * FROM batchtest ORDER BY tprimary" ), + ImmutableList.of( + new Object[]{ 2, "Bar", 32 }, + new Object[]{ 3, "Foo", 33 } ) ); + + } finally { + // Drop tables and stores + statement.executeUpdate( "DROP TABLE IF EXISTS batchtest" ); } } } + } } diff --git a/dbms/src/test/java/org/polypheny/db/sql/clause/GroupByTest.java b/dbms/src/test/java/org/polypheny/db/sql/clause/GroupByTest.java index a776932315..7b6fa2aecf 100644 --- a/dbms/src/test/java/org/polypheny/db/sql/clause/GroupByTest.java +++ b/dbms/src/test/java/org/polypheny/db/sql/clause/GroupByTest.java @@ -59,7 +59,7 @@ private static void addTestData() throws SQLException { statement.executeUpdate( "INSERT INTO TestTableA VALUES(4,'Name4')" ); statement.executeUpdate( "INSERT INTO TestTableA VALUES(5,'Name5')" ); - statement.executeUpdate( "CREATE TABLE TestTableB(Id INTEGER NOT NULL,Row_Code VARCHAR(255) NOT NULL,Frequency INTEGER, Primary key(Id,Row_Code))" ); + statement.executeUpdate( "CREATE TABLE TestTableB(Id INTEGER NOT NULL,Row_Code VARCHAR(255) NOT NULL,Frequencies INTEGER, Primary key(Id,Row_Code))" ); statement.executeUpdate( "INSERT INTO TestTableB VALUES(1,'A',86)" ); statement.executeUpdate( "INSERT INTO TestTableB VALUES(1,'B',86)" ); statement.executeUpdate( "INSERT INTO TestTableB VALUES(1,'C',90)" ); @@ -113,7 +113,7 @@ public void groupByTest() throws SQLException { new Object[]{ "Name5", 443 } ); TestHelper.checkResultSet( - statement.executeQuery( "SELECT S.Name, sum (P.Frequency) FROM TestTableA S, TestTableB P WHERE P.Frequency > 84 GROUP BY S.Name ORDER BY S.Name" ), + statement.executeQuery( "SELECT S.Name, sum (P.Frequencies) FROM TestTableA S, TestTableB P WHERE P.Frequencies > 84 GROUP BY S.Name ORDER BY S.Name" ), expectedResult, true ); @@ -134,7 +134,7 @@ public void groupByWithInnerSelect() throws SQLException { new Object[]{ 2, "Name2" } ); TestHelper.checkResultSet( - statement.executeQuery( "SELECT s.id, s.name FROM TestTableC s, TestTableB t WHERE s.id = t.id AND Frequency > (SELECT AVG (Frequency) FROM TestTableB WHERE row_code = 'C' GROUP BY row_code='C')\n" ), + statement.executeQuery( "SELECT s.id, s.name FROM TestTableC s, TestTableB t WHERE s.id = t.id AND Frequencies > (SELECT AVG (Frequencies) FROM TestTableB WHERE row_code = 'C' GROUP BY row_code='C')\n" ), expectedResult, true ); diff --git a/ethereum-adapter/src/main/java/org/polypheny/db/adapter/ethereum/EthereumDataSource.java b/ethereum-adapter/src/main/java/org/polypheny/db/adapter/ethereum/EthereumDataSource.java index 92c7ffd34b..0050cb2e0c 100644 --- a/ethereum-adapter/src/main/java/org/polypheny/db/adapter/ethereum/EthereumDataSource.java +++ b/ethereum-adapter/src/main/java/org/polypheny/db/adapter/ethereum/EthereumDataSource.java @@ -32,6 +32,7 @@ import org.polypheny.db.adapter.DataSource; import org.polypheny.db.adapter.DeployMode; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.information.InformationGroup; import org.polypheny.db.information.InformationTable; @@ -92,8 +93,8 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentSchema.createBlockchainTable( catalogTable, columnPlacementsOnStore, this ); + public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentSchema.createBlockchainTable( combinedTable, columnPlacementsOnStore, this ); } @@ -112,9 +113,9 @@ public void truncate( Context context, CatalogTable table ) { @Override public Map> getExportedColumns() { Map> map = new HashMap<>(); - String[] blockColumns = { "number", "hash", "parentHash", "nonce", "sha3Uncles", "logsBloom", "transactionsRoot", "stateRoot", "receiptsRoot", "author", "miner", "mixHash", "difficulty", "totalDifficulty", "extraData", "size", "gasLimit", "gasUsed", "timestamp" }; + String[] blockColumns = { "number", "hash", "parent_hash", "nonce", "sha3uncles", "logs_bloom", "transactions_root", "state_root", "receipts_root", "author", "miner", "mix_hash", "difficulty", "total_difficulty", "extra_data", "size", "gas_limit", "gas_used", "timestamp" }; PolyType[] blockTypes = { PolyType.BIGINT, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.BIGINT, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.BIGINT, PolyType.BIGINT, PolyType.VARCHAR, PolyType.BIGINT, PolyType.BIGINT, PolyType.BIGINT, PolyType.TIMESTAMP }; - String[] transactionColumns = { "hash", "nonce", "blockHash", "blockNumber", "transactionIndex", "from", "to", "value", "gasPrice", "gas", "input", "creates", "publicKey", "raw", "r", "s" }; + String[] transactionColumns = { "hash", "nonce", "block_hash", "block_number", "transaction_index", "from", "to", "value", "gas_price", "gas", "input", "creates", "public_key", "raw", "r", "s" }; PolyType[] transactionTypes = { PolyType.VARCHAR, PolyType.BIGINT, PolyType.VARCHAR, PolyType.BIGINT, PolyType.BIGINT, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.BIGINT, PolyType.BIGINT, PolyType.BIGINT, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR, PolyType.VARCHAR }; PolyType type = PolyType.VARCHAR; diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileEnumerator.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileEnumerator.java index ab9ead1879..5d89716fc4 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileEnumerator.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileEnumerator.java @@ -74,6 +74,7 @@ public class FileEnumerator implements Enumerator { * If a filter is available, it will iterate over all columns and project each row * * @param rootPath The rootPath is required to know where the files to iterate are placed + * @param partitionId The id of the partition * @param columnIds Ids of the columns that come from a tableScan. If there is no filter, the enumerator will only iterate over the columns that are specified by the projection * @param columnTypes DataTypes of the columns that are given by the {@code columnIds} array * @param projectionMapping Mapping on how to project a table. E.g. the array [3,2] means that the row [a,b,c,d,e] will be projected to [c,b]. @@ -81,8 +82,10 @@ public class FileEnumerator implements Enumerator { * @param dataContext DataContext * @param condition Condition that can be {@code null}. The columnReferences in the filter point to the columns coming from the tableScan, not from the projection */ - public FileEnumerator( final Operation operation, + public FileEnumerator( + final Operation operation, final String rootPath, + final Long partitionId, final Long[] columnIds, final PolyType[] columnTypes, final List pkIds, @@ -91,6 +94,10 @@ public FileEnumerator( final Operation operation, final Condition condition, final Value[] updates ) { + if ( dataContext.getParameterValues().size() > 1 && (operation == Operation.UPDATE || operation == Operation.DELETE) ) { + throw new RuntimeException( "The file store does not support batch update or delete statements!" ); + } + this.operation = operation; if ( operation == Operation.DELETE || operation == Operation.UPDATE ) { //fix to make sure current is never null @@ -139,19 +146,19 @@ public FileEnumerator( final Operation operation, String xidHash = FileStore.SHA.hashString( dataContext.getStatement().getTransaction().getXid().toString(), FileStore.CHARSET ).toString(); FileFilter fileFilter = file -> !file.isHidden() && !file.getName().startsWith( "~$" ) && (!file.getName().startsWith( "_" ) || file.getName().startsWith( "_ins_" + xidHash )); for ( Long colId : columnsToIterate ) { - File columnFolder = FileStore.getColumnFolder( rootPath, colId ); + File columnFolder = FileStore.getColumnFolder( rootPath, colId, partitionId ); columnFolders.add( columnFolder ); } if ( columnsToIterate.length == 1 ) { // If we go over a single column, we can iterate it, even if null values are not present as files - this.fileList = FileStore.getColumnFolder( rootPath, columnsToIterate[0] ).listFiles( fileFilter ); + this.fileList = FileStore.getColumnFolder( rootPath, columnsToIterate[0], partitionId ).listFiles( fileFilter ); } else { // Iterate over a PK-column, because they are always NOT NULL - this.fileList = FileStore.getColumnFolder( rootPath, pkIds.get( 0 ) ).listFiles( fileFilter ); + this.fileList = FileStore.getColumnFolder( rootPath, pkIds.get( 0 ), partitionId ).listFiles( fileFilter ); } numOfCols = columnFolders.size(); - //create folder for the hardlinks + // create folder for the hardlinks this.hardlinkFolder = new File( rootPath, "hardlinks/" + xidHash ); if ( !hardlinkFolder.exists() ) { if ( !hardlinkFolder.mkdirs() ) { diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileMethod.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileMethod.java index 79303c6db5..cc4528cab7 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileMethod.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileMethod.java @@ -29,9 +29,9 @@ public enum FileMethod { - EXECUTE( FileStoreSchema.class, "execute", Operation.class, Integer.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Integer[].class, Condition.class, Value[].class ), - EXECUTE_MODIFY( FileStoreSchema.class, "executeModify", Operation.class, Integer.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Boolean.class, Object[].class, Condition.class ), - EXECUTE_QFS( QfsSchema.class, "execute", Operation.class, Integer.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Integer[].class, Condition.class, Value[].class ); + EXECUTE( FileStoreSchema.class, "execute", Operation.class, Integer.class, Long.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Integer[].class, Condition.class, Value[].class ), + EXECUTE_MODIFY( FileStoreSchema.class, "executeModify", Operation.class, Integer.class, Long.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Boolean.class, Object[].class, Condition.class ), + EXECUTE_QFS( QfsSchema.class, "execute", Operation.class, Integer.class, Long.class, DataContext.class, String.class, Long[].class, PolyType[].class, List.class, Integer[].class, Condition.class, Value[].class ); public final Method method; public static final ImmutableMap MAP; diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileModifier.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileModifier.java index 6a46c8dce5..2930b1f8c3 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileModifier.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileModifier.java @@ -40,15 +40,17 @@ public class FileModifier extends FileEnumerator { private boolean inserted = false; - public FileModifier( final Operation operation, + public FileModifier( + final Operation operation, final String rootPath, + final Long partitionId, final Long[] columnIds, final PolyType[] columnTypes, final List pkIds, final DataContext dataContext, final Object[] insertValues, final Condition condition ) { - super( operation, rootPath, columnIds, columnTypes, pkIds, null, dataContext, condition, null ); + super( operation, rootPath, partitionId, columnIds, columnTypes, pkIds, null, dataContext, condition, null ); this.insertValues = insertValues; } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStore.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStore.java index e9c6468ff7..cd11062bd2 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStore.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStore.java @@ -30,6 +30,7 @@ import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.information.InformationGraph; @@ -125,8 +126,8 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentSchema.createFileTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentSchema.createFileTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } @@ -137,36 +138,49 @@ public Schema getCurrentSchema() { @Override - public void createTable( Context context, CatalogTable catalogTable ) { + public void createTable( Context context, CatalogTable catalogTable, List partitionIds ) { context.getStatement().getTransaction().registerInvolvedAdapter( this ); - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { + + for ( long partitionId : partitionIds ) { + catalog.updatePartitionPlacementPhysicalNames( + getAdapterId(), + partitionId, + "unused", + "unused" ); + + for ( Long colId : catalogTable.columnIds ) { + File newColumnFolder = getColumnFolder( colId, partitionId ); + if ( !newColumnFolder.mkdir() ) { + throw new RuntimeException( "Could not create column folder " + newColumnFolder.getAbsolutePath() ); + } + } + } + + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { catalog.updateColumnPlacementPhysicalNames( getAdapterId(), placement.columnId, - currentSchema.getSchemaName(), - getPhysicalTableName( catalogTable.id ), - getPhysicalColumnName( placement.columnId ), + "unused", + "unused", true ); } - for ( Long colId : catalogTable.columnIds ) { - File newColumnFolder = getColumnFolder( colId ); - if ( !newColumnFolder.mkdir() ) { - throw new RuntimeException( "Could not create column folder " + newColumnFolder.getAbsolutePath() ); - } - } } @Override - public void dropTable( Context context, CatalogTable catalogTable ) { + public void dropTable( Context context, CatalogTable catalogTable, List partitionIds ) { context.getStatement().getTransaction().registerInvolvedAdapter( this ); - //todo check if it is on this store? - for ( Long colId : catalogTable.columnIds ) { - File f = getColumnFolder( colId ); - try { - FileUtils.deleteDirectory( f ); - } catch ( IOException e ) { - throw new RuntimeException( "Could not drop table " + colId, e ); + // TODO check if it is on this store? + + for ( long partitionId : partitionIds ) { + catalog.deletePartitionPlacement( getAdapterId(), partitionId ); + for ( Long colId : catalogTable.columnIds ) { + File f = getColumnFolder( colId, partitionId ); + try { + FileUtils.deleteDirectory( f ); + } catch ( IOException e ) { + throw new RuntimeException( "Could not drop table " + colId, e ); + } } } } @@ -175,21 +189,32 @@ public void dropTable( Context context, CatalogTable catalogTable ) { @Override public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn catalogColumn ) { context.getStatement().getTransaction().registerInvolvedAdapter( this ); - File newColumnFolder = getColumnFolder( catalogColumn.id ); - if ( !newColumnFolder.mkdir() ) { - throw new RuntimeException( "Could not create column folder " + newColumnFolder.getName() ); + + CatalogColumnPlacement ccp = null; + for ( CatalogColumnPlacement p : Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { + // The for loop is required to avoid using the names of the column which we are currently adding (which are null) + if ( p.columnId != catalogColumn.id ) { + ccp = p; + break; + } } + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( ccp.adapterId, catalogTable.id ) ) { + File newColumnFolder = getColumnFolder( catalogColumn.id, partitionPlacement.partitionId ); + if ( !newColumnFolder.mkdir() ) { + throw new RuntimeException( "Could not create column folder " + newColumnFolder.getName() ); + } - // Add default values - if ( catalogColumn.defaultValue != null ) { - try { - CatalogPrimaryKey primaryKey = catalog.getPrimaryKey( catalogTable.primaryKey ); - File primaryKeyDir = new File( rootDir, getPhysicalColumnName( primaryKey.columnIds.get( 0 ) ) ); - for ( File entry : primaryKeyDir.listFiles() ) { - FileModifier.write( new File( newColumnFolder, entry.getName() ), catalogColumn.defaultValue.value ); + // Add default values + if ( catalogColumn.defaultValue != null ) { + try { + CatalogPrimaryKey primaryKey = catalog.getPrimaryKey( catalogTable.primaryKey ); + File primaryKeyDir = new File( rootDir, getPhysicalColumnName( primaryKey.columnIds.get( 0 ), partitionPlacement.partitionId ) ); + for ( File entry : primaryKeyDir.listFiles() ) { + FileModifier.write( new File( newColumnFolder, entry.getName() ), catalogColumn.defaultValue.value ); + } + } catch ( IOException e ) { + throw new RuntimeException( "Caught exception while inserting default values", e ); } - } catch ( IOException e ) { - throw new RuntimeException( "Caught exception while inserting default values", e ); } } @@ -197,8 +222,7 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn getAdapterId(), catalogColumn.id, currentSchema.getSchemaName(), - getPhysicalTableName( catalogTable.id ), - getPhysicalColumnName( catalogColumn.id ), + "unused", false ); } @@ -206,23 +230,26 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn @Override public void dropColumn( Context context, CatalogColumnPlacement columnPlacement ) { context.getStatement().getTransaction().registerInvolvedAdapter( this ); - File columnFile = getColumnFolder( columnPlacement.columnId ); - try { - FileUtils.deleteDirectory( columnFile ); - } catch ( IOException e ) { - throw new RuntimeException( "Could not delete column folder", e ); + + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( columnPlacement.adapterId, columnPlacement.tableId ) ) { + File columnFile = getColumnFolder( columnPlacement.columnId, partitionPlacement.partitionId ); + try { + FileUtils.deleteDirectory( columnFile ); + } catch ( IOException e ) { + throw new RuntimeException( "Could not delete column folder", e ); + } } } @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { throw new RuntimeException( "File adapter does not support adding indexes" ); } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { throw new RuntimeException( "File adapter does not support dropping indexes" ); } @@ -319,7 +346,7 @@ public void commitOrRollback( final PolyXid xid, final boolean commit ) { movePrefix = "_del_" + xidHash; } if ( rootDir.listFiles() != null ) { - for ( File columnFolder : rootDir.listFiles( f -> f.isDirectory() ) ) { + for ( File columnFolder : rootDir.listFiles( File::isDirectory ) ) { for ( File data : columnFolder.listFiles( f -> !f.isHidden() && f.getName().startsWith( deletePrefix ) ) ) { data.delete(); } @@ -369,14 +396,16 @@ private void cleanupHardlinks( final PolyXid xid ) { @Override public void truncate( Context context, CatalogTable table ) { //context.getStatement().getTransaction().registerInvolvedStore( this ); - FileTranslatableTable fileTable = (FileTranslatableTable) currentSchema.getTable( table.name ); - try { - for ( String colName : fileTable.getColumnNames() ) { - File columnFolder = getColumnFolder( fileTable.getColumnIdMap().get( colName ) ); - FileUtils.cleanDirectory( columnFolder ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( getAdapterId(), table.id ) ) { + FileTranslatableTable fileTable = (FileTranslatableTable) currentSchema.getTable( table.name + "_" + partitionPlacement.partitionId ); + try { + for ( String colName : fileTable.getColumnNames() ) { + File columnFolder = getColumnFolder( fileTable.getColumnIdMap().get( colName ), fileTable.getPartitionId() ); + FileUtils.cleanDirectory( columnFolder ); + } + } catch ( IOException e ) { + throw new RuntimeException( "Could not truncate file table", e ); } - } catch ( IOException e ) { - throw new RuntimeException( "Could not truncate file table", e ); } } @@ -410,7 +439,7 @@ public List getFunctionalIndexes( CatalogTable catalogTable @Override public void shutdown() { - log.info( "shutting down file store '{}'", getUniqueName() ); + log.info( "Shutting down file store '{}'", getUniqueName() ); removeInformationPage(); try { FileHelper.deleteDirRecursively( rootDir ); @@ -426,24 +455,19 @@ protected void reloadSettings( List updatedSettings ) { } - protected static String getPhysicalTableName( long tableId ) { - return "tab" + tableId; - } - - - protected static String getPhysicalColumnName( long columnId ) { - return "col" + columnId; + protected static String getPhysicalColumnName( long columnId, long partitionId ) { + return "col" + columnId + "_" + partitionId; } - public static File getColumnFolder( final String rootPath, final Long columnId ) { + public static File getColumnFolder( final String rootPath, final long columnId, final long partitionId ) { File root = new File( rootPath ); - return new File( root, getPhysicalColumnName( columnId ) ); + return new File( root, getPhysicalColumnName( columnId, partitionId ) ); } - public File getColumnFolder( final Long columnId ) { - return new File( rootDir, getPhysicalColumnName( columnId ) ); + public File getColumnFolder( final long columnId, final long partitionId ) { + return new File( rootDir, getPhysicalColumnName( columnId, partitionId ) ); } } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStoreSchema.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStoreSchema.java index 0985339f68..f5408c90f8 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStoreSchema.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileStoreSchema.java @@ -34,6 +34,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.rel.type.RelDataTypeFactory; @@ -86,7 +87,10 @@ protected Map getTableMap() { } - public Table createFileTable( CatalogTable catalogTable, List columnPlacementsOnStore ) { + public Table createFileTable( + CatalogTable catalogTable, + List columnPlacementsOnStore, + CatalogPartitionPlacement partitionPlacement ) { final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder(); ArrayList columnIds = new ArrayList<>(); @@ -122,9 +126,18 @@ public Table createFileTable( CatalogTable catalogTable, List(); } - //FileTable table = new FileTable( store.getRootDir(), schemaName, catalogTable.id, columnIds, columnTypes, columnNames, store, this ); - FileTranslatableTable table = new FileTranslatableTable( this, catalogTable.name, catalogTable.id, columnIds, columnTypes, columnNames, pkIds, protoRowType ); - tableMap.put( catalogTable.name, table ); + // FileTable table = new FileTable( store.getRootDir(), schemaName, catalogTable.id, columnIds, columnTypes, columnNames, store, this ); + FileTranslatableTable table = new FileTranslatableTable( + this, + catalogTable.name + "_" + partitionPlacement.partitionId, + catalogTable.id, + partitionPlacement.partitionId, + columnIds, + columnTypes, + columnNames, + pkIds, + protoRowType ); + tableMap.put( catalogTable.name + "_" + partitionPlacement.partitionId, table ); return table; } @@ -134,12 +147,23 @@ public Table createFileTable( CatalogTable catalogTable, List execute( final Operation operation, final Integer adapterId, final DataContext dataContext, final String path, final Long[] columnIds, final PolyType[] columnTypes, final List pkIds, final Integer[] projectionMapping, final Condition condition, final Value[] updates ) { + public static Enumerable execute( + final Operation operation, + final Integer adapterId, + final Long partitionId, + final DataContext dataContext, + final String path, + final Long[] columnIds, + final PolyType[] columnTypes, + final List pkIds, + final Integer[] projectionMapping, + final Condition condition, + final Value[] updates ) { dataContext.getStatement().getTransaction().registerInvolvedAdapter( AdapterManager.getInstance().getAdapter( adapterId ) ); return new AbstractEnumerable() { @Override public Enumerator enumerator() { - return new FileEnumerator( operation, path, columnIds, columnTypes, pkIds, projectionMapping, dataContext, condition, updates ); + return new FileEnumerator( operation, path, partitionId, columnIds, columnTypes, pkIds, projectionMapping, dataContext, condition, updates ); } }; } @@ -150,7 +174,18 @@ public Enumerator enumerator() { * Executes INSERT operations * see {@link FileMethod#EXECUTE_MODIFY} and {@link org.polypheny.db.adapter.file.rel.FileToEnumerableConverter#implement} */ - public static Enumerable executeModify( final Operation operation, final Integer adapterId, final DataContext dataContext, final String path, final Long[] columnIds, final PolyType[] columnTypes, final List pkIds, final Boolean isBatch, final Object[] insertValues, final Condition condition ) { + public static Enumerable executeModify( + final Operation operation, + final Integer adapterId, + final Long partitionId, + final DataContext dataContext, + final String path, + final Long[] columnIds, + final PolyType[] columnTypes, + final List pkIds, + final Boolean isBatch, + final Object[] insertValues, + final Condition condition ) { dataContext.getStatement().getTransaction().registerInvolvedAdapter( AdapterManager.getInstance().getAdapter( adapterId ) ); final Object[] insert; @@ -183,7 +218,7 @@ public static Enumerable executeModify( final Operation operation, final return new AbstractEnumerable() { @Override public Enumerator enumerator() { - return new FileModifier( operation, path, columnIds, columnTypes, pkIds, dataContext, insert, condition ); + return new FileModifier( operation, path, partitionId, columnIds, columnTypes, pkIds, dataContext, insert, condition ); } }; } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileTranslatableTable.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileTranslatableTable.java index d601e37abc..c51f243da8 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileTranslatableTable.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/FileTranslatableTable.java @@ -56,6 +56,8 @@ public class FileTranslatableTable extends AbstractQueryableTable implements Tra private final String tableName; private final long tableId; @Getter + private final long partitionId; + @Getter private final List columnNames; @Getter private final Map columnIdMap; @@ -70,9 +72,11 @@ public class FileTranslatableTable extends AbstractQueryableTable implements Tra private final RelProtoDataType protoRowType; - public FileTranslatableTable( final FileSchema fileSchema, + public FileTranslatableTable( + final FileSchema fileSchema, final String tableName, final long tableId, + final long partitionId, final List columnIds, final ArrayList columnTypes, final List columnNames, @@ -83,6 +87,7 @@ public FileTranslatableTable( final FileSchema fileSchema, this.rootDir = fileSchema.getRootDir(); this.tableName = tableName; this.tableId = tableId; + this.partitionId = partitionId; this.adapterId = fileSchema.getAdapterId(); this.pkIds = pkIds; this.protoRowType = protoRowType; @@ -120,9 +125,26 @@ public Collection getModifiableCollection() { @Override - public TableModify toModificationRel( RelOptCluster cluster, RelOptTable table, CatalogReader catalogReader, RelNode child, Operation operation, List updateColumnList, List sourceExpressionList, boolean flattened ) { + public TableModify toModificationRel( + RelOptCluster cluster, + RelOptTable table, + CatalogReader catalogReader, + RelNode child, + Operation operation, + List updateColumnList, + List sourceExpressionList, + boolean flattened ) { fileSchema.getConvention().register( cluster.getPlanner() ); - return new LogicalTableModify( cluster, cluster.traitSetOf( Convention.NONE ), table, catalogReader, child, operation, updateColumnList, sourceExpressionList, flattened ); + return new LogicalTableModify( + cluster, + cluster.traitSetOf( Convention.NONE ), + table, + catalogReader, + child, + operation, + updateColumnList, + sourceExpressionList, + flattened ); } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileRules.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileRules.java index e00f17bdba..c00057cab2 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileRules.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileRules.java @@ -57,7 +57,7 @@ public static List rules( FileConvention out, Method enumeratorMetho new FileProjectRule( out, RelFactories.LOGICAL_BUILDER ), new FileValuesRule( out, RelFactories.LOGICAL_BUILDER ), new FileTableModificationRule( out, RelFactories.LOGICAL_BUILDER ), - new FileUnionRule( out, RelFactories.LOGICAL_BUILDER ), + //new FileUnionRule( out, RelFactories.LOGICAL_BUILDER ), new FileFilterRule( out, RelFactories.LOGICAL_BUILDER ) ); } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileToEnumerableConverter.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileToEnumerableConverter.java index 84b10039fc..c6e42ef367 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileToEnumerableConverter.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/rel/FileToEnumerableConverter.java @@ -120,6 +120,7 @@ public Result implement( EnumerableRelImplementor implementor, Prefer pref ) { enumeratorMethod, Expressions.constant( fileImplementor.getOperation() ), Expressions.constant( fileImplementor.getFileTable().getAdapterId() ), + Expressions.constant( fileImplementor.getFileTable().getPartitionId() ), DataContext.ROOT, Expressions.constant( fileSchema.getRootDir().getAbsolutePath() ), Expressions.newArrayInit( Long.class, columnIds.toArray( new Expression[0] ) ), @@ -136,6 +137,7 @@ public Result implement( EnumerableRelImplementor implementor, Prefer pref ) { FileMethod.EXECUTE_MODIFY.method, Expressions.constant( fileImplementor.getOperation() ), Expressions.constant( fileImplementor.getFileTable().getAdapterId() ), + Expressions.constant( fileImplementor.getFileTable().getPartitionId() ), DataContext.ROOT, Expressions.constant( fileSchema.getRootDir().getAbsolutePath() ), Expressions.newArrayInit( Long.class, columnIds.toArray( new Expression[0] ) ), diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/Qfs.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/Qfs.java index bc09352744..2736a8289b 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/Qfs.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/Qfs.java @@ -37,6 +37,7 @@ import org.polypheny.db.adapter.DataSource; import org.polypheny.db.adapter.DeployMode; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.information.InformationGroup; import org.polypheny.db.information.InformationManager; @@ -89,8 +90,8 @@ public void createNewSchema( SchemaPlus rootSchema, String name ) { @Override - public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore ) { - return currentSchema.createFileTable( combinedTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentSchema.createFileTable( combinedTable, columnPlacementsOnStore, partitionPlacement ); } diff --git a/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/QfsSchema.java b/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/QfsSchema.java index fa2a10fb06..ad20374432 100644 --- a/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/QfsSchema.java +++ b/file-adapter/src/main/java/org/polypheny/db/adapter/file/source/QfsSchema.java @@ -39,6 +39,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogPrimaryKey; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.rel.type.RelDataTypeFactory; @@ -91,7 +92,7 @@ protected Map getTableMap() { } - public Table createFileTable( CatalogTable catalogTable, List columnPlacementsOnStore ) { + public Table createFileTable( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder(); ArrayList columnIds = new ArrayList<>(); @@ -132,14 +133,15 @@ public Table createFileTable( CatalogTable catalogTable, List execute( final Operation operation, final Integer adapterId, + final Long partitionId, final DataContext dataContext, final String path, final Long[] columnIds, diff --git a/information/src/main/java/org/polypheny/db/information/InformationDuration.java b/information/src/main/java/org/polypheny/db/information/InformationDuration.java index 8dad3f316c..b9d5d45778 100644 --- a/information/src/main/java/org/polypheny/db/information/InformationDuration.java +++ b/information/src/main/java/org/polypheny/db/information/InformationDuration.java @@ -27,12 +27,12 @@ public class InformationDuration extends Information { + private final HashMap children = new HashMap<>(); + private final boolean isChild = false; /** * Duration in NanoSeconds */ long duration = 0L; - private final HashMap children = new HashMap<>(); - private final boolean isChild = false; /** @@ -45,6 +45,20 @@ public InformationDuration( final InformationGroup group ) { } + static JsonSerializer getSerializer() { + return ( src, typeOfSrc, context ) -> { + JsonObject jsonObj = new JsonObject(); + jsonObj.addProperty( "type", src.type ); + jsonObj.add( "duration", context.serialize( src.duration ) ); + Object[] children1 = src.children.values().toArray(); + Arrays.sort( children1 ); + jsonObj.add( "children", context.serialize( children1 ) ); + jsonObj.add( "isChild", context.serialize( src.isChild ) ); + return jsonObj; + }; + } + + public Duration start( final String name ) { Duration d = new Duration( name ); this.children.put( name, d ); @@ -87,27 +101,17 @@ public Duration addMilliDuration( final String name, final long milliDuration ) } - static JsonSerializer getSerializer() { - return ( src, typeOfSrc, context ) -> { - JsonObject jsonObj = new JsonObject(); - jsonObj.addProperty( "type", src.type ); - jsonObj.add( "duration", context.serialize( src.duration ) ); - Object[] children1 = src.children.values().toArray(); - Arrays.sort( children1 ); - jsonObj.add( "children", context.serialize( children1 ) ); - jsonObj.add( "isChild", context.serialize( src.isChild ) ); - return jsonObj; - }; - } - - /** * Helper class for Durations */ static class Duration implements Comparable { + static long counter = 0; private final String type = InformationDuration.class.getSimpleName();//for the UI private final String name; + private final long sequence; + private final HashMap children = new HashMap<>(); + private final boolean isChild = true; /** * Duration in NanoSeconds */ @@ -117,13 +121,7 @@ static class Duration implements Comparable { */ private long limit; private StopWatch sw; - private final long sequence; - private boolean noProgressBar = false; - static long counter = 0; - - private final HashMap children = new HashMap<>(); - private final boolean isChild = true; private Duration( final String name ) { diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/JdbcSchema.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/JdbcSchema.java index b81916edb9..0bee44f43d 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/JdbcSchema.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/JdbcSchema.java @@ -57,6 +57,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.rel.type.RelDataType; import org.polypheny.db.rel.type.RelDataTypeFactory; @@ -136,7 +137,7 @@ public JdbcSchema( } - public JdbcTable createJdbcTable( CatalogTable catalogTable, List columnPlacementsOnStore ) { + public JdbcTable createJdbcTable( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { // Temporary type factory, just for the duration of this method. Allowable because we're creating a proto-type, // not a type; before being used, the proto-type will be copied into a real type factory. final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); @@ -144,20 +145,19 @@ public JdbcTable createJdbcTable( CatalogTable catalogTable, List logicalColumnNames = new LinkedList<>(); List physicalColumnNames = new LinkedList<>(); String physicalSchemaName = null; - String physicalTableName = null; + for ( CatalogColumnPlacement placement : columnPlacementsOnStore ) { CatalogColumn catalogColumn = Catalog.getInstance().getColumn( placement.columnId ); if ( physicalSchemaName == null ) { physicalSchemaName = placement.physicalSchemaName; } - if ( physicalTableName == null ) { - physicalTableName = placement.physicalTableName; - } + RelDataType sqlType = catalogColumn.getRelDataType( typeFactory ); fieldInfo.add( catalogColumn.name, placement.physicalColumnName, sqlType ).nullable( catalogColumn.nullable ); logicalColumnNames.add( catalogColumn.name ); physicalColumnNames.add( placement.physicalColumnName ); } + JdbcTable table = new JdbcTable( this, catalogTable.getSchemaName(), @@ -166,10 +166,10 @@ public JdbcTable createJdbcTable( CatalogTable catalogTable, List pcnl = Expressions.list(); + int i = 0; + for ( String str : physicalColumnNames ) { + SqlNode[] operands = new SqlNode[]{ + new SqlIdentifier( Arrays.asList( physicalSchemaName, physicalTableName, str ), SqlParserPos.ZERO ), + new SqlIdentifier( Arrays.asList( logicalColumnNames.get( i++ ) ), SqlParserPos.ZERO ) + }; + pcnl.add( new SqlBasicCall( SqlStdOperatorTable.AS, operands, SqlParserPos.ZERO ) ); + } + return new SqlNodeList( pcnl, SqlParserPos.ZERO ); + } + + @Override public RelNode toRel( RelOptTable.ToRelContext context, RelOptTable relOptTable ) { return new JdbcTableScan( context.getCluster(), relOptTable, this, jdbcSchema.getConvention() ); @@ -281,6 +297,7 @@ public Enumerator enumerator() { JdbcUtils.ObjectArrayRowBuilder.factory( fieldClasses( typeFactory ) ) ); return enumerable.enumerator(); } + } } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/RelToSqlConverter.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/RelToSqlConverter.java index cd7abf658f..07edc4cdeb 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/RelToSqlConverter.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/RelToSqlConverter.java @@ -1,5 +1,5 @@ /* - * Copyright 2019-2020 The Polypheny Project + * Copyright 2019-2021 The Polypheny Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -110,6 +110,8 @@ public abstract class RelToSqlConverter extends SqlImplementor implements Reflec private final Deque stack = new ArrayDeque<>(); + private boolean isUnion = false; + /** * Creates a RelToSqlConverter. @@ -191,15 +193,15 @@ public Result visit( Filter e ) { if ( input instanceof Aggregate ) { final Builder builder; if ( ((Aggregate) input).getInput() instanceof Project ) { - builder = x.builder( e ); + builder = x.builder( e, true ); builder.clauses.add( Clause.HAVING ); } else { - builder = x.builder( e, Clause.HAVING ); + builder = x.builder( e, true, Clause.HAVING ); } builder.setHaving( builder.context.toSql( null, e.getCondition() ) ); return builder.result(); } else { - final Builder builder = x.builder( e, Clause.WHERE ); + final Builder builder = x.builder( e, isUnion, Clause.WHERE ); builder.setWhere( builder.context.toSql( null, e.getCondition() ) ); return builder.result(); } @@ -215,7 +217,7 @@ public Result visit( Project e ) { if ( isStar( e.getChildExps(), e.getInput().getRowType(), e.getRowType() ) ) { return x; } - final Builder builder = x.builder( e, Clause.SELECT ); + final Builder builder = x.builder( e, false, Clause.SELECT ); final List selectList = new ArrayList<>(); for ( RexNode ref : e.getChildExps() ) { SqlNode sqlExpr = builder.context.toSql( null, ref ); @@ -235,10 +237,10 @@ public Result visit( Aggregate e ) { final Result x = visitChild( 0, e.getInput() ); final Builder builder; if ( e.getInput() instanceof Project ) { - builder = x.builder( e ); + builder = x.builder( e, true ); builder.clauses.add( Clause.GROUP_BY ); } else { - builder = x.builder( e, Clause.GROUP_BY ); + builder = x.builder( e, true, Clause.GROUP_BY ); } List groupByList = Expressions.list(); final List selectList = new ArrayList<>(); @@ -280,9 +282,12 @@ public Result visit( TableScan e ) { * @see #dispatch */ public Result visit( Union e ) { - return setOpToSql( e.all + isUnion = true; + Result result = setOpToSql( e.all ? SqlStdOperatorTable.UNION_ALL : SqlStdOperatorTable.UNION, e ); + isUnion = false; + return result; } @@ -315,8 +320,8 @@ public Result visit( Calc e ) { final RexProgram program = e.getProgram(); Builder builder = program.getCondition() != null - ? x.builder( e, Clause.WHERE ) - : x.builder( e ); + ? x.builder( e, true, Clause.WHERE ) + : x.builder( e, true ); if ( !isStar( program ) ) { final List selectList = new ArrayList<>(); for ( RexLocalRef ref : program.getProjectList() ) { @@ -405,7 +410,7 @@ public Result visit( Values e ) { */ public Result visit( Sort e ) { Result x = visitChild( 0, e.getInput() ); - Builder builder = x.builder( e, Clause.ORDER_BY ); + Builder builder = x.builder( e, false, Clause.ORDER_BY ); if ( stack.size() != 1 && builder.select.getSelectList() == null ) { // Generates explicit column names instead of start(*) for non-root ORDER BY to avoid ambiguity. final List selectList = Expressions.list(); @@ -423,12 +428,12 @@ public Result visit( Sort e ) { x = builder.result(); } if ( e.fetch != null ) { - builder = x.builder( e, Clause.FETCH ); + builder = x.builder( e, false, Clause.FETCH ); builder.setFetch( builder.context.toSql( null, e.fetch ) ); x = builder.result(); } if ( e.offset != null ) { - builder = x.builder( e, Clause.OFFSET ); + builder = x.builder( e, false, Clause.OFFSET ); builder.setOffset( builder.context.toSql( null, e.offset ) ); x = builder.result(); } @@ -671,6 +676,7 @@ private static class Frame { this.ordinalInParent = ordinalInParent; this.r = r; } + } @@ -694,6 +700,7 @@ public SqlIdentifier getPhysicalTableName( List tableNames ) { public SqlIdentifier getPhysicalColumnName( List tableName, String columnName ) { return new SqlIdentifier( columnName, POS ); } + } } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/SqlImplementor.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/SqlImplementor.java index fe84e3cb23..2b68713381 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/SqlImplementor.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/rel2sql/SqlImplementor.java @@ -58,6 +58,9 @@ import javax.annotation.Nonnull; import org.apache.calcite.linq4j.Ord; import org.apache.calcite.linq4j.tree.Expressions; +import org.polypheny.db.adapter.jdbc.JdbcTable; +import org.polypheny.db.adapter.jdbc.JdbcTableScan; +import org.polypheny.db.prepare.RelOptTableImpl; import org.polypheny.db.rel.RelFieldCollation; import org.polypheny.db.rel.RelNode; import org.polypheny.db.rel.core.AggregateCall; @@ -183,9 +186,17 @@ public Result setOpToSql( SqlSetOperator operator, RelNode rel ) { for ( Ord input : Ord.zip( rel.getInputs() ) ) { final Result result = visitChild( input.i, input.e ); if ( node == null ) { - node = result.asSelect(); + if ( input.getValue() instanceof JdbcTableScan ) { + node = result.asSelect( ((JdbcTable) ((RelOptTableImpl) input.getValue().getTable()).getTable()).getNodeList() ); + } else { + node = result.asSelect(); + } } else { - node = operator.createCall( POS, node, result.asSelect() ); + if ( input.getValue() instanceof JdbcTableScan ) { + node = operator.createCall( POS, node, result.asSelect( ((JdbcTable) ((RelOptTableImpl) input.getValue().getTable()).getTable()).getNodeList() ) ); + } else { + node = operator.createCall( POS, node, result.asSelect() ); + } } } final List clauses = Expressions.list( Clause.SET_OP ); @@ -410,10 +421,15 @@ private void collectAliases( ImmutableMap.Builder builder, } + SqlSelect wrapSelect( SqlNode node ) { + return wrapSelect( node, null ); + } + + /** * Wraps a node in a SELECT statement that has no clauses: "SELECT ... FROM (node)". */ - SqlSelect wrapSelect( SqlNode node ) { + SqlSelect wrapSelect( SqlNode node, SqlNodeList sqlNodes ) { assert node instanceof SqlJoin || node instanceof SqlIdentifier || node instanceof SqlMatchRecognize @@ -425,7 +441,7 @@ SqlSelect wrapSelect( SqlNode node ) { return new SqlSelect( POS, SqlNodeList.EMPTY, - null, + sqlNodes, node, null, null, @@ -1107,7 +1123,7 @@ public Result( SqlNode node, Collection clauses, String neededAlias, Rel * @param clauses Clauses that will be generated to implement current relational expression * @return A builder */ - public Builder builder( RelNode rel, Clause... clauses ) { + public Builder builder( RelNode rel, boolean explicitColumnNames, Clause... clauses ) { final Clause maxClause = maxClause(); boolean needNew = false; // If old and new clause are equal and belong to below set, then new SELECT wrap is not required @@ -1129,7 +1145,11 @@ && hasNestedAggregations( (LogicalAggregate) rel ) ) { if ( needNew ) { select = subSelect(); } else { - select = asSelect(); + if ( explicitColumnNames && rel.getInputs().size() == 1 && rel.getInput( 0 ) instanceof JdbcTableScan ) { + select = asSelect( ((JdbcTable) ((RelOptTableImpl) rel.getInput( 0 ).getTable()).getTable()).getNodeList() ); + } else { + select = asSelect(); + } clauseList.addAll( this.clauses ); } clauseList.appendAll( clauses ); @@ -1221,13 +1241,18 @@ public SqlSelect subSelect() { * Converts a non-query node into a SELECT node. Set operators (UNION, INTERSECT, EXCEPT) remain as is. */ public SqlSelect asSelect() { + return asSelect( null ); + } + + + public SqlSelect asSelect( SqlNodeList sqlNodes ) { if ( node instanceof SqlSelect ) { return (SqlSelect) node; } if ( !dialect.hasImplicitTableAlias() ) { - return wrapSelect( asFrom() ); + return wrapSelect( asFrom(), sqlNodes ); } - return wrapSelect( node ); + return wrapSelect( node, sqlNodes ); } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/AbstractJdbcSource.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/AbstractJdbcSource.java index ebc018769f..bbea7332e2 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/AbstractJdbcSource.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/AbstractJdbcSource.java @@ -121,8 +121,8 @@ public void truncate( Context context, CatalogTable catalogTable ) { // We get the physical schema / table name by checking existing column placements of the same logical table placed on this store. // This works because there is only one physical table for each logical table on JDBC stores. The reason for choosing this // approach rather than using the default physical schema / table names is that this approach allows truncating linked tables. - String physicalTableName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalTableName; - String physicalSchemaName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalSchemaName; + String physicalTableName = Catalog.getInstance().getPartitionPlacementByTable( getAdapterId(), catalogTable.id ).get( 0 ).physicalTableName; + String physicalSchemaName = Catalog.getInstance().getPartitionPlacementByTable( getAdapterId(), catalogTable.id ).get( 0 ).physicalSchemaName; StringBuilder builder = new StringBuilder(); builder.append( "TRUNCATE TABLE " ) .append( dialect.quoteIdentifier( physicalSchemaName ) ) diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MonetdbSource.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MonetdbSource.java index fe52389b27..1b1cb977fd 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MonetdbSource.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MonetdbSource.java @@ -29,6 +29,7 @@ import org.polypheny.db.adapter.jdbc.connection.ConnectionFactory; import org.polypheny.db.adapter.jdbc.connection.TransactionalConnectionFactory; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.schema.Schema; import org.polypheny.db.schema.Table; @@ -77,8 +78,8 @@ protected ConnectionFactory createConnectionFactory( final Map s @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MysqlSource.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MysqlSource.java index 2a18421f87..0076ab360e 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MysqlSource.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/MysqlSource.java @@ -27,6 +27,7 @@ import org.polypheny.db.adapter.Adapter.AdapterSettingString; import org.polypheny.db.adapter.DeployMode; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.schema.Schema; import org.polypheny.db.schema.Table; @@ -62,8 +63,8 @@ public MysqlSource( int storeId, String uniqueName, final Map se @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/PostgresqlSource.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/PostgresqlSource.java index 4d43323f8f..f90193acaf 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/PostgresqlSource.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/sources/PostgresqlSource.java @@ -27,6 +27,7 @@ import org.polypheny.db.adapter.Adapter.AdapterSettingString; import org.polypheny.db.adapter.DeployMode; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.schema.Schema; import org.polypheny.db.schema.Table; @@ -68,8 +69,8 @@ public PostgresqlSource( int storeId, String uniqueName, final Map columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/AbstractJdbcStore.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/AbstractJdbcStore.java index 6852eee962..12f0c3174f 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/AbstractJdbcStore.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/AbstractJdbcStore.java @@ -18,6 +18,7 @@ import java.sql.SQLException; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -32,7 +33,9 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; +import org.polypheny.db.config.RuntimeConfig; import org.polypheny.db.docker.DockerInstance; import org.polypheny.db.jdbc.Context; import org.polypheny.db.runtime.PolyphenyDbException; @@ -120,25 +123,40 @@ public void createUdfs() { @Override - public void createTable( Context context, CatalogTable catalogTable ) { + public void createTable( Context context, CatalogTable catalogTable, List partitionIds ) { List qualifiedNames = new LinkedList<>(); qualifiedNames.add( catalogTable.getSchemaName() ); qualifiedNames.add( catalogTable.name ); - String physicalTableName = getPhysicalTableName( catalogTable.id ); - if ( log.isDebugEnabled() ) { - log.debug( "[{}] createTable: Qualified names: {}, physicalTableName: {}", getUniqueName(), qualifiedNames, physicalTableName ); - } - StringBuilder query = buildCreateTableQuery( getDefaultPhysicalSchemaName(), physicalTableName, catalogTable ); - executeUpdate( query, context ); - // Add physical names to placements - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { - catalog.updateColumnPlacementPhysicalNames( + + List existingPlacements = catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ); + + // Remove the unpartitioned table name again, otherwise it would cause, table already exist due to create statement + for ( long partitionId : partitionIds ) { + String physicalTableName = getPhysicalTableName( catalogTable.id, partitionId ); + + if ( log.isDebugEnabled() ) { + log.debug( "[{}] createTable: Qualified names: {}, physicalTableName: {}", getUniqueName(), qualifiedNames, physicalTableName ); + } + StringBuilder query = buildCreateTableQuery( getDefaultPhysicalSchemaName(), physicalTableName, catalogTable ); + if ( RuntimeConfig.DEBUG.getBoolean() ) { + log.info( "{} on store {}", query.toString(), this.getUniqueName() ); + } + executeUpdate( query, context ); + + catalog.updatePartitionPlacementPhysicalNames( getAdapterId(), - placement.columnId, + partitionId, getDefaultPhysicalSchemaName(), - physicalTableName, - getPhysicalColumnName( placement.columnId ), - true ); + physicalTableName ); + + for ( CatalogColumnPlacement placement : existingPlacements ) { + catalog.updateColumnPlacementPhysicalNames( + getAdapterId(), + placement.columnId, + getDefaultPhysicalSchemaName(), + getPhysicalColumnName( placement.columnId ), + true ); + } } } @@ -151,7 +169,7 @@ protected StringBuilder buildCreateTableQuery( String schemaName, String physica .append( dialect.quoteIdentifier( physicalTableName ) ) .append( " ( " ); boolean first = true; - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { CatalogColumn catalogColumn = catalog.getColumn( placement.columnId ); if ( !first ) { builder.append( ", " ); @@ -173,30 +191,31 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn // This works because there is only one physical table for each logical table on JDBC stores. The reason for choosing this // approach rather than using the default physical schema / table names is that this approach allows adding columns to linked tables. CatalogColumnPlacement ccp = null; - for ( CatalogColumnPlacement p : Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { + for ( CatalogColumnPlacement p : Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { // The for loop is required to avoid using the names of the column which we are currently adding (which are null) if ( p.columnId != catalogColumn.id ) { ccp = p; break; } } - String physicalTableName = ccp.physicalTableName; - String physicalSchemaName = ccp.physicalSchemaName; - StringBuilder query = buildAddColumnQuery( physicalSchemaName, physicalTableName, physicalColumnName, catalogTable, catalogColumn ); - executeUpdate( query, context ); - // Insert default value - if ( catalogColumn.defaultValue != null ) { - query = buildInsertDefaultValueQuery( physicalSchemaName, physicalTableName, physicalColumnName, catalogColumn ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( ccp.adapterId, catalogTable.id ) ) { + String physicalTableName = partitionPlacement.physicalTableName; + String physicalSchemaName = partitionPlacement.physicalSchemaName; + StringBuilder query = buildAddColumnQuery( physicalSchemaName, physicalTableName, physicalColumnName, catalogTable, catalogColumn ); executeUpdate( query, context ); + // Insert default value + if ( catalogColumn.defaultValue != null ) { + query = buildInsertDefaultValueQuery( physicalSchemaName, physicalTableName, physicalColumnName, catalogColumn ); + executeUpdate( query, context ); + } + // Add physical name to placement + catalog.updateColumnPlacementPhysicalNames( + getAdapterId(), + catalogColumn.id, + physicalSchemaName, + physicalColumnName, + false ); } - // Add physical name to placement - catalog.updateColumnPlacementPhysicalNames( - getAdapterId(), - catalogColumn.id, - physicalSchemaName, - physicalTableName, - physicalColumnName, - false ); } @@ -215,7 +234,7 @@ protected StringBuilder buildAddColumnQuery( String physicalSchemaName, String p protected void createColumnDefinition( CatalogColumn catalogColumn, StringBuilder builder ) { if ( !this.dialect.supportsNestedArrays() && catalogColumn.collectionsType != null ) { - //returns e.g. TEXT if arrays are not supported + // Returns e.g. TEXT if arrays are not supported builder.append( getTypeString( PolyType.ARRAY ) ); } else { builder.append( " " ).append( getTypeString( catalogColumn.type ) ); @@ -276,50 +295,69 @@ public void updateColumnType( Context context, CatalogColumnPlacement columnPlac if ( !this.dialect.supportsNestedArrays() && catalogColumn.collectionsType != null ) { return; } - StringBuilder builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " ALTER COLUMN " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); - builder.append( " " ).append( getTypeString( catalogColumn.type ) ); - if ( catalogColumn.length != null ) { - builder.append( "(" ); - builder.append( catalogColumn.length ); - if ( catalogColumn.scale != null ) { - builder.append( "," ).append( catalogColumn.scale ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( columnPlacement.adapterId, columnPlacement.tableId ) ) { + StringBuilder builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " ALTER COLUMN " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); + builder.append( " " ).append( getTypeString( catalogColumn.type ) ); + if ( catalogColumn.length != null ) { + builder.append( "(" ); + builder.append( catalogColumn.length ); + if ( catalogColumn.scale != null ) { + builder.append( "," ).append( catalogColumn.scale ); + } + builder.append( ")" ); } - builder.append( ")" ); + executeUpdate( builder, context ); } - executeUpdate( builder, context ); } @Override - public void dropTable( Context context, CatalogTable catalogTable ) { + public void dropTable( Context context, CatalogTable catalogTable, List partitionIds ) { // We get the physical schema / table name by checking existing column placements of the same logical table placed on this store. // This works because there is only one physical table for each logical table on JDBC stores. The reason for choosing this // approach rather than using the default physical schema / table names is that this approach allows dropping linked tables. - String physicalTableName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalTableName; - String physicalSchemaName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalSchemaName; - StringBuilder builder = new StringBuilder(); - builder.append( "DROP TABLE " ) - .append( dialect.quoteIdentifier( physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( physicalTableName ) ); - executeUpdate( builder, context ); + String physicalTableName; + String physicalSchemaName; + + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + catalog.deletePartitionPlacement( getAdapterId(), partitionPlacement.partitionId ); + physicalSchemaName = partitionPlacement.physicalSchemaName; + physicalTableName = partitionPlacement.physicalTableName; + + StringBuilder builder = new StringBuilder(); + + builder.append( "DROP TABLE " ) + .append( dialect.quoteIdentifier( physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( physicalTableName ) ); + + if ( RuntimeConfig.DEBUG.getBoolean() ) { + log.info( "{} from store {}", builder.toString(), this.getUniqueName() ); + } + executeUpdate( builder, context ); + } } @Override public void dropColumn( Context context, CatalogColumnPlacement columnPlacement ) { - StringBuilder builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " DROP " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); - executeUpdate( builder, context ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( columnPlacement.adapterId, columnPlacement.tableId ) ) { + StringBuilder builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " DROP " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); + executeUpdate( builder, context ); + } } @@ -328,14 +366,16 @@ public void truncate( Context context, CatalogTable catalogTable ) { // We get the physical schema / table name by checking existing column placements of the same logical table placed on this store. // This works because there is only one physical table for each logical table on JDBC stores. The reason for choosing this // approach rather than using the default physical schema / table names is that this approach allows truncating linked tables. - String physicalTableName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalTableName; - String physicalSchemaName = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ).get( 0 ).physicalSchemaName; - StringBuilder builder = new StringBuilder(); - builder.append( "TRUNCATE TABLE " ) - .append( dialect.quoteIdentifier( physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( physicalTableName ) ); - executeUpdate( builder, context ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( getAdapterId(), catalogTable.id ) ) { + String physicalTableName = partitionPlacement.physicalTableName; + String physicalSchemaName = partitionPlacement.physicalSchemaName; + StringBuilder builder = new StringBuilder(); + builder.append( "TRUNCATE TABLE " ) + .append( dialect.quoteIdentifier( physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( physicalTableName ) ); + executeUpdate( builder, context ); + } } @@ -395,8 +435,12 @@ public void shutdown() { } - protected String getPhysicalTableName( long tableId ) { - return "tab" + tableId; + protected String getPhysicalTableName( long tableId, long partitionId ) { + String physicalTableName = "tab" + tableId; + if ( partitionId >= 0 ) { + physicalTableName += "_part" + partitionId; + } + return physicalTableName; } @@ -412,5 +456,4 @@ protected String getPhysicalIndexName( long tableId, long indexId ) { protected abstract String getDefaultPhysicalSchemaName(); - } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/HsqldbStore.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/HsqldbStore.java index f511293041..d046a050b0 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/HsqldbStore.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/HsqldbStore.java @@ -4,6 +4,7 @@ import com.google.common.collect.ImmutableList; import java.io.File; import java.sql.SQLException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import lombok.extern.slf4j.Slf4j; @@ -18,6 +19,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.config.RuntimeConfig; import org.polypheny.db.jdbc.Context; @@ -79,8 +81,8 @@ protected ConnectionFactory deployEmbedded() { @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } @@ -91,44 +93,55 @@ public Schema getCurrentSchema() { @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { - List ccps = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogIndex.key.tableId ); - StringBuilder builder = new StringBuilder(); - builder.append( "CREATE " ); - if ( catalogIndex.unique ) { - builder.append( "UNIQUE INDEX " ); - } else { - builder.append( "INDEX " ); - } + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + List ccps = Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogIndex.key.tableId ); + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + String physicalIndexName = getPhysicalIndexName( catalogIndex.key.tableId, catalogIndex.id ); - builder.append( dialect.quoteIdentifier( physicalIndexName ) ); - builder.append( " ON " ) - .append( dialect.quoteIdentifier( ccps.get( 0 ).physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( ccps.get( 0 ).physicalTableName ) ); - - builder.append( "(" ); - boolean first = true; - for ( long columnId : catalogIndex.key.columnIds ) { - if ( !first ) { - builder.append( ", " ); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + + StringBuilder builder = new StringBuilder(); + builder.append( "CREATE " ); + if ( catalogIndex.unique ) { + builder.append( "UNIQUE INDEX " ); + } else { + builder.append( "INDEX " ); } - first = false; - builder.append( dialect.quoteIdentifier( getPhysicalColumnName( columnId ) ) ).append( " " ); - } - builder.append( ")" ); - executeUpdate( builder, context ); + builder.append( dialect.quoteIdentifier( physicalIndexName + "_" + partitionPlacement.partitionId ) ); + builder.append( " ON " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + + builder.append( "(" ); + boolean first = true; + for ( long columnId : catalogIndex.key.columnIds ) { + if ( !first ) { + builder.append( ", " ); + } + first = false; + builder.append( dialect.quoteIdentifier( getPhysicalColumnName( columnId ) ) ).append( " " ); + } + builder.append( ")" ); + executeUpdate( builder, context ); + } Catalog.getInstance().setIndexPhysicalName( catalogIndex.id, physicalIndexName ); } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { - StringBuilder builder = new StringBuilder(); - builder.append( "DROP INDEX " ); - builder.append( dialect.quoteIdentifier( catalogIndex.physicalName ) ); - executeUpdate( builder, context ); + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + StringBuilder builder = new StringBuilder(); + builder.append( "DROP INDEX " ); + builder.append( dialect.quoteIdentifier( catalogIndex.physicalName + "_" + partitionPlacement.partitionId ) ); + executeUpdate( builder, context ); + } } diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/MonetdbStore.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/MonetdbStore.java index 4c308a256f..c13e48a9d5 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/MonetdbStore.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/MonetdbStore.java @@ -38,6 +38,7 @@ import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.docker.DockerManager; import org.polypheny.db.docker.DockerManager.ContainerBuilder; @@ -80,7 +81,7 @@ protected ConnectionFactory deployDocker( int dockerInstanceId ) { DockerManager.Container container = new ContainerBuilder( getAdapterId(), "topaztechnology/monetdb:11.37.11", getUniqueName(), dockerInstanceId ) .withMappedPort( 50000, Integer.parseInt( settings.get( "port" ) ) ) .withEnvironmentVariables( Arrays.asList( "MONETDB_PASSWORD=" + settings.get( "password" ), "MONET_DATABASE=monetdb" ) ) - .withReadyTest( this::testDockerConnection, 15000 ) + .withReadyTest( this::testConnection, 15000 ) .build(); host = container.getHost(); @@ -95,12 +96,14 @@ protected ConnectionFactory deployDocker( int dockerInstanceId ) { } - @Override protected ConnectionFactory deployRemote() { host = settings.get( "host" ); database = settings.get( "database" ); username = settings.get( "username" ); + if ( !testConnection() ) { + throw new RuntimeException( "Unable to connect" ); + } ConnectionFactory connectionFactory = createConnectionFactory(); createDefaultSchema( connectionFactory ); return connectionFactory; @@ -143,81 +146,84 @@ public void updateColumnType( Context context, CatalogColumnPlacement columnPlac String tmpColName = columnPlacement.physicalColumnName + "tmp"; StringBuilder builder; - // (1) Create a temporary column `alter table tabX add column colXtemp NEW_TYPE;` - builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " ADD COLUMN " ) - .append( dialect.quoteIdentifier( tmpColName ) ) - .append( " " ) - .append( getTypeString( catalogColumn.type ) ); - executeUpdate( builder, context ); - - // (2) Set data in temporary column to original data `update tabX set colXtemp=colX;` - builder = new StringBuilder(); - builder.append( "UPDATE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " SET " ) - .append( dialect.quoteIdentifier( tmpColName ) ) - .append( "=" ) - .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); - executeUpdate( builder, context ); - - // (3) Remove the original column `alter table tabX drop column colX;` - builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " DROP COLUMN " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); - executeUpdate( builder, context ); - - // (4) Re-create the original column with the new type `alter table tabX add column colX NEW_TYPE; - builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " ADD COLUMN " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) - .append( " " ) - .append( getTypeString( catalogColumn.type ) ); - executeUpdate( builder, context ); - - // (5) Move data from temporary column to new column `update tabX set colX=colXtemp`; - builder = new StringBuilder(); - builder.append( "UPDATE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " SET " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) - .append( "=" ) - .append( dialect.quoteIdentifier( tmpColName ) ); - executeUpdate( builder, context ); - - // (6) Drop the temporary column `alter table tabX drop column colXtemp;` - builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " DROP COLUMN " ) - .append( dialect.quoteIdentifier( tmpColName ) ); - executeUpdate( builder, context ); - + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( columnPlacement.adapterId, columnPlacement.tableId ) ) { + + // (1) Create a temporary column `alter table tabX add column colXtemp NEW_TYPE;` + builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " ADD COLUMN " ) + .append( dialect.quoteIdentifier( tmpColName ) ) + .append( " " ) + .append( getTypeString( catalogColumn.type ) ); + executeUpdate( builder, context ); + + // (2) Set data in temporary column to original data `update tabX set colXtemp=colX;` + builder = new StringBuilder(); + builder.append( "UPDATE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " SET " ) + .append( dialect.quoteIdentifier( tmpColName ) ) + .append( "=" ) + .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); + executeUpdate( builder, context ); + + // (3) Remove the original column `alter table tabX drop column colX;` + builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " DROP COLUMN " ) + .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); + executeUpdate( builder, context ); + + // (4) Re-create the original column with the new type `alter table tabX add column colX NEW_TYPE; + builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " ADD COLUMN " ) + .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) + .append( " " ) + .append( getTypeString( catalogColumn.type ) ); + executeUpdate( builder, context ); + + // (5) Move data from temporary column to new column `update tabX set colX=colXtemp`; + builder = new StringBuilder(); + builder.append( "UPDATE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " SET " ) + .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) + .append( "=" ) + .append( dialect.quoteIdentifier( tmpColName ) ); + executeUpdate( builder, context ); + + // (6) Drop the temporary column `alter table tabX drop column colXtemp;` + builder = new StringBuilder(); + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " DROP COLUMN " ) + .append( dialect.quoteIdentifier( tmpColName ) ); + executeUpdate( builder, context ); + } Catalog.getInstance().updateColumnPlacementPhysicalPosition( getAdapterId(), catalogColumn.id ); + } @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } @@ -228,13 +234,13 @@ public Schema getCurrentSchema() { @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { throw new RuntimeException( "MonetDB adapter does not support adding indexes" ); } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { throw new RuntimeException( "MonetDB adapter does not support dropping indexes" ); } @@ -316,7 +322,7 @@ private static String getConnectionUrl( final String dbHostname, final int dbPor } - private boolean testDockerConnection() { + private boolean testConnection() { ConnectionFactory connectionFactory = null; ConnectionHandler handler = null; try { diff --git a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/PostgresqlStore.java b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/PostgresqlStore.java index bf3b3ae948..13c2ffa14c 100644 --- a/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/PostgresqlStore.java +++ b/jdbc-adapter/src/main/java/org/polypheny/db/adapter/jdbc/stores/PostgresqlStore.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; import java.util.List; import java.util.Map; import lombok.extern.slf4j.Slf4j; @@ -38,6 +39,7 @@ import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; import org.polypheny.db.catalog.entity.CatalogIndex; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.docker.DockerManager; import org.polypheny.db.docker.DockerManager.ContainerBuilder; @@ -59,7 +61,7 @@ usedModes = { DeployMode.REMOTE, DeployMode.DOCKER }) @AdapterSettingString(name = "host", defaultValue = "localhost", position = 1, description = "Hostname or IP address of the remote PostgreSQL instance.", appliesTo = DeploySetting.REMOTE) -@AdapterSettingInteger(name = "port", defaultValue = 3306, position = 2, +@AdapterSettingInteger(name = "port", defaultValue = 5432, position = 2, description = "JDBC port number on the remote PostgreSQL instance.") @AdapterSettingString(name = "database", defaultValue = "polypheny", position = 3, description = "Name of the database to connect to.", appliesTo = DeploySetting.REMOTE) @@ -86,7 +88,7 @@ public ConnectionFactory deployDocker( int instanceId ) { DockerManager.Container container = new ContainerBuilder( getAdapterId(), "postgres:13.2", getUniqueName(), instanceId ) .withMappedPort( 5432, Integer.parseInt( settings.get( "port" ) ) ) .withEnvironmentVariable( "POSTGRES_PASSWORD=" + settings.get( "password" ) ) - .withReadyTest( this::testDockerConnection, 15000 ) + .withReadyTest( this::testConnection, 15000 ) .build(); host = container.getHost(); @@ -104,6 +106,9 @@ protected ConnectionFactory deployRemote() { host = settings.get( "host" ); database = settings.get( "database" ); username = settings.get( "username" ); + if ( !testConnection() ) { + throw new RuntimeException( "Unable to connect" ); + } return createConnectionFactory(); } @@ -145,37 +150,42 @@ public void createUdfs() { @Override public void updateColumnType( Context context, CatalogColumnPlacement columnPlacement, CatalogColumn catalogColumn, PolyType oldType ) { StringBuilder builder = new StringBuilder(); - builder.append( "ALTER TABLE " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( columnPlacement.physicalTableName ) ); - builder.append( " ALTER COLUMN " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); - builder.append( " TYPE " ).append( getTypeString( catalogColumn.type ) ); - if ( catalogColumn.collectionsType != null ) { - builder.append( " " ).append( catalogColumn.collectionsType.toString() ); - } - if ( catalogColumn.length != null ) { - builder.append( "(" ); - builder.append( catalogColumn.length ); - if ( catalogColumn.scale != null ) { - builder.append( "," ).append( catalogColumn.scale ); + List partitionPlacements = catalog.getPartitionPlacementByTable( getAdapterId(), catalogColumn.tableId ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + builder.append( "ALTER TABLE " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + builder.append( " ALTER COLUMN " ).append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ); + builder.append( " TYPE " ).append( getTypeString( catalogColumn.type ) ); + if ( catalogColumn.collectionsType != null ) { + builder.append( " " ).append( catalogColumn.collectionsType.toString() ); } - builder.append( ")" ); - } - builder.append( " USING " ) - .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) - .append( "::" ) - .append( getTypeString( catalogColumn.type ) ); - if ( catalogColumn.collectionsType != null ) { - builder.append( " " ).append( catalogColumn.collectionsType.toString() ); + if ( catalogColumn.length != null ) { + builder.append( "(" ); + builder.append( catalogColumn.length ); + if ( catalogColumn.scale != null ) { + builder.append( "," ).append( catalogColumn.scale ); + } + builder.append( ")" ); + } + builder.append( " USING " ) + .append( dialect.quoteIdentifier( columnPlacement.physicalColumnName ) ) + .append( "::" ) + .append( getTypeString( catalogColumn.type ) ); + if ( catalogColumn.collectionsType != null ) { + builder.append( " " ).append( catalogColumn.collectionsType.toString() ); + } + executeUpdate( builder, context ); } - executeUpdate( builder, context ); + } @Override - public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore ); + public Table createTableSchema( CatalogTable catalogTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentJdbcSchema.createJdbcTable( catalogTable, columnPlacementsOnStore, partitionPlacement ); } @@ -186,64 +196,75 @@ public Schema getCurrentSchema() { @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { - List ccps = Catalog.getInstance().getColumnPlacementsOnAdapter( getAdapterId(), catalogIndex.key.tableId ); - StringBuilder builder = new StringBuilder(); - builder.append( "CREATE " ); - if ( catalogIndex.unique ) { - builder.append( "UNIQUE INDEX " ); - } else { - builder.append( "INDEX " ); - } + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + List ccps = Catalog.getInstance().getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogIndex.key.tableId ); + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + String physicalIndexName = getPhysicalIndexName( catalogIndex.key.tableId, catalogIndex.id ); - builder.append( dialect.quoteIdentifier( physicalIndexName ) ); - builder.append( " ON " ) - .append( dialect.quoteIdentifier( ccps.get( 0 ).physicalSchemaName ) ) - .append( "." ) - .append( dialect.quoteIdentifier( ccps.get( 0 ).physicalTableName ) ); - - builder.append( " USING " ); - switch ( catalogIndex.method ) { - case "btree": - case "btree_unique": - builder.append( "btree" ); - break; - case "hash": - case "hash_unique": - builder.append( "hash" ); - break; - case "gin": - case "gin_unique": - builder.append( "gin" ); - break; - case "brin": - builder.append( "gin" ); - break; - } - builder.append( "(" ); - boolean first = true; - for ( long columnId : catalogIndex.key.columnIds ) { - if ( !first ) { - builder.append( ", " ); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + StringBuilder builder = new StringBuilder(); + builder.append( "CREATE " ); + if ( catalogIndex.unique ) { + builder.append( "UNIQUE INDEX " ); + } else { + builder.append( "INDEX " ); } - first = false; - builder.append( dialect.quoteIdentifier( getPhysicalColumnName( columnId ) ) ).append( " " ); - } - builder.append( ")" ); - executeUpdate( builder, context ); + builder.append( dialect.quoteIdentifier( physicalIndexName + "_" + partitionPlacement.partitionId ) ); + builder.append( " ON " ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalSchemaName ) ) + .append( "." ) + .append( dialect.quoteIdentifier( partitionPlacement.physicalTableName ) ); + + builder.append( " USING " ); + switch ( catalogIndex.method ) { + case "btree": + case "btree_unique": + builder.append( "btree" ); + break; + case "hash": + case "hash_unique": + builder.append( "hash" ); + break; + case "gin": + case "gin_unique": + builder.append( "gin" ); + break; + case "brin": + builder.append( "brin" ); + break; + } + builder.append( "(" ); + boolean first = true; + for ( long columnId : catalogIndex.key.columnIds ) { + if ( !first ) { + builder.append( ", " ); + } + first = false; + builder.append( dialect.quoteIdentifier( getPhysicalColumnName( columnId ) ) ).append( " " ); + } + builder.append( ")" ); + + executeUpdate( builder, context ); + } Catalog.getInstance().setIndexPhysicalName( catalogIndex.id, physicalIndexName ); } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { - StringBuilder builder = new StringBuilder(); - builder.append( "DROP INDEX " ); - builder.append( dialect.quoteIdentifier( catalogIndex.physicalName ) ); - executeUpdate( builder, context ); + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + StringBuilder builder = new StringBuilder(); + builder.append( "DROP INDEX " ); + builder.append( dialect.quoteIdentifier( catalogIndex.physicalName + "_" + partitionPlacement.partitionId ) ); + executeUpdate( builder, context ); + } } @@ -276,6 +297,7 @@ protected void reloadSettings( List updatedSettings ) { } + @Override protected void createColumnDefinition( CatalogColumn catalogColumn, StringBuilder builder ) { builder.append( " " ).append( getTypeString( catalogColumn.type ) ); if ( catalogColumn.length != null ) { @@ -343,7 +365,7 @@ private static String getConnectionUrl( final String dbHostname, final int dbPor } - private boolean testDockerConnection() { + private boolean testConnection() { ConnectionFactory connectionFactory = null; ConnectionHandler handler = null; try { diff --git a/jdbc-interface/build.gradle b/jdbc-interface/build.gradle index 9e6ba439ee..74eea205b2 100644 --- a/jdbc-interface/build.gradle +++ b/jdbc-interface/build.gradle @@ -5,6 +5,7 @@ version = versionMajor + "." + versionMinor + versionQualifier dependencies { implementation project(":core") + implementation project(":monitoring") ////// BYTE UNITS implementation group: "com.jakewharton.byteunits", name: "byteunits", version: byteunits_version // Apache 2.0 diff --git a/jdbc-interface/src/main/java/org/polypheny/db/jdbc/DbmsMeta.java b/jdbc-interface/src/main/java/org/polypheny/db/jdbc/DbmsMeta.java index a402a61acf..0fcf99e476 100644 --- a/jdbc-interface/src/main/java/org/polypheny/db/jdbc/DbmsMeta.java +++ b/jdbc-interface/src/main/java/org/polypheny/db/jdbc/DbmsMeta.java @@ -92,6 +92,8 @@ import org.polypheny.db.information.InformationManager; import org.polypheny.db.information.InformationPage; import org.polypheny.db.information.InformationTable; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.monitoring.events.StatementEvent; import org.polypheny.db.processing.SqlProcessor; import org.polypheny.db.rel.RelRoot; import org.polypheny.db.rel.type.RelDataType; @@ -1299,6 +1301,10 @@ private List execute( StatementHandle h, PolyphenyDbConnectionHan throw new AvaticaRuntimeException( message == null ? "null" : message, -1, "", AvaticaSeverity.ERROR ); } } + if ( statementHandle.getStatement().getTransaction().getMonitoringData() != null ) { + StatementEvent ev = statementHandle.getStatement().getTransaction().getMonitoringData(); + MonitoringServiceProvider.getInstance().monitorEvent( ev ); + } return resultSets; } diff --git a/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoSchema.java b/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoSchema.java index 42715f8fd5..1236896d4f 100644 --- a/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoSchema.java +++ b/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoSchema.java @@ -46,6 +46,7 @@ import org.polypheny.db.catalog.Catalog; import org.polypheny.db.catalog.entity.CatalogColumn; import org.polypheny.db.catalog.entity.CatalogColumnPlacement; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.plan.Convention; import org.polypheny.db.rel.type.RelDataType; @@ -116,7 +117,7 @@ protected Map getTableMap() { } - public MongoTable createTable( CatalogTable catalogTable, List columnPlacementsOnStore, int storeId ) { + public MongoTable createTable( CatalogTable catalogTable, List columnPlacementsOnStore, int storeId, CatalogPartitionPlacement partitionPlacement ) { final RelDataTypeFactory typeFactory = new PolyTypeFactoryImpl( RelDataTypeSystem.DEFAULT ); final RelDataTypeFactory.Builder fieldInfo = typeFactory.builder(); @@ -125,9 +126,9 @@ public MongoTable createTable( CatalogTable catalogTable, List columnPlacementsOnStore ) { - return currentSchema.createTable( combinedTable, columnPlacementsOnStore, getAdapterId() ); + public Table createTableSchema( CatalogTable combinedTable, List columnPlacementsOnStore, CatalogPartitionPlacement partitionPlacement ) { + return currentSchema.createTable( combinedTable, columnPlacementsOnStore, getAdapterId(), partitionPlacement ); } @@ -167,8 +169,10 @@ public Schema getCurrentSchema() { public void truncate( Context context, CatalogTable table ) { commitAll(); context.getStatement().getTransaction().registerInvolvedAdapter( this ); - // DDL is auto-committed - currentSchema.database.getCollection( getPhysicalTableName( table.id ) ).deleteMany( new Document() ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( getAdapterId(), table.id ) ) { + // DDL is auto-committed + currentSchema.database.getCollection( partitionPlacement.physicalTableName ).deleteMany( new Document() ); + } } @@ -210,31 +214,47 @@ protected void reloadSettings( List updatedSettings ) { @Override - public void createTable( Context context, CatalogTable catalogTable ) { + public void createTable( Context context, CatalogTable catalogTable, List partitionIds ) { Catalog catalog = Catalog.getInstance(); commitAll(); //ClientSession session = transactionProvider.startTransaction( context.getStatement().getTransaction().getXid() ); //context.getStatement().getTransaction().registerInvolvedAdapter( this ); - this.currentSchema.database.createCollection( getPhysicalTableName( catalogTable.id ) ); - for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapter( getAdapterId(), catalogTable.id ) ) { - catalog.updateColumnPlacementPhysicalNames( + for ( long partitionId : partitionIds ) { + String physicalTableName = getPhysicalTableName( catalogTable.id, partitionId ); + this.currentSchema.database.createCollection( physicalTableName ); + + catalog.updatePartitionPlacementPhysicalNames( getAdapterId(), - placement.columnId, + partitionId, catalogTable.getSchemaName(), - catalogTable.name, - getPhysicalColumnName( placement.columnId ), - true ); + physicalTableName ); + + for ( CatalogColumnPlacement placement : catalog.getColumnPlacementsOnAdapterPerTable( getAdapterId(), catalogTable.id ) ) { + catalog.updateColumnPlacementPhysicalNames( + getAdapterId(), + placement.columnId, + catalogTable.getSchemaName(), + physicalTableName, + true ); + } } } @Override - public void dropTable( Context context, CatalogTable combinedTable ) { + public void dropTable( Context context, CatalogTable combinedTable, List partitionIds ) { commitAll(); context.getStatement().getTransaction().registerInvolvedAdapter( this ); //transactionProvider.startTransaction(); - this.currentSchema.database.getCollection( getPhysicalTableName( combinedTable.id ) ).drop(); + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + catalog.deletePartitionPlacement( getAdapterId(), partitionPlacement.partitionId ); + //this.currentSchema.database.getCollection( getPhysicalTableName( combinedTable.id ) ).drop(); + this.currentSchema.database.getCollection( partitionPlacement.physicalTableName ).drop(); + } } @@ -243,72 +263,78 @@ public void addColumn( Context context, CatalogTable catalogTable, CatalogColumn commitAll(); context.getStatement().getTransaction().registerInvolvedAdapter( this ); // updates all columns with this field if a default value is provided - Document field; - if ( catalogColumn.defaultValue != null ) { - CatalogDefaultValue defaultValue = catalogColumn.defaultValue; - BsonValue value; - if ( catalogColumn.type.getFamily() == PolyTypeFamily.CHARACTER ) { - value = new BsonString( defaultValue.value ); - } else if ( PolyType.INT_TYPES.contains( catalogColumn.type ) ) { - value = new BsonInt32( Integer.parseInt( defaultValue.value ) ); - } else if ( PolyType.FRACTIONAL_TYPES.contains( catalogColumn.type ) ) { - value = new BsonDouble( Double.parseDouble( defaultValue.value ) ); - } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.BOOLEAN ) { - value = new BsonBoolean( Boolean.parseBoolean( defaultValue.value ) ); - } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.DATE ) { - try { - value = new BsonInt64( new SimpleDateFormat( "yyyy-MM-dd" ).parse( defaultValue.value ).getTime() ); - } catch ( ParseException e ) { - throw new RuntimeException( e ); + + List partitionPlacements = new ArrayList<>(); + catalogTable.partitionProperty.partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + Document field; + if ( catalogColumn.defaultValue != null ) { + CatalogDefaultValue defaultValue = catalogColumn.defaultValue; + BsonValue value; + if ( catalogColumn.type.getFamily() == PolyTypeFamily.CHARACTER ) { + value = new BsonString( defaultValue.value ); + } else if ( PolyType.INT_TYPES.contains( catalogColumn.type ) ) { + value = new BsonInt32( Integer.parseInt( defaultValue.value ) ); + } else if ( PolyType.FRACTIONAL_TYPES.contains( catalogColumn.type ) ) { + value = new BsonDouble( Double.parseDouble( defaultValue.value ) ); + } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.BOOLEAN ) { + value = new BsonBoolean( Boolean.parseBoolean( defaultValue.value ) ); + } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.DATE ) { + try { + value = new BsonInt64( new SimpleDateFormat( "yyyy-MM-dd" ).parse( defaultValue.value ).getTime() ); + } catch ( ParseException e ) { + throw new RuntimeException( e ); + } + } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.TIME ) { + value = new BsonInt32( (int) Time.valueOf( defaultValue.value ).getTime() ); + } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.TIMESTAMP ) { + value = new BsonInt64( Timestamp.valueOf( defaultValue.value ).getTime() ); + } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.BINARY ) { + value = new BsonBinary( ByteString.parseBase64( defaultValue.value ) ); + } else { + value = new BsonString( defaultValue.value ); + } + if ( catalogColumn.collectionsType == PolyType.ARRAY ) { + throw new RuntimeException( "Default values are not supported for array types" ); } - } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.TIME ) { - value = new BsonInt32( (int) Time.valueOf( defaultValue.value ).getTime() ); - } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.TIMESTAMP ) { - value = new BsonInt64( Timestamp.valueOf( defaultValue.value ).getTime() ); - } else if ( catalogColumn.type.getFamily() == PolyTypeFamily.BINARY ) { - value = new BsonBinary( ByteString.parseBase64( defaultValue.value ) ); + + field = new Document().append( getPhysicalColumnName( catalogColumn.id ), value ); } else { - value = new BsonString( defaultValue.value ); - } - if ( catalogColumn.collectionsType == PolyType.ARRAY ) { - throw new RuntimeException( "Default values are not supported for array types" ); + field = new Document().append( getPhysicalColumnName( catalogColumn.id ), null ); } + Document update = new Document().append( "$set", field ); - field = new Document().append( getPhysicalColumnName( catalogColumn.id ), value ); - } else { - field = new Document().append( getPhysicalColumnName( catalogColumn.id ), null ); - } - Document update = new Document().append( "$set", field ); - - // DDL is auto-commit - this.currentSchema.database.getCollection( getPhysicalTableName( catalogTable.id ) ).updateMany( new Document(), update ); - - // Add physical name to placement - catalog.updateColumnPlacementPhysicalNames( - getAdapterId(), - catalogColumn.id, - currentSchema.getDatabase().getName(), - catalogTable.name, - getPhysicalColumnName( catalogColumn.id ), - false ); + // DDL is auto-commit + this.currentSchema.database.getCollection( partitionPlacement.physicalTableName ).updateMany( new Document(), update ); + // Add physical name to placement + catalog.updateColumnPlacementPhysicalNames( + getAdapterId(), + catalogColumn.id, + currentSchema.getDatabase().getName(), + getPhysicalColumnName( catalogColumn.id ), + false ); + } } @Override public void dropColumn( Context context, CatalogColumnPlacement columnPlacement ) { commitAll(); - Document field = new Document().append( getPhysicalColumnName( columnPlacement.columnId ), 1 ); - Document filter = new Document().append( "$unset", field ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( columnPlacement.adapterId, columnPlacement.tableId ) ) { + Document field = new Document().append( partitionPlacement.physicalTableName, 1 ); + Document filter = new Document().append( "$unset", field ); - context.getStatement().getTransaction().registerInvolvedAdapter( AdapterManager.getInstance().getStore( getAdapterId() ) ); - // DDL is auto-commit - this.currentSchema.database.getCollection( getPhysicalTableName( columnPlacement.tableId ) ).updateMany( new Document(), filter ); + context.getStatement().getTransaction().registerInvolvedAdapter( AdapterManager.getInstance().getStore( getAdapterId() ) ); + // DDL is auto-commit + this.currentSchema.database.getCollection( partitionPlacement.physicalTableName ).updateMany( new Document(), filter ); + } } @Override - public void addIndex( Context context, CatalogIndex catalogIndex ) { + public void addIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { commitAll(); context.getStatement().getTransaction().registerInvolvedAdapter( this ); HASH_FUNCTION type = HASH_FUNCTION.valueOf( catalogIndex.method.toUpperCase( Locale.ROOT ) ); @@ -340,36 +366,44 @@ public void addIndex( Context context, CatalogIndex catalogIndex ) { private void addCompositeIndex( CatalogIndex catalogIndex, List columns ) { - Document doc = new Document(); - columns.forEach( name -> doc.append( name, 1 ) ); + for ( CatalogPartitionPlacement partitionPlacement : catalog.getPartitionPlacementByTable( getAdapterId(), catalogIndex.key.tableId ) ) { + Document doc = new Document(); + columns.forEach( name -> doc.append( name, 1 ) ); - IndexOptions options = new IndexOptions(); - options.unique( catalogIndex.unique ); - options.name( catalogIndex.name ); + IndexOptions options = new IndexOptions(); + options.unique( catalogIndex.unique ); + options.name( catalogIndex.name ); - this.currentSchema.database - .getCollection( getPhysicalTableName( catalogIndex.key.tableId ) ) - .createIndex( doc, options ); + this.currentSchema.database + .getCollection( partitionPlacement.physicalTableName ) + .createIndex( doc, options ); + } } @Override - public void dropIndex( Context context, CatalogIndex catalogIndex ) { + public void dropIndex( Context context, CatalogIndex catalogIndex, List partitionIds ) { + List partitionPlacements = new ArrayList<>(); + partitionIds.forEach( id -> partitionPlacements.add( catalog.getPartitionPlacement( getAdapterId(), id ) ) ); + commitAll(); context.getStatement().getTransaction().registerInvolvedAdapter( this ); - this.currentSchema.database.getCollection( getPhysicalTableName( catalogIndex.key.tableId ) ).dropIndex( catalogIndex.name ); + for ( CatalogPartitionPlacement partitionPlacement : partitionPlacements ) { + this.currentSchema.database.getCollection( partitionPlacement.physicalTableName ).dropIndex( catalogIndex.name ); + } } @Override public void updateColumnType( Context context, CatalogColumnPlacement columnPlacement, CatalogColumn catalogColumn, PolyType polyType ) { String name = columnPlacement.physicalColumnName; + CatalogPartitionPlacement partitionPlacement = catalog.getPartitionPlacement( getAdapterId(), catalog.getTable( columnPlacement.tableId ).partitionProperty.partitionIds.get( 0 ) ); BsonDocument filter = new BsonDocument(); List updates = Collections.singletonList( new BsonDocument( "$set", new BsonDocument( name, new BsonDocument( "$convert", new BsonDocument() .append( "input", new BsonString( "$" + name ) ) .append( "to", new BsonInt32( MongoTypeUtil.getTypeNumber( catalogColumn.type ) ) ) ) ) ) ); - this.currentSchema.database.getCollection( columnPlacement.physicalTableName ).updateMany( filter, updates ); + this.currentSchema.database.getCollection( partitionPlacement.physicalTableName ).updateMany( filter, updates ); } @@ -399,8 +433,12 @@ public static String getPhysicalColumnName( long id ) { } - public static String getPhysicalTableName( long id ) { - return "tab-" + id; + public static String getPhysicalTableName( long tableId, long partitionId ) { + String physicalTableName = "tab-" + tableId; + if ( partitionId >= 0 ) { + physicalTableName += "_part" + partitionId; + } + return physicalTableName; } diff --git a/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoTable.java b/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoTable.java index 1bc5ce2ff0..3971df8ee4 100644 --- a/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoTable.java +++ b/mongodb-adapter/src/main/java/org/polypheny/db/adapter/mongodb/MongoTable.java @@ -65,6 +65,7 @@ import org.polypheny.db.adapter.mongodb.MongoEnumerator.IterWrapper; import org.polypheny.db.adapter.mongodb.util.MongoDynamic; import org.polypheny.db.adapter.mongodb.util.MongoTypeUtil; +import org.polypheny.db.catalog.entity.CatalogPartitionPlacement; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.plan.Convention; import org.polypheny.db.plan.RelOptCluster; @@ -110,9 +111,9 @@ public class MongoTable extends AbstractQueryableTable implements TranslatableTa /** * Creates a MongoTable. */ - MongoTable( CatalogTable catalogTable, MongoSchema schema, RelProtoDataType proto, TransactionProvider transactionProvider, int storeId ) { + MongoTable( CatalogTable catalogTable, MongoSchema schema, RelProtoDataType proto, TransactionProvider transactionProvider, int storeId, CatalogPartitionPlacement partitionPlacement ) { super( Object[].class ); - this.collectionName = MongoStore.getPhysicalTableName( catalogTable.id ); + this.collectionName = MongoStore.getPhysicalTableName( catalogTable.id, partitionPlacement.partitionId ); this.transactionProvider = transactionProvider; this.catalogTable = catalogTable; this.protoRowType = proto; diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueueImpl.java b/monitoring/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueueImpl.java index 4772482352..917df7e934 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueueImpl.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/core/MonitoringQueueImpl.java @@ -56,6 +56,11 @@ public class MonitoringQueueImpl implements MonitoringQueue { // Processed events since restart private long processedEvents; + private long processedEventsTotal; + + // endregion + + // region ctors /** @@ -124,10 +129,18 @@ public List> getInformationOnElementsInQueue() { @Override - public long getNumberOfProcessedEvents() { + public long getNumberOfProcessedEvents( boolean all ) { + if ( all ) { + return processedEventsTotal; + } + //returns only processed events since last restart return processedEvents; } + // endregion + + // region private helper methods + private void startBackgroundTask() { if ( backgroundTaskId == null ) { @@ -170,6 +183,7 @@ private void processQueue() { queueIds.remove( event.get().getId() ); } processedEvents += countEvents; + processedEventsTotal += countEvents; } finally { this.processingQueueLock.unlock(); } @@ -183,4 +197,5 @@ private Optional getNextJob() { return Optional.empty(); } + // endregion } diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/DmlEvent.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/DmlEvent.java index a82b65256d..3a73e30ed1 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/DmlEvent.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/DmlEvent.java @@ -20,17 +20,16 @@ import java.util.List; import lombok.Getter; import lombok.Setter; -import lombok.extern.slf4j.Slf4j; import org.polypheny.db.monitoring.events.analyzer.DmlEventAnalyzer; import org.polypheny.db.monitoring.events.metrics.DmlDataPoint; -import org.polypheny.db.monitoring.exceptions.GenericEventAnalyzeRuntimeException; @Getter @Setter -@Slf4j public class DmlEvent extends StatementEvent { + private String eventType = "DML EVENT"; + @Override public List> getMetrics() { @@ -40,11 +39,7 @@ public List> getMetrics() { @Override public List analyze() { - try { - return Arrays.asList( DmlEventAnalyzer.analyze( this ) ); - } catch ( Exception e ) { - throw new GenericEventAnalyzeRuntimeException( "Could not analyze dml event" ); - } + return Arrays.asList( DmlEventAnalyzer.analyze( this ) ); } } diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/QueryEvent.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/QueryEvent.java index 97bdba14d1..f98527f436 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/QueryEvent.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/QueryEvent.java @@ -16,21 +16,21 @@ package org.polypheny.db.monitoring.events; + import java.util.Arrays; import java.util.List; import lombok.Getter; import lombok.Setter; -import lombok.extern.slf4j.Slf4j; import org.polypheny.db.monitoring.events.analyzer.QueryEventAnalyzer; import org.polypheny.db.monitoring.events.metrics.QueryDataPoint; -import org.polypheny.db.monitoring.exceptions.GenericEventAnalyzeRuntimeException; @Getter @Setter -@Slf4j public class QueryEvent extends StatementEvent { + private String eventType = "QUERY EVENT"; + @Override public List> getMetrics() { @@ -40,11 +40,7 @@ public List> getMetrics() { @Override public List analyze() { - try { - return Arrays.asList( QueryEventAnalyzer.analyze( this ) ); - } catch ( Exception e ) { - throw new GenericEventAnalyzeRuntimeException( "Could not analyze query event:" ); - } + return Arrays.asList( QueryEventAnalyzer.analyze( this ) ); } } diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/DmlEventAnalyzer.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/DmlEventAnalyzer.java index 061d878df1..0279ec5d35 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/DmlEventAnalyzer.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/DmlEventAnalyzer.java @@ -32,12 +32,14 @@ public static DmlDataPoint analyze( DmlEvent dmlEvent ) { DmlDataPoint metric = DmlDataPoint .builder() .description( dmlEvent.getDescription() ) + .monitoringType( dmlEvent.getMonitoringType() ) .Id( dmlEvent.getId() ) .fieldNames( dmlEvent.getFieldNames() ) .executionTime( dmlEvent.getExecutionTime() ) .rowCount( dmlEvent.getRowCount() ) .isSubQuery( dmlEvent.isSubQuery() ) .recordedTimestamp( dmlEvent.getRecordedTimestamp() ) + .accessedPartitions( dmlEvent.getAccessedPartitions() ) .build(); RelRoot relRoot = dmlEvent.getRouted(); @@ -56,14 +58,11 @@ public static DmlDataPoint analyze( DmlEvent dmlEvent ) { private static void processDurationInfo( DmlEvent dmlEvent, DmlDataPoint metric ) { InformationDuration duration = new Gson().fromJson( dmlEvent.getDurations(), InformationDuration.class ); - getDurationInfo( metric, "Plan Caching", duration ); getDurationInfo( metric, "Index Lookup Rewrite", duration ); getDurationInfo( metric, "Constraint Enforcement", duration ); getDurationInfo( metric, "Implementation Caching", duration ); getDurationInfo( metric, "Index Update", duration ); getDurationInfo( metric, "Routing", duration ); - getDurationInfo( metric, "Planning & Optimization", duration ); - getDurationInfo( metric, "Implementation", duration ); getDurationInfo( metric, "Locking", duration ); } diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/QueryEventAnalyzer.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/QueryEventAnalyzer.java index 7345089ff6..23638d4c6e 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/QueryEventAnalyzer.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/analyzer/QueryEventAnalyzer.java @@ -32,12 +32,14 @@ public static QueryDataPoint analyze( QueryEvent queryEvent ) { QueryDataPoint metric = QueryDataPoint .builder() .description( queryEvent.getDescription() ) + .monitoringType( queryEvent.getMonitoringType() ) .Id( queryEvent.getId() ) .fieldNames( queryEvent.getFieldNames() ) .executionTime( queryEvent.getExecutionTime() ) .rowCount( queryEvent.getRowCount() ) .isSubQuery( queryEvent.isSubQuery() ) .recordedTimestamp( queryEvent.getRecordedTimestamp() ) + .accessedPartitions( queryEvent.getAccessedPartitions() ) .build(); RelRoot relRoot = queryEvent.getRouted(); @@ -57,7 +59,6 @@ public static QueryDataPoint analyze( QueryEvent queryEvent ) { private static void processDurationInfo( QueryEvent queryEvent, QueryDataPoint metric ) { InformationDuration duration = new Gson().fromJson( queryEvent.getDurations(), InformationDuration.class ); - getDurationInfo( metric, "Plan Caching", duration ); getDurationInfo( metric, "Index Lookup Rewrite", duration ); getDurationInfo( metric, "Constraint Enforcement", duration ); getDurationInfo( metric, "Implementation Caching", duration ); diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/DmlDataPoint.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/DmlDataPoint.java index 6897314a8f..b4df934397 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/DmlDataPoint.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/DmlDataPoint.java @@ -50,6 +50,7 @@ public class DmlDataPoint implements MonitoringDataPoint, Serializable { private boolean isSubQuery; private int rowCount; private List fieldNames; + private List accessedPartitions; @Override diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/QueryDataPoint.java b/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/QueryDataPoint.java index 9305a8ef26..2b3e1af780 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/QueryDataPoint.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/events/metrics/QueryDataPoint.java @@ -50,6 +50,7 @@ public class QueryDataPoint implements MonitoringDataPoint, Serializable { private boolean isSubQuery; private int rowCount; private List fieldNames; + private List accessedPartitions; @Override diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/persistence/MapDbRepository.java b/monitoring/src/main/java/org/polypheny/db/monitoring/persistence/MapDbRepository.java index 16be5b1192..6f1c7a9a86 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/persistence/MapDbRepository.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/persistence/MapDbRepository.java @@ -126,16 +126,19 @@ protected void initialize( String filePath, String folderName ) { simpleBackendDb.close(); } - File folder = FileSystemManager.getInstance().registerNewFolder( folderName ); - - simpleBackendDb = DBMaker.fileDB( new File( folder, filePath ) ) - .closeOnJvmShutdown() - .transactionEnable() - .fileMmapEnableIfSupported() - .fileMmapPreclearDisable() - .make(); - - simpleBackendDb.getStore().fileLoad(); + synchronized ( this ) { + File folder = FileSystemManager.getInstance().registerNewFolder( folderName ); + + simpleBackendDb = DBMaker + .fileDB( new File( folder, filePath ) ) + .closeOnJvmShutdown() + .transactionEnable() + .fileMmapEnableIfSupported() + .fileMmapPreclearDisable() + .make(); + + simpleBackendDb.getStore().fileLoad(); + } } diff --git a/monitoring/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUiImpl.java b/monitoring/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUiImpl.java index 28c5b0f94b..d9725900fa 100644 --- a/monitoring/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUiImpl.java +++ b/monitoring/src/main/java/org/polypheny/db/monitoring/ui/MonitoringServiceUiImpl.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.LinkedList; import java.util.List; +import java.util.stream.Collectors; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import lombok.val; @@ -67,8 +68,26 @@ public void initializeInformationPage() { } + @Override + public void registerDataPointForUi( @NonNull Class metricClass ) { + String className = metricClass.getName(); + val informationGroup = new InformationGroup( informationPage, className ); + + // TODO: see todo below in {#link updateMetricInformationTable} + val fieldAsString = Arrays.stream( metricClass.getDeclaredFields() ) + .map( Field::getName ) + .filter( str -> !str.equals( "serialVersionUID" ) ) + .collect( Collectors.toList() ); + val informationTable = new InformationTable( informationGroup, fieldAsString ); + + // informationGroup.setRefreshFunction( () -> this.updateMetricInformationTable( informationTable, metricClass ) ); + + addInformationGroupTUi( informationGroup, Arrays.asList( informationTable ) ); + } + + /** - * Universal method to add arbitrary new information Groups to UI. + * Universal method to add arbitrary new information groups to UI */ private void addInformationGroupTUi( @NonNull InformationGroup informationGroup, @NonNull List informationTables ) { InformationManager im = InformationManager.getInstance(); @@ -90,8 +109,8 @@ private void updateMetricInformationTable( Infor List row = new LinkedList<>(); for ( Field field : fields ) { - // TODO: get declared fields and fine corresponding Lombok getter to execute - // Therefore, nothing need to be done for serialVersionID + // TODO: get declared fields and find corresponding Lombok getter to execute + // Therefore, nothing needs to be done for serialVersionID // and neither do we need to hacky set the setAccessible flag for the fields if ( field.getName().equals( "serialVersionUID" ) ) { continue; @@ -102,7 +121,7 @@ private void updateMetricInformationTable( Infor val value = field.get( element ); row.add( value.toString() ); } catch ( IllegalAccessException e ) { - e.printStackTrace(); + log.error( "Caught exception", e ); } } @@ -113,8 +132,7 @@ private void updateMetricInformationTable( Infor private void initializeWorkloadInformationTable() { val informationGroup = new InformationGroup( informationPage, "Workload Overview" ); - val informationTable = new InformationTable( informationGroup, - Arrays.asList( "Attribute", "Value" ) ); + val informationTable = new InformationTable( informationGroup, Arrays.asList( "Attribute", "Value" ) ); informationGroup.setOrder( 1 ); informationGroup.setRefreshFunction( () -> this.updateWorkloadInformationTable( informationTable ) ); @@ -124,9 +142,9 @@ private void initializeWorkloadInformationTable() { private void initializeQueueInformationTable() { - //On first subscriber also add - //Also build active subscription table Metric to subscribers - //or which subscribers, exist and to which metrics they are subscribed + // On first subscriber also add + // Also build active subscription table Metric to subscribers + // or which subscribers, exist and to which metrics they are subscribed val informationGroup = new InformationGroup( informationPage, "Monitoring Queue" ).setOrder( 2 ); val informationTable = new InformationTable( informationGroup, Arrays.asList( "Event Type", "UUID", "Timestamp" ) ); @@ -146,7 +164,6 @@ private void updateQueueInformationTable( InformationTable table ) { row.add( infoRow.get( "type" ) ); row.add( infoRow.get( "id" ) ); row.add( infoRow.get( "timestamp" ) ); - table.addRow( row ); } } @@ -155,7 +172,7 @@ private void updateQueueInformationTable( InformationTable table ) { private void updateWorkloadInformationTable( InformationTable table ) { table.reset(); - table.addRow( "Number of processed events since restart", queue.getNumberOfProcessedEvents() ); + table.addRow( "Number of processed events since restart", queue.getNumberOfProcessedEvents( false ) ); table.addRow( "Number of events in queue", queue.getNumberOfElementsInQueue() ); //table.addRow( "# Data Points", queue.getElementsInQueue().size() ); table.addRow( "# SELECT", MonitoringServiceProvider.getInstance().getAllDataPoints( QueryDataPoint.class ).size() ); diff --git a/rest-interface/src/main/java/org/polypheny/db/restapi/Rest.java b/rest-interface/src/main/java/org/polypheny/db/restapi/Rest.java index 5e31f6a080..6bf9bae8d9 100644 --- a/rest-interface/src/main/java/org/polypheny/db/restapi/Rest.java +++ b/rest-interface/src/main/java/org/polypheny/db/restapi/Rest.java @@ -33,6 +33,9 @@ import org.polypheny.db.catalog.exceptions.UnknownSchemaException; import org.polypheny.db.catalog.exceptions.UnknownUserException; import org.polypheny.db.jdbc.PolyphenyDbSignature; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.monitoring.events.DmlEvent; +import org.polypheny.db.monitoring.events.QueryEvent; import org.polypheny.db.plan.RelOptCluster; import org.polypheny.db.plan.RelOptPlanner; import org.polypheny.db.prepare.PolyphenyDbCatalogReader; @@ -106,6 +109,8 @@ String processGetResource( final ResourceGetRequest resourceGetRequest, final Re JavaTypeFactory typeFactory = transaction.getTypeFactory(); RexBuilder rexBuilder = new RexBuilder( typeFactory ); + statement.getTransaction().setMonitoringData( new QueryEvent() ); + // Table Scans relBuilder = this.tableScans( relBuilder, rexBuilder, resourceGetRequest.tables ); @@ -153,6 +158,8 @@ String processPatchResource( final ResourcePatchRequest resourcePatchRequest, fi JavaTypeFactory typeFactory = transaction.getTypeFactory(); RexBuilder rexBuilder = new RexBuilder( typeFactory ); + statement.getTransaction().setMonitoringData( new DmlEvent() ); + PolyphenyDbCatalogReader catalogReader = statement.getTransaction().getCatalogReader(); PreparingTable table = catalogReader.getTable( Arrays.asList( resourcePatchRequest.tables.get( 0 ).getSchemaName(), resourcePatchRequest.tables.get( 0 ).name ) ); @@ -212,6 +219,8 @@ String processDeleteResource( final ResourceDeleteRequest resourceDeleteRequest, JavaTypeFactory typeFactory = transaction.getTypeFactory(); RexBuilder rexBuilder = new RexBuilder( typeFactory ); + statement.getTransaction().setMonitoringData( new DmlEvent() ); + PolyphenyDbCatalogReader catalogReader = statement.getTransaction().getCatalogReader(); PreparingTable table = catalogReader.getTable( Arrays.asList( resourceDeleteRequest.tables.get( 0 ).getSchemaName(), resourceDeleteRequest.tables.get( 0 ).name ) ); @@ -265,6 +274,8 @@ String processPostResource( final ResourcePostRequest insertValueRequest, final JavaTypeFactory typeFactory = transaction.getTypeFactory(); RexBuilder rexBuilder = new RexBuilder( typeFactory ); + statement.getTransaction().setMonitoringData( new DmlEvent() ); + PolyphenyDbCatalogReader catalogReader = statement.getTransaction().getCatalogReader(); PreparingTable table = catalogReader.getTable( Arrays.asList( insertValueRequest.tables.get( 0 ).getSchemaName(), insertValueRequest.tables.get( 0 ).name ) ); @@ -570,6 +581,7 @@ String executeAndTransformRelAlg( RelRoot relRoot, final Statement statement, fi signature.getExecutionTimeMonitor().setExecutionTime( executionTime ); } + statement.getTransaction().getMonitoringData().setExecutionTime( executionTime ); statement.getTransaction().commit(); } catch ( Throwable e ) { log.error( "Error during execution of REST query", e ); @@ -580,9 +592,11 @@ String executeAndTransformRelAlg( RelRoot relRoot, final Statement statement, fi } return null; } - String result = restResult.getResult( res ); + Pair result = restResult.getResult( res ); + statement.getTransaction().getMonitoringData().setRowCount( result.right ); + MonitoringServiceProvider.getInstance().monitorEvent( statement.getTransaction().getMonitoringData() ); - return result; + return result.left; } } diff --git a/rest-interface/src/main/java/org/polypheny/db/restapi/RestResult.java b/rest-interface/src/main/java/org/polypheny/db/restapi/RestResult.java index 2340575f00..6424885213 100644 --- a/rest-interface/src/main/java/org/polypheny/db/restapi/RestResult.java +++ b/rest-interface/src/main/java/org/polypheny/db/restapi/RestResult.java @@ -48,6 +48,7 @@ import org.polypheny.db.sql.SqlKind; import org.polypheny.db.type.PolyType; import org.polypheny.db.type.PolyTypeFamily; +import org.polypheny.db.util.Pair; import spark.Response; import spark.utils.IOUtils; @@ -230,13 +231,13 @@ private String getContentType( Object o ) { } - public String getResult( final Response res ) { + public Pair getResult( final Response res ) { Gson gson = new Gson(); Map finalResult = new HashMap<>(); finalResult.put( "result", result ); finalResult.put( "size", result.size() ); if ( !containsFiles ) { - return gson.toJson( finalResult ); + return new Pair( gson.toJson( finalResult ), finalResult.size() ); } else { OutputStream os; ZipEntry zipEntry = new ZipEntry( "data.json" ); @@ -262,7 +263,7 @@ public String getResult( final Response res ) { zipFile.delete(); res.status( 500 ); } - return ""; + return new Pair( "", finalResult.size() ); } } diff --git a/statistic/build.gradle b/statistic/build.gradle index 74e27ddf3a..a15c1a884f 100644 --- a/statistic/build.gradle +++ b/statistic/build.gradle @@ -5,6 +5,7 @@ version = versionMajor + "." + versionMinor + versionQualifier dependencies { implementation project(":core") + implementation project(":monitoring") // --- Test Compile --- testImplementation project(path: ":core", configuration: "tests") diff --git a/statistic/src/main/java/org/polypheny/db/statistic/StatisticQueryProcessor.java b/statistic/src/main/java/org/polypheny/db/statistic/StatisticQueryProcessor.java index 20aaee4d69..bdff1c5d64 100644 --- a/statistic/src/main/java/org/polypheny/db/statistic/StatisticQueryProcessor.java +++ b/statistic/src/main/java/org/polypheny/db/statistic/StatisticQueryProcessor.java @@ -39,6 +39,8 @@ import org.polypheny.db.config.RuntimeConfig; import org.polypheny.db.iface.Authenticator; import org.polypheny.db.jdbc.PolyphenyDbSignature; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.monitoring.events.QueryEvent; import org.polypheny.db.processing.SqlProcessor; import org.polypheny.db.rel.RelRoot; import org.polypheny.db.rel.type.RelDataType; @@ -201,6 +203,7 @@ private StatisticResult executeSqlSelect( String query ) { Transaction transaction = getTransaction(); Statement statement = transaction.createStatement(); StatisticResult result = new StatisticResult(); + try { result = executeSqlSelect( statement, query ); transaction.commit(); @@ -234,6 +237,8 @@ private StatisticResult executeSqlSelect( final Statement statement, final Strin List> rows; Iterator iterator = null; + statement.getTransaction().setMonitoringData( new QueryEvent() ); + try { signature = processQuery( statement, sqlSelect ); final Enumerable enumerable = signature.enumerable( statement.getDataContext() ); @@ -282,6 +287,9 @@ private StatisticResult executeSqlSelect( final Statement statement, final Strin String[][] d = data.toArray( new String[0][] ); + statement.getTransaction().getMonitoringData().setRowCount( data.size() ); + MonitoringServiceProvider.getInstance().monitorEvent( statement.getTransaction().getMonitoringData() ); + return new StatisticResult( names, types, d ); } finally { try { diff --git a/webui/src/main/java/org/polypheny/db/webui/Crud.java b/webui/src/main/java/org/polypheny/db/webui/Crud.java index 49e8a8e112..4244ce7245 100644 --- a/webui/src/main/java/org/polypheny/db/webui/Crud.java +++ b/webui/src/main/java/org/polypheny/db/webui/Crud.java @@ -150,6 +150,10 @@ import org.polypheny.db.information.InformationStacktrace; import org.polypheny.db.information.InformationText; import org.polypheny.db.jdbc.PolyphenyDbSignature; +import org.polypheny.db.monitoring.core.MonitoringServiceProvider; +import org.polypheny.db.monitoring.events.DmlEvent; +import org.polypheny.db.monitoring.events.QueryEvent; +import org.polypheny.db.monitoring.events.StatementEvent; import org.polypheny.db.partition.PartitionFunctionInfo; import org.polypheny.db.partition.PartitionFunctionInfo.PartitionFunctionInfoColumn; import org.polypheny.db.partition.PartitionManager; @@ -838,6 +842,7 @@ ArrayList anyQuery( final QueryRequest request, final Session session ) temp = System.nanoTime(); int numOfRows = executeSqlUpdate( transaction, query ); executionTime += System.nanoTime() - temp; + transaction.getMonitoringData().setExecutionTime( executionTime ); result = new Result( numOfRows ).setGeneratedQuery( query ).setXid( transaction.getXid().toString() ); results.add( result ); @@ -1390,15 +1395,15 @@ Result getDataSourceColumns( final Request req, final Response res ) { } return new Result( columns.toArray( new DbColumn[0] ), null ).setType( ResultType.VIEW ); } else { - if ( catalog.getColumnPlacements( catalogTable.columnIds.get( 0 ) ).size() != 1 ) { + if ( catalog.getColumnPlacement( catalogTable.columnIds.get( 0 ) ).size() != 1 ) { throw new RuntimeException( "The table has an unexpected number of placements!" ); } - int adapterId = catalog.getColumnPlacements( catalogTable.columnIds.get( 0 ) ).get( 0 ).adapterId; + int adapterId = catalog.getColumnPlacement( catalogTable.columnIds.get( 0 ) ).get( 0 ).adapterId; CatalogPrimaryKey primaryKey = catalog.getPrimaryKey( catalogTable.primaryKey ); List pkColumnNames = primaryKey.getColumnNames(); List columns = new ArrayList<>(); - for ( CatalogColumnPlacement ccp : catalog.getColumnPlacementsOnAdapter( adapterId, catalogTable.id ) ) { + for ( CatalogColumnPlacement ccp : catalog.getColumnPlacementsOnAdapterPerTable( adapterId, catalogTable.id ) ) { CatalogColumn col = catalog.getColumn( ccp.columnId ); columns.add( new DbColumn( col.name, @@ -2045,7 +2050,7 @@ private Placement getPlacements( final Index index ) { String tableName = index.getTable(); try { CatalogTable table = catalog.getTable( databaseName, schemaName, tableName ); - Placement p = new Placement( table.isPartitioned, catalog.getPartitionNames( table.id ), table.tableType ); + Placement p = new Placement( table.isPartitioned, catalog.getPartitionGroupNames( table.id ), table.tableType ); if ( table.tableType == TableType.VIEW ) { return p; @@ -2053,20 +2058,19 @@ private Placement getPlacements( final Index index ) { long pkid = table.primaryKey; List pkColumnIds = Catalog.getInstance().getPrimaryKey( pkid ).columnIds; CatalogColumn pkColumn = Catalog.getInstance().getColumn( pkColumnIds.get( 0 ) ); - List pkPlacements = catalog.getColumnPlacements( pkColumn.id ); + List pkPlacements = catalog.getColumnPlacement( pkColumn.id ); for ( CatalogColumnPlacement placement : pkPlacements ) { Adapter adapter = AdapterManager.getInstance().getAdapter( placement.adapterId ); p.addAdapter( new Placement.Store( adapter.getUniqueName(), adapter.getAdapterName(), - catalog.getColumnPlacementsOnAdapter( adapter.getAdapterId(), table.id ), - catalog.getPartitionsIndexOnDataPlacement( placement.adapterId, placement.tableId ), - table.numPartitions, - table.partitionType ) ); + catalog.getColumnPlacementsOnAdapterPerTable( adapter.getAdapterId(), table.id ), + catalog.getPartitionGroupsIndexOnDataPlacement( placement.adapterId, placement.tableId ), + table.partitionProperty.numPartitionGroups, + table.partitionProperty.partitionType ) ); } return p; } - } catch ( UnknownTableException | UnknownDatabaseException | UnknownSchemaException e ) { log.error( "Caught exception while getting placements", e ); return new Placement( e ); @@ -2122,7 +2126,7 @@ String getPartitionTypes( final Request req, final Response res ) { } - private List buildPartitionFunctionRow( List columnList ) { + private List buildPartitionFunctionRow( PartitioningRequest request, List columnList ) { List constructedRow = new ArrayList<>(); for ( PartitionFunctionInfoColumn currentColumn : columnList ) { @@ -2151,7 +2155,19 @@ private List buildPartitionFunctionRow( List> rowsBefore = functionInfo.getRowsBefore(); for ( int i = 0; i < rowsBefore.size(); i++ ) { - rows.add( buildPartitionFunctionRow( rowsBefore.get( i ) ) ); + rows.add( buildPartitionFunctionRow( request, rowsBefore.get( i ) ) ); } } if ( infoJson.has( "dynamicRows" ) ) { // Build as many dynamic rows as requested per num Partitions for ( int i = 0; i < request.numPartitions; i++ ) { - rows.add( buildPartitionFunctionRow( functionInfo.getDynamicRows() ) ); + rows.add( buildPartitionFunctionRow( request, functionInfo.getDynamicRows() ) ); } } @@ -2207,7 +2223,7 @@ PartitionFunctionModel getPartitionFunctionModel( final Request req, final Respo // Insert Rows After List> rowsAfter = functionInfo.getRowsAfter(); for ( int i = 0; i < rowsAfter.size(); i++ ) { - rows.add( buildPartitionFunctionRow( rowsAfter.get( i ) ) ); + rows.add( buildPartitionFunctionRow( request, rowsAfter.get( i ) ) ); } } @@ -2225,10 +2241,10 @@ Result partitionTable( final Request req, final Response res ) { PartitionFunctionModel request = gson.fromJson( req.body(), PartitionFunctionModel.class ); // Get correct partition function - PartitionManagerFactory partitionManagerFactory = new PartitionManagerFactory(); + PartitionManagerFactory partitionManagerFactory = PartitionManagerFactory.getInstance(); PartitionManager partitionManager = null; try { - partitionManager = partitionManagerFactory.getInstance( PartitionType.getByName( request.functionName ) ); + partitionManager = partitionManagerFactory.getPartitionManager( PartitionType.getByName( request.functionName ) ); } catch ( UnknownPartitionTypeException e ) { throw new RuntimeException( e ); } @@ -3456,6 +3472,7 @@ private Result executeSqlSelect( final Statement statement, final UIRequest requ List> rows; Iterator iterator = null; boolean hasMoreRows = false; + statement.getTransaction().setMonitoringData( new QueryEvent() ); try { signature = processQuery( statement, sqlSelect ); @@ -3471,7 +3488,12 @@ private Result executeSqlSelect( final Statement statement, final UIRequest requ } hasMoreRows = iterator.hasNext(); stopWatch.stop(); - signature.getExecutionTimeMonitor().setExecutionTime( stopWatch.getNanoTime() ); + + long executionTime = stopWatch.getNanoTime(); + signature.getExecutionTimeMonitor().setExecutionTime( executionTime ); + + statement.getTransaction().getMonitoringData().setExecutionTime( executionTime ); + } catch ( Throwable t ) { if ( statement.getTransaction().isAnalyze() ) { InformationManager analyzer = statement.getTransaction().getQueryAnalyzer(); @@ -3549,6 +3571,9 @@ private Result executeSqlSelect( final Statement statement, final UIRequest requ ArrayList data = computeResultData( rows, header, statement.getTransaction() ); + statement.getTransaction().getMonitoringData().setRowCount( data.size() ); + MonitoringServiceProvider.getInstance().monitorEvent( statement.getTransaction().getMonitoringData() ); + if ( tableType != null ) { return new Result( header.toArray( new DbColumn[0] ), data.toArray( new String[0][] ) ).setAffectedRows( data.size() ).setHasMoreRows( hasMoreRows ); } else { @@ -3753,6 +3778,8 @@ private int executeSqlUpdate( final Transaction transaction, final String sqlUpd private int executeSqlUpdate( final Statement statement, final Transaction transaction, final String sqlUpdate ) throws QueryExecutionException { PolyphenyDbSignature signature; + statement.getTransaction().setMonitoringData( new DmlEvent() ); + try { signature = processQuery( statement, sqlUpdate ); } catch ( Throwable t ) { @@ -3805,6 +3832,11 @@ private int executeSqlUpdate( final Statement statement, final Transaction trans } } + StatementEvent ev = statement.getTransaction().getMonitoringData(); + ev.setRowCount( rowsChanged ); + + MonitoringServiceProvider.getInstance().monitorEvent( ev ); + return rowsChanged; } else { throw new QueryExecutionException( "Unknown statement type: " + signature.statementType ); diff --git a/webui/src/main/java/org/polypheny/db/webui/models/Placement.java b/webui/src/main/java/org/polypheny/db/webui/models/Placement.java index 09bf78bc4a..e3589149ad 100644 --- a/webui/src/main/java/org/polypheny/db/webui/models/Placement.java +++ b/webui/src/main/java/org/polypheny/db/webui/models/Placement.java @@ -98,7 +98,6 @@ private static class ColumnPlacement { private final String storeUniqueName; private final PlacementType placementType; private final String physicalSchemaName; - private final String physicalTableName; private final String physicalColumnName; @@ -111,7 +110,6 @@ public ColumnPlacement( CatalogColumnPlacement catalogColumnPlacement ) { this.storeUniqueName = catalogColumnPlacement.adapterUniqueName; this.placementType = catalogColumnPlacement.placementType; this.physicalSchemaName = catalogColumnPlacement.physicalSchemaName; - this.physicalTableName = catalogColumnPlacement.physicalTableName; this.physicalColumnName = catalogColumnPlacement.physicalColumnName; } diff --git a/webui/src/test/java/org/polypheny/db/webui/SchemaToJsonMapperTest.java b/webui/src/test/java/org/polypheny/db/webui/SchemaToJsonMapperTest.java index 835a904963..a31ca86f23 100644 --- a/webui/src/test/java/org/polypheny/db/webui/SchemaToJsonMapperTest.java +++ b/webui/src/test/java/org/polypheny/db/webui/SchemaToJsonMapperTest.java @@ -34,6 +34,7 @@ import org.polypheny.db.catalog.entity.CatalogSchema; import org.polypheny.db.catalog.entity.CatalogTable; import org.polypheny.db.catalog.entity.CatalogUser; +import org.polypheny.db.partition.properties.PartitionProperty; import org.polypheny.db.type.PolyType; @@ -58,7 +59,8 @@ public void exportTest() { TableType.TABLE, 23L, ImmutableMap.of(), - true ); + true, + PartitionProperty.builder().build() ); Catalog catalog = Catalog.getInstance(); Arrays.asList( new CatalogColumn( 5, "sid", 4, 1, 1, 1, PolyType.INTEGER, null, null, null, null, null, false, null, null ),