Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MINOR] Fix usages of orElse #10435

Merged
merged 7 commits into from
Jan 10, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -495,7 +495,7 @@ private void completeClustering(HoodieReplaceCommitMetadata metadata,
preCommit(metadata);
}
// Update table's metadata (table)
writeTableMetadata(table, clusteringInstant.getTimestamp(), metadata, writeStatuses.orElse(context.emptyHoodieData()));
writeTableMetadata(table, clusteringInstant.getTimestamp(), metadata, writeStatuses.orElseGet(context::emptyHoodieData));

LOG.info("Committing Clustering " + clusteringCommitTime + ". Finished with result " + metadata);

Expand Down Expand Up @@ -1016,7 +1016,7 @@ private List<String> getInstantsToRollbackForLazyCleanPolicy(HoodieTableMetaClie
@Deprecated
public boolean rollback(final String commitInstantTime, Option<HoodiePendingRollbackInfo> pendingRollbackInfo, boolean skipLocking) throws HoodieRollbackException {
final String rollbackInstantTime = pendingRollbackInfo.map(entry -> entry.getRollbackInstant().getTimestamp())
.orElse(createNewInstantTime(!skipLocking));
.orElseGet(() -> createNewInstantTime(!skipLocking));
return rollback(commitInstantTime, pendingRollbackInfo, rollbackInstantTime, skipLocking);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, so #orElseGet is always preferrable than #orElse.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In general, you do not want to execute methods or create objects you will not use. Therefore you can use orElse when returning a constant but otherwise you should avoid it.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good catch!

}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,7 @@ private void saveInternalSchema(HoodieTable table, String instantTime, HoodieCom
InternalSchema internalSchema;
Schema avroSchema = HoodieAvroUtils.createHoodieWriteSchema(config.getSchema(), config.allowOperationMetadataField());
if (historySchemaStr.isEmpty()) {
internalSchema = SerDeHelper.fromJson(config.getInternalSchema()).orElse(AvroInternalSchemaConverter.convert(avroSchema));
internalSchema = SerDeHelper.fromJson(config.getInternalSchema()).orElseGet(() -> AvroInternalSchemaConverter.convert(avroSchema));
internalSchema.setSchemaId(Long.parseLong(instantTime));
} else {
internalSchema = InternalSchemaUtils.searchSchema(Long.parseLong(instantTime),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ public static Option<HoodieCommitMetadata> resolveWriteConflictIfAny(
table.getMetaClient(), currentTxnOwnerInstant.get(), lastCompletedTxnOwnerInstant),
completedInstantsDuringCurrentWriteOperation);

final ConcurrentOperation thisOperation = new ConcurrentOperation(currentTxnOwnerInstant.get(), thisCommitMetadata.orElse(new HoodieCommitMetadata()));
final ConcurrentOperation thisOperation = new ConcurrentOperation(currentTxnOwnerInstant.get(), thisCommitMetadata.orElseGet(HoodieCommitMetadata::new));
instantStream.forEach(instant -> {
try {
ConcurrentOperation otherOperation = new ConcurrentOperation(instant, table.getMetaClient());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -641,7 +641,7 @@ private void rollbackInflightInstant(HoodieInstant inflightInstant,
Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String commitTime = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
.orElse(getMetaClient().createNewInstantTime());
.orElseGet(() -> getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false, config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, false, false);
Expand All @@ -657,7 +657,7 @@ private void rollbackInflightInstant(HoodieInstant inflightInstant,
public void rollbackInflightLogCompaction(HoodieInstant inflightInstant, Function<String, Option<HoodiePendingRollbackInfo>> getPendingRollbackInstantFunc) {
final String commitTime = getPendingRollbackInstantFunc.apply(inflightInstant.getTimestamp()).map(entry
-> entry.getRollbackInstant().getTimestamp())
.orElse(getMetaClient().createNewInstantTime());
.orElseGet(() -> getMetaClient().createNewInstantTime());
scheduleRollback(context, commitTime, inflightInstant, false, config.shouldRollbackUsingMarkers(),
false);
rollback(context, commitTime, inflightInstant, true, false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ public BaseHoodieFunctionalIndexClient() {
public void register(HoodieTableMetaClient metaClient, String indexName, String indexType, Map<String, Map<String, String>> columns, Map<String, String> options) {
LOG.info("Registering index {} of using {}", indexName, indexType);
String indexMetaPath = metaClient.getTableConfig().getIndexDefinitionPath()
.orElse(metaClient.getMetaPath() + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME);
.orElseGet(() -> metaClient.getMetaPath() + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FOLDER_NAME + Path.SEPARATOR + HoodieTableMetaClient.INDEX_DEFINITION_FILE_NAME);
// build HoodieFunctionalIndexMetadata and then add to index definition file
metaClient.buildFunctionalIndexDefinition(indexMetaPath, indexName, indexType, columns, options);
// update table config if necessary
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ public HoodieSavepointMetadata execute() {
} catch (IOException e) {
throw new HoodieSavepointException("Failed to savepoint " + instantTime, e);
}
}).orElse(table.getCompletedCommitsTimeline().firstInstant().get().getTimestamp());
}).orElseGet(() -> table.getCompletedCommitsTimeline().firstInstant().get().getTimestamp());

// Cannot allow savepoint time on a commit that could have been cleaned
ValidationUtils.checkArgument(HoodieTimeline.compareTimestamps(instantTime, HoodieTimeline.GREATER_THAN_OR_EQUALS, lastCommitRetained),
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ protected void completeClustering(
// commit to data table after committing to metadata table.
// We take the lock here to ensure all writes to metadata table happens within a single lock (single writer).
// Because more than one write to metadata table will result in conflicts since all of them updates the same partition.
writeTableMetadata(table, clusteringCommitTime, metadata, writeStatuses.orElse(context.emptyHoodieData()));
writeTableMetadata(table, clusteringCommitTime, metadata, writeStatuses.orElseGet(context::emptyHoodieData));

LOG.info("Committing Clustering {} finished with result {}.", clusteringCommitTime, metadata);
table.getActiveTimeline().transitionReplaceInflightToComplete(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ public HoodieWriteMetadata<List<WriteStatus>> bulkInsert(final List<HoodieRecord
config.shouldAllowMultiWriteOnSameInstant());
}

BulkInsertPartitioner partitioner = userDefinedBulkInsertPartitioner.orElse(JavaBulkInsertInternalPartitionerFactory.get(config.getBulkInsertSortMode()));
BulkInsertPartitioner partitioner = userDefinedBulkInsertPartitioner.orElseGet(() -> JavaBulkInsertInternalPartitionerFactory.get(config.getBulkInsertSortMode()));

// write new files
List<WriteStatus> writeStatuses = bulkInsert(inputRecords, instantTime, table, config, performDedupe, partitioner, false,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,7 @@ private <I> BulkInsertPartitioner<I> getPartitioner(Map<String, String> strategy
default:
throw new UnsupportedOperationException(String.format("Layout optimization strategy '%s' is not supported", layoutOptStrategy));
}
}).orElse(isRowPartitioner
}).orElseGet(() -> isRowPartitioner
? BulkInsertInternalPartitionerWithRowsFactory.get(getWriteConfig(), getHoodieTable().isPartitioned(), true)
: BulkInsertInternalPartitionerFactory.get(getHoodieTable(), getWriteConfig(), true));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ public HoodieWriteMetadata<HoodieData<WriteStatus>> bulkInsert(final HoodieData<
executor.getCommitActionType(), instantTime), Option.empty(),
config.shouldAllowMultiWriteOnSameInstant());

BulkInsertPartitioner partitioner = userDefinedBulkInsertPartitioner.orElse(BulkInsertInternalPartitionerFactory.get(table, config));
BulkInsertPartitioner partitioner = userDefinedBulkInsertPartitioner.orElseGet(() -> BulkInsertInternalPartitionerFactory.get(table, config));

// Write new files
HoodieData<WriteStatus> writeStatuses =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ public HoodieWriteMetadata<HoodieData<WriteStatus>> execute() {
protected Partitioner getPartitioner(WorkloadProfile profile) {
return table.getStorageLayout().layoutPartitionerClass()
.map(c -> getLayoutPartitioner(profile, c))
.orElse(new SparkInsertOverwritePartitioner(profile, context, table, config));
.orElseGet(() -> new SparkInsertOverwritePartitioner(profile, context, table, config));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -97,19 +97,15 @@ object AvroConversionUtils {
* TODO convert directly from GenericRecord into InternalRow instead
*/
def createDataFrame(rdd: RDD[GenericRecord], schemaStr: String, ss: SparkSession): Dataset[Row] = {
if (rdd.isEmpty()) {
ss.emptyDataFrame
} else {
ss.createDataFrame(rdd.mapPartitions { records =>
if (records.isEmpty) Iterator.empty
else {
val schema = new Schema.Parser().parse(schemaStr)
val dataType = convertAvroSchemaToStructType(schema)
val converter = createConverterToRow(schema, dataType)
records.map { r => converter(r) }
}
}, convertAvroSchemaToStructType(new Schema.Parser().parse(schemaStr)))
}
ss.createDataFrame(rdd.mapPartitions { records =>
if (records.isEmpty) Iterator.empty
else {
val schema = new Schema.Parser().parse(schemaStr)
val dataType = convertAvroSchemaToStructType(schema)
val converter = createConverterToRow(schema, dataType)
records.map { r => converter(r) }
}
}, convertAvroSchemaToStructType(new Schema.Parser().parse(schemaStr)))
}

/**
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ public BaseHoodieTableFileIndex(HoodieEngineContext engineContext,
Option<String> beginInstantTime,
Option<String> endInstantTime) {
this.partitionColumns = metaClient.getTableConfig().getPartitionFields()
.orElse(new String[0]);
.orElseGet(() -> new String[0]);

this.metadataConfig = HoodieMetadataConfig.newBuilder()
.fromProperties(configProperties)
Expand Down Expand Up @@ -284,7 +284,7 @@ private Map<PartitionPath, List<FileSlice>> loadFileSlicesForPartitions(List<Par
queryInstant.map(instant ->
fileSystemView.getLatestMergedFileSlicesBeforeOrOn(partitionPath.path, queryInstant.get())
)
.orElse(fileSystemView.getLatestFileSlices(partitionPath.path))
.orElseGet(() -> fileSystemView.getLatestFileSlices(partitionPath.path))
.collect(Collectors.toList())
));
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ public <T> Integer getInt(ConfigProperty<T> configProperty) {
public <T> Integer getIntOrDefault(ConfigProperty<T> configProperty) {
Option<Object> rawValue = getRawValue(configProperty);
return rawValue.map(v -> Integer.parseInt(v.toString()))
.orElse(Integer.parseInt(configProperty.defaultValue().toString()));
.orElseGet(() -> Integer.parseInt(configProperty.defaultValue().toString()));
}

public <T> Boolean getBoolean(ConfigProperty<T> configProperty) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -956,7 +956,7 @@ private Pair<ClosableIterator<HoodieRecord>, Schema> getRecordsIterator(
.orElse(Function.identity());

Schema schema = schemaEvolutionTransformerOpt.map(Pair::getRight)
.orElse(dataBlock.getSchema());
.orElseGet(dataBlock::getSchema);

return Pair.of(new CloseableMappingIterator<>(blockRecordsIterator, transformer), schema);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ private CompletableFuture<Void> startConsumingAsync() {
return (Void) null;
}, consumerExecutorService)
)
.orElse(CompletableFuture.completedFuture(null));
.orElseGet(() -> CompletableFuture.completedFuture(null));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,14 +108,14 @@ public Expression visitPredicate(Predicate predicate) {
Predicates.IsNull isNull = (Predicates.IsNull) predicate;
return Option.ofNullable(isNull.child.accept(this))
.map(expr -> (Expression)Predicates.isNull(expr))
.orElse(alwaysTrue());
.orElseGet(this::alwaysTrue);
}

if (predicate instanceof Predicates.IsNotNull) {
Predicates.IsNotNull isNotNull = (Predicates.IsNotNull) predicate;
return Option.ofNullable(isNotNull.child.accept(this))
.map(expr -> (Expression)Predicates.isNotNull(expr))
.orElse(alwaysTrue());
.orElseGet(this::alwaysTrue);
}

if (predicate instanceof Predicates.StringStartsWith) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ public void create(
List<HoodieSecondaryIndex> newSecondaryIndexes = secondaryIndexes.map(h -> {
h.add(secondaryIndexToAdd);
return h;
}).orElse(Collections.singletonList(secondaryIndexToAdd));
}).orElseGet(() -> Collections.singletonList(secondaryIndexToAdd));
newSecondaryIndexes.sort(new HoodieSecondaryIndex.HoodieIndexCompactor());

// Persistence secondary indexes' metadata to hoodie.properties file
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ FileStatus[] fetchAllFilesInPartition(Path partitionPath) throws IOException {
throw new HoodieIOException("Failed to extract file-statuses from the payload", e);
}
})
.orElse(new FileStatus[0]);
.orElseGet(() -> new FileStatus[0]);

LOG.info("Listed file in partition from metadata: partition=" + relativePartitionPath + ", #files=" + statuses.length);
return statuses;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ public HoodieTableFileSystemView getMetadataFileSystemView() {

public Map<String, String> stats() {
Set<String> allMetadataPartitionPaths = Arrays.stream(MetadataPartitionType.values()).map(MetadataPartitionType::getPartitionPath).collect(Collectors.toSet());
return metrics.map(m -> m.getStats(true, metadataMetaClient, this, allMetadataPartitionPaths)).orElse(new HashMap<>());
return metrics.map(m -> m.getStats(true, metadataMetaClient, this, allMetadataPartitionPaths)).orElseGet(HashMap::new);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1029,7 +1029,7 @@ private static List<FileSlice> getPartitionFileSlices(HoodieTableMetaClient meta
Option<HoodieTableFileSystemView> fileSystemView,
String partition,
boolean mergeFileSlices) {
HoodieTableFileSystemView fsView = fileSystemView.orElse(getFileSystemView(metaClient));
HoodieTableFileSystemView fsView = fileSystemView.orElseGet(() -> getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream;
if (mergeFileSlices) {
if (metaClient.getActiveTimeline().filterCompletedInstants().lastInstant().isPresent()) {
Expand Down Expand Up @@ -1057,7 +1057,7 @@ private static List<FileSlice> getPartitionFileSlices(HoodieTableMetaClient meta
public static List<FileSlice> getPartitionLatestFileSlicesIncludingInflight(HoodieTableMetaClient metaClient,
Option<HoodieTableFileSystemView> fileSystemView,
String partition) {
HoodieTableFileSystemView fsView = fileSystemView.orElse(getFileSystemView(metaClient));
HoodieTableFileSystemView fsView = fileSystemView.orElseGet(() -> getFileSystemView(metaClient));
Stream<FileSlice> fileSliceStream = fsView.fetchLatestFileSlicesIncludingInflight(partition);
return fileSliceStream
.sorted(Comparator.comparing(FileSlice::getFileId))
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -176,12 +176,12 @@ public HoodieTableSource(
this.dataPruner = dataPruner;
this.partitionPruner = partitionPruner;
this.dataBucket = dataBucket;
this.requiredPos = Optional.ofNullable(requiredPos).orElse(IntStream.range(0, this.tableRowType.getFieldCount()).toArray());
this.requiredPos = Optional.ofNullable(requiredPos).orElseGet(() -> IntStream.range(0, this.tableRowType.getFieldCount()).toArray());
this.limit = Optional.ofNullable(limit).orElse(NO_LIMIT_CONSTANT);
this.hadoopConf = HadoopConfigurations.getHadoopConf(conf);
this.metaClient = Optional.ofNullable(metaClient).orElse(StreamerUtil.metaClientForReader(conf, hadoopConf));
this.metaClient = Optional.ofNullable(metaClient).orElseGet(() -> StreamerUtil.metaClientForReader(conf, hadoopConf));
this.maxCompactionMemoryInBytes = StreamerUtil.getMaxCompactionMemoryInBytes(conf);
this.internalSchemaManager = Optional.ofNullable(internalSchemaManager).orElse(InternalSchemaManager.get(this.conf, this.metaClient));
this.internalSchemaManager = Optional.ofNullable(internalSchemaManager).orElseGet(() -> InternalSchemaManager.get(this.conf, this.metaClient));
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,11 @@
import org.apache.hadoop.mapred.RecordReader;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapreduce.Job;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.annotation.Nonnull;

import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
Expand All @@ -60,8 +63,6 @@
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import static org.apache.hudi.common.config.HoodieMetadataConfig.ENABLE;

Expand Down Expand Up @@ -290,7 +291,7 @@ private List<FileStatus> listStatusForSnapshotMode(JobConf job,

List<FileSlice> fileSlices = queryInstant.map(
instant -> fsView.getLatestMergedFileSlicesBeforeOrOn(relativePartitionPath, instant))
.orElse(fsView.getLatestFileSlices(relativePartitionPath))
.orElseGet(() -> fsView.getLatestFileSlices(relativePartitionPath))
.collect(Collectors.toList());

filteredFileSlices.addAll(fileSlices);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ private File getLogTempFile(long startTime, long endTime, String diskType) {
return Arrays.stream(new File("/tmp").listFiles())
.filter(f -> f.isDirectory() && f.getName().startsWith("hudi-" + diskType) && f.lastModified() > startTime && f.lastModified() < endTime)
.findFirst()
.orElse(new File(""));
.orElseGet(() -> new File(""));
}

@Test
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,7 @@ public static String getPartitionColumns(KeyGenerator keyGenerator, TypedPropert
if (keyGenerator instanceof CustomAvroKeyGenerator) {
return ((BaseKeyGenerator) keyGenerator).getPartitionPathFields().stream().map(
pathField -> Arrays.stream(pathField.split(CustomAvroKeyGenerator.SPLIT_REGEX))
.findFirst().orElse("Illegal partition path field format: '$pathField' for ${c.getClass.getSimpleName}"))
.findFirst().orElseGet(() -> "Illegal partition path field format: '$pathField' for ${c.getClass.getSimpleName}"))
.collect(Collectors.joining(","));
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ private HoodieWriteMetadata<JavaRDD<WriteStatus>> buildHoodieWriteMetadata(Optio
hoodieWriteMetadata.setWriteStatuses(HoodieJavaRDD.getJavaRDD(statuses));
hoodieWriteMetadata.setPartitionToReplaceFileIds(getPartitionToReplacedFileIds(statuses));
return hoodieWriteMetadata;
}).orElse(new HoodieWriteMetadata<>());
}).orElseGet(HoodieWriteMetadata::new);
}

public final HoodieWriteResult execute(Dataset<Row> records, boolean isTablePartitioned) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ public static SparkRDDWriteClient<HoodieRecordPayload> createHoodieClient(JavaSp
HoodieCompactionConfig compactionConfig = compactionStrategyClass
.map(strategy -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false)
.withCompactionStrategy(ReflectionUtils.loadClass(strategy)).build())
.orElse(HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
.orElseGet(() -> HoodieCompactionConfig.newBuilder().withInlineCompaction(false).build());
HoodieWriteConfig config =
HoodieWriteConfig.newBuilder().withPath(basePath)
.withParallelism(parallelism, parallelism)
Expand Down
Loading
Loading