Skip to content

Commit

Permalink
Fix format-check (oap-project#334)
Browse files Browse the repository at this point in the history
* format
  • Loading branch information
zhejiangxiaomai committed Jul 3, 2023
1 parent c3a8948 commit 66dc508
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
2 changes: 1 addition & 1 deletion velox/dwio/dwrf/test/WriterFlushTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ class MockMemoryPool : public velox::memory::MemoryPool {
/*unused*/) override {
VELOX_UNSUPPORTED("freeContiguous unsupported");
}

bool highUsage() override {
VELOX_NYI("{} unsupported", __FUNCTION__);
}
Expand Down
14 changes: 9 additions & 5 deletions velox/dwio/parquet/writer/Writer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ void Writer::flush() {
finalSink_.get(),
pool_,
queryCtx_->queryConfig().dataBufferGrowRatio());
auto arrowProperties = ::parquet::ArrowWriterProperties::Builder().build();
auto arrowProperties =
::parquet::ArrowWriterProperties::Builder().build();
PARQUET_ASSIGN_OR_THROW(
arrowWriter_,
::parquet::arrow::FileWriter::Open(
Expand All @@ -43,7 +44,9 @@ void Writer::flush() {
std::vector<std::shared_ptr<arrow::ChunkedArray>> chunks;
for (int colIdx = 0; colIdx < fields.size(); colIdx++) {
auto dataType = fields.at(colIdx)->type();
auto chunk = arrow::ChunkedArray::Make(std::move(stagingChunks_.at(colIdx)), dataType).ValueOrDie();
auto chunk = arrow::ChunkedArray::Make(
std::move(stagingChunks_.at(colIdx)), dataType)
.ValueOrDie();
chunks.push_back(chunk);
}
auto table = arrow::Table::Make(schema_, std::move(chunks), stagingRows_);
Expand All @@ -60,8 +63,8 @@ void Writer::flush() {
}

/**
* This method would cache input `ColumnarBatch` to make the size of row group big.
* It would flush when:
* This method would cache input `ColumnarBatch` to make the size of row group
* big. It would flush when:
* - the cached numRows bigger than `maxRowGroupRows_`
* - the cached bytes bigger than `maxRowGroupBytes_`
*
Expand All @@ -83,7 +86,8 @@ void Writer::write(const RowVectorPtr& data) {

auto bytes = data->estimateFlatSize();
auto numRows = data->size();
if (stagingBytes_ + bytes > maxRowGroupBytes_ || stagingRows_ + numRows > maxRowGroupRows_) {
if (stagingBytes_ + bytes > maxRowGroupBytes_ ||
stagingRows_ + numRows > maxRowGroupRows_) {
flush();
}

Expand Down

0 comments on commit 66dc508

Please sign in to comment.