Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Managing memory usage during query execution #3

Closed
wants to merge 17 commits into from
59 changes: 6 additions & 53 deletions ballista/rust/core/src/execution_plans/shuffle_writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ use datafusion::arrow::io::ipc::read::FileReader;
use datafusion::arrow::io::ipc::write::FileWriter;
use datafusion::arrow::record_batch::RecordBatch;
use datafusion::error::{DataFusionError, Result};
use datafusion::physical_plan::common::IPCWriterWrapper;
use datafusion::physical_plan::hash_utils::create_hashes;
use datafusion::physical_plan::metrics::{
self, ExecutionPlanMetricsSet, MetricBuilder, MetricsSet,
Expand Down Expand Up @@ -198,7 +199,7 @@ impl ShuffleWriterExec {

// we won't necessary produce output for every possible partition, so we
// create writers on demand
let mut writers: Vec<Option<ShuffleWriter>> = vec![];
let mut writers: Vec<Option<IPCWriterWrapper>> = vec![];
for _ in 0..num_output_partitions {
writers.push(None);
}
Expand Down Expand Up @@ -268,8 +269,10 @@ impl ShuffleWriterExec {
let path = path.to_str().unwrap();
info!("Writing results to {}", path);

let mut writer =
ShuffleWriter::new(path, stream.schema().as_ref())?;
let mut writer = IPCWriterWrapper::new(
path,
stream.schema().as_ref(),
)?;

writer.write(&output_batch)?;
writers[output_partition] = Some(writer);
Expand Down Expand Up @@ -434,56 +437,6 @@ fn result_schema() -> SchemaRef {
]))
}

struct ShuffleWriter {
path: String,
writer: FileWriter<BufWriter<File>>,
num_batches: u64,
num_rows: u64,
num_bytes: u64,
}

impl ShuffleWriter {
fn new(path: &str, schema: &Schema) -> Result<Self> {
let file = File::create(path)
.map_err(|e| {
BallistaError::General(format!(
"Failed to create partition file at {}: {:?}",
path, e
))
})
.map_err(|e| DataFusionError::Execution(format!("{:?}", e)))?;
let buffer_writer = std::io::BufWriter::new(file);
Ok(Self {
num_batches: 0,
num_rows: 0,
num_bytes: 0,
path: path.to_owned(),
writer: FileWriter::try_new(buffer_writer, schema, WriteOptions::default())?,
})
}

fn write(&mut self, batch: &RecordBatch) -> Result<()> {
self.writer.write(batch)?;
self.num_batches += 1;
self.num_rows += batch.num_rows() as u64;
let num_bytes: usize = batch
.columns()
.iter()
.map(|array| estimated_bytes_size(array.as_ref()))
.sum();
self.num_bytes += num_bytes as u64;
Ok(())
}

fn finish(&mut self) -> Result<()> {
self.writer.finish().map_err(DataFusionError::ArrowError)
}

fn path(&self) -> &str {
&self.path
}
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
3 changes: 2 additions & 1 deletion ballista/rust/core/src/serde/physical_plan/from_proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ use datafusion::physical_plan::{
limit::{GlobalLimitExec, LocalLimitExec},
projection::ProjectionExec,
repartition::RepartitionExec,
sort::{SortExec, SortOptions},
sorts::sort::SortExec,
sorts::SortOptions,
Partitioning,
};
use datafusion::physical_plan::{
Expand Down
2 changes: 1 addition & 1 deletion ballista/rust/core/src/serde/physical_plan/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ mod roundtrip_tests {
hash_aggregate::{AggregateMode, HashAggregateExec},
hash_join::{HashJoinExec, PartitionMode},
limit::{GlobalLimitExec, LocalLimitExec},
sort::SortExec,
sorts::sort::SortExec,
AggregateExpr, ColumnarValue, Distribution, ExecutionPlan, Partitioning,
PhysicalExpr,
},
Expand Down
2 changes: 1 addition & 1 deletion ballista/rust/core/src/serde/physical_plan/to_proto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ use std::{
use datafusion::physical_plan::hash_join::{HashJoinExec, PartitionMode};
use datafusion::physical_plan::limit::{GlobalLimitExec, LocalLimitExec};
use datafusion::physical_plan::projection::ProjectionExec;
use datafusion::physical_plan::sort::SortExec;
use datafusion::physical_plan::sorts::sort::SortExec;
use datafusion::physical_plan::{cross_join::CrossJoinExec, ColumnStatistics};
use datafusion::physical_plan::{
expressions::{
Expand Down
2 changes: 1 addition & 1 deletion ballista/rust/core/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ use datafusion::physical_plan::filter::FilterExec;
use datafusion::physical_plan::hash_aggregate::HashAggregateExec;
use datafusion::physical_plan::hash_join::HashJoinExec;
use datafusion::physical_plan::projection::ProjectionExec;
use datafusion::physical_plan::sort::SortExec;
use datafusion::physical_plan::sorts::sort::SortExec;
use datafusion::physical_plan::{
metrics, AggregateExpr, ExecutionPlan, Metric, PhysicalExpr, RecordBatchStream,
};
Expand Down
2 changes: 1 addition & 1 deletion ballista/rust/scheduler/src/planner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ mod test {
use datafusion::physical_plan::coalesce_batches::CoalesceBatchesExec;
use datafusion::physical_plan::hash_aggregate::{AggregateMode, HashAggregateExec};
use datafusion::physical_plan::hash_join::HashJoinExec;
use datafusion::physical_plan::sort::SortExec;
use datafusion::physical_plan::sorts::sort::SortExec;
use datafusion::physical_plan::{
coalesce_partitions::CoalescePartitionsExec, projection::ProjectionExec,
};
Expand Down
3 changes: 2 additions & 1 deletion datafusion/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ rand = "0.8"
avro-rs = { version = "0.13", features = ["snappy"], optional = true }
num-traits = { version = "0.2", optional = true }
pyo3 = { version = "0.14", optional = true }
uuid = { version = "0.8", features = ["v4"] }
tempfile = "3"

[dependencies.arrow]
package = "arrow2"
Expand All @@ -89,7 +91,6 @@ features = ["io_csv", "io_json", "io_parquet", "io_parquet_compression", "io_ipc

[dev-dependencies]
criterion = "0.3"
tempfile = "3"
doc-comment = "0.3"
parquet-format-async-temp = "0"

Expand Down
4 changes: 3 additions & 1 deletion datafusion/benches/aggregate_query_sql.rs
Original file line number Diff line number Diff line change
Expand Up @@ -132,5 +132,7 @@ fn criterion_benchmark(c: &mut Criterion) {
});
}

criterion_group!(benches, criterion_benchmark);
criterion_group!(name = benches;
config = Criterion::default().measurement_time(std::time::Duration::from_secs(30));
targets = criterion_benchmark);
criterion_main!(benches);
6 changes: 6 additions & 0 deletions datafusion/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,9 @@ pub enum DataFusionError {
/// Error returned during execution of the query.
/// Examples include files not found, errors in parsing certain types.
Execution(String),
/// This error is thrown when a consumer cannot acquire memory from the Memory Manager
/// we can just cancel the execution of the partition.
ResourcesExhausted(String),
}

impl DataFusionError {
Expand Down Expand Up @@ -129,6 +132,9 @@ impl Display for DataFusionError {
DataFusionError::Execution(ref desc) => {
write!(f, "Execution error: {}", desc)
}
DataFusionError::ResourcesExhausted(ref desc) => {
write!(f, "Resources exhausted: {}", desc)
}
}
}
}
Expand Down
104 changes: 104 additions & 0 deletions datafusion/src/execution/disk_manager.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

//! Manages files generated during query execution, files are
//! hashed among the directories listed in RuntimeConfig::local_dirs.

use crate::error::{DataFusionError, Result};
use std::collections::hash_map::DefaultHasher;
use std::fs;
use std::fs::File;
use std::hash::{Hash, Hasher};
use std::path::{Path, PathBuf};
use uuid::Uuid;

/// Manages files generated during query execution, e.g. spill files generated
/// while processing dataset larger than available memory.
pub struct DiskManager {
local_dirs: Vec<String>,
}

impl DiskManager {
/// Create local dirs inside user provided dirs through conf
pub fn new(conf_dirs: &[String]) -> Result<Self> {
Ok(Self {
local_dirs: create_local_dirs(conf_dirs)?,
})
}

/// Create a file in conf dirs in randomized manner and return the file path
pub fn create_tmp_file(&self) -> Result<String> {
create_tmp_file(&self.local_dirs)
}

#[allow(dead_code)]
fn cleanup_resource(&mut self) -> Result<()> {
for dir in self.local_dirs.drain(..) {
fs::remove_dir(dir)?;
}
Ok(())
}
}

/// Setup local dirs by creating one new dir in each of the given dirs
fn create_local_dirs(local_dir: &[String]) -> Result<Vec<String>> {
local_dir
.iter()
.map(|root| create_directory(root, "datafusion"))
.collect()
}

const MAX_DIR_CREATION_ATTEMPTS: i32 = 10;

fn create_directory(root: &str, prefix: &str) -> Result<String> {
let mut attempt = 0;
while attempt < MAX_DIR_CREATION_ATTEMPTS {
let mut path = PathBuf::from(root);
path.push(format!("{}-{}", prefix, Uuid::new_v4().to_string()));
let path = path.as_path();
if !path.exists() {
fs::create_dir(path)?;
return Ok(path.canonicalize().unwrap().to_str().unwrap().to_string());
}
attempt += 1;
}
Err(DataFusionError::Execution(format!(
"Failed to create a temp dir under {} after {} attempts",
root, MAX_DIR_CREATION_ATTEMPTS
)))
}

fn get_file(file_name: &str, local_dirs: &[String]) -> String {
let mut hasher = DefaultHasher::new();
file_name.hash(&mut hasher);
let hash = hasher.finish();
let dir = &local_dirs[hash.rem_euclid(local_dirs.len() as u64) as usize];
let mut path = PathBuf::new();
path.push(dir);
path.push(file_name);
path.to_str().unwrap().to_string()
}

fn create_tmp_file(local_dirs: &[String]) -> Result<String> {
let name = Uuid::new_v4().to_string();
let mut path = get_file(&*name, local_dirs);
while Path::new(path.as_str()).exists() {
path = get_file(&*Uuid::new_v4().to_string(), local_dirs);
}
File::create(&path)?;
Ok(path)
}
Loading