Skip to content

Commit

Permalink
feat: support append for hdfs (#2671)
Browse files Browse the repository at this point in the history
* feat: support append for hdfs

* fmt
  • Loading branch information
zuston authored Jul 20, 2023
1 parent 442475e commit e7c60fe
Show file tree
Hide file tree
Showing 4 changed files with 81 additions and 1 deletion.
48 changes: 48 additions & 0 deletions core/src/services/hdfs/appender.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.

use async_trait::async_trait;
use bytes::Bytes;
use futures::AsyncWriteExt;

use super::error::parse_io_error;
use crate::raw::*;
use crate::*;

pub struct HdfsAppender<F> {
f: F,
}

impl<F> HdfsAppender<F> {
pub fn new(f: F) -> Self {
Self { f }
}
}

#[async_trait]
impl oio::Append for HdfsAppender<hdrs::AsyncFile> {
async fn append(&mut self, bs: Bytes) -> Result<()> {
self.f.write_all(&bs).await.map_err(parse_io_error)?;
Ok(())
}

async fn close(&mut self) -> Result<()> {
self.f.flush().await.map_err(parse_io_error)?;
self.f.close().await.map_err(parse_io_error)?;
Ok(())
}
}
32 changes: 31 additions & 1 deletion core/src/services/hdfs/backend.rs
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ use std::sync::Arc;
use async_trait::async_trait;
use log::debug;

use super::appender::HdfsAppender;
use super::error::parse_io_error;
use super::pager::HdfsPager;
use super::writer::HdfsWriter;
Expand Down Expand Up @@ -164,7 +165,7 @@ impl Accessor for HdfsBackend {
type BlockingReader = oio::into_blocking_reader::FdReader<hdrs::File>;
type Writer = HdfsWriter<hdrs::AsyncFile>;
type BlockingWriter = HdfsWriter<hdrs::File>;
type Appender = ();
type Appender = HdfsAppender<hdrs::AsyncFile>;
type Pager = Option<HdfsPager>;
type BlockingPager = Option<HdfsPager>;

Expand Down Expand Up @@ -237,6 +238,35 @@ impl Accessor for HdfsBackend {
Ok((RpRead::new(end - start), r))
}

async fn append(&self, path: &str, _: OpAppend) -> Result<(RpAppend, Self::Appender)> {
let p = build_rooted_abs_path(&self.root, path);

let parent = PathBuf::from(&p)
.parent()
.ok_or_else(|| {
Error::new(
ErrorKind::Unexpected,
"path should have parent but not, it must be malformed",
)
.with_context("input", &p)
})?
.to_path_buf();
self.client
.create_dir(&parent.to_string_lossy())
.map_err(parse_io_error)?;

let f = self
.client
.open_file()
.create(true)
.append(true)
.async_open(&p)
.await
.map_err(parse_io_error)?;

Ok((RpAppend::new(), HdfsAppender::new(f)))
}

async fn write(&self, path: &str, _: OpWrite) -> Result<(RpWrite, Self::Writer)> {
let p = build_rooted_abs_path(&self.root, path);

Expand Down
1 change: 1 addition & 0 deletions core/src/services/hdfs/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ This service can be used to:
- [ ] ~~scan~~
- [ ] ~~presign~~
- [x] blocking
- [x] append

## Differences with webhdfs

Expand Down
1 change: 1 addition & 0 deletions core/src/services/hdfs/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
mod backend;
pub use backend::HdfsBuilder as Hdfs;

mod appender;
mod error;
mod pager;
mod writer;

0 comments on commit e7c60fe

Please sign in to comment.