-
Notifications
You must be signed in to change notification settings - Fork 1.3k
/
Copy pathdataframe-to-s3.rs
97 lines (81 loc) · 3.36 KB
/
dataframe-to-s3.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
// Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::env;
use std::sync::Arc;
use datafusion::dataframe::DataFrameWriteOptions;
use datafusion::datasource::file_format::parquet::ParquetFormat;
use datafusion::datasource::file_format::FileFormat;
use datafusion::datasource::listing::ListingOptions;
use datafusion::error::Result;
use datafusion::prelude::*;
use object_store::aws::AmazonS3Builder;
use url::Url;
/// This example demonstrates querying data from AmazonS3 and writing
/// the result of a query back to AmazonS3
#[tokio::main]
async fn main() -> Result<()> {
// create local execution context
let ctx = SessionContext::new();
//enter region and bucket to which your credentials have GET and PUT access
let region = "<bucket-region-here>";
let bucket_name = "<bucket-name-here>";
let s3 = AmazonS3Builder::new()
.with_bucket_name(bucket_name)
.with_region(region)
.with_access_key_id(env::var("AWS_ACCESS_KEY_ID").unwrap())
.with_secret_access_key(env::var("AWS_SECRET_ACCESS_KEY").unwrap())
.build()?;
let path = format!("s3://{bucket_name}");
let s3_url = Url::parse(&path).unwrap();
let arc_s3 = Arc::new(s3);
ctx.register_object_store(&s3_url, arc_s3.clone());
let path = format!("s3://{bucket_name}/test_data/");
let file_format = ParquetFormat::default().with_enable_pruning(true);
let listing_options = ListingOptions::new(Arc::new(file_format))
.with_file_extension(ParquetFormat::default().get_ext());
ctx.register_listing_table("test", &path, listing_options, None, None)
.await?;
// execute the query
let df = ctx.sql("SELECT * from test").await?;
let out_path = format!("s3://{bucket_name}/test_write/");
df.clone()
.write_parquet(&out_path, DataFrameWriteOptions::new(), None)
.await?;
//write as JSON to s3
let json_out = format!("s3://{bucket_name}/json_out");
df.clone()
.write_json(&json_out, DataFrameWriteOptions::new(), None)
.await?;
//write as csv to s3
let csv_out = format!("s3://{bucket_name}/csv_out");
df.write_csv(&csv_out, DataFrameWriteOptions::new(), None)
.await?;
let file_format = ParquetFormat::default().with_enable_pruning(true);
let listing_options = ListingOptions::new(Arc::new(file_format))
.with_file_extension(ParquetFormat::default().get_ext());
ctx.register_listing_table("test2", &out_path, listing_options, None, None)
.await?;
let df = ctx
.sql(
"SELECT * \
FROM test2 \
",
)
.await?;
df.show_limit(20).await?;
Ok(())
}