Refactor S3 client implementation (#13)

Added below S3 APIs

* abort_multipart_upload()
* bucket_exists()
* complete_multipart_upload()
* create_multipart_upload()
* get_object()
* list_buckets()
* list_objects_v1()
* list_objects_v2()
* list_object_versions()
* list_objects()
* make_bucket()
* put_object()
* put_object_api()
* remove_bucket()
* remove_object()
* remove_objects_api()
* remove_objects()
* select_object_content()
* stat_object()
* upload_part()

Signed-off-by: Bala.FA <bala@minio.io>
This commit is contained in:
Bala FA 2022-08-21 03:40:11 +05:30 committed by GitHub
parent 91ad6401c3
commit 0fb80e1456
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 5686 additions and 1757 deletions

39
.github/workflow/ci.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: MinIO Rust Library
on:
push:
branches: [ "master" ]
pull_request:
branches: [ "master" ]
env:
CARGO_TERM_COLOR: always
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
# - uses: actions-rs/toolchain@v1
# with:
# toolchain: stable
- name: Check style
run: |
cargo fmt --all -- --check
- name: Start MinIO server
run: |
wget --quiet https://dl.min.io/server/minio/release/linux-amd64/minio
chmod +x minio
mkdir -p ~/.minio/certs
cp ./tests/public.crt ./tests/private.key ~/.minio/certs/
sudo cp ./tests/public.crt /usr/local/share/ca-certificates/
sudo update-ca-certificates
MINIO_CI_CD=true ./minio server /tmp/test-xl/{1...4}/ &
sleep 10
- name: Run tests
run: |
SERVER_ENDPOINT=https://localhost:9000/ ACCESS_KEY=minioadmin SECRET_KEY=minioadmin cargo test --verbose -- --nocapture

View File

@ -1,26 +1,30 @@
[package]
name = "minio-rs"
name = "minio"
version = "0.1.0"
authors = ["MinIO Dev Team <dev@min.io>"]
edition = "2018"
edition = "2021"
[dependencies]
bytes = "0.4.12"
futures = "0.1.27"
http = "0.1.17"
hyper = "0.12.28"
hyper-tls = "0.3.2"
log = "0.4.6"
pretty_env_logger = "0.3.0"
ring = "0.14.6"
roxmltree = "0.6.0"
serde = "1.0.92"
serde_derive = "1.0.124"
serde_json = "1.0.39"
time = "0.1.42"
hyper = { version = "0.14", features = ["full"] }
tokio = { version = "1", features = ["full"] }
xml-rs = "0.8.3"
quick-xml = { version = "0.22", features = [ "serialize" ] }
thiserror = "1.0.24"
derivative = "2.2.0"
multimap = "0.8.3"
urlencoding = "2.1.0"
lazy_static = "1.4.0"
regex = "1.5.6"
chrono = "0.4.19"
chrono_locale = "0.1.1"
sha2 = "0.10.2"
base64 = "0.13.0"
md5 = "0.7.0"
crc = "3.0.0"
byteorder = "1.4.3"
hmac = "0.12.1"
hex = "0.4.3"
reqwest = { version = "0.11.11", features = ["stream"] }
futures-core = "0.3.21"
bytes = "1.2.0"
futures-util = "0.3.21"
xmltree = "0.10.3"
http = "0.2.8"
dashmap = "5.3.4"
rand = "0.8.5"

View File

@ -1,84 +1,16 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod minio;
#[cfg(test)]
mod tests {
use futures::{future::Future, stream::Stream};
use hyper::rt;
use log::debug;
use super::*;
fn get_local_default_server() -> minio::Client {
match minio::Client::new("http://localhost:9000") {
Ok(mut c) => {
c.set_credentials(minio::Credentials::new("minio", "minio123"));
c
}
Err(_) => panic!("could not make local client"),
}
}
#[test]
fn test_lib_functions() {
println!("test func");
rt::run(rt::lazy(|| {
let c = minio::Client::get_play_client();
let bucket_name = "aaaa";
c.put_object_req(bucket_name, "hhhhhhhhhh", vec![], "object content".as_bytes().to_vec())
.and_then(|g| {
print!("object: {} {} {:?}", g.object_size, g.etag, g.content_type);
g.get_object_stream().concat2()
})
.map(|c| {
println!("{:?}", c);
})
.map_err(|c| {
println!("{:?}", c);
})
.map(|_| {})
}));
rt::run(rt::lazy(|| {
let c = minio::Client::get_play_client();
let bucket = "aaaa";
c.get_object_req(bucket, "hhhhhhhhhh", vec![])
.and_then(|g| {
debug!("object: {} {} {:?}", g.object_size, g.etag, g.content_type);
g.get_object_stream().concat2()
})
.map(|c| debug!("get obj res: {:?}", c))
.map_err(|c| debug!("err res: {:?}", c))
.map(|_| {})
}));
rt::run(rt::lazy(|| {
let c = minio::Client::get_play_client();
let bucket = "aaaa";
c.delete_bucket(bucket)
.map(|_| debug!("Deleted!"))
.map_err(|err| debug!("del err: {:?}", err))
.map(|_| {})
}));
}
}
pub mod s3;

View File

@ -1,534 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::env;
use std::str;
use std::string::String;
use futures::future::{self, Future};
use futures::Stream;
use http;
use hyper::{body::Body, client, header, header::HeaderMap, Method, Request, Response, Uri};
use hyper::header::{HeaderName, HeaderValue};
use hyper_tls::HttpsConnector;
use log::debug;
use time;
use time::Tm;
use types::{Err, GetObjectResp, ListObjectsResp, Region};
pub use types::BucketInfo;
use crate::minio::net::{Values, ValuesAccess};
use crate::minio::xml::{parse_s3_error, S3GenericError};
use bytes::Buf;
mod api;
mod api_notification;
mod net;
mod sign;
mod types;
mod xml;
mod woxml;
pub const SPACE_BYTE: &[u8; 1] = b" ";
#[derive(Debug, Clone)]
pub struct Credentials {
access_key: String,
secret_key: String,
}
impl Credentials {
pub fn new(ak: &str, sk: &str) -> Credentials {
Credentials {
access_key: ak.to_string(),
secret_key: sk.to_string(),
}
}
pub fn from_env() -> Result<Credentials, Err> {
let (ak, sk) = (env::var("MINIO_ACCESS_KEY"), env::var("MINIO_SECRET_KEY"));
match (ak, sk) {
(Ok(ak), Ok(sk)) => Ok(Credentials::new(ak.as_str(), sk.as_str())),
_ => Err(Err::InvalidEnv(
"Missing MINIO_ACCESS_KEY or MINIO_SECRET_KEY environment variables".to_string(),
)),
}
}
}
#[derive(Clone)]
enum ConnClient {
HttpCC(client::Client<client::HttpConnector, Body>),
HttpsCC(client::Client<HttpsConnector<client::HttpConnector>, Body>),
}
impl ConnClient {
fn make_req(&self, req: http::Request<Body>) -> client::ResponseFuture {
match self {
ConnClient::HttpCC(c) => c.request(req),
ConnClient::HttpsCC(c) => c.request(req),
}
}
}
pub struct Client {
server: Uri,
region: Region,
conn_client: ConnClient,
pub credentials: Option<Credentials>,
}
impl Client {
pub fn new(server: &str) -> Result<Client, Err> {
let valid = server.parse::<Uri>();
match valid {
Ok(server_uri) => {
if server_uri.host().is_none() {
Err(Err::InvalidUrl("no host specified!".to_string()))
} else if server_uri.scheme_str() != Some("http")
&& server_uri.scheme_str() != Some("https")
{
Err(Err::InvalidUrl("invalid scheme!".to_string()))
} else {
Ok(Client {
server: server_uri.clone(),
region: Region::empty(),
conn_client: if server_uri.scheme_str() == Some("http") {
ConnClient::HttpCC(client::Client::new())
} else {
let https = HttpsConnector::new(4).unwrap();
ConnClient::HttpsCC(
client::Client::builder().build::<_, hyper::Body>(https),
)
},
credentials: None,
})
}
}
Err(err) => Err(Err::InvalidUrl(err.to_string())),
}
}
pub fn set_credentials(&mut self, credentials: Credentials) {
self.credentials = Some(credentials);
}
pub fn set_region(&mut self, r: Region) {
self.region = r;
}
fn add_host_header(&self, header_map: &mut HeaderMap) {
let host_val = match self.server.port_part() {
Some(port) => format!("{}:{}", self.server.host().unwrap_or(""), port),
None => self.server.host().unwrap_or("").to_string(),
};
match header::HeaderValue::from_str(&host_val) {
Ok(v) => {
header_map.insert(header::HOST, v);
}
_ => {}
}
}
pub fn get_play_client() -> Client {
Client {
server: "https://play.min.io:9000".parse::<Uri>().unwrap(),
region: Region::new("us-east-1"),
conn_client: {
let https = HttpsConnector::new(4).unwrap();
ConnClient::HttpsCC(client::Client::builder().build::<_, hyper::Body>(https))
},
credentials: Some(Credentials::new(
"Q3AM3UQ867SPQQA43P2F",
"zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)),
}
}
fn signed_req_future(
&self,
mut s3_req: S3Req,
body_res: Result<Body, Err>,
) -> impl Future<Item=Response<Body>, Error=Err> {
let hmap = &mut s3_req.headers;
self.add_host_header(hmap);
let body_hash_hdr = (
HeaderName::from_static("x-amz-content-sha256"),
HeaderValue::from_static("UNSIGNED-PAYLOAD"),
);
hmap.insert(body_hash_hdr.0.clone(), body_hash_hdr.1.clone());
let creds = self.credentials.clone();
let region = self.region.clone();
let server_addr = self.server.to_string();
let conn_client = self.conn_client.clone();
future::result(body_res)
.and_then(move |body| {
s3_req.body = body;
let sign_hdrs = sign::sign_v4(&s3_req, creds, region);
debug!("signout: {:?}", sign_hdrs);
api::mk_request(s3_req, &server_addr, &sign_hdrs)
})
.and_then(move |req| {
debug!("{:?}", req);
conn_client.make_req(req).map_err(|e| Err::HyperErr(e))
})
.and_then(|resp| {
let st = resp.status();
if st.is_success() {
Ok(resp)
} else {
Err(Err::RawSvcErr(st, resp))
}
})
.or_else(|err| {
future::err(err)
.or_else(|x| match x {
Err::RawSvcErr(st, resp) => Ok((st, resp)),
other_err => Err(other_err),
})
.and_then(|(st, resp)| {
resp.into_body()
.concat2()
.map_err(|err| Err::HyperErr(err))
.and_then(move |chunk| {
match st.as_str() {
"404" => {
let x = str::from_utf8(&chunk.bytes());
let s3_err = parse_s3_error(x.unwrap());
Err(Err::S3Error(s3_err))
}
_ => {
Err(Err::FailStatusCodeErr(st, chunk.into_bytes()))
}
}
})
})
})
}
/// get_bucket_location - Get location for the bucket_name.
pub fn get_bucket_location(
&self,
bucket_name: &str,
) -> impl Future<Item=Region, Error=Err> {
let mut qp = Values::new();
qp.set_value("location", None);
let s3_req = S3Req {
method: Method::GET,
bucket: Some(bucket_name.to_string()),
object: None,
headers: HeaderMap::new(),
query: qp,
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.and_then(|resp| {
// Read the whole body for bucket location response.
resp.into_body()
.concat2()
.map_err(|err| Err::HyperErr(err))
.and_then(move |chunk| chunk_to_string(&chunk))
.and_then(|s| xml::parse_bucket_location(s))
})
}
pub fn delete_bucket(&self, bucket_name: &str) -> impl Future<Item=(), Error=Err> {
let s3_req = S3Req {
method: Method::DELETE,
bucket: Some(bucket_name.to_string()),
object: None,
headers: HeaderMap::new(),
query: Values::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.and_then(|_| Ok(()))
}
pub fn bucket_exists(&self, bucket_name: &str) -> impl Future<Item=bool, Error=Err> {
let s3_req = S3Req {
method: Method::HEAD,
bucket: Some(bucket_name.to_string()),
object: None,
headers: HeaderMap::new(),
query: Values::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.then(|res| match res {
Ok(_) => Ok(true),
Err(Err::FailStatusCodeErr(st, b)) => {
let code = st.as_u16();
if code == 404 {
Ok(false)
} else {
Err(Err::FailStatusCodeErr(st, b))
}
}
Err(err) => Err(err),
})
}
pub fn get_object_req(
&self,
bucket_name: &str,
key: &str,
get_obj_opts: Vec<(HeaderName, HeaderValue)>,
) -> impl Future<Item=GetObjectResp, Error=Err> {
let mut h = HeaderMap::new();
get_obj_opts
.iter()
.map(|(x, y)| (x.clone(), y.clone()))
.for_each(|(k, v)| {
h.insert(k, v);
});
let s3_req = S3Req {
method: Method::GET,
bucket: Some(bucket_name.to_string()),
object: Some(key.to_string()),
headers: h,
query: Values::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.and_then(GetObjectResp::new)
}
pub fn put_object_req(
&self,
bucket_name: &str,
key: &str,
get_obj_opts: Vec<(HeaderName, HeaderValue)>,
data: Vec<u8>,
) -> impl Future<Item=GetObjectResp, Error=Err> {
let mut h = HeaderMap::new();
get_obj_opts
.iter()
.map(|(x, y)| (x.clone(), y.clone()))
.for_each(|(k, v)| {
h.insert(k, v);
});
let s3_req = S3Req {
method: Method::PUT,
bucket: Some(bucket_name.to_string()),
object: Some(key.to_string()),
headers: h,
query: Values::new(),
body: Body::from(data.clone()),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::from(data)))
.and_then(GetObjectResp::new)
}
pub fn make_bucket(&self, bucket_name: &str) -> impl Future<Item = (), Error = Err> {
let xml_body_res = xml::get_mk_bucket_body();
let bucket = bucket_name.clone().to_string();
let s3_req = S3Req {
method: Method::PUT,
bucket: Some(bucket),
object: None,
query: Values::new(),
headers: HeaderMap::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, xml_body_res)
.and_then(|_| future::ok(()))
}
pub fn list_buckets(&self) -> impl Future<Item=Vec<BucketInfo>, Error=Err> {
let s3_req = S3Req {
method: Method::GET,
bucket: None,
object: None,
query: Values::new(),
headers: HeaderMap::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.and_then(|resp| {
// Read the whole body for list buckets response.
resp.into_body()
.concat2()
.map_err(|err| Err::HyperErr(err))
.and_then(move |chunk| chunk_to_string(&chunk))
.and_then(|s| xml::parse_bucket_list(s))
})
}
pub fn list_objects(
&self,
b: &str,
prefix: Option<&str>,
marker: Option<&str>,
delimiter: Option<&str>,
max_keys: Option<i32>,
) -> impl Future<Item=ListObjectsResp, Error=Err> {
let mut qparams: Values = Values::new();
qparams.set_value("list-type", Some("2".to_string()));
if let Some(d) = delimiter {
qparams.set_value("delimiter", Some(d.to_string()));
}
if let Some(m) = marker {
qparams.set_value("marker", Some(m.to_string()));
}
if let Some(p) = prefix {
qparams.set_value("prefix", Some(p.to_string()));
}
if let Some(mkeys) = max_keys {
qparams.set_value("max-keys", Some(mkeys.to_string()));
}
let s3_req = S3Req {
method: Method::GET,
bucket: Some(b.to_string()),
object: None,
query: qparams,
headers: HeaderMap::new(),
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.and_then(|resp| {
resp.into_body()
.concat2()
.map_err(|err| Err::HyperErr(err))
.and_then(move |chunk| chunk_to_string(&chunk))
.and_then(|s| xml::parse_list_objects(s))
})
}
}
fn run_req_future(
req_result: Result<Request<Body>, Err>,
c: ConnClient,
) -> impl Future<Item=Response<Body>, Error=Err> {
future::result(req_result)
//.map_err(|e| Err::HttpErr(e))
.and_then(move |req| c.make_req(req).map_err(|e| Err::HyperErr(e)))
.and_then(|resp| {
let st = resp.status();
if st.is_success() {
Ok(resp)
} else {
Err(Err::RawSvcErr(st, resp))
}
})
}
/// Converts a `hyper::Chunk` into a string.
fn chunk_to_string(chunk: &hyper::Chunk) -> Result<String, Err> {
match String::from_utf8(chunk.to_vec()) {
Err(e) => Err(Err::Utf8DecodingErr(e)),
Ok(s) => Ok(s.to_string()),
}
}
pub struct S3Req {
method: Method,
bucket: Option<String>,
object: Option<String>,
headers: HeaderMap,
query: Values,
body: Body,
ts: Tm,
}
impl S3Req {
fn mk_path(&self) -> String {
let mut res: String = String::from("/");
if let Some(s) = &self.bucket {
res.push_str(&s);
res.push_str("/");
if let Some(o) = &self.object {
res.push_str(&o);
}
};
res
}
/// Takes the query_parameters and turn them into a valid query string for example:
/// {"key1":["val1","val2"],"key2":["val1","val2"]}
/// will be returned as:
/// "key1=val1&key1=val2&key2=val3&key2=val4"
fn mk_query(&self) -> String {
self.query
.iter()
.map(|(key, values)| {
values.iter().map(move |value| match value {
Some(v) => format!("{}={}", &key, v),
None => format!("{}=", &key, ),
})
})
.flatten()
.collect::<Vec<String>>()
.join("&")
}
}
#[cfg(test)]
mod minio_tests {
use std::collections::HashMap;
use super::*;
#[test]
fn serialize_query_parameters() {
let mut query_params: HashMap<String, Vec<Option<String>>> = HashMap::new();
query_params.insert(
"key1".to_string(),
vec![Some("val1".to_string()), Some("val2".to_string())],
);
query_params.insert(
"key2".to_string(),
vec![Some("val3".to_string()), Some("val4".to_string())],
);
let s3_req = S3Req {
method: Method::GET,
bucket: None,
object: None,
headers: HeaderMap::new(),
query: query_params,
body: Body::empty(),
ts: time::now_utc(),
};
let result = s3_req.mk_query();
assert!(result.contains("key1=val1"));
assert!(result.contains("key1=val2"));
assert!(result.contains("key2=val3"));
assert!(result.contains("key2=val4"));
}
}

View File

@ -1,45 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::minio;
use hyper::{header::HeaderName, header::HeaderValue, Body, Request};
use log::debug;
pub fn mk_request(
r: minio::S3Req,
svr_str: &str,
sign_hdrs: &Vec<(HeaderName, HeaderValue)>,
) -> Result<Request<Body>, minio::Err> {
let mut request = Request::builder();
let uri_str = svr_str.trim_end_matches('/');
debug!("uri_str: {}", uri_str);
let upd_uri = format!("{}{}?{}", uri_str, &r.mk_path(), &r.mk_query());
debug!("upd_uri: {}", upd_uri);
request.uri(&upd_uri).method(&r.method);
for hdr in r
.headers
.iter()
.map(|(x, y)| (x.clone(), y.clone()))
.chain(sign_hdrs.iter().map(|x| x.clone()))
{
request.header(hdr.0, hdr.1);
}
request
.body(r.body)
.map_err(|err| minio::Err::HttpErr(err))
}

View File

@ -1,177 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::HashMap;
use crate::minio::net::{Values, ValuesAccess};
use crate::minio::{Client, Err, S3Req, SPACE_BYTE};
use futures::future::Future;
use futures::{stream, Stream};
use hyper::{Body, HeaderMap, Method};
use serde_derive::Deserialize;
/// Notification event object metadata.
#[derive(Deserialize, Debug)]
pub struct ObjectMeta {
#[serde(rename(deserialize = "key"))]
pub key: String,
#[serde(rename(deserialize = "size"))]
pub size: Option<i64>,
#[serde(rename(deserialize = "eTag"))]
pub e_tag: Option<String>,
#[serde(rename(deserialize = "versionId"))]
pub version_id: Option<String>,
#[serde(rename(deserialize = "sequencer"))]
pub sequencer: String,
}
/// Notification event bucket metadata.
#[derive(Deserialize, Debug)]
pub struct BucketMeta {
#[serde(rename(deserialize = "name"))]
pub name: String,
#[serde(rename(deserialize = "ownerIdentity"))]
pub owner_identity: Identity,
#[serde(rename(deserialize = "arn"))]
pub arn: String,
}
/// Indentity represents the user id, this is a compliance field.
#[derive(Deserialize, Debug)]
pub struct Identity {
#[serde(rename(deserialize = "principalId"))]
pub principal_id: String,
}
//// sourceInfo represents information on the client that
//// triggered the event notification.
#[derive(Deserialize, Debug)]
pub struct SourceInfo {
#[serde(rename(deserialize = "host"))]
pub host: String,
#[serde(rename(deserialize = "port"))]
pub port: String,
#[serde(rename(deserialize = "userAgent"))]
pub user_agent: String,
}
/// Notification event server specific metadata.
#[derive(Deserialize, Debug)]
pub struct EventMeta {
#[serde(rename(deserialize = "s3SchemaVersion"))]
pub schema_version: String,
#[serde(rename(deserialize = "configurationId"))]
pub configuration_id: String,
#[serde(rename(deserialize = "bucket"))]
pub bucket: BucketMeta,
#[serde(rename(deserialize = "object"))]
pub object: ObjectMeta,
}
/// NotificationEvent represents an Amazon an S3 bucket notification event.
#[derive(Deserialize, Debug)]
pub struct NotificationEvent {
#[serde(rename(deserialize = "eventVersion"))]
pub event_version: String,
#[serde(rename(deserialize = "eventSource"))]
pub event_source: String,
#[serde(rename(deserialize = "awsRegion"))]
pub aws_region: String,
#[serde(rename(deserialize = "eventTime"))]
pub event_time: String,
#[serde(rename(deserialize = "eventName"))]
pub event_name: String,
#[serde(rename(deserialize = "source"))]
pub source: SourceInfo,
#[serde(rename(deserialize = "userIdentity"))]
pub user_identity: Identity,
#[serde(rename(deserialize = "requestParameters"))]
pub request_parameters: HashMap<String, String>,
#[serde(rename(deserialize = "responseElements"))]
pub response_elements: HashMap<String, String>,
#[serde(rename(deserialize = "s3"))]
pub s3: EventMeta,
}
/// NotificationInfo - represents the collection of notification events, additionally
/// also reports errors if any while listening on bucket notifications.
#[derive(Deserialize, Debug)]
pub struct NotificationInfo {
#[serde(rename(deserialize = "Records"), default = "Vec::new")]
pub records: Vec<NotificationEvent>,
pub err: Option<String>,
}
impl Client {
/// listen_bucket_notificaion - Get bucket notifications for the bucket_name.
pub fn listen_bucket_notification(
&self,
bucket_name: &str,
prefix: Option<String>,
suffix: Option<String>,
events: Vec<String>,
) -> impl Stream<Item = NotificationInfo, Error = Err> {
// Prepare request query parameters
let mut query_params: Values = Values::new();
query_params.set_value("prefix", prefix);
query_params.set_value("suffix", suffix);
let opt_events: Vec<Option<String>> = events.into_iter().map(|evt| Some(evt)).collect();
query_params.insert("events".to_string(), opt_events);
// build signed request
let s3_req = S3Req {
method: Method::GET,
bucket: Some(bucket_name.to_string()),
object: None,
headers: HeaderMap::new(),
query: query_params,
body: Body::empty(),
ts: time::now_utc(),
};
self.signed_req_future(s3_req, Ok(Body::empty()))
.map(|resp| {
// Read the whole body for bucket location response.
resp.into_body()
.map_err(|e| Err::HyperErr(e))
.filter(|c| {
// filter out white spaces sent by the server to indicate it's still alive
c[0] != SPACE_BYTE[0]
})
.map(|chunk| {
// Split the chunk by lines and process.
// TODO: Handle case when partial lines are present in the chunk
let chunk_lines = String::from_utf8(chunk.to_vec())
.map(|p| {
let lines =
p.lines().map(|s| s.to_string()).collect::<Vec<String>>();
stream::iter_ok(lines.into_iter())
})
.map_err(|e| Err::Utf8DecodingErr(e));
futures::future::result(chunk_lines).flatten_stream()
})
.flatten()
.map(|line| {
// Deserialize the notification
let notification_info: NotificationInfo =
serde_json::from_str(&line).unwrap();
notification_info
})
})
.flatten_stream()
}
}

View File

@ -1,113 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::HashMap;
pub type Values = HashMap<String, Vec<Option<String>>>;
pub trait ValuesAccess {
fn get_value(&self, key: &str) -> Option<String>;
fn set_value(&mut self, key: &str, value: Option<String>);
fn add_value(&mut self, key: &str, value: Option<String>);
fn del_value(&mut self, key: &str);
}
impl ValuesAccess for Values {
/// Gets the first item for a given key. If the key is invalid it returns `None`
/// To get multiple values use the `Values` instance as map.
fn get_value(&self, key: &str) -> Option<String> {
let value_vec = match self.get(key) {
Some(v) => v,
None => return None,
};
if value_vec.len() == 0 {
return None;
}
return value_vec.get(0).unwrap().clone();
}
/// Sets the key to value. It replaces any existing values.
fn set_value(&mut self, key: &str, value: Option<String>) {
self.insert(key.to_string(), vec![value]);
}
/// Add adds the value to key. It appends to any existing values associated with key.
fn add_value(&mut self, key: &str, value: Option<String>) {
match self.get_mut(key) {
Some(value_vec) => value_vec.push(value),
None => (),
}
}
// Del deletes the values associated with key.
fn del_value(&mut self, key: &str) {
self.remove(key);
}
}
#[cfg(test)]
mod net_tests {
use super::*;
#[test]
fn values_set() {
let mut values = Values::new();
values.set_value("key", Some("value".to_string()));
assert_eq!(values.len(), 1);
assert_eq!(values.get("key").unwrap().len(), 1);
values.set_value("key", None);
assert_eq!(values.len(), 1);
assert_eq!(values.get("key").unwrap().len(), 1);
}
#[test]
fn values_add() {
let mut values = Values::new();
values.set_value("key", Some("value".to_string()));
assert_eq!(values.get("key").unwrap().len(), 1);
values.add_value("key", None);
assert_eq!(values.get("key").unwrap().len(), 2);
}
#[test]
fn values_del() {
let mut values = Values::new();
values.set_value("key", Some("value".to_string()));
values.add_value("key", None);
values.del_value("key");
assert_eq!(values.len(), 0);
let mut values2 = Values::new();
values2.set_value("key", Some("value".to_string()));
values2.add_value("key", None);
values2.set_value("key2", Some("value".to_string()));
values2.add_value("key2", None);
values2.del_value("key");
assert_eq!(values2.len(), 1);
}
#[test]
fn values_get() {
let mut values = Values::new();
values.set_value("key", Some("value".to_string()));
values.add_value("key", None);
assert_eq!(values.get_value("key"), Some("value".to_string()));
}
}

View File

@ -1,238 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::{HashMap, HashSet};
use hyper::header::{
AUTHORIZATION, CONTENT_LENGTH, CONTENT_TYPE, HeaderMap, HeaderName, HeaderValue, USER_AGENT,
};
use log::debug;
use ring::{digest, hmac};
use time::Tm;
use crate::minio;
use crate::minio::types::Region;
fn aws_format_time(t: &Tm) -> String {
t.strftime("%Y%m%dT%H%M%SZ").unwrap().to_string()
}
fn aws_format_date(t: &Tm) -> String {
t.strftime("%Y%m%d").unwrap().to_string()
}
fn mk_scope(t: &Tm, r: &minio::Region) -> String {
let scope_time = t.strftime("%Y%m%d").unwrap().to_string();
format!("{}/{}/s3/aws4_request", scope_time, r.to_string())
}
// Returns list of SORTED headers that will be signed.
// TODO: verify that input headermap contains only ASCII valued headers
fn get_headers_to_sign(h: HeaderMap) -> Vec<(String, String)> {
let ignored_hdrs: HashSet<HeaderName> = vec![
AUTHORIZATION,
CONTENT_LENGTH,
CONTENT_TYPE,
USER_AGENT].into_iter().collect();
let mut res: Vec<(String, String)> = h
.iter()
.filter(|(x, _)| !ignored_hdrs.contains(*x))
.map(|(x, y)| {
(
x.as_str().to_string(),
y.to_str()
.expect("Unexpected non-ASCII header value!")
.to_string(),
)
}).collect();
res.sort();
res
}
fn uri_encode(c: char, encode_slash: bool) -> String {
if c == '/' {
if encode_slash {
"%2F".to_string()
} else {
"/".to_string()
}
} else if c.is_ascii_alphanumeric() || c == '_' || c == '-' || c == '.' || c == '~' {
c.to_string()
} else {
let mut b = [0; 8];
let cs = c.encode_utf8(&mut b).as_bytes();
cs.iter().map(|x| format!("%{:02X}", x)).collect()
}
}
fn uri_encode_str(s: &str, encode_slash: bool) -> String {
s.chars().map(|x| uri_encode(x, encode_slash)).collect()
}
fn get_canonical_querystr(q: &HashMap<String, Vec<Option<String>>>) -> String {
let mut hs: Vec<(String, Vec<Option<String>>)> = q.clone().drain().collect();
// sort keys
hs.sort();
// Build canonical query string
hs.iter()
.map(|(key, values)| {
values.iter().map(move |value| match value {
Some(v) => format!("{}={}", &key, uri_encode_str(&v, true)),
None => format!("{}=", &key),
})
})
.flatten()
.collect::<Vec<String>>()
.join("&")
}
fn get_canonical_request(
r: &minio::S3Req,
hdrs_to_use: &Vec<(String, String)>,
signed_hdrs_str: &str,
) -> String {
let path_str = r.mk_path();
let canonical_qstr = get_canonical_querystr(&r.query);
let canonical_hdrs: String = hdrs_to_use
.iter()
.map(|(x, y)| format!("{}:{}\n", x.clone(), y.clone()))
.collect();
// FIXME: using only unsigned payload for now - need to add
// hashing of payload.
let payload_hash_str = String::from("UNSIGNED-PAYLOAD");
let res = vec![
r.method.to_string(),
uri_encode_str(&path_str, false),
canonical_qstr,
canonical_hdrs,
signed_hdrs_str.to_string(),
payload_hash_str,
];
res.join("\n")
}
fn string_to_sign(ts: &Tm, scope: &str, canonical_request: &str) -> String {
let sha256_digest: String = digest::digest(&digest::SHA256, canonical_request.as_bytes())
.as_ref()
.iter()
.map(|x| format!("{:02x}", x))
.collect();
vec![
"AWS4-HMAC-SHA256",
&aws_format_time(&ts),
scope,
&sha256_digest,
]
.join("\n")
}
fn hmac_sha256(msg: &str, key: &[u8]) -> hmac::Signature {
let key = hmac::SigningKey::new(&digest::SHA256, key);
hmac::sign(&key, msg.as_bytes())
}
fn get_signing_key(ts: &Tm, region: &str, secret_key: &str) -> Vec<u8> {
let kstr = format!("AWS4{}", secret_key);
let s1 = hmac_sha256(&aws_format_date(&ts), kstr.as_bytes());
let s2 = hmac_sha256(&region, s1.as_ref());
let s3 = hmac_sha256("s3", s2.as_ref());
let s4 = hmac_sha256("aws4_request", s3.as_ref());
// FIXME: can this be done better?
s4.as_ref().iter().map(|x| x.clone()).collect()
}
fn compute_sign(str_to_sign: &str, key: &Vec<u8>) -> String {
let s1 = hmac_sha256(&str_to_sign, key.as_slice());
s1.as_ref().iter().map(|x| format!("{:02x}", x)).collect()
}
pub fn sign_v4(
request: &minio::S3Req,
credentials: Option<minio::Credentials>,
region: Region,
) -> Vec<(HeaderName, HeaderValue)> {
credentials.map_or(Vec::new(), |creds| {
let scope = mk_scope(&request.ts, &region);
let date_hdr = (
HeaderName::from_static("x-amz-date"),
HeaderValue::from_str(&aws_format_time(&request.ts)).unwrap(),
);
let mut hmap = request.headers.clone();
hmap.insert(date_hdr.0.clone(), date_hdr.1.clone());
let headers = get_headers_to_sign(hmap);
let signed_hdrs_str: String = headers
.iter()
.map(|(x, _)| x.clone())
.collect::<Vec<String>>()
.join(";");
let cr = get_canonical_request(request, &headers, &signed_hdrs_str);
debug!("canonicalreq: {}", cr);
let s2s = string_to_sign(&request.ts, &scope, &cr);
debug!("s2s: {}", s2s);
let skey = get_signing_key(&request.ts, &region.to_string(), &creds.secret_key);
debug!("skey: {:?}", skey);
let signature = compute_sign(&s2s, &skey);
debug!("sign: {}", signature);
let auth_hdr_val = format!(
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
&creds.access_key, &scope, &signed_hdrs_str, &signature,
);
let auth_hdr = (AUTHORIZATION, HeaderValue::from_str(&auth_hdr_val).unwrap());
vec![auth_hdr, date_hdr]
})
}
#[cfg(test)]
mod sign_tests {
use super::*;
#[test]
fn canonical_ordered() {
let mut query_params: HashMap<String, Vec<Option<String>>> = HashMap::new();
query_params.insert("key2".to_string(), vec![Some("val3".to_string()), None]);
query_params.insert(
"key1".to_string(),
vec![Some("val1".to_string()), Some("val2".to_string())],
);
assert_eq!(
get_canonical_querystr(&query_params),
"key1=val1&key1=val2&key2=val3&key2="
);
}
#[test]
fn headers_to_sign_remove_ignored_and_sort() {
let mut map = HeaderMap::new();
map.insert(AUTHORIZATION, "hello".parse().unwrap());
map.insert(CONTENT_LENGTH, "123".parse().unwrap());
map.insert("second", "123".parse().unwrap());
map.insert("first", "123".parse().unwrap());
assert_eq!(
get_headers_to_sign(map),
vec![("first".parse().unwrap(), "123".parse().unwrap()),
("second".parse().unwrap(), "123".parse().unwrap())]
);
}
}

View File

@ -1,190 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use bytes::Bytes;
use futures::stream::Stream;
use hyper::header::{
HeaderMap, HeaderValue, CACHE_CONTROL, CONTENT_DISPOSITION, CONTENT_ENCODING, CONTENT_LANGUAGE,
CONTENT_LENGTH, CONTENT_TYPE, ETAG, EXPIRES,
};
use hyper::{body::Body, Response};
use roxmltree;
use std::collections::HashMap;
use std::string;
use time::{strptime, Tm};
use crate::minio::xml::S3GenericError;
#[derive(Clone)]
pub struct Region(String);
impl Region {
pub fn new(s: &str) -> Region {
Region(s.to_string())
}
pub fn empty() -> Region {
Region::new("")
}
pub fn to_string(&self) -> String {
self.0.clone()
}
}
#[derive(Debug)]
pub enum Err {
InvalidUrl(String),
InvalidEnv(String),
InvalidTmFmt(String),
HttpErr(http::Error),
HyperErr(hyper::Error),
FailStatusCodeErr(hyper::StatusCode, Bytes),
Utf8DecodingErr(string::FromUtf8Error),
XmlDocParseErr(roxmltree::Error),
XmlElemMissing(String),
XmlElemParseErr(String),
InvalidXmlResponseErr(String),
MissingRequiredParams,
RawSvcErr(hyper::StatusCode, Response<Body>),
XmlWriteErr(String),
S3Error(S3GenericError),
}
pub struct GetObjectResp {
pub user_metadata: Vec<(String, String)>,
pub object_size: u64,
pub etag: String,
// standard headers
pub content_type: Option<String>,
pub content_language: Option<String>,
pub expires: Option<String>,
pub cache_control: Option<String>,
pub content_disposition: Option<String>,
pub content_encoding: Option<String>,
resp: Response<Body>,
}
impl GetObjectResp {
pub fn new(r: Response<Body>) -> Result<GetObjectResp, Err> {
let h = r.headers();
let cl_opt = hv2s(h.get(CONTENT_LENGTH)).and_then(|l| l.parse::<u64>().ok());
let etag_opt = hv2s(h.get(ETAG));
match (cl_opt, etag_opt) {
(Some(cl), Some(etag)) => Ok(GetObjectResp {
user_metadata: extract_user_meta(h),
object_size: cl,
etag: etag,
content_type: hv2s(h.get(CONTENT_TYPE)),
content_language: hv2s(h.get(CONTENT_LANGUAGE)),
expires: hv2s(h.get(EXPIRES)),
cache_control: hv2s(h.get(CACHE_CONTROL)),
content_disposition: hv2s(h.get(CONTENT_DISPOSITION)),
content_encoding: hv2s(h.get(CONTENT_ENCODING)),
resp: r,
}),
_ => Err(Err::MissingRequiredParams),
}
}
// Consumes GetObjectResp
pub fn get_object_stream(self) -> impl Stream<Item = hyper::Chunk, Error = Err> {
self.resp.into_body().map_err(|err| Err::HyperErr(err))
}
}
fn hv2s(o: Option<&HeaderValue>) -> Option<String> {
o.and_then(|v| v.to_str().ok()).map(|x| x.to_string())
}
fn extract_user_meta(h: &HeaderMap) -> Vec<(String, String)> {
h.iter()
.map(|(k, v)| (k.as_str(), v.to_str()))
.filter(|(k, v)| k.to_lowercase().starts_with("x-amz-meta-") && v.is_ok())
.map(|(k, v)| (k.to_string(), v.unwrap_or("").to_string()))
.collect()
}
fn parse_aws_time(time_str: &str) -> Result<Tm, Err> {
strptime(time_str, "%Y-%m-%dT%H:%M:%S.%Z")
.map_err(|err| Err::InvalidTmFmt(format!("{:?}", err)))
}
#[derive(Debug)]
pub struct BucketInfo {
pub name: String,
pub created_time: Tm,
}
impl BucketInfo {
pub fn new(name: &str, time_str: &str) -> Result<BucketInfo, Err> {
parse_aws_time(time_str).and_then(|ctime| {
Ok(BucketInfo {
name: name.to_string(),
created_time: ctime,
})
})
}
}
#[derive(Debug)]
pub struct ObjectInfo {
pub name: String,
pub modified_time: Tm,
pub etag: String,
pub size: i64,
pub storage_class: String,
pub metadata: HashMap<String, String>,
}
impl ObjectInfo {
pub fn new(
name: &str,
mtime_str: &str,
etag: &str,
size: i64,
storage_class: &str,
metadata: HashMap<String, String>,
) -> Result<ObjectInfo, Err> {
parse_aws_time(mtime_str).and_then(|mtime| {
Ok(ObjectInfo {
name: name.to_string(),
modified_time: mtime,
etag: etag.to_string(),
size: size,
storage_class: storage_class.to_string(),
metadata: metadata,
})
})
}
}
#[derive(Debug)]
pub struct ListObjectsResp {
pub bucket_name: String,
pub prefix: String,
pub max_keys: i32,
pub key_count: i32,
pub is_truncated: bool,
pub object_infos: Vec<ObjectInfo>,
pub common_prefixes: Vec<String>,
pub next_continuation_token: String,
}

View File

@ -1,91 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern crate xml;
use xml::writer::{EmitterConfig, EventWriter, XmlEvent};
pub struct XmlNode {
name: String,
namespace: Option<String>,
text: Option<String>,
children: Vec<XmlNode>,
}
impl XmlNode {
pub fn new(name: &str) -> XmlNode {
XmlNode {
name: name.to_string(),
namespace: None,
text: None,
children: Vec::new(),
}
}
pub fn namespace(mut self, ns: &str) -> XmlNode {
self.namespace = Some(ns.to_string());
self
}
pub fn text(mut self, value: &str) -> XmlNode {
self.text = Some(value.to_string());
self
}
pub fn children(mut self, kids: Vec<XmlNode>) -> XmlNode {
self.children = kids;
self
}
fn serialize_rec<W>(&self, xml_writer: &mut EventWriter<W>) -> xml::writer::Result<()>
where
W: std::io::Write,
{
let st_elem = XmlEvent::start_element(self.name.as_str());
let st_elem = match &self.namespace {
Some(ns) => st_elem.ns("", ns.clone()),
None => st_elem,
};
xml_writer.write(st_elem)?;
// An xml node would have a text field or child nodes, not both, at least not usually.
match &self.text {
Some(content) => {
let content_node = XmlEvent::characters(content.as_str());
xml_writer.write(content_node)?;
}
None => {
for child in &self.children {
child.serialize_rec(xml_writer)?;
}
}
}
let end_elem: XmlEvent = XmlEvent::end_element().name(self.name.as_str()).into();
xml_writer.write(end_elem)?;
Ok(())
}
pub fn serialize<W>(&self, writer: W) -> xml::writer::Result<()>
where
W: std::io::Write,
{
let mut xml_writer = EmitterConfig::new()
.perform_indent(true)
.create_writer(writer);
self.serialize_rec(&mut xml_writer)
}
}

View File

@ -1,265 +0,0 @@
/*
* MinIO Rust Library for Amazon S3 Compatible Cloud Storage
* Copyright 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern crate quick_xml;
extern crate serde;
use std::collections::HashMap;
use std::str::FromStr;
use std::str;
use hyper::body::Body;
use roxmltree;
use serde_derive::Deserialize;
use thiserror::Error;
use crate::minio::types::{BucketInfo, Err, ListObjectsResp, ObjectInfo, Region};
use crate::minio::woxml;
pub fn parse_bucket_location(s: String) -> Result<Region, Err> {
let res = roxmltree::Document::parse(&s);
match res {
Ok(doc) => {
let region_res = doc.root_element().text();
if let Some(region) = region_res {
Ok(Region::new(region))
} else {
Ok(Region::empty())
}
}
Err(e) => Err(Err::XmlDocParseErr(e)),
}
}
#[allow(non_snake_case)]
#[derive(Debug, Deserialize, PartialEq)]
pub struct Error {
Param: Option<String>,
Code: String,
Message: String,
BucketName: String,
Key: Option<String>,
RequestId: String,
HostId: String,
// Region where the bucket is located. This header is returned
// only in HEAD bucket and ListObjects response.
Region: Option<String>,
}
#[derive(Error, Debug, PartialEq)]
pub enum S3GenericError {
#[error("no such bucket: {error:?}")]
NoSuchBucket {
error: Error,
},
#[error("unknown error: {error:?}")]
Unknown {
error: Error,
},
}
pub(crate) fn parse_s3_error(response_xml: &str) -> S3GenericError {
println!("{}",response_xml);
let doc: Error = quick_xml::de::from_str(response_xml).unwrap();
match doc.Code.as_str() {
"NoSuchBucket" => {
return S3GenericError::NoSuchBucket {
error: doc,
};
}
_ => {
return S3GenericError::Unknown {
error: doc,
};
}
}
}
#[cfg(test)]
mod xml_tests {
use super::*;
#[test]
fn parse_xml_error() {
let response_xml = r#"
<?xml version="1.0" encoding="UTF-8"?>
<Error>
<Code>NoSuchBucket</Code>
<Message>The specified bucket does not exist</Message>
<Key>hhhhhhhhhh</Key>
<BucketName>aaaa</BucketName>
<Resource>/aaaa/hhhhhhhhhh</Resource>
<RequestId>166B5E4E3A406CC6</RequestId>
<HostId>129c19c9-4cf6-44ff-9f2d-4cb7611be894</HostId>
</Error>
"#;
let s3_error = parse_s3_error(response_xml);
print!("test! {:?}", s3_error);
assert!(matches!(s3_error, S3GenericError::NoSuchBucket {cerror} ));
}
}
pub fn parse_bucket_list(s: String) -> Result<Vec<BucketInfo>, Err> {
let res = roxmltree::Document::parse(&s);
match res {
Ok(doc) => {
let mut bucket_infos: Vec<BucketInfo> = Vec::new();
let bucket_nodes = doc
.root_element()
.descendants()
.filter(|node| node.has_tag_name("Bucket"));
for bucket in bucket_nodes {
let bucket_names = bucket.children().filter(|node| node.has_tag_name("Name"));
let bucket_ctimes = bucket
.children()
.filter(|node| node.has_tag_name("CreationDate"));
for (name_node, ctime_node) in bucket_names.zip(bucket_ctimes) {
let name = name_node.text().ok_or(Err::InvalidXmlResponseErr(
"Missing name in list buckets XML response ".to_string(),
))?;
let ctime = ctime_node.text().ok_or(Err::InvalidXmlResponseErr(
"Missing creation date in list buckets XML response".to_string(),
))?;
match BucketInfo::new(name, ctime) {
Ok(bucket_info) => bucket_infos.push(bucket_info),
Err(err) => return Err(Err::InvalidTmFmt(format!("{:?}", err))),
}
}
}
Ok(bucket_infos)
}
Err(err) => Err(Err::XmlDocParseErr(err)),
}
}
pub fn parse_list_objects(s: String) -> Result<ListObjectsResp, Err> {
let doc_res = roxmltree::Document::parse(&s);
match doc_res {
Ok(doc) => parse_list_objects_result(doc),
Err(err) => panic!(err),
}
}
pub fn get_mk_bucket_body() -> Result<Body, Err> {
let lc_node = woxml::XmlNode::new("LocationConstraint").text("us-east-1");
let mk_bucket_xml = woxml::XmlNode::new("CreateBucketConfiguration")
.namespace("http://s3.amazonaws.com/doc/2006-03-01/")
.children(vec![lc_node]);
let mut xml_bytes = Vec::new();
mk_bucket_xml
.serialize(&mut xml_bytes)
.or_else(|err| Err(Err::XmlWriteErr(err.to_string())))?;
Ok(Body::from(xml_bytes))
}
fn get_child_node<'a>(node: &'a roxmltree::Node, tag_name: &str) -> Option<&'a str> {
node.children()
.find(|node| node.has_tag_name(tag_name))
.and_then(|node| node.text())
}
// gets text value inside given tag or return default
fn get_child_node_or<'a>(node: &'a roxmltree::Node, tag_name: &str, default: &'a str) -> &'a str {
get_child_node(&node, tag_name).unwrap_or(default)
}
fn parse_child_content<T>(node: &roxmltree::Node, tag: &str) -> Result<T, Err>
where
T: FromStr,
{
let content = get_child_node(node, tag).ok_or(Err::XmlElemMissing(format!("{:?}", tag)))?;
str::parse::<T>(content).map_err(|_| Err::XmlElemParseErr(format!("{}", tag)))
}
fn parse_tag_content<T>(node: &roxmltree::Node) -> Result<T, Err>
where
T: FromStr,
{
let content = must_get_node_text(node)?;
str::parse::<T>(content).map_err(|_| Err::XmlElemParseErr(format!("{:?}", node.tag_name())))
}
fn must_get_node_text<'a>(node: &'a roxmltree::Node) -> Result<&'a str, Err> {
node.text()
.ok_or(Err::XmlElemMissing(node.tag_name().name().to_string()))
}
fn parse_object_infos(node: roxmltree::Node) -> Result<Vec<ObjectInfo>, Err> {
let mut object_infos: Vec<ObjectInfo> = Vec::new();
let contents_nodes = node
.descendants()
.filter(|node| node.has_tag_name("Contents"));
for node in contents_nodes {
let keys = node.children().filter(|node| node.has_tag_name("Key"));
let mtimes = node
.children()
.filter(|node| node.has_tag_name("LastModified"));
let etags = node.children().filter(|node| node.has_tag_name("ETag"));
let sizes = node.children().filter(|node| node.has_tag_name("Size"));
let storage_classes = node
.children()
.filter(|node| node.has_tag_name("StorageClass"));
for (key, (mtime, (etag, (size, storage_class)))) in
keys.zip(mtimes.zip(etags.zip(sizes.zip(storage_classes))))
{
let sz: i64 = parse_tag_content(&size)?;
let key_text = must_get_node_text(&key)?;
let mtime_text = must_get_node_text(&mtime)?;
let etag_text = must_get_node_text(&etag)?;
let storage_class_text = must_get_node_text(&storage_class)?;
let object_info = ObjectInfo::new(
key_text,
mtime_text,
etag_text,
sz,
storage_class_text,
HashMap::new(),
)?;
object_infos.push(object_info);
}
}
Ok(object_infos)
}
fn parse_list_objects_result(doc: roxmltree::Document) -> Result<ListObjectsResp, Err> {
let root = doc.root_element();
let bucket_name =
get_child_node(&root, "Name").ok_or(Err::XmlElemMissing("Name".to_string()))?;
let prefix = get_child_node_or(&root, "Prefix", "");
let key_count: i32 = parse_child_content(&root, "KeyCount")?;
let max_keys: i32 = parse_child_content(&root, "MaxKeys")?;
let is_truncated: bool = parse_child_content(&root, "IsTruncated")?;
let object_infos = parse_object_infos(root)?;
Ok(ListObjectsResp {
bucket_name: bucket_name.to_string(),
prefix: prefix.to_string(),
max_keys: max_keys,
key_count: key_count,
is_truncated: is_truncated,
next_continuation_token: "".to_string(),
common_prefixes: Vec::new(),
object_infos: object_infos,
})
}

981
src/s3/args.rs Normal file
View File

@ -0,0 +1,981 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::sse::{Sse, SseCustomerKey};
use crate::s3::types::{DeleteObject, Item, Part, Retention, SelectProgress, SelectRequest};
use crate::s3::utils::{
check_bucket_name, merge, to_http_header_value, to_iso8601utc, urlencode, Multimap, UtcTime,
};
use derivative::Derivative;
const MIN_PART_SIZE: usize = 5_242_880; // 5 MiB
const MAX_PART_SIZE: usize = 5_368_709_120; // 5 GiB
const MAX_OBJECT_SIZE: usize = 5_497_558_138_880; // 5 TiB
const MAX_MULTIPART_COUNT: u16 = 10_000;
fn object_write_args_headers(
extra_headers: Option<&Multimap>,
headers: Option<&Multimap>,
user_metadata: Option<&Multimap>,
sse: Option<&dyn Sse>,
tags: Option<&std::collections::HashMap<String, String>>,
retention: Option<&Retention>,
legal_hold: bool,
) -> Multimap {
let mut map = Multimap::new();
if let Some(v) = extra_headers {
merge(&mut map, v);
}
if let Some(v) = headers {
merge(&mut map, v);
}
if let Some(v) = user_metadata {
merge(&mut map, v);
}
if let Some(v) = sse {
merge(&mut map, &v.headers());
}
if let Some(v) = tags {
let mut tagging = String::new();
for (key, value) in v.iter() {
if !tagging.is_empty() {
tagging.push_str("&");
}
tagging.push_str(&urlencode(key));
tagging.push_str("=");
tagging.push_str(&urlencode(value));
}
if !tagging.is_empty() {
map.insert(String::from("x-amz-tagging"), tagging);
}
}
if let Some(v) = retention {
map.insert(String::from("x-amz-object-lock-mode"), v.mode.to_string());
map.insert(
String::from("x-amz-object-lock-retain-until-date"),
to_iso8601utc(v.retain_until_date),
);
}
if legal_hold {
map.insert(
String::from("x-amz-object-lock-legal-hold"),
String::from("ON"),
);
}
return map;
}
fn calc_part_info(
object_size: Option<usize>,
part_size: Option<usize>,
) -> Result<(usize, i16), Error> {
if let Some(v) = part_size {
if v < MIN_PART_SIZE {
return Err(Error::InvalidMinPartSize(v));
}
if v > MAX_PART_SIZE {
return Err(Error::InvalidMaxPartSize(v));
}
}
if let Some(v) = object_size {
if v > MAX_OBJECT_SIZE {
return Err(Error::InvalidObjectSize(v));
}
} else {
if part_size.is_none() {
return Err(Error::MissingPartSize);
}
return Ok((part_size.unwrap(), -1));
}
let mut psize = 0_usize;
if part_size.is_none() {
psize = (object_size.unwrap() as f64 / MAX_MULTIPART_COUNT as f64).ceil() as usize;
psize = MIN_PART_SIZE * (psize as f64 / MIN_PART_SIZE as f64).ceil() as usize;
}
if psize > object_size.unwrap() {
psize = object_size.unwrap();
}
let mut part_count = 1_i16;
if psize > 0 {
part_count = (object_size.unwrap() as f64 / psize as f64).ceil() as i16;
}
if part_count as u16 > MAX_MULTIPART_COUNT {
return Err(Error::InvalidPartCount(
object_size.unwrap(),
psize,
MAX_MULTIPART_COUNT,
));
}
return Ok((psize, part_count));
}
#[derive(Clone, Debug, Default)]
pub struct BucketArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
}
impl<'a> BucketArgs<'a> {
pub fn new(bucket_name: &'a str) -> Result<BucketArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(BucketArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
})
}
}
pub type BucketExistsArgs<'a> = BucketArgs<'a>;
pub type RemoveBucketArgs<'a> = BucketArgs<'a>;
#[derive(Clone, Debug, Default)]
pub struct ObjectArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
}
impl<'a> ObjectArgs<'a> {
pub fn new(bucket_name: &'a str, object_name: &'a str) -> Result<ObjectArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(ObjectArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
})
}
}
#[derive(Clone, Debug, Default)]
pub struct ObjectVersionArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub version_id: Option<&'a str>,
}
impl<'a> ObjectVersionArgs<'a> {
pub fn new(bucket_name: &'a str, object_name: &'a str) -> Result<ObjectVersionArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(ObjectVersionArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
version_id: None,
})
}
}
pub type RemoveObjectArgs<'a> = ObjectVersionArgs<'a>;
#[derive(Clone, Debug, Default)]
pub struct MakeBucketArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object_lock: bool,
}
impl<'a> MakeBucketArgs<'a> {
pub fn new(bucket_name: &'a str) -> Result<MakeBucketArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(MakeBucketArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object_lock: false,
})
}
}
#[derive(Clone, Debug, Default)]
pub struct ListBucketsArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
}
impl<'a> ListBucketsArgs<'a> {
pub fn new() -> ListBucketsArgs<'a> {
ListBucketsArgs::default()
}
}
#[derive(Clone, Debug, Default)]
pub struct AbortMultipartUploadArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub upload_id: &'a str,
}
impl<'a> AbortMultipartUploadArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
upload_id: &'a str,
) -> Result<AbortMultipartUploadArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
if upload_id.is_empty() {
return Err(Error::InvalidUploadId(String::from(
"upload ID cannot be empty",
)));
}
Ok(AbortMultipartUploadArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
upload_id: upload_id,
})
}
}
#[derive(Clone, Debug)]
pub struct CompleteMultipartUploadArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub upload_id: &'a str,
pub parts: &'a Vec<Part>,
}
impl<'a> CompleteMultipartUploadArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
upload_id: &'a str,
parts: &'a Vec<Part>,
) -> Result<CompleteMultipartUploadArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
if upload_id.is_empty() {
return Err(Error::InvalidUploadId(String::from(
"upload ID cannot be empty",
)));
}
if parts.len() == 0 {
return Err(Error::EmptyParts(String::from("parts cannot be empty")));
}
Ok(CompleteMultipartUploadArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
upload_id: upload_id,
parts: parts,
})
}
}
#[derive(Clone, Debug, Default)]
pub struct CreateMultipartUploadArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub headers: Option<&'a Multimap>,
}
impl<'a> CreateMultipartUploadArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
) -> Result<CreateMultipartUploadArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(CreateMultipartUploadArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
headers: None,
})
}
}
#[derive(Clone, Debug, Default)]
pub struct PutObjectApiArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub headers: Option<&'a Multimap>,
pub user_metadata: Option<&'a Multimap>,
pub sse: Option<&'a dyn Sse>,
pub tags: Option<&'a std::collections::HashMap<String, String>>,
pub retention: Option<&'a Retention>,
pub legal_hold: bool,
pub data: &'a [u8],
pub query_params: Option<&'a Multimap>,
}
impl<'a> PutObjectApiArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
data: &'a [u8],
) -> Result<PutObjectApiArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(PutObjectApiArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
headers: None,
user_metadata: None,
sse: None,
tags: None,
retention: None,
legal_hold: false,
data: data,
query_params: None,
})
}
pub fn get_headers(&self) -> Multimap {
object_write_args_headers(
self.extra_headers,
self.headers,
self.user_metadata,
self.sse,
self.tags,
self.retention,
self.legal_hold,
)
}
}
#[derive(Clone, Debug, Default)]
pub struct UploadPartArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub headers: Option<&'a Multimap>,
pub user_metadata: Option<&'a Multimap>,
pub sse: Option<&'a dyn Sse>,
pub tags: Option<&'a std::collections::HashMap<String, String>>,
pub retention: Option<&'a Retention>,
pub legal_hold: bool,
pub upload_id: &'a str,
pub part_number: u16,
pub data: &'a [u8],
}
impl<'a> UploadPartArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
upload_id: &'a str,
part_number: u16,
data: &'a [u8],
) -> Result<UploadPartArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
if upload_id.is_empty() {
return Err(Error::InvalidUploadId(String::from(
"upload ID cannot be empty",
)));
}
if part_number < 1 || part_number > 10000 {
return Err(Error::InvalidPartNumber(String::from(
"part number must be between 1 and 1000",
)));
}
Ok(UploadPartArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
headers: None,
user_metadata: None,
sse: None,
tags: None,
retention: None,
legal_hold: false,
upload_id: upload_id,
part_number: part_number,
data: data,
})
}
pub fn get_headers(&self) -> Multimap {
object_write_args_headers(
self.extra_headers,
self.headers,
self.user_metadata,
self.sse,
self.tags,
self.retention,
self.legal_hold,
)
}
}
pub struct PutObjectArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub headers: Option<&'a Multimap>,
pub user_metadata: Option<&'a Multimap>,
pub sse: Option<&'a dyn Sse>,
pub tags: Option<&'a std::collections::HashMap<String, String>>,
pub retention: Option<&'a Retention>,
pub legal_hold: bool,
pub object_size: Option<usize>,
pub part_size: usize,
pub part_count: i16,
pub content_type: &'a str,
pub stream: &'a mut dyn std::io::Read,
}
impl<'a> PutObjectArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
stream: &'a mut dyn std::io::Read,
object_size: Option<usize>,
part_size: Option<usize>,
) -> Result<PutObjectArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
let (psize, part_count) = calc_part_info(object_size, part_size)?;
Ok(PutObjectArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
headers: None,
user_metadata: None,
sse: None,
tags: None,
retention: None,
legal_hold: false,
object_size: object_size,
part_size: psize,
part_count: part_count,
content_type: "application/octet-stream",
stream: stream,
})
}
pub fn get_headers(&self) -> Multimap {
object_write_args_headers(
self.extra_headers,
self.headers,
self.user_metadata,
self.sse,
self.tags,
self.retention,
self.legal_hold,
)
}
}
#[derive(Clone, Debug, Default)]
pub struct ObjectConditionalReadArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub version_id: Option<&'a str>,
pub ssec: Option<&'a SseCustomerKey>,
pub offset: Option<usize>,
pub length: Option<usize>,
pub match_etag: Option<&'a str>,
pub not_match_etag: Option<&'a str>,
pub modified_since: Option<UtcTime>,
pub unmodified_since: Option<UtcTime>,
}
impl<'a> ObjectConditionalReadArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
) -> Result<ObjectConditionalReadArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(ObjectConditionalReadArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
version_id: None,
ssec: None,
offset: None,
length: None,
match_etag: None,
not_match_etag: None,
modified_since: None,
unmodified_since: None,
})
}
pub fn get_headers(&self) -> Multimap {
let (offset, length) = match self.length {
Some(_) => (Some(self.offset.unwrap_or(0_usize)), self.length),
None => (self.offset, None),
};
let mut range = String::new();
if let Some(o) = offset {
range.push_str("bytes=");
range.push_str(&o.to_string());
range.push_str("-");
if let Some(l) = length {
range.push_str(&(o + l - 1).to_string());
}
}
let mut headers = Multimap::new();
if !range.is_empty() {
headers.insert(String::from("Range"), range.clone());
}
if let Some(v) = self.match_etag {
headers.insert(String::from("if-match"), v.to_string());
}
if let Some(v) = self.not_match_etag {
headers.insert(String::from("if-none-match"), v.to_string());
}
if let Some(v) = self.modified_since {
headers.insert(String::from("if-modified-since"), to_http_header_value(v));
}
if let Some(v) = self.unmodified_since {
headers.insert(String::from("if-unmodified-since"), to_http_header_value(v));
}
if let Some(v) = self.ssec {
merge(&mut headers, &v.headers());
}
return headers;
}
pub fn get_copy_headers(&self) -> Multimap {
let mut headers = Multimap::new();
let mut copy_source = String::from("/");
copy_source.push_str(self.bucket);
copy_source.push_str("/");
copy_source.push_str(self.object);
if let Some(v) = self.version_id {
copy_source.push_str("?versionId=");
copy_source.push_str(&urlencode(v));
}
headers.insert(String::from("x-amz-copy-source"), copy_source.to_string());
if let Some(v) = self.match_etag {
headers.insert(String::from("x-amz-copy-source-if-match"), v.to_string());
}
if let Some(v) = self.not_match_etag {
headers.insert(
String::from("x-amz-copy-source-if-none-match"),
v.to_string(),
);
}
if let Some(v) = self.modified_since {
headers.insert(
String::from("x-amz-copy-source-if-modified-since"),
to_http_header_value(v),
);
}
if let Some(v) = self.unmodified_since {
headers.insert(
String::from("x-amz-copy-source-if-unmodified-since"),
to_http_header_value(v),
);
}
if let Some(v) = self.ssec {
merge(&mut headers, &v.copy_headers());
}
return headers;
}
}
pub type GetObjectArgs<'a> = ObjectConditionalReadArgs<'a>;
pub type StatObjectArgs<'a> = ObjectConditionalReadArgs<'a>;
#[derive(Derivative, Clone, Debug, Default)]
pub struct RemoveObjectsApiArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub bypass_governance_mode: bool,
#[derivative(Default(value = "true"))]
pub quiet: bool,
pub objects: &'a [DeleteObject<'a>],
}
impl<'a> RemoveObjectsApiArgs<'a> {
pub fn new(
bucket_name: &'a str,
objects: &'a [DeleteObject],
) -> Result<RemoveObjectsApiArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(RemoveObjectsApiArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
bypass_governance_mode: false,
quiet: true,
objects: objects,
})
}
}
pub struct RemoveObjectsArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub bypass_governance_mode: bool,
pub objects: &'a mut core::slice::Iter<'a, DeleteObject<'a>>,
}
impl<'a> RemoveObjectsArgs<'a> {
pub fn new(
bucket_name: &'a str,
objects: &'a mut core::slice::Iter<'a, DeleteObject<'a>>,
) -> Result<RemoveObjectsArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(RemoveObjectsArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
bypass_governance_mode: false,
objects: objects,
})
}
}
pub struct ListObjectsV1Args<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub delimiter: Option<&'a str>,
pub encoding_type: Option<&'a str>,
pub max_keys: Option<u16>,
pub prefix: Option<&'a str>,
pub marker: Option<String>,
}
impl<'a> ListObjectsV1Args<'a> {
pub fn new(bucket_name: &'a str) -> Result<ListObjectsV1Args<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(ListObjectsV1Args {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
delimiter: None,
encoding_type: None,
max_keys: None,
prefix: None,
marker: None,
})
}
}
pub struct ListObjectsV2Args<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub delimiter: Option<&'a str>,
pub encoding_type: Option<&'a str>,
pub max_keys: Option<u16>,
pub prefix: Option<&'a str>,
pub start_after: Option<String>,
pub continuation_token: Option<String>,
pub fetch_owner: bool,
pub include_user_metadata: bool,
}
impl<'a> ListObjectsV2Args<'a> {
pub fn new(bucket_name: &'a str) -> Result<ListObjectsV2Args<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(ListObjectsV2Args {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
delimiter: None,
encoding_type: None,
max_keys: None,
prefix: None,
start_after: None,
continuation_token: None,
fetch_owner: false,
include_user_metadata: false,
})
}
}
pub struct ListObjectVersionsArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub delimiter: Option<&'a str>,
pub encoding_type: Option<&'a str>,
pub max_keys: Option<u16>,
pub prefix: Option<&'a str>,
pub key_marker: Option<String>,
pub version_id_marker: Option<String>,
}
impl<'a> ListObjectVersionsArgs<'a> {
pub fn new(bucket_name: &'a str) -> Result<ListObjectVersionsArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(ListObjectVersionsArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
delimiter: None,
encoding_type: None,
max_keys: None,
prefix: None,
key_marker: None,
version_id_marker: None,
})
}
}
pub struct ListObjectsArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub delimiter: Option<&'a str>,
pub use_url_encoding_type: bool,
pub marker: Option<&'a str>, // only for ListObjectsV1.
pub start_after: Option<&'a str>, // only for ListObjectsV2.
pub key_marker: Option<&'a str>, // only for GetObjectVersions.
pub max_keys: Option<u16>,
pub prefix: Option<&'a str>,
pub continuation_token: Option<&'a str>, // only for ListObjectsV2.
pub fetch_owner: bool, // only for ListObjectsV2.
pub version_id_marker: Option<&'a str>, // only for GetObjectVersions.
pub include_user_metadata: bool, // MinIO extension for ListObjectsV2.
pub recursive: bool,
pub use_api_v1: bool,
pub include_versions: bool,
pub result_fn: &'a dyn Fn(Result<&Item, Error>) -> bool,
}
impl<'a> ListObjectsArgs<'a> {
pub fn new(
bucket_name: &'a str,
result_fn: &'a dyn Fn(Result<&Item, Error>) -> bool,
) -> Result<ListObjectsArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
Ok(ListObjectsArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
delimiter: None,
use_url_encoding_type: true,
marker: None,
start_after: None,
key_marker: None,
max_keys: None,
prefix: None,
continuation_token: None,
fetch_owner: false,
version_id_marker: None,
include_user_metadata: false,
recursive: false,
use_api_v1: false,
include_versions: false,
result_fn: result_fn,
})
}
}
pub struct SelectObjectContentArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub version_id: Option<&'a str>,
pub ssec: Option<&'a SseCustomerKey>,
pub request: &'a SelectRequest<'a>,
}
impl<'a> SelectObjectContentArgs<'a> {
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
request: &'a SelectRequest,
) -> Result<SelectObjectContentArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(SelectObjectContentArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
version_id: None,
ssec: None,
request: request,
})
}
}

1888
src/s3/client.rs Normal file

File diff suppressed because it is too large Load Diff

48
src/s3/creds.rs Normal file
View File

@ -0,0 +1,48 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#[derive(Clone, Debug, Default)]
pub struct Credentials {
pub access_key: String,
pub secret_key: String,
pub session_token: Option<String>,
}
pub trait Provider: std::fmt::Debug {
fn fetch(&self) -> Credentials;
}
#[derive(Clone, Debug)]
pub struct StaticProvider {
creds: Credentials,
}
impl StaticProvider {
pub fn new(access_key: &str, secret_key: &str, session_token: Option<&str>) -> StaticProvider {
StaticProvider {
creds: Credentials {
access_key: access_key.to_string(),
secret_key: secret_key.to_string(),
session_token: session_token.map(|v| v.to_string()),
},
}
}
}
impl Provider for StaticProvider {
fn fetch(&self) -> Credentials {
self.creds.clone()
}
}

187
src/s3/error.rs Normal file
View File

@ -0,0 +1,187 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
use crate::s3::utils::get_default_text;
use bytes::{Buf, Bytes};
use std::fmt;
use xmltree::Element;
#[derive(Clone, Debug, Default)]
pub struct ErrorResponse {
pub code: String,
pub message: String,
pub resource: String,
pub request_id: String,
pub host_id: String,
pub bucket_name: String,
pub object_name: String,
}
impl ErrorResponse {
pub fn parse(body: &mut Bytes) -> Result<ErrorResponse, Error> {
let root = match Element::parse(body.reader()) {
Ok(v) => v,
Err(e) => return Err(Error::XmlParseError(e)),
};
Ok(ErrorResponse {
code: get_default_text(&root, "Code"),
message: get_default_text(&root, "Message"),
resource: get_default_text(&root, "Resource"),
request_id: get_default_text(&root, "RequestId"),
host_id: get_default_text(&root, "HostId"),
bucket_name: get_default_text(&root, "bucketName"),
object_name: get_default_text(&root, "Key"),
})
}
}
#[derive(Debug)]
pub enum Error {
TimeParseError(chrono::ParseError),
InvalidUrl(http::uri::InvalidUri),
IOError(std::io::Error),
XmlParseError(xmltree::ParseError),
HttpError(reqwest::Error),
StrError(reqwest::header::ToStrError),
IntError(std::num::ParseIntError),
BoolError(std::str::ParseBoolError),
Utf8Error(alloc::string::FromUtf8Error),
XmlError(String),
InvalidBucketName(String),
InvalidBaseUrl(String),
UrlBuildError(String),
RegionMismatch(String, String),
S3Error(ErrorResponse),
InvalidResponse(u16, String),
ServerError(u16),
InvalidObjectName(String),
InvalidUploadId(String),
InvalidPartNumber(String),
EmptyParts(String),
InvalidRetentionMode(String),
InvalidMinPartSize(usize),
InvalidMaxPartSize(usize),
InvalidObjectSize(usize),
MissingPartSize,
InvalidPartCount(usize, usize, u16),
SseTlsRequired,
InsufficientData(usize, usize),
InvalidLegalHold(String),
InvalidSelectExpression(String),
InvalidHeaderValueType(u8),
CrcMismatch(String, u32, u32),
UnknownEventType(String),
SelectError(String, String),
}
impl std::error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::TimeParseError(e) => write!(f, "{}", e),
Error::InvalidUrl(e) => write!(f, "{}", e),
Error::IOError(e) => write!(f, "{}", e),
Error::XmlParseError(e) => write!(f, "{}", e),
Error::HttpError(e) => write!(f, "{}", e),
Error::StrError(e) => write!(f, "{}", e),
Error::IntError(e) => write!(f, "{}", e),
Error::BoolError(e) => write!(f, "{}", e),
Error::Utf8Error(e) => write!(f, "{}", e),
Error::XmlError(m) => write!(f, "{}", m),
Error::InvalidBucketName(m) => write!(f, "{}", m),
Error::InvalidObjectName(m) => write!(f, "{}", m),
Error::InvalidUploadId(m) => write!(f, "{}", m),
Error::InvalidPartNumber(m) => write!(f, "{}", m),
Error::EmptyParts(m) => write!(f, "{}", m),
Error::InvalidRetentionMode(m) => write!(f, "invalid retention mode {}", m),
Error::InvalidMinPartSize(s) => write!(f, "part size {} is not supported; minimum allowed 5MiB", s),
Error::InvalidMaxPartSize(s) => write!(f, "part size {} is not supported; maximum allowed 5GiB", s),
Error::InvalidObjectSize(s) => write!(f, "object size {} is not supported; maximum allowed 5TiB", s),
Error::MissingPartSize => write!(f, "valid part size must be provided when object size is unknown"),
Error::InvalidPartCount(os, ps, pc) => write!(f, "object size {} and part size {} make more than {} parts for upload", os, ps, pc),
Error::SseTlsRequired => write!(f, "SSE operation must be performed over a secure connection"),
Error::InsufficientData(ps, br) => write!(f, "not enough data in the stream; expected: {}, got: {} bytes", ps, br),
Error::InvalidBaseUrl(m) => write!(f, "{}", m),
Error::UrlBuildError(m) => write!(f, "{}", m),
Error::InvalidLegalHold(s) => write!(f, "invalid legal hold {}", s),
Error::RegionMismatch(br, r) => write!(f, "region must be {}, but passed {}", br, r),
Error::S3Error(er) => write!(f, "s3 operation failed; code: {}, message: {}, resource: {}, request_id: {}, host_id: {}, bucket_name: {}, object_name: {}", er.code, er.message, er.resource, er.request_id, er.host_id, er.bucket_name, er.object_name),
Error::InvalidResponse(sc, ct) => write!(f, "invalid response received; status code: {}; content-type: {}", sc, ct),
Error::ServerError(sc) => write!(f, "server failed with HTTP status code {}", sc),
Error::InvalidSelectExpression(m) => write!(f, "{}", m),
Error::InvalidHeaderValueType(v) => write!(f, "invalid header value type {}", v),
Error::CrcMismatch(t, e, g) => write!(f, "{} CRC mismatch; expected: {}, got: {}", t, e, g),
Error::UnknownEventType(et) => write!(f, "unknown event type {}", et),
Error::SelectError(ec, em) => write!(f, "error code: {}, error message: {}", ec, em),
}
}
}
impl From<chrono::ParseError> for Error {
fn from(err: chrono::ParseError) -> Self {
Error::TimeParseError(err)
}
}
impl From<http::uri::InvalidUri> for Error {
fn from(err: http::uri::InvalidUri) -> Self {
Error::InvalidUrl(err)
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::IOError(err)
}
}
impl From<xmltree::ParseError> for Error {
fn from(err: xmltree::ParseError) -> Self {
Error::XmlParseError(err)
}
}
impl From<reqwest::Error> for Error {
fn from(err: reqwest::Error) -> Self {
Error::HttpError(err)
}
}
impl From<reqwest::header::ToStrError> for Error {
fn from(err: reqwest::header::ToStrError) -> Self {
Error::StrError(err)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(err: std::num::ParseIntError) -> Self {
Error::IntError(err)
}
}
impl From<std::str::ParseBoolError> for Error {
fn from(err: std::str::ParseBoolError) -> Self {
Error::BoolError(err)
}
}
impl From<alloc::string::FromUtf8Error> for Error {
fn from(err: alloc::string::FromUtf8Error) -> Self {
Error::Utf8Error(err)
}
}

279
src/s3/http.rs Normal file
View File

@ -0,0 +1,279 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::utils::{to_query_string, Multimap};
use derivative::Derivative;
use hyper::http::Method;
use hyper::Uri;
use std::fmt;
#[derive(Derivative)]
#[derivative(Clone, Debug, Default)]
pub struct Url {
#[derivative(Default(value = "true"))]
pub https: bool,
pub host: String,
pub port: u16,
pub path: String,
pub query: Multimap,
}
impl Url {
pub fn host_header_value(&self) -> String {
if self.port > 0 {
return format!("{}:{}", self.host, self.port);
}
return self.host.clone();
}
}
impl fmt::Display for Url {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if self.host.is_empty() {
return Err(std::fmt::Error);
}
if self.https {
f.write_str("https://")?;
} else {
f.write_str("http://")?;
}
if self.port > 0 {
f.write_str(format!("{}:{}", self.host, self.port).as_str())?;
} else {
f.write_str(&self.host)?;
}
if !self.path.starts_with("/") {
f.write_str("/")?;
}
f.write_str(&self.path)?;
if !self.query.is_empty() {
f.write_str("?")?;
f.write_str(&to_query_string(&self.query))?;
}
Ok(())
}
}
fn extract_region(host: &str) -> String {
let tokens: Vec<&str> = host.split('.').collect();
let region = match tokens.get(1) {
Some(r) => match *r {
"dualstack" => match tokens.get(2) {
Some(t) => t,
_ => "",
},
"amazonaws" => "",
_ => r,
},
_ => "",
};
return region.to_string();
}
#[derive(Derivative)]
#[derivative(Clone, Debug, Default)]
pub struct BaseUrl {
#[derivative(Default(value = "true"))]
pub https: bool,
host: String,
port: u16,
pub region: String,
aws_host: bool,
accelerate_host: bool,
dualstack_host: bool,
virtual_style: bool,
}
impl BaseUrl {
pub fn build_url(
&self,
method: &Method,
region: &String,
query: &Multimap,
bucket_name: Option<&str>,
object_name: Option<&str>,
) -> Result<Url, Error> {
if !object_name.map_or(true, |v| v.is_empty()) {
if bucket_name.map_or(true, |v| v.is_empty()) {
return Err(Error::UrlBuildError(String::from(
"empty bucket name provided for object name",
)));
}
}
let mut url = Url::default();
url.https = self.https;
url.host = self.host.clone();
url.port = self.port;
url.query = query.clone();
if bucket_name.is_none() {
url.path.push_str("/");
if self.aws_host {
url.host = format!("s3.{}.{}", region, self.host);
}
return Ok(url);
}
let bucket = bucket_name.unwrap();
let enforce_path_style = true &&
// CreateBucket API requires path style in Amazon AWS S3.
(method == Method::PUT && object_name.is_none() && query.is_empty()) ||
// GetBucketLocation API requires path style in Amazon AWS S3.
query.contains_key("location") ||
// Use path style for bucket name containing '.' which causes
// SSL certificate validation error.
(bucket.contains('.') && self.https);
if self.aws_host {
let mut s3_domain = "s3.".to_string();
if self.accelerate_host {
if bucket.contains('.') {
return Err(Error::UrlBuildError(String::from(
"bucket name with '.' is not allowed for accelerate endpoint",
)));
}
if !enforce_path_style {
s3_domain = "s3-accelerate.".to_string();
}
}
if self.dualstack_host {
s3_domain.push_str("dualstack.");
}
if enforce_path_style || !self.accelerate_host {
s3_domain.push_str(region);
s3_domain.push_str(".");
}
url.host = s3_domain + &url.host;
}
if enforce_path_style || !self.virtual_style {
url.path.push_str("/");
url.path.push_str(bucket);
} else {
url.host = format!("{}.{}", bucket, url.host);
}
if object_name.is_some() {
if object_name.unwrap().chars().nth(0) != Some('/') {
url.path.push_str("/");
}
// FIXME: urlencode path
url.path.push_str(object_name.unwrap());
}
return Ok(url);
}
pub fn from_string(s: String) -> Result<BaseUrl, Error> {
let url = s.parse::<Uri>()?;
let https = match url.scheme() {
None => true,
Some(scheme) => match scheme.as_str() {
"http" => false,
"https" => true,
_ => {
return Err(Error::InvalidBaseUrl(String::from(
"scheme must be http or https",
)))
}
},
};
let mut host = match url.host() {
Some(h) => h,
_ => {
return Err(Error::InvalidBaseUrl(String::from(
"valid host must be provided",
)))
}
};
let ipv6host = "[".to_string() + host + "]";
if host.parse::<std::net::Ipv6Addr>().is_ok() {
host = &ipv6host;
}
let mut port = match url.port() {
Some(p) => p.as_u16(),
_ => 0u16,
};
if (https && port == 443) || (!https && port == 80) {
port = 0u16;
}
if url.path() != "/" && url.path() != "" {
return Err(Error::InvalidBaseUrl(String::from(
"path must be empty for base URL",
)));
}
if !url.query().is_none() {
return Err(Error::InvalidBaseUrl(String::from(
"query must be none for base URL",
)));
}
let mut accelerate_host = host.starts_with("s3-accelerate.");
let aws_host = (host.starts_with("s3.") || accelerate_host)
&& (host.ends_with(".amazonaws.com") || host.ends_with(".amazonaws.com.cn"));
let virtual_style = aws_host || host.ends_with("aliyuncs.com");
let mut region = String::new();
let mut dualstack_host = false;
if aws_host {
let mut aws_domain = "amazonaws.com";
region = extract_region(host);
let is_aws_china_host = host.ends_with(".cn");
if is_aws_china_host {
aws_domain = "amazonaws.com.cn";
if region.is_empty() {
return Err(Error::InvalidBaseUrl(String::from(
"region must be provided in Amazon S3 China endpoint",
)));
}
}
dualstack_host = host.contains(".dualstack.");
host = aws_domain;
} else {
accelerate_host = false;
}
return Ok(BaseUrl {
https: https,
host: host.to_string(),
port: port,
region: region,
aws_host: aws_host,
accelerate_host: accelerate_host,
dualstack_host: dualstack_host,
virtual_style: virtual_style,
});
}
}

25
src/s3/mod.rs Normal file
View File

@ -0,0 +1,25 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub mod args;
pub mod client;
pub mod creds;
pub mod error;
pub mod http;
pub mod response;
pub mod signer;
pub mod sse;
pub mod types;
pub mod utils;

596
src/s3/response.rs Normal file
View File

@ -0,0 +1,596 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::types::{parse_legal_hold, Bucket, Item, RetentionMode, SelectProgress};
use crate::s3::utils::{
copy_slice, crc32, from_http_header_value, from_iso8601utc, get_text, uint32, UtcTime,
};
use reqwest::header::HeaderMap;
use std::collections::HashMap;
use std::collections::VecDeque;
use std::io::BufReader;
use xmltree::Element;
#[derive(Debug)]
pub struct ListBucketsResponse {
pub headers: HeaderMap,
pub buckets: Vec<Bucket>,
}
#[derive(Debug)]
pub struct BucketResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
}
pub type MakeBucketResponse = BucketResponse;
pub type RemoveBucketResponse = BucketResponse;
#[derive(Debug)]
pub struct ObjectResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
pub object_name: String,
pub version_id: Option<String>,
}
pub type RemoveObjectResponse = ObjectResponse;
#[derive(Debug)]
pub struct UploadIdResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
pub object_name: String,
pub upload_id: String,
}
pub type AbortMultipartUploadResponse = UploadIdResponse;
pub type CreateMultipartUploadResponse = UploadIdResponse;
#[derive(Debug)]
pub struct PutObjectBaseResponse {
pub headers: HeaderMap,
pub bucket_name: String,
pub object_name: String,
pub location: String,
pub etag: String,
pub version_id: Option<String>,
}
pub type CompleteMultipartUploadResponse = PutObjectBaseResponse;
pub type PutObjectApiResponse = PutObjectBaseResponse;
pub type UploadPartResponse = PutObjectApiResponse;
pub type PutObjectResponse = PutObjectApiResponse;
#[derive(Debug)]
pub struct StatObjectResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
pub object_name: String,
pub size: usize,
pub etag: String,
pub version_id: Option<String>,
pub last_modified: Option<UtcTime>,
pub retention_mode: Option<RetentionMode>,
pub retention_retain_until_date: Option<UtcTime>,
pub legal_hold: Option<bool>,
pub delete_marker: Option<bool>,
pub user_metadata: HashMap<String, String>,
}
impl StatObjectResponse {
pub fn new(
headers: &HeaderMap,
region: &str,
bucket_name: &str,
object_name: &str,
) -> Result<StatObjectResponse, Error> {
let size = match headers.get("Content-Length") {
Some(v) => v.to_str()?.parse::<usize>()?,
None => 0_usize,
};
let etag = match headers.get("ETag") {
Some(v) => v.to_str()?.trim_matches('"'),
None => "",
};
let version_id = match headers.get("x-amz-version-id") {
Some(v) => Some(v.to_str()?.to_string()),
None => None,
};
let last_modified = match headers.get("Last-Modified") {
Some(v) => Some(from_http_header_value(v.to_str()?)?),
None => None,
};
let retention_mode = match headers.get("x-amz-object-lock-mode") {
Some(v) => Some(RetentionMode::parse(v.to_str()?)?),
None => None,
};
let retention_retain_until_date = match headers.get("x-amz-object-lock-retain-until-date") {
Some(v) => Some(from_iso8601utc(v.to_str()?)?),
None => None,
};
let legal_hold = match headers.get("x-amz-object-lock-legal-hold") {
Some(v) => Some(parse_legal_hold(v.to_str()?)?),
None => None,
};
let delete_marker = match headers.get("x-amz-delete-marker") {
Some(v) => Some(v.to_str()?.parse::<bool>()?),
None => None,
};
let mut user_metadata: HashMap<String, String> = HashMap::new();
for (key, value) in headers.iter() {
if let Some(v) = key.as_str().strip_prefix("x-amz-meta-") {
user_metadata.insert(v.to_string(), value.to_str()?.to_string());
}
}
Ok(StatObjectResponse {
headers: headers.clone(),
region: region.to_string(),
bucket_name: bucket_name.to_string(),
object_name: object_name.to_string(),
size: size,
etag: etag.to_string(),
version_id: version_id,
last_modified: last_modified,
retention_mode: retention_mode,
retention_retain_until_date: retention_retain_until_date,
legal_hold: legal_hold,
delete_marker: delete_marker,
user_metadata: user_metadata,
})
}
}
#[derive(Clone, Debug)]
pub struct DeleteError {
pub code: String,
pub message: String,
pub object_name: String,
pub version_id: Option<String>,
}
#[derive(Clone, Debug)]
pub struct DeletedObject {
pub name: String,
pub version_id: Option<String>,
pub delete_marker: bool,
pub delete_marker_version_id: Option<String>,
}
#[derive(Clone, Debug)]
pub struct RemoveObjectsApiResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
pub objects: Vec<DeletedObject>,
pub errors: Vec<DeleteError>,
}
pub type RemoveObjectsResponse = RemoveObjectsApiResponse;
#[derive(Clone, Debug)]
pub struct ListObjectsV1Response {
pub headers: HeaderMap,
pub name: String,
pub encoding_type: Option<String>,
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub is_truncated: bool,
pub max_keys: Option<u16>,
pub contents: Vec<Item>,
pub marker: Option<String>,
pub next_marker: Option<String>,
}
#[derive(Clone, Debug)]
pub struct ListObjectsV2Response {
pub headers: HeaderMap,
pub name: String,
pub encoding_type: Option<String>,
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub is_truncated: bool,
pub max_keys: Option<u16>,
pub contents: Vec<Item>,
pub key_count: Option<u16>,
pub start_after: Option<String>,
pub continuation_token: Option<String>,
pub next_continuation_token: Option<String>,
}
#[derive(Clone, Debug)]
pub struct ListObjectVersionsResponse {
pub headers: HeaderMap,
pub name: String,
pub encoding_type: Option<String>,
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub is_truncated: bool,
pub max_keys: Option<u16>,
pub contents: Vec<Item>,
pub key_marker: Option<String>,
pub next_key_marker: Option<String>,
pub version_id_marker: Option<String>,
pub next_version_id_marker: Option<String>,
}
#[derive(Clone, Debug)]
pub struct ListObjectsResponse {
pub headers: HeaderMap,
pub name: String,
pub encoding_type: Option<String>,
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub is_truncated: bool,
pub max_keys: Option<u16>,
pub contents: Vec<Item>,
// ListObjectsV1
pub marker: String,
pub next_marker: String,
// ListObjectsV2
pub key_count: u16,
pub start_after: String,
pub continuation_token: String,
pub next_continuation_token: String,
// ListObjectVersions
pub key_marker: String,
pub next_key_marker: String,
pub version_id_marker: String,
pub next_version_id_marker: String,
}
pub struct SelectObjectContentResponse {
pub headers: HeaderMap,
pub region: String,
pub bucket_name: String,
pub object_name: String,
pub progress: SelectProgress,
resp: reqwest::Response,
done: bool,
buf: VecDeque<u8>,
prelude: [u8; 8],
prelude_read: bool,
prelude_crc: [u8; 4],
prelude_crc_read: bool,
total_length: usize,
data: Vec<u8>,
data_read: bool,
message_crc: [u8; 4],
message_crc_read: bool,
payload: Vec<u8>,
payload_index: usize,
}
impl SelectObjectContentResponse {
pub fn new(
resp: reqwest::Response,
region: &str,
bucket_name: &str,
object_name: &str,
) -> SelectObjectContentResponse {
let headers = resp.headers().clone();
SelectObjectContentResponse {
headers: headers,
region: region.to_string(),
bucket_name: bucket_name.to_string(),
object_name: object_name.to_string(),
progress: SelectProgress {
bytes_scanned: 0,
bytes_progressed: 0,
bytes_returned: 0,
},
resp: resp,
done: false,
buf: VecDeque::<u8>::new(),
prelude: [0_u8; 8],
prelude_read: false,
prelude_crc: [0_u8; 4],
prelude_crc_read: false,
total_length: 0_usize,
data: Vec::<u8>::new(),
data_read: false,
message_crc: [0_u8; 4],
message_crc_read: false,
payload: Vec::<u8>::new(),
payload_index: 0,
}
}
fn reset(&mut self) {
self.buf.clear();
self.data.clear();
self.data_read = false;
self.prelude_read = false;
self.prelude_crc_read = false;
self.message_crc_read = false;
}
fn read_prelude(&mut self) -> Result<bool, Error> {
if self.buf.len() < 8 {
return Ok(false);
}
self.prelude_read = true;
for i in 0..8 {
self.prelude[i] = self.buf.pop_front().ok_or(Error::InsufficientData(8, i))?;
}
return Ok(true);
}
fn read_prelude_crc(&mut self) -> Result<bool, Error> {
if self.buf.len() < 4 {
return Ok(false);
}
self.prelude_crc_read = true;
for i in 0..4 {
self.prelude_crc[i] = self.buf.pop_front().ok_or(Error::InsufficientData(4, i))?;
}
return Ok(true);
}
fn read_data(&mut self) -> Result<bool, Error> {
let data_length = self.total_length - 8 - 4 - 4;
if self.buf.len() < data_length {
return Ok(false);
}
self.data = Vec::new();
self.data_read = true;
for i in 0..data_length {
self.data.push(
self.buf
.pop_front()
.ok_or(Error::InsufficientData(data_length, i))?,
);
}
return Ok(true);
}
fn read_message_crc(&mut self) -> Result<bool, Error> {
if self.buf.len() < 4 {
return Ok(false);
}
self.message_crc_read = true;
for i in 0..4 {
self.message_crc[i] = self.buf.pop_front().ok_or(Error::InsufficientData(4, i))?;
}
return Ok(true);
}
fn decode_header(&mut self, header_length: usize) -> Result<HashMap<String, String>, Error> {
let mut headers: HashMap<String, String> = HashMap::new();
let mut offset = 0_usize;
while offset < header_length {
let mut length = self.data[offset] as usize;
offset += 1;
if length == 0 {
break;
}
let name = String::from_utf8(self.data[offset..offset + length].to_vec())?;
offset += length;
if self.data[offset] != 7 {
return Err(Error::InvalidHeaderValueType(self.data[offset]));
}
offset += 1;
let b0 = self.data[offset] as u16;
offset += 1;
let b1 = self.data[offset] as u16;
offset += 1;
length = (b0 << 8 | b1) as usize;
let value = String::from_utf8(self.data[offset..offset + length].to_vec())?;
offset += length;
headers.insert(name, value);
}
return Ok(headers);
}
async fn do_read(&mut self) -> Result<(), Error> {
if self.done {
return Ok(());
}
loop {
let chunk = match self.resp.chunk().await? {
Some(v) => v,
None => return Ok(()),
};
self.buf.extend(chunk.iter().copied());
if !self.prelude_read && !self.read_prelude()? {
continue;
}
if !self.prelude_crc_read {
if !self.read_prelude_crc()? {
continue;
}
let got = crc32(&self.prelude);
let expected = uint32(&self.prelude_crc)?;
if got != expected {
self.done = true;
return Err(Error::CrcMismatch(String::from("prelude"), expected, got));
}
self.total_length = uint32(&self.prelude[0..4])? as usize;
}
if !self.data_read && !self.read_data()? {
continue;
}
if !self.message_crc_read {
if !self.read_message_crc()? {
continue;
}
let mut message: Vec<u8> = Vec::new();
message.extend_from_slice(&self.prelude);
message.extend_from_slice(&self.prelude_crc);
message.extend_from_slice(&self.data);
let got = crc32(&message);
let expected = uint32(&self.message_crc)?;
if got != expected {
self.done = true;
return Err(Error::CrcMismatch(String::from("message"), expected, got));
}
}
let header_length = uint32(&self.prelude[4..])? as usize;
let headers = self.decode_header(header_length)?;
let value = match headers.get(":message-type") {
Some(v) => v.as_str(),
None => "",
};
if value == "error" {
self.done = true;
return Err(Error::SelectError(
match headers.get(":error-code") {
Some(v) => v.clone(),
None => String::new(),
},
match headers.get(":error-message") {
Some(v) => v.clone(),
None => String::new(),
},
));
}
let event_type = match headers.get(":event-type") {
Some(v) => v.as_str(),
None => "",
};
if event_type == "End" {
self.done = true;
return Ok(());
}
let payload_length = self.total_length - header_length - 16;
if event_type == "Cont" || payload_length < 1 {
self.reset();
continue;
}
let payload = &self.data[header_length..(header_length + payload_length)];
if event_type == "Progress" || event_type == "Stats" {
let root = Element::parse(&mut BufReader::new(payload))?;
self.reset();
self.progress = SelectProgress {
bytes_scanned: get_text(&root, "BytesScanned")?.parse::<usize>()?,
bytes_progressed: get_text(&root, "BytesProcessed")?.parse::<usize>()?,
bytes_returned: get_text(&root, "BytesReturned")?.parse::<usize>()?,
};
continue;
}
if event_type == "Records" {
self.payload = payload.to_vec();
self.payload_index = 0;
self.reset();
return Ok(());
}
self.done = true;
return Err(Error::UnknownEventType(event_type.to_string()));
}
}
pub async fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
loop {
if self.done {
return Ok(0);
}
if self.payload_index < self.payload.len() {
let n = copy_slice(buf, &self.payload[self.payload_index..]);
self.payload_index += n;
if self.payload_index > self.payload.len() {
self.payload_index = self.payload.len();
}
return Ok(n);
}
self.payload.clear();
self.payload_index = 0;
match self.do_read().await {
Err(e) => {
self.done = true;
return Err(std::io::Error::new(
std::io::ErrorKind::Other,
e.to_string(),
));
}
Ok(_) => {
if self.payload.len() == 0 {
self.done = true;
return Ok(0);
}
}
}
}
}
}

241
src/s3/signer.rs Normal file
View File

@ -0,0 +1,241 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::utils::{
get_canonical_headers, get_canonical_query_string, sha256_hash, to_amz_date, to_signer_date,
Multimap, UtcTime,
};
use hex::encode as hexencode;
use hmac::{Hmac, Mac};
use hyper::http::Method;
use sha2::Sha256;
pub fn hmac_hash(key: &[u8], data: &[u8]) -> Vec<u8> {
let mut hasher = Hmac::<Sha256>::new_from_slice(key).expect("HMAC can take key of any size");
hasher.update(data);
return hasher.finalize().into_bytes().to_vec();
}
pub fn hmac_hash_hex(key: &[u8], data: &[u8]) -> String {
return hexencode(hmac_hash(key, data));
}
pub fn get_scope(date: UtcTime, region: &str, service_name: &str) -> String {
return format!(
"{}/{}/{}/aws4_request",
to_signer_date(date),
region,
service_name
);
}
pub fn get_canonical_request_hash(
method: &Method,
uri: &str,
query_string: &str,
headers: &str,
signed_headers: &str,
content_sha256: &str,
) -> String {
// CanonicalRequest =
// HTTPRequestMethod + '\n' +
// CanonicalURI + '\n' +
// CanonicalQueryString + '\n' +
// CanonicalHeaders + '\n\n' +
// SignedHeaders + '\n' +
// HexEncode(Hash(RequestPayload))
let canonical_request = format!(
"{}\n{}\n{}\n{}\n\n{}\n{}",
method, uri, query_string, headers, signed_headers, content_sha256
);
return sha256_hash(canonical_request.as_bytes());
}
pub fn get_string_to_sign(date: UtcTime, scope: &str, canonical_request_hash: &str) -> String {
return format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
to_amz_date(date),
scope,
canonical_request_hash
);
}
pub fn get_signing_key(
secret_key: &str,
date: UtcTime,
region: &str,
service_name: &str,
) -> Vec<u8> {
let mut key: Vec<u8> = b"AWS4".to_vec();
key.extend(secret_key.as_bytes());
let date_key = hmac_hash(key.as_slice(), to_signer_date(date).as_bytes());
let date_region_key = hmac_hash(date_key.as_slice(), region.as_bytes());
let date_region_service_key = hmac_hash(date_region_key.as_slice(), service_name.as_bytes());
return hmac_hash(date_region_service_key.as_slice(), b"aws4_request");
}
pub fn get_signature(signing_key: &[u8], string_to_sign: &[u8]) -> String {
hmac_hash_hex(signing_key, string_to_sign)
}
pub fn get_authorization(
access_key: &str,
scope: &str,
signed_headers: &str,
signature: &str,
) -> String {
return format!(
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
access_key, scope, signed_headers, signature
);
}
pub fn sign_v4(
service_name: &str,
method: &Method,
uri: &str,
region: &str,
headers: &mut Multimap,
query_params: &Multimap,
access_key: &str,
secret_key: &str,
content_sha256: &str,
date: UtcTime,
) {
let scope = get_scope(date, region, service_name);
let (signed_headers, canonical_headers) = get_canonical_headers(headers);
let canonical_query_string = get_canonical_query_string(query_params);
let canonical_request_hash = get_canonical_request_hash(
method,
uri,
&canonical_query_string,
&canonical_headers,
&signed_headers,
content_sha256,
);
let string_to_sign = get_string_to_sign(date, &scope, &canonical_request_hash);
let signing_key = get_signing_key(secret_key, date, region, service_name);
let signature = get_signature(signing_key.as_slice(), string_to_sign.as_bytes());
let authorization = get_authorization(access_key, &scope, &signed_headers, &signature);
headers.insert("Authorization".to_string(), authorization);
}
pub fn sign_v4_s3(
method: &Method,
uri: &str,
region: &str,
headers: &mut Multimap,
query_params: &Multimap,
access_key: &str,
secret_key: &str,
content_sha256: &str,
date: UtcTime,
) {
sign_v4(
"s3",
method,
uri,
region,
headers,
query_params,
access_key,
secret_key,
content_sha256,
date,
)
}
pub fn sign_v4_sts(
method: &Method,
uri: &str,
region: &str,
headers: &mut Multimap,
query_params: &Multimap,
access_key: &str,
secret_key: &str,
content_sha256: &str,
date: UtcTime,
) {
sign_v4(
"sts",
method,
uri,
region,
headers,
query_params,
access_key,
secret_key,
content_sha256,
date,
)
}
pub fn presign_v4(
method: &Method,
host: &str,
uri: &str,
region: &str,
query_params: &mut Multimap,
access_key: &str,
secret_key: &str,
date: UtcTime,
expires: u32,
) {
let scope = get_scope(date, region, "s3");
let canonical_headers = "host:".to_string() + host;
let signed_headers = "host";
query_params.insert(
"X-Amz-Algorithm".to_string(),
"AWS4-HMAC-SHA256".to_string(),
);
query_params.insert(
"X-Amz-Credential".to_string(),
access_key.to_string() + "/" + &scope,
);
query_params.insert("X-Amz-Date".to_string(), to_amz_date(date));
query_params.insert("X-Amz-Expires".to_string(), expires.to_string());
query_params.insert(
"X-Amz-SignedHeaders".to_string(),
signed_headers.to_string(),
);
let canonical_query_string = get_canonical_query_string(query_params);
let canonical_request_hash = get_canonical_request_hash(
method,
uri,
&canonical_query_string,
&canonical_headers,
&signed_headers,
"UNSIGNED-PAYLOAD",
);
let string_to_sign = get_string_to_sign(date, &scope, &canonical_request_hash);
let signing_key = get_signing_key(secret_key, date, region, "s3");
let signature = get_signature(signing_key.as_slice(), string_to_sign.as_bytes());
query_params.insert("X-Amz-Signature".to_string(), signature);
}
pub fn post_presign_v4(
string_to_sign: &str,
secret_key: &str,
date: UtcTime,
region: &str,
) -> String {
let signing_key = get_signing_key(secret_key, date, region, "s3");
return get_signature(signing_key.as_slice(), string_to_sign.as_bytes());
}

168
src/s3/sse.rs Normal file
View File

@ -0,0 +1,168 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::utils;
use std::any::Any;
pub trait Sse: std::fmt::Debug {
fn headers(&self) -> utils::Multimap;
fn copy_headers(&self) -> utils::Multimap;
fn tls_required(&self) -> bool;
fn as_any(&self) -> &dyn Any;
}
#[derive(Clone, Debug)]
pub struct SseCustomerKey {
headers: utils::Multimap,
copy_headers: utils::Multimap,
}
impl SseCustomerKey {
pub fn new(key: &str) -> SseCustomerKey {
let b64key = utils::b64encode(key);
let md5key = utils::md5sum_hash(key.as_bytes());
let mut headers = utils::Multimap::new();
headers.insert(
String::from("X-Amz-Server-Side-Encryption-Customer-Algorithm"),
String::from("AES256"),
);
headers.insert(
String::from("X-Amz-Server-Side-Encryption-Customer-Key"),
b64key.clone(),
);
headers.insert(
String::from("X-Amz-Server-Side-Encryption-Customer-Key-MD5"),
md5key.clone(),
);
let mut copy_headers = utils::Multimap::new();
copy_headers.insert(
String::from("X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"),
String::from("AES256"),
);
copy_headers.insert(
String::from("X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"),
b64key.clone(),
);
copy_headers.insert(
String::from("X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"),
md5key.clone(),
);
SseCustomerKey {
headers: headers,
copy_headers: copy_headers,
}
}
}
impl Sse for SseCustomerKey {
fn headers(&self) -> utils::Multimap {
self.headers.clone()
}
fn copy_headers(&self) -> utils::Multimap {
self.copy_headers.clone()
}
fn tls_required(&self) -> bool {
true
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[derive(Clone, Debug)]
pub struct SseKms {
headers: utils::Multimap,
}
impl SseKms {
pub fn new(key: &str, context: Option<&str>) -> SseKms {
let mut headers = utils::Multimap::new();
headers.insert(
String::from("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"),
key.to_string(),
);
headers.insert(
String::from("X-Amz-Server-Side-Encryption"),
String::from("aws:kms"),
);
if let Some(v) = context {
headers.insert(
String::from("X-Amz-Server-Side-Encryption-Context"),
utils::b64encode(v),
);
}
SseKms { headers: headers }
}
}
impl Sse for SseKms {
fn headers(&self) -> utils::Multimap {
self.headers.clone()
}
fn copy_headers(&self) -> utils::Multimap {
utils::Multimap::new()
}
fn tls_required(&self) -> bool {
true
}
fn as_any(&self) -> &dyn Any {
self
}
}
#[derive(Clone, Debug)]
pub struct SseS3 {
headers: utils::Multimap,
}
impl SseS3 {
pub fn new() -> SseS3 {
let mut headers = utils::Multimap::new();
headers.insert(
String::from("X-Amz-Server-Side-Encryption"),
String::from("AES256"),
);
SseS3 { headers: headers }
}
}
impl Sse for SseS3 {
fn headers(&self) -> utils::Multimap {
self.headers.clone()
}
fn copy_headers(&self) -> utils::Multimap {
utils::Multimap::new()
}
fn tls_required(&self) -> bool {
false
}
fn as_any(&self) -> &dyn Any {
self
}
}

459
src/s3/types.rs Normal file
View File

@ -0,0 +1,459 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::utils::UtcTime;
use std::collections::HashMap;
use std::fmt;
#[derive(Clone, Debug, Default)]
pub struct Item {
pub name: String,
pub last_modified: Option<UtcTime>,
pub etag: Option<String>, // except DeleteMarker
pub owner_id: Option<String>,
pub owner_name: Option<String>,
pub size: Option<usize>, // except DeleteMarker
pub storage_class: Option<String>,
pub is_latest: bool, // except ListObjects V1/V2
pub version_id: Option<String>, // except ListObjects V1/V2
pub user_metadata: Option<HashMap<String, String>>,
pub is_prefix: bool,
pub is_delete_marker: bool,
pub encoding_type: Option<String>,
}
#[derive(Clone, Debug)]
pub struct Bucket {
pub name: String,
pub creation_date: UtcTime,
}
#[derive(Clone, Debug)]
pub struct Part {
pub number: u16,
pub etag: String,
}
#[derive(Clone, Debug)]
pub enum RetentionMode {
Governance,
Compliance,
}
impl RetentionMode {
pub fn parse(s: &str) -> Result<RetentionMode, Error> {
match s {
"GOVERNANCE" => Ok(RetentionMode::Governance),
"COMPLIANCE" => Ok(RetentionMode::Compliance),
_ => Err(Error::InvalidRetentionMode(s.to_string())),
}
}
}
impl fmt::Display for RetentionMode {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
RetentionMode::Governance => write!(f, "GOVERNANCE"),
RetentionMode::Compliance => write!(f, "COMPLIANCE"),
}
}
}
#[derive(Clone, Debug)]
pub struct Retention {
pub mode: RetentionMode,
pub retain_until_date: UtcTime,
}
pub fn parse_legal_hold(s: &str) -> Result<bool, Error> {
match s {
"ON" => Ok(true),
"OFF" => Ok(false),
_ => Err(Error::InvalidLegalHold(s.to_string())),
}
}
#[derive(Clone, Debug, Copy)]
pub struct DeleteObject<'a> {
pub name: &'a str,
pub version_id: Option<&'a str>,
}
#[derive(Clone, Debug)]
pub enum CompressionType {
NONE,
GZIP,
BZIP2,
}
impl fmt::Display for CompressionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
CompressionType::NONE => write!(f, "NONE"),
CompressionType::GZIP => write!(f, "GZIP"),
CompressionType::BZIP2 => write!(f, "BZIP2"),
}
}
}
#[derive(Clone, Debug)]
pub enum FileHeaderInfo {
USE,
IGNORE,
NONE,
}
impl fmt::Display for FileHeaderInfo {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
FileHeaderInfo::IGNORE => write!(f, "IGNORE"),
FileHeaderInfo::USE => write!(f, "USE"),
FileHeaderInfo::NONE => write!(f, "NONE"),
}
}
}
#[derive(Clone, Debug)]
pub enum JsonType {
DOCUMENT,
LINES,
}
impl fmt::Display for JsonType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
JsonType::DOCUMENT => write!(f, "DOCUMENT"),
JsonType::LINES => write!(f, "LINES"),
}
}
}
#[derive(Clone, Debug)]
pub enum QuoteFields {
ALWAYS,
ASNEEDED,
}
impl fmt::Display for QuoteFields {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
QuoteFields::ALWAYS => write!(f, "ALWAYS"),
QuoteFields::ASNEEDED => write!(f, "ASNEEDED"),
}
}
}
#[derive(Clone, Debug, Default)]
pub struct CsvInputSerialization {
pub compression_type: Option<CompressionType>,
pub allow_quoted_record_delimiter: bool,
pub comments: Option<char>,
pub field_delimiter: Option<char>,
pub file_header_info: Option<FileHeaderInfo>,
pub quote_character: Option<char>,
pub quote_escape_character: Option<char>,
pub record_delimiter: Option<char>,
}
#[derive(Clone, Debug, Default)]
pub struct JsonInputSerialization {
pub compression_type: Option<CompressionType>,
pub json_type: Option<JsonType>,
}
#[derive(Clone, Debug, Default)]
pub struct ParquetInputSerialization;
#[derive(Clone, Debug, Default)]
pub struct CsvOutputSerialization {
pub field_delimiter: Option<char>,
pub quote_character: Option<char>,
pub quote_escape_character: Option<char>,
pub quote_fields: Option<QuoteFields>,
pub record_delimiter: Option<char>,
}
#[derive(Clone, Debug, Default)]
pub struct JsonOutputSerialization {
pub record_delimiter: Option<char>,
}
#[derive(Clone, Debug, Default)]
pub struct SelectRequest<'a> {
pub expr: &'a str,
pub csv_input: Option<CsvInputSerialization>,
pub json_input: Option<JsonInputSerialization>,
pub parquet_input: Option<ParquetInputSerialization>,
pub csv_output: Option<CsvOutputSerialization>,
pub json_output: Option<JsonOutputSerialization>,
pub request_progress: bool,
pub scan_start_range: Option<usize>,
pub scan_end_range: Option<usize>,
}
impl<'a> SelectRequest<'a> {
pub fn new_csv_input_output(
expr: &'a str,
csv_input: CsvInputSerialization,
csv_output: CsvOutputSerialization,
) -> Result<SelectRequest, Error> {
if expr.is_empty() {
return Err(Error::InvalidSelectExpression(String::from(
"select expression cannot be empty",
)));
}
Ok(SelectRequest {
expr: expr,
csv_input: Some(csv_input),
json_input: None,
parquet_input: None,
csv_output: Some(csv_output),
json_output: None,
request_progress: false,
scan_start_range: None,
scan_end_range: None,
})
}
pub fn new_csv_input_json_output(
expr: &'a str,
csv_input: CsvInputSerialization,
json_output: JsonOutputSerialization,
) -> Result<SelectRequest, Error> {
if expr.is_empty() {
return Err(Error::InvalidSelectExpression(String::from(
"select expression cannot be empty",
)));
}
Ok(SelectRequest {
expr: expr,
csv_input: Some(csv_input),
json_input: None,
parquet_input: None,
csv_output: None,
json_output: Some(json_output),
request_progress: false,
scan_start_range: None,
scan_end_range: None,
})
}
pub fn new_json_input_output(
expr: &'a str,
json_input: JsonInputSerialization,
json_output: JsonOutputSerialization,
) -> Result<SelectRequest, Error> {
if expr.is_empty() {
return Err(Error::InvalidSelectExpression(String::from(
"select expression cannot be empty",
)));
}
Ok(SelectRequest {
expr: expr,
csv_input: None,
json_input: Some(json_input),
parquet_input: None,
csv_output: None,
json_output: Some(json_output),
request_progress: false,
scan_start_range: None,
scan_end_range: None,
})
}
pub fn new_parquet_input_csv_output(
expr: &'a str,
parquet_input: ParquetInputSerialization,
csv_output: CsvOutputSerialization,
) -> Result<SelectRequest, Error> {
if expr.is_empty() {
return Err(Error::InvalidSelectExpression(String::from(
"select expression cannot be empty",
)));
}
Ok(SelectRequest {
expr: expr,
csv_input: None,
json_input: None,
parquet_input: Some(parquet_input),
csv_output: Some(csv_output),
json_output: None,
request_progress: false,
scan_start_range: None,
scan_end_range: None,
})
}
pub fn new_parquet_input_json_output(
expr: &'a str,
parquet_input: ParquetInputSerialization,
json_output: JsonOutputSerialization,
) -> Result<SelectRequest, Error> {
if expr.is_empty() {
return Err(Error::InvalidSelectExpression(String::from(
"select expression cannot be empty",
)));
}
Ok(SelectRequest {
expr: expr,
csv_input: None,
json_input: None,
parquet_input: Some(parquet_input),
csv_output: None,
json_output: Some(json_output),
request_progress: false,
scan_start_range: None,
scan_end_range: None,
})
}
pub fn to_xml(&self) -> String {
let mut data = String::from("<SelectObjectContentRequest>");
data.push_str("<Expression>");
data.push_str(self.expr);
data.push_str("</Expression>");
data.push_str("<ExpressionType>SQL</ExpressionType>");
data.push_str("<InputSerialization>");
if let Some(c) = &self.csv_input {
if let Some(v) = &c.compression_type {
data.push_str("<CompressionType>");
data.push_str(&v.to_string());
data.push_str("</CompressionType>");
}
data.push_str("<CSV>");
if c.allow_quoted_record_delimiter {
data.push_str("<AllowQuotedRecordDelimiter>true</AllowQuotedRecordDelimiter>");
}
if let Some(v) = c.comments {
data.push_str("<Comments>");
data.push_str(&v.to_string());
data.push_str("</Comments>");
}
if let Some(v) = c.field_delimiter {
data.push_str("<FieldDelimiter>");
data.push_str(&v.to_string());
data.push_str("</FieldDelimiter>");
}
if let Some(v) = &c.file_header_info {
data.push_str("<FileHeaderInfo>");
data.push_str(&v.to_string());
data.push_str("</FileHeaderInfo>");
}
if let Some(v) = c.quote_character {
data.push_str("<QuoteCharacter>");
data.push_str(&v.to_string());
data.push_str("</QuoteCharacter>");
}
if let Some(v) = c.record_delimiter {
data.push_str("<RecordDelimiter>");
data.push_str(&v.to_string());
data.push_str("</RecordDelimiter>");
}
data.push_str("</CSV>");
} else if let Some(j) = &self.json_input {
if let Some(v) = &j.compression_type {
data.push_str("<CompressionType>");
data.push_str(&v.to_string());
data.push_str("</CompressionType>");
}
data.push_str("<JSON>");
if let Some(v) = &j.json_type {
data.push_str("<Type>");
data.push_str(&v.to_string());
data.push_str("</Type>");
}
data.push_str("</JSON>");
} else if let Some(_) = &self.parquet_input {
data.push_str("<Parquet></Parquet>");
}
data.push_str("</InputSerialization>");
data.push_str("<OutputSerialization>");
if let Some(c) = &self.csv_output {
data.push_str("<CSV>");
if let Some(v) = c.field_delimiter {
data.push_str("<FieldDelimiter>");
data.push_str(&v.to_string());
data.push_str("</FieldDelimiter>");
}
if let Some(v) = c.quote_character {
data.push_str("<QuoteCharacter>");
data.push_str(&v.to_string());
data.push_str("</QuoteCharacter>");
}
if let Some(v) = c.quote_escape_character {
data.push_str("<QuoteEscapeCharacter>");
data.push_str(&v.to_string());
data.push_str("</QuoteEscapeCharacter>");
}
if let Some(v) = &c.quote_fields {
data.push_str("<QuoteFields>");
data.push_str(&v.to_string());
data.push_str("</QuoteFields>");
}
if let Some(v) = c.record_delimiter {
data.push_str("<RecordDelimiter>");
data.push_str(&v.to_string());
data.push_str("</RecordDelimiter>");
}
data.push_str("</CSV>");
} else if let Some(j) = &self.json_output {
data.push_str("<JSON>");
if let Some(v) = j.record_delimiter {
data.push_str("<RecordDelimiter>");
data.push_str(&v.to_string());
data.push_str("</RecordDelimiter>");
}
data.push_str("</JSON>");
}
data.push_str("</OutputSerialization>");
if self.request_progress {
data.push_str("<RequestProgress><Enabled>true</Enabled></RequestProgress>");
}
if let Some(s) = self.scan_start_range {
if let Some(e) = self.scan_end_range {
data.push_str("<ScanRange>");
data.push_str("<Start>");
data.push_str(&s.to_string());
data.push_str("</Start>");
data.push_str("<End>");
data.push_str(&e.to_string());
data.push_str("</End>");
data.push_str("</ScanRange>");
}
}
data.push_str("</SelectObjectContentRequest>");
return data;
}
}
#[derive(Clone, Debug)]
pub struct SelectProgress {
pub bytes_scanned: usize,
pub bytes_progressed: usize,
pub bytes_returned: usize,
}

282
src/s3/utils.rs Normal file
View File

@ -0,0 +1,282 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
pub use base64::encode as b64encode;
use byteorder::{BigEndian, ReadBytesExt};
use chrono::{DateTime, NaiveDateTime, ParseError, Utc};
use chrono_locale::LocaleDate;
use crc::{Crc, CRC_32_ISO_HDLC};
use lazy_static::lazy_static;
use md5::compute as md5compute;
use multimap::MultiMap;
use regex::Regex;
use sha2::{Digest, Sha256};
pub use urlencoding::decode as urldecode;
pub use urlencoding::encode as urlencode;
use xmltree::Element;
pub type UtcTime = DateTime<Utc>;
pub type Multimap = MultiMap<String, String>;
pub fn merge(m1: &mut Multimap, m2: &Multimap) {
for (key, values) in m2.iter_all() {
for value in values {
m1.insert(key.to_string(), value.to_string());
}
}
}
pub fn crc32(data: &[u8]) -> u32 {
Crc::<u32>::new(&CRC_32_ISO_HDLC).checksum(data)
}
pub fn uint32(mut data: &[u8]) -> Result<u32, std::io::Error> {
data.read_u32::<BigEndian>()
}
pub fn sha256_hash(data: &[u8]) -> String {
let mut hasher = Sha256::new();
hasher.update(data);
return format!("{:x}", hasher.finalize());
}
pub fn md5sum_hash(data: &[u8]) -> String {
b64encode(md5compute(data).as_slice())
}
pub fn utc_now() -> UtcTime {
chrono::offset::Utc::now()
}
pub fn to_signer_date(time: UtcTime) -> String {
time.format("%Y%m%d").to_string()
}
pub fn to_amz_date(time: UtcTime) -> String {
time.format("%Y%m%dT%H%M%SZ").to_string()
}
pub fn to_http_header_value(time: UtcTime) -> String {
time.formatl("%a, %d %b %Y %H:%M:%S GMT", "C").to_string()
}
pub fn to_iso8601utc(time: UtcTime) -> String {
time.format("%Y-%m-%dT%H:%M:%S.%3fZ").to_string()
}
pub fn from_iso8601utc(s: &str) -> Result<UtcTime, ParseError> {
Ok(DateTime::<Utc>::from_utc(
match NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%3fZ") {
Ok(d) => d,
_ => NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%SZ")?,
},
Utc,
))
}
pub fn from_http_header_value(s: &str) -> Result<UtcTime, ParseError> {
Ok(DateTime::<Utc>::from_utc(
NaiveDateTime::parse_from_str(s, "%a, %d %b %Y %H:%M:%S GMT")?,
Utc,
))
}
pub fn to_http_headers(map: &Multimap) -> Vec<String> {
let mut headers: Vec<String> = Vec::new();
for (key, values) in map.iter_all() {
for value in values {
let mut s = String::new();
s.push_str(key);
s.push_str(": ");
s.push_str(value);
headers.push(s);
}
}
return headers;
}
pub fn to_query_string(map: &Multimap) -> String {
let mut query = String::new();
for (key, values) in map.iter_all() {
for value in values {
if !query.is_empty() {
query.push_str("&");
}
query.push_str(&urlencode(key));
query.push_str("=");
query.push_str(&urlencode(value));
}
}
return query;
}
pub fn get_canonical_query_string(map: &Multimap) -> String {
let mut keys: Vec<String> = Vec::new();
for (key, _) in map.iter() {
keys.push(key.to_string());
}
keys.sort();
let mut query = String::new();
for key in keys {
match map.get_vec(key.as_str()) {
Some(values) => {
for value in values {
if !query.is_empty() {
query.push_str("&");
}
query.push_str(&urlencode(key.as_str()));
query.push_str("=");
query.push_str(&urlencode(value));
}
}
None => todo!(), // This never happens.
};
}
return query;
}
pub fn get_canonical_headers(map: &Multimap) -> (String, String) {
lazy_static! {
static ref MULTI_SPACE_REGEX: Regex = Regex::new("( +)").unwrap();
}
let mut signed_headers: Vec<String> = Vec::new();
let mut mmap: MultiMap<String, String> = MultiMap::new();
for (k, values) in map.iter_all() {
let key = k.to_lowercase();
if "authorization" == key || "user-agent" == key {
continue;
}
if !signed_headers.contains(&key) {
signed_headers.push(key.clone());
}
for v in values {
mmap.insert(key.clone(), v.to_string());
}
}
let mut canonical_headers: Vec<String> = Vec::new();
for (key, values) in mmap.iter_all_mut() {
values.sort();
let mut value = String::new();
for v in values {
if !value.is_empty() {
value.push_str(",");
}
let s: String = MULTI_SPACE_REGEX.replace_all(v, " ").to_string();
value.push_str(&s);
}
canonical_headers.push(key.to_string() + ":" + value.as_str());
}
signed_headers.sort();
canonical_headers.sort();
return (signed_headers.join(";"), canonical_headers.join("\n"));
}
pub fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<(), Error> {
if bucket_name.trim().is_empty() {
return Err(Error::InvalidBucketName(String::from(
"bucket name cannot be empty",
)));
}
if bucket_name.len() < 3 {
return Err(Error::InvalidBucketName(String::from(
"bucket name cannot be less than 3 characters",
)));
}
if bucket_name.len() > 63 {
return Err(Error::InvalidBucketName(String::from(
"Bucket name cannot be greater than 63 characters",
)));
}
lazy_static! {
static ref VALID_IP_ADDR_REGEX: Regex = Regex::new("^(\\d+\\.){3}\\d+$").unwrap();
static ref VALID_BUCKET_NAME_REGEX: Regex =
Regex::new("^[A-Za-z0-9][A-Za-z0-9\\.\\-_:]{1,61}[A-Za-z0-9]$").unwrap();
static ref VALID_BUCKET_NAME_STRICT_REGEX: Regex =
Regex::new("^[a-z0-9][a-z0-9\\.\\-]{1,61}[a-z0-9]$").unwrap();
}
if VALID_IP_ADDR_REGEX.is_match(bucket_name) {
return Err(Error::InvalidBucketName(String::from(
"bucket name cannot be an IP address",
)));
}
if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") {
return Err(Error::InvalidBucketName(String::from(
"bucket name contains invalid successive characters '..', '.-' or '-.'",
)));
}
if strict {
if !VALID_BUCKET_NAME_STRICT_REGEX.is_match(bucket_name) {
return Err(Error::InvalidBucketName(String::from(
"bucket name does not follow S3 standards strictly",
)));
}
} else if !VALID_BUCKET_NAME_REGEX.is_match(bucket_name) {
return Err(Error::InvalidBucketName(String::from(
"bucket name does not follow S3 standards",
)));
}
return Ok(());
}
pub fn get_text(element: &Element, tag: &str) -> Result<String, Error> {
Ok(element
.get_child(tag)
.ok_or(Error::XmlError(format!("<{}> tag not found", tag)))?
.get_text()
.ok_or(Error::XmlError(format!("text of <{}> tag not found", tag)))?
.to_string())
}
pub fn get_option_text(element: &Element, tag: &str) -> Result<Option<String>, Error> {
Ok(match element.get_child(tag) {
Some(v) => Some(
v.get_text()
.ok_or(Error::XmlError(format!("text of <{}> tag not found", tag)))?
.to_string(),
),
None => None,
})
}
pub fn get_default_text(element: &Element, tag: &str) -> String {
element.get_child(tag).map_or(String::new(), |v| {
v.get_text().unwrap_or_default().to_string()
})
}
pub fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize {
let mut c = 0;
for (d, s) in dst.iter_mut().zip(src.iter()) {
*d = *s;
c += 1;
}
c
}

453
tests/tests.rs Normal file
View File

@ -0,0 +1,453 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rand::distributions::{Alphanumeric, DistString};
use std::io::BufReader;
use minio::s3::args::*;
use minio::s3::client::Client;
use minio::s3::creds::StaticProvider;
use minio::s3::http::BaseUrl;
use minio::s3::types::{
CsvInputSerialization, CsvOutputSerialization, DeleteObject, FileHeaderInfo, QuoteFields,
SelectRequest,
};
struct RandReader {
size: usize,
}
impl RandReader {
fn new(size: usize) -> RandReader {
RandReader { size: size }
}
}
impl std::io::Read for RandReader {
fn read(&mut self, buf: &mut [u8]) -> Result<usize, std::io::Error> {
let bytes_read = match self.size > buf.len() {
true => buf.len(),
false => self.size,
};
if bytes_read > 0 {
let random: &mut dyn rand::RngCore = &mut rand::thread_rng();
random.fill_bytes(&mut buf[0..bytes_read]);
}
self.size -= bytes_read;
Ok(bytes_read)
}
}
fn rand_bucket_name() -> String {
Alphanumeric
.sample_string(&mut rand::thread_rng(), 8)
.to_lowercase()
}
fn rand_object_name() -> String {
Alphanumeric.sample_string(&mut rand::thread_rng(), 8)
}
struct ClientTest<'a> {
client: &'a Client<'a>,
test_bucket: String,
}
impl<'a> ClientTest<'a> {
fn new(client: &'a Client<'_>, test_bucket: &'a str) -> ClientTest<'a> {
ClientTest {
client: client,
test_bucket: test_bucket.to_string(),
}
}
async fn bucket_exists(&self) {
let bucket_name = rand_bucket_name();
self.client
.make_bucket(&MakeBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
let exists = self
.client
.bucket_exists(&BucketExistsArgs::new(&bucket_name).unwrap())
.await
.unwrap();
assert_eq!(exists, true);
self.client
.remove_bucket(&RemoveBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
}
async fn list_buckets(&self) {
let mut names: Vec<String> = Vec::new();
for _ in 1..=3 {
names.push(rand_bucket_name());
}
for b in names.iter() {
self.client
.make_bucket(&MakeBucketArgs::new(&b).unwrap())
.await
.unwrap();
}
let mut count = 0;
let resp = self
.client
.list_buckets(&ListBucketsArgs::new())
.await
.unwrap();
for bucket in resp.buckets.iter() {
if names.contains(&bucket.name) {
count += 1;
}
}
assert_eq!(count, 3);
for b in names.iter() {
self.client
.remove_bucket(&RemoveBucketArgs::new(&b).unwrap())
.await
.unwrap();
}
}
async fn put_object(&self) {
let object_name = rand_object_name();
let size = 16_usize;
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.await
.unwrap();
let resp = self
.client
.stat_object(&StatObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
assert_eq!(resp.bucket_name, self.test_bucket);
assert_eq!(resp.object_name, object_name);
assert_eq!(resp.size, size);
self.client
.remove_object(&RemoveObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
}
async fn put_object_multipart(&self) {
let object_name = rand_object_name();
let size: usize = 16 + 5 * 1024 * 1024;
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.await
.unwrap();
let resp = self
.client
.stat_object(&StatObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
assert_eq!(resp.bucket_name, self.test_bucket);
assert_eq!(resp.object_name, object_name);
assert_eq!(resp.size, size);
self.client
.remove_object(&RemoveObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
}
async fn get_object(&self) {
let object_name = rand_object_name();
let data = "hello, world";
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut BufReader::new(data.as_bytes()),
Some(data.len()),
None,
)
.unwrap(),
)
.await
.unwrap();
let resp = self
.client
.get_object(&GetObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
let got = resp.text().await.unwrap();
assert_eq!(got, data);
self.client
.remove_object(&RemoveObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
}
async fn remove_objects(&self) {
let bucket_name = rand_bucket_name();
self.client
.make_bucket(&MakeBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
let mut names: Vec<String> = Vec::new();
for _ in 1..=3 {
let object_name = rand_object_name();
let size = 0_usize;
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.await
.unwrap();
names.push(object_name);
}
let mut objects: Vec<DeleteObject> = Vec::new();
for name in names.iter() {
objects.push(DeleteObject {
name: &name,
version_id: None,
});
}
self.client
.remove_objects(
&mut RemoveObjectsArgs::new(&self.test_bucket, &mut objects.iter()).unwrap(),
)
.await
.unwrap();
self.client
.remove_bucket(&RemoveBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
}
async fn list_objects(&self) {
let bucket_name = rand_bucket_name();
self.client
.make_bucket(&MakeBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
let mut names: Vec<String> = Vec::new();
for _ in 1..=3 {
let object_name = rand_object_name();
let size = 0_usize;
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.await
.unwrap();
names.push(object_name);
}
self.client
.list_objects(
&mut ListObjectsArgs::new(&self.test_bucket, &|res| {
let item = res.unwrap();
assert_eq!(names.contains(&item.name), true);
true
})
.unwrap(),
)
.await
.unwrap();
let mut objects: Vec<DeleteObject> = Vec::new();
for name in names.iter() {
objects.push(DeleteObject {
name: &name,
version_id: None,
});
}
self.client
.remove_objects(
&mut RemoveObjectsArgs::new(&self.test_bucket, &mut objects.iter()).unwrap(),
)
.await
.unwrap();
self.client
.remove_bucket(&RemoveBucketArgs::new(&bucket_name).unwrap())
.await
.unwrap();
}
async fn select_object_content(&self) {
let object_name = rand_object_name();
let mut data = String::new();
data.push_str("1997,Ford,E350,\"ac, abs, moon\",3000.00\n");
data.push_str("1999,Chevy,\"Venture \"\"Extended Edition\"\"\",,4900.00\n");
data.push_str("1999,Chevy,\"Venture \"\"Extended Edition, Very Large\"\"\",,5000.00\n");
data.push_str("1996,Jeep,Grand Cherokee,\"MUST SELL!\n");
data.push_str("air, moon roof, loaded\",4799.00\n");
let body = String::from("Year,Make,Model,Description,Price\n") + &data;
self.client
.put_object(
&mut PutObjectArgs::new(
&self.test_bucket,
&object_name,
&mut BufReader::new(body.as_bytes()),
Some(body.len()),
None,
)
.unwrap(),
)
.await
.unwrap();
let request = SelectRequest::new_csv_input_output(
"select * from S3Object",
CsvInputSerialization {
compression_type: None,
allow_quoted_record_delimiter: false,
comments: None,
field_delimiter: None,
file_header_info: Some(FileHeaderInfo::USE),
quote_character: None,
quote_escape_character: None,
record_delimiter: None,
},
CsvOutputSerialization {
field_delimiter: None,
quote_character: None,
quote_escape_character: None,
quote_fields: Some(QuoteFields::ASNEEDED),
record_delimiter: None,
},
)
.unwrap();
let mut resp = self
.client
.select_object_content(
&SelectObjectContentArgs::new(&self.test_bucket, &object_name, &request).unwrap(),
)
.await
.unwrap();
let mut got = String::new();
let mut buf = [0_u8; 512];
loop {
let size = resp.read(&mut buf).await.unwrap();
if size == 0 {
break;
}
got += &String::from_utf8(buf[..size].to_vec()).unwrap();
}
assert_eq!(got, data);
self.client
.remove_object(&RemoveObjectArgs::new(&self.test_bucket, &object_name).unwrap())
.await
.unwrap();
}
}
#[tokio::main]
#[test]
async fn s3_tests() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let host = std::env::var("SERVER_ENDPOINT")?;
let access_key = std::env::var("ACCESS_KEY")?;
let secret_key = std::env::var("SECRET_KEY")?;
let secure = std::env::var("ENABLE_HTTPS").is_ok();
let ignore_cert_check = std::env::var("IGNORE_CERT_CHECK").is_ok();
let region = std::env::var("SERVER_REGION").ok();
let mut burl = BaseUrl::from_string(host).unwrap();
burl.https = secure;
if let Some(v) = region {
burl.region = v;
}
let provider = StaticProvider::new(&access_key, &secret_key, None);
let client = Client::new(burl.clone(), Some(&provider));
let test_bucket = rand_bucket_name();
client
.make_bucket(&MakeBucketArgs::new(&test_bucket).unwrap())
.await
.unwrap();
let ctest = ClientTest::new(&client, &test_bucket);
println!("make_bucket() + bucket_exists() + remove_bucket()");
ctest.bucket_exists().await;
println!("list_buckets()");
ctest.list_buckets().await;
println!("put_object() + stat_object() + remove_object()");
ctest.put_object().await;
println!("[Multipart] put_object()");
ctest.put_object_multipart().await;
println!("get_object()");
ctest.get_object().await;
println!("remove_objects()");
ctest.remove_objects().await;
println!("list_objects()");
ctest.list_objects().await;
println!("select_object_content()");
ctest.select_object_content().await;
client
.remove_bucket(&RemoveBucketArgs::new(&test_bucket).unwrap())
.await
.unwrap();
Ok(())
}