Remove deprecated downloads, uploads, and multipart API (#137)

Add From<&'static str> for ObjectContent and remove deprecated API
This commit is contained in:
Aditya Manthramurthy 2025-03-14 13:39:24 -07:00 committed by GitHub
parent 46bda8f960
commit c9172cf189
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 151 additions and 736 deletions

View File

@ -1829,126 +1829,3 @@ impl<'a> PostPolicy<'a> {
Ok(data)
}
}
/// Argument for [download_object()](crate::s3::client::Client::download_object) API
pub struct DownloadObjectArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub version_id: Option<&'a str>,
pub ssec: Option<&'a SseCustomerKey>,
pub filename: &'a str,
pub overwrite: bool,
}
impl<'a> DownloadObjectArgs<'a> {
/// Returns argument for [download_object()](crate::s3::client::Client::download_object) API with given bucket name, object name and filename
///
/// # Examples
///
/// ```
/// use minio::s3::args::*;
/// let args = DownloadObjectArgs::new("my-bucket", "my-object", "/path/to/my/object/download").unwrap();
/// ```
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
filename: &'a str,
) -> Result<DownloadObjectArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
Ok(DownloadObjectArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
version_id: None,
ssec: None,
filename,
overwrite: false,
})
}
}
/// Argument for [upload_object()](crate::s3::client::Client::upload_object) API
pub struct UploadObjectArgs<'a> {
pub extra_headers: Option<&'a Multimap>,
pub extra_query_params: Option<&'a Multimap>,
pub region: Option<&'a str>,
pub bucket: &'a str,
pub object: &'a str,
pub headers: Option<&'a Multimap>,
pub user_metadata: Option<&'a Multimap>,
pub sse: Option<&'a (dyn Sse + Send + Sync)>,
pub tags: Option<&'a HashMap<String, String>>,
pub retention: Option<&'a Retention>,
pub legal_hold: bool,
pub object_size: Option<usize>,
pub part_size: usize,
pub part_count: i16,
pub content_type: &'a str,
pub filename: &'a str,
}
impl<'a> UploadObjectArgs<'a> {
/// Returns argument for [upload_object()](crate::s3::client::Client::upload_object) API with given bucket name, object name and filename
///
/// # Examples
///
/// ```no_run
/// use minio::s3::args::*;
/// let args = UploadObjectArgs::new("my-bucket", "my-object", "asiaphotos-2015.zip").unwrap();
/// ```
pub fn new(
bucket_name: &'a str,
object_name: &'a str,
filename: &'a str,
) -> Result<UploadObjectArgs<'a>, Error> {
check_bucket_name(bucket_name, true)?;
if object_name.is_empty() {
return Err(Error::InvalidObjectName(String::from(
"object name cannot be empty",
)));
}
let meta = std::fs::metadata(filename)?;
if !meta.is_file() {
return Err(Error::IOError(std::io::Error::new(
std::io::ErrorKind::Other,
"not a file",
)));
}
let object_size = Some(meta.len() as usize);
let (psize, part_count) = calc_part_info(object_size, None)?;
Ok(UploadObjectArgs {
extra_headers: None,
extra_query_params: None,
region: None,
bucket: bucket_name,
object: object_name,
headers: None,
user_metadata: None,
sse: None,
tags: None,
retention: None,
legal_hold: false,
object_size,
part_size: psize,
part_count,
content_type: "application/octet-stream",
filename,
})
}
}

View File

@ -111,6 +111,14 @@ impl From<&'static [u8]> for ObjectContent {
}
}
impl From<&'static str> for ObjectContent {
fn from(value: &'static str) -> Self {
ObjectContent(ObjectContentInner::Bytes(SegmentedBytes::from(
Bytes::from(value),
)))
}
}
impl From<&Path> for ObjectContent {
fn from(value: &Path) -> Self {
ObjectContent(ObjectContentInner::FilePath(value.to_path_buf()))

View File

@ -28,7 +28,7 @@ use crate::s3::http::{BaseUrl, Url};
use crate::s3::response::*;
use crate::s3::signer::{presign_v4, sign_v4_s3};
use crate::s3::sse::SseCustomerKey;
use crate::s3::types::{Directive, ObjectLockConfig, Part, RetentionMode};
use crate::s3::types::{Directive, ObjectLockConfig, RetentionMode};
use crate::s3::utils::{
Multimap, from_iso8601utc, get_default_text, get_option_text, get_text, md5sum_hash,
md5sum_hash_sb, merge, sha256_hash_sb, to_amz_date, to_iso8601utc, utc_now,
@ -40,7 +40,6 @@ use dashmap::DashMap;
use hyper::http::Method;
use reqwest::Body;
use reqwest::header::HeaderMap;
use tokio::fs;
use xmltree::Element;
@ -72,6 +71,7 @@ mod set_bucket_tags;
mod set_bucket_versioning;
use super::builders::{ListBuckets, SegmentedBytes};
use super::types::{PartInfo, S3Api};
/// Client Builder manufactures a Client using given parameters.
#[derive(Debug, Default)]
@ -632,44 +632,6 @@ impl Client {
Ok(location)
}
/// Executes [AbortMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html) S3 API
pub async fn abort_multipart_upload_old(
&self,
args: &AbortMultipartUploadArgs<'_>,
) -> Result<AbortMultipartUploadResponse, Error> {
let region = self.get_region(args.bucket, args.region).await?;
let mut headers = Multimap::new();
if let Some(v) = &args.extra_headers {
merge(&mut headers, v);
}
let mut query_params = Multimap::new();
if let Some(v) = &args.extra_query_params {
merge(&mut query_params, v);
}
query_params.insert(String::from("uploadId"), args.upload_id.to_string());
let resp = self
.execute(
Method::DELETE,
&region,
&mut headers,
&query_params,
Some(args.bucket),
Some(args.object),
None,
)
.await?;
Ok(AbortMultipartUploadResponse {
headers: resp.headers().clone(),
region: region.clone(),
bucket: args.bucket.to_string(),
object_name: args.object.to_string(),
upload_id: args.upload_id.to_string(),
})
}
pub async fn bucket_exists(&self, args: &BucketExistsArgs<'_>) -> Result<bool, Error> {
let region;
match self.get_region(args.bucket, args.region).await {
@ -719,68 +681,6 @@ impl Client {
}
}
/// Executes [CompleteMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html) S3 API
pub async fn complete_multipart_upload_old(
&self,
args: &CompleteMultipartUploadArgs<'_>,
) -> Result<CompleteMultipartUploadResponse, Error> {
let region = self.get_region(args.bucket, args.region).await?;
let mut data = String::from("<CompleteMultipartUpload>");
for part in args.parts.iter() {
let s = format!(
"<Part><PartNumber>{}</PartNumber><ETag>{}</ETag></Part>",
part.number, part.etag
);
data.push_str(&s);
}
data.push_str("</CompleteMultipartUpload>");
let data: Bytes = data.into();
let mut headers = Multimap::new();
if let Some(v) = &args.extra_headers {
merge(&mut headers, v);
}
headers.insert(
String::from("Content-Type"),
String::from("application/xml"),
);
headers.insert(String::from("Content-MD5"), md5sum_hash(data.as_ref()));
let mut query_params = Multimap::new();
if let Some(v) = &args.extra_query_params {
merge(&mut query_params, v);
}
query_params.insert(String::from("uploadId"), args.upload_id.to_string());
let resp = self
.execute(
Method::POST,
&region,
&mut headers,
&query_params,
Some(args.bucket),
Some(args.object),
Some(data),
)
.await?;
let header_map = resp.headers().clone();
let body = resp.bytes().await?;
let root = Element::parse(body.reader())?;
Ok(CompleteMultipartUploadResponse {
headers: header_map.clone(),
bucket_name: get_text(&root, "Bucket")?,
object_name: get_text(&root, "Key")?,
location: get_text(&root, "Location")?,
etag: get_text(&root, "ETag")?.trim_matches('"').to_string(),
version_id: match header_map.get("x-amz-version-id") {
Some(v) => Some(v.to_str()?.to_string()),
None => None,
},
})
}
async fn calculate_part_count(&self, sources: &mut [ComposeSource<'_>]) -> Result<u16, Error> {
let mut object_size = 0_usize;
let mut i = 0;
@ -907,12 +807,14 @@ impl Client {
let headers = args.get_headers();
let mut cmu_args = CreateMultipartUploadArgs::new(args.bucket, args.object)?;
cmu_args.extra_query_params = args.extra_query_params;
cmu_args.region = args.region;
cmu_args.headers = Some(&headers);
let resp = self.create_multipart_upload_old(&cmu_args).await?;
upload_id.push_str(&resp.upload_id);
let cmu = self
.create_multipart_upload(args.bucket, args.object)
.extra_query_params(args.extra_query_params.cloned())
.region(args.region.map(String::from))
.extra_headers(Some(headers))
.send()
.await?;
upload_id.push_str(&cmu.upload_id);
let mut part_number = 0_u16;
let ssec_headers = match args.sse {
@ -923,7 +825,7 @@ impl Client {
_ => Multimap::new(),
};
let mut parts: Vec<Part> = Vec::new();
let mut parts: Vec<PartInfo> = Vec::new();
for source in args.sources.iter() {
let mut size = source.get_object_size();
if let Some(l) = source.length {
@ -961,9 +863,10 @@ impl Client {
upc_args.region = args.region;
let resp = self.upload_part_copy(&upc_args).await?;
parts.push(Part {
parts.push(PartInfo {
number: part_number,
etag: resp.etag,
size: size as u64,
});
} else {
while size > 0 {
@ -991,9 +894,10 @@ impl Client {
upc_args.region = args.region;
let resp = self.upload_part_copy(&upc_args).await?;
parts.push(Part {
parts.push(PartInfo {
number: part_number,
etag: resp.etag,
size: size as u64,
});
offset += length;
@ -1002,10 +906,12 @@ impl Client {
}
}
let mut cmu_args =
CompleteMultipartUploadArgs::new(args.bucket, args.object, upload_id, &parts)?;
cmu_args.region = args.region;
self.complete_multipart_upload_old(&cmu_args).await
let rsp = self
.complete_multipart_upload(args.bucket, args.object, upload_id, parts)
.region(args.region.map(String::from))
.send()
.await?;
Ok(PutObjectBaseResponse::from(rsp))
}
pub async fn compose_object(
@ -1021,8 +927,9 @@ impl Client {
let mut upload_id = String::new();
let res = self.do_compose_object(args, &mut upload_id).await;
if res.is_err() && !upload_id.is_empty() {
let amuargs = &AbortMultipartUploadArgs::new(args.bucket, args.object, &upload_id)?;
self.abort_multipart_upload_old(amuargs).await?;
self.abort_multipart_upload(args.bucket, args.object, &upload_id)
.send()
.await?;
}
res
@ -1144,54 +1051,6 @@ impl Client {
})
}
/// Executes [CreateMultipartUpload](https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html) S3 API
pub async fn create_multipart_upload_old(
&self,
args: &CreateMultipartUploadArgs<'_>,
) -> Result<CreateMultipartUploadResponse, Error> {
let region = self.get_region(args.bucket, args.region).await?;
let mut headers = Multimap::new();
if let Some(v) = &args.extra_headers {
merge(&mut headers, v);
}
if !headers.contains_key("Content-Type") {
headers.insert(
String::from("Content-Type"),
String::from("application/octet-stream"),
);
}
let mut query_params = Multimap::new();
if let Some(v) = &args.extra_query_params {
merge(&mut query_params, v);
}
query_params.insert(String::from("uploads"), String::new());
let resp = self
.execute(
Method::POST,
&region,
&mut headers,
&query_params,
Some(args.bucket),
Some(args.object),
None,
)
.await?;
let header_map = resp.headers().clone();
let body = resp.bytes().await?;
let root = Element::parse(body.reader())?;
Ok(CreateMultipartUploadResponse {
headers: header_map.clone(),
region: region.clone(),
bucket: args.bucket.to_string(),
object_name: args.object.to_string(),
upload_id: get_text(&root, "UploadId")?,
})
}
pub async fn disable_object_legal_hold(
&self,
args: &DisableObjectLegalHoldArgs<'_>,
@ -1294,55 +1153,6 @@ impl Client {
})
}
pub async fn download_object(
&self,
args: &DownloadObjectArgs<'_>,
) -> Result<DownloadObjectResponse, Error> {
let mut resp = self
.get_object_old(&GetObjectArgs {
extra_headers: args.extra_headers,
extra_query_params: args.extra_query_params,
region: args.region,
bucket: args.bucket,
object: args.object,
version_id: args.version_id,
ssec: args.ssec,
offset: None,
length: None,
match_etag: None,
not_match_etag: None,
modified_since: None,
unmodified_since: None,
})
.await?;
let path = Path::new(&args.filename);
if let Some(parent_dir) = path.parent() {
if !parent_dir.exists() {
fs::create_dir_all(parent_dir).await?;
}
}
let mut file = match args.overwrite {
true => File::create(args.filename)?,
false => File::options()
.write(true)
.truncate(true)
.create_new(true)
.open(args.filename)?,
};
while let Some(v) = resp.chunk().await? {
file.write_all(&v)?;
}
file.sync_all()?;
Ok(DownloadObjectResponse {
headers: resp.headers().clone(),
region: args.region.map_or(String::new(), String::from),
bucket_name: args.bucket.to_string(),
object_name: args.object.to_string(),
version_id: args.version_id.as_ref().map(|v| v.to_string()),
})
}
pub async fn enable_object_legal_hold(
&self,
args: &EnableObjectLegalHoldArgs<'_>,
@ -1386,42 +1196,6 @@ impl Client {
})
}
pub async fn get_object_old(
&self,
args: &GetObjectArgs<'_>,
) -> Result<reqwest::Response, Error> {
if args.ssec.is_some() && !self.base_url.https {
return Err(Error::SseTlsRequired(None));
}
let region = self.get_region(args.bucket, args.region).await?;
let mut headers = Multimap::new();
if let Some(v) = &args.extra_headers {
merge(&mut headers, v);
}
merge(&mut headers, &args.get_headers());
let mut query_params = Multimap::new();
if let Some(v) = &args.extra_query_params {
merge(&mut query_params, v);
}
if let Some(v) = args.version_id {
query_params.insert(String::from("versionId"), v.to_string());
}
self.execute(
Method::GET,
&region,
&mut headers,
&query_params,
Some(args.bucket),
Some(args.object),
None,
)
.await
}
pub async fn get_object_lock_config(
&self,
args: &GetObjectLockConfigArgs<'_>,
@ -1797,172 +1571,6 @@ impl Client {
})
}
fn read_part(
reader: &mut dyn std::io::Read,
buf: &mut [u8],
size: usize,
) -> Result<usize, Error> {
let mut bytes_read = 0_usize;
let mut i = 0_usize;
let mut stop = false;
while !stop {
let br = reader.read(&mut buf[i..size])?;
bytes_read += br;
stop = (br == 0) || (br == size - i);
i += br;
}
Ok(bytes_read)
}
async fn do_put_object(
&self,
args: &mut PutObjectArgs<'_>,
buf: &mut [u8],
upload_id: &mut String,
) -> Result<PutObjectResponseOld, Error> {
let mut headers = args.get_headers();
if !headers.contains_key("Content-Type") {
if args.content_type.is_empty() {
headers.insert(
String::from("Content-Type"),
String::from("application/octet-stream"),
);
} else {
headers.insert(String::from("Content-Type"), args.content_type.to_string());
}
}
let mut uploaded_size = 0_usize;
let mut part_number = 0_i16;
let mut stop = false;
let mut one_byte: Vec<u8> = Vec::new();
let mut parts: Vec<Part> = Vec::new();
let object_size = &args.object_size.unwrap();
let mut part_size = args.part_size;
let mut part_count = args.part_count;
while !stop {
part_number += 1;
let mut bytes_read = 0_usize;
if args.part_count > 0 {
if part_number == args.part_count {
part_size = object_size - uploaded_size;
stop = true;
}
bytes_read = Client::read_part(&mut args.stream, buf, part_size)?;
if bytes_read != part_size {
return Err(Error::InsufficientData(part_size as u64, bytes_read as u64));
}
} else {
let mut size = part_size + 1;
let newbuf = match one_byte.len() == 1 {
true => {
buf[0] = one_byte.pop().unwrap();
size -= 1;
bytes_read = 1;
&mut buf[1..]
}
false => buf,
};
let n = Client::read_part(&mut args.stream, newbuf, size)?;
bytes_read += n;
// If bytes read is less than or equals to part size, then we have reached last part.
if bytes_read <= part_size {
part_count = part_number;
part_size = bytes_read;
stop = true;
} else {
one_byte.push(buf[part_size + 1]);
}
}
let data = &buf[0..part_size];
uploaded_size += part_size;
if part_count == 1_i16 {
let mut poaargs = PutObjectApiArgs::new(args.bucket, args.object, data)?;
poaargs.extra_query_params = args.extra_query_params;
poaargs.region = args.region;
poaargs.headers = Some(&headers);
return self.put_object_api(&poaargs).await;
}
if upload_id.is_empty() {
let mut cmuargs = CreateMultipartUploadArgs::new(args.bucket, args.object)?;
cmuargs.extra_query_params = args.extra_query_params;
cmuargs.region = args.region;
cmuargs.headers = Some(&headers);
let resp = self.create_multipart_upload_old(&cmuargs).await?;
upload_id.push_str(&resp.upload_id);
}
let mut upargs = UploadPartArgs::new(
args.bucket,
args.object,
upload_id,
part_number as u16,
data,
)?;
upargs.region = args.region;
let ssec_headers = match args.sse {
Some(v) => match v.as_any().downcast_ref::<SseCustomerKey>() {
Some(_) => v.headers(),
_ => Multimap::new(),
},
_ => Multimap::new(),
};
upargs.headers = Some(&ssec_headers);
let resp = self.upload_part_old(&upargs).await?;
parts.push(Part {
number: part_number as u16,
etag: resp.etag.clone(),
});
}
let mut cmuargs =
CompleteMultipartUploadArgs::new(args.bucket, args.object, upload_id, &parts)?;
cmuargs.region = args.region;
self.complete_multipart_upload_old(&cmuargs).await
}
pub async fn put_object_old(
&self,
args: &mut PutObjectArgs<'_>,
) -> Result<PutObjectResponseOld, Error> {
if let Some(v) = &args.sse {
if v.tls_required() && !self.base_url.https {
return Err(Error::SseTlsRequired(None));
}
}
let bufsize = match args.part_count > 0 {
true => args.part_size,
false => args.part_size + 1,
};
let mut buf = vec![0_u8; bufsize];
let mut upload_id = String::new();
let res = self.do_put_object(args, &mut buf, &mut upload_id).await;
std::mem::drop(buf);
if res.is_err() && !upload_id.is_empty() {
let amuargs = &AbortMultipartUploadArgs::new(args.bucket, args.object, &upload_id)?;
self.abort_multipart_upload_old(amuargs).await?;
}
res
}
/// Executes [PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) S3 API
pub async fn put_object_api(
&self,
@ -2282,33 +1890,6 @@ impl Client {
StatObjectResponse::new(resp.headers(), &region, args.bucket, args.object)
}
pub async fn upload_object(
&self,
args: &UploadObjectArgs<'_>,
) -> Result<UploadObjectResponse, Error> {
let mut file = File::open(args.filename)?;
self.put_object_old(&mut PutObjectArgs {
extra_headers: args.extra_headers,
extra_query_params: args.extra_query_params,
region: args.region,
bucket: args.bucket,
object: args.object,
headers: args.headers,
user_metadata: args.user_metadata,
sse: args.sse,
tags: args.tags,
retention: args.retention,
legal_hold: args.legal_hold,
object_size: args.object_size,
part_size: args.part_size,
part_count: args.part_count,
content_type: args.content_type,
stream: &mut file,
})
.await
}
/// Executes [UploadPart](https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html) S3 API
pub async fn upload_part_old(
&self,

View File

@ -139,6 +139,19 @@ pub struct PutObjectBaseResponse {
pub version_id: Option<String>,
}
impl From<PutObjectResponse> for PutObjectBaseResponse {
fn from(response: PutObjectResponse) -> Self {
PutObjectBaseResponse {
headers: response.headers,
bucket_name: response.bucket_name,
object_name: response.object_name,
location: response.location,
etag: response.etag,
version_id: response.version_id,
}
}
}
/// Response of [complete_multipart_upload()](crate::s3::client::Client::complete_multipart_upload) API
pub type CompleteMultipartUploadResponse = PutObjectBaseResponse;

View File

@ -15,8 +15,10 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::{ComposeObjectArgs, ComposeSource, PutObjectArgs, StatObjectArgs};
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use common::RandSrc;
use minio::s3::args::{ComposeObjectArgs, ComposeSource, StatObjectArgs};
use minio::s3::builders::ObjectContent;
use minio::s3::types::S3Api;
#[tokio::test(flavor = "multi_thread", worker_threads = 10)]
@ -25,18 +27,12 @@ async fn compose_object() {
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let src_object_name = rand_object_name();
let size = 16_usize;
let size = 16_u64;
let r = RandSrc::new(size);
let put_content = ObjectContent::new_from_stream(r, Some(size));
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&src_object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.put_object_content(&bucket_name, &src_object_name, put_content)
.send()
.await
.unwrap();

View File

@ -15,8 +15,10 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::{CopyObjectArgs, CopySource, PutObjectArgs, StatObjectArgs};
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use common::RandSrc;
use minio::s3::args::{CopyObjectArgs, CopySource, StatObjectArgs};
use minio::s3::builders::ObjectContent;
use minio::s3::types::S3Api;
#[tokio::test(flavor = "multi_thread", worker_threads = 10)]
@ -25,18 +27,11 @@ async fn copy_object() {
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let src_object_name = rand_object_name();
let size = 16_usize;
let size = 16_u64;
let content = ObjectContent::new_from_stream(RandSrc::new(size), Some(size));
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&src_object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.put_object_content(&bucket_name, &src_object_name, content)
.send()
.await
.unwrap();
@ -58,7 +53,7 @@ async fn copy_object() {
.stat_object(&StatObjectArgs::new(&bucket_name, &object_name).unwrap())
.await
.unwrap();
assert_eq!(resp.size, size);
assert_eq!(resp.size as u64, size);
ctx.client
.remove_object(&bucket_name, object_name.as_str())

View File

@ -17,9 +17,7 @@ mod common;
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use bytes::Bytes;
use minio::s3::args::{GetObjectArgs, PutObjectArgs};
use minio::s3::types::S3Api;
use std::io::BufReader;
#[tokio::test(flavor = "multi_thread", worker_threads = 10)]
async fn get_object() {
@ -47,37 +45,3 @@ async fn get_object() {
.await
.unwrap();
}
#[tokio::test(flavor = "multi_thread", worker_threads = 10)]
async fn get_object_old() {
let ctx = TestContext::new_from_env();
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let object_name = rand_object_name();
let data = "hello, world";
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&object_name,
&mut BufReader::new(data.as_bytes()),
Some(data.len()),
None,
)
.unwrap(),
)
.await
.unwrap();
let resp = ctx
.client
.get_object_old(&GetObjectArgs::new(&bucket_name, &object_name).unwrap())
.await
.unwrap();
let got = resp.text().await.unwrap();
assert_eq!(got, data);
ctx.client
.remove_object(&bucket_name, object_name.as_str())
.send()
.await
.unwrap();
}

View File

@ -15,8 +15,7 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::PutObjectArgs;
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use minio::s3::builders::ObjectToDelete;
use minio::s3::types::ToStream;
use tokio_stream::StreamExt;
@ -30,18 +29,9 @@ async fn list_objects() {
let mut names: Vec<String> = Vec::new();
for _ in 1..=N_OBJECTS {
let object_name = rand_object_name();
let size = 0_usize;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.put_object_content(&bucket_name, &object_name, "")
.send()
.await
.unwrap();
names.push(object_name);

View File

@ -15,10 +15,11 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use async_std::task;
use common::RandSrc;
use minio::s3::Client;
use minio::s3::args::PutObjectArgs;
use minio::s3::builders::ObjectContent;
use minio::s3::creds::StaticProvider;
use minio::s3::types::{NotificationRecords, S3Api};
use tokio::sync::mpsc;
@ -81,18 +82,14 @@ async fn listen_bucket_notification() {
let spawned_task = task::spawn(listen_task());
task::sleep(std::time::Duration::from_millis(200)).await;
let size = 16_usize;
let size = 16_u64;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
.put_object_content(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
ObjectContent::new_from_stream(RandSrc::new(size), Some(size)),
)
.send()
.await
.unwrap();

View File

@ -15,10 +15,10 @@
mod common;
use crate::common::{CleanupGuard, RandReader, TestContext, rand_bucket_name, rand_object_name};
use minio::s3::args::{
GetObjectRetentionArgs, MakeBucketArgs, PutObjectArgs, SetObjectRetentionArgs,
};
use crate::common::{CleanupGuard, TestContext, rand_bucket_name, rand_object_name};
use common::RandSrc;
use minio::s3::args::{GetObjectRetentionArgs, MakeBucketArgs, SetObjectRetentionArgs};
use minio::s3::builders::ObjectContent;
use minio::s3::types::{RetentionMode, S3Api};
use minio::s3::utils::{to_iso8601utc, utc_now};
@ -34,19 +34,16 @@ async fn object_retention() {
let object_name = rand_object_name();
let size = 16_usize;
let size = 16_u64;
let obj_resp = ctx
.client
.put_object_old(
&mut PutObjectArgs::new(
.put_object_content(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
ObjectContent::new_from_stream(RandSrc::new(size), Some(size)),
)
.send()
.await
.unwrap();

View File

@ -15,8 +15,10 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::{DeleteObjectTagsArgs, GetObjectTagsArgs, PutObjectArgs, SetObjectTagsArgs};
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use common::RandSrc;
use minio::s3::args::{DeleteObjectTagsArgs, GetObjectTagsArgs, SetObjectTagsArgs};
use minio::s3::builders::ObjectContent;
use minio::s3::types::S3Api;
use std::collections::HashMap;
@ -26,18 +28,14 @@ async fn object_tags() {
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let object_name = rand_object_name();
let size = 16_usize;
let size = 16_u64;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
.put_object_content(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
ObjectContent::new_from_stream(RandSrc::new(size), Some(size)),
)
.send()
.await
.unwrap();

View File

@ -15,9 +15,9 @@
mod common;
use crate::common::{RandReader, RandSrc, TestContext, create_bucket_helper, rand_object_name};
use crate::common::{RandSrc, TestContext, create_bucket_helper, rand_object_name};
use http::header;
use minio::s3::args::{PutObjectArgs, StatObjectArgs};
use minio::s3::args::StatObjectArgs;
use minio::s3::builders::ObjectContent;
use minio::s3::error::Error;
use minio::s3::types::S3Api;
@ -29,18 +29,14 @@ async fn put_object() {
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let object_name = rand_object_name();
let size = 16_usize;
let size = 16_u64;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
.put_object_content(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
ObjectContent::new_from_stream(RandSrc::new(size), Some(size)),
)
.send()
.await
.unwrap();
let resp = ctx
@ -50,7 +46,7 @@ async fn put_object() {
.unwrap();
assert_eq!(resp.bucket_name, bucket_name);
assert_eq!(resp.object_name, object_name);
assert_eq!(resp.size, size);
assert_eq!(resp.size as u64, size);
ctx.client
.remove_object(&bucket_name, object_name.as_str())
.send()
@ -75,18 +71,15 @@ async fn put_object_multipart() {
let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await;
let object_name = rand_object_name();
let size: usize = 16 + 5 * 1024 * 1024;
let size: u64 = 16 + 5 * 1024 * 1024;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
.put_object_content(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
ObjectContent::new_from_stream(RandSrc::new(size), Some(size)),
)
.send()
.await
.unwrap();
let resp = ctx
@ -96,7 +89,7 @@ async fn put_object_multipart() {
.unwrap();
assert_eq!(resp.bucket_name, bucket_name);
assert_eq!(resp.object_name, object_name);
assert_eq!(resp.size, size);
assert_eq!(resp.size as u64, size);
ctx.client
.remove_object(&bucket_name, object_name.as_str())
.send()

View File

@ -15,8 +15,7 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::PutObjectArgs;
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use minio::s3::builders::ObjectToDelete;
use minio::s3::types::ToStream;
use tokio_stream::StreamExt;
@ -29,18 +28,9 @@ async fn remove_objects() {
let mut names: Vec<String> = Vec::new();
for _ in 1..=3 {
let object_name = rand_object_name();
let size = 0_usize;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&object_name,
&mut RandReader::new(size),
Some(size),
None,
)
.unwrap(),
)
.put_object_content(&bucket_name, &object_name, "")
.send()
.await
.unwrap();
names.push(object_name);

View File

@ -16,12 +16,11 @@
mod common;
use crate::common::{TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::{PutObjectArgs, SelectObjectContentArgs};
use minio::s3::args::SelectObjectContentArgs;
use minio::s3::types::{
CsvInputSerialization, CsvOutputSerialization, FileHeaderInfo, QuoteFields, S3Api,
SelectRequest,
};
use std::io::BufReader;
#[tokio::test(flavor = "multi_thread", worker_threads = 10)]
async fn select_object_content() {
@ -38,16 +37,8 @@ async fn select_object_content() {
let body = String::from("Year,Make,Model,Description,Price\n") + &data;
ctx.client
.put_object_old(
&mut PutObjectArgs::new(
&bucket_name,
&object_name,
&mut BufReader::new(body.as_bytes()),
Some(body.len()),
None,
)
.unwrap(),
)
.put_object_content(&bucket_name, &object_name, body)
.send()
.await
.unwrap();

View File

@ -16,9 +16,9 @@
mod common;
use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name};
use minio::s3::args::{DownloadObjectArgs, UploadObjectArgs};
use minio::s3::types::S3Api;
use sha2::{Digest, Sha256};
use std::path::PathBuf;
use std::{fs, io};
fn get_hash(filename: &String) -> String {
@ -38,14 +38,27 @@ async fn upload_download_object() {
let mut file = fs::File::create(&object_name).unwrap();
io::copy(&mut RandReader::new(size), &mut file).unwrap();
file.sync_all().unwrap();
ctx.client
.upload_object(&UploadObjectArgs::new(&bucket_name, &object_name, &object_name).unwrap())
.put_object_content(
&bucket_name,
&object_name,
PathBuf::from(&object_name).as_path(),
)
.send()
.await
.unwrap();
let filename = rand_object_name();
ctx.client
.download_object(&DownloadObjectArgs::new(&bucket_name, &object_name, &filename).unwrap())
let get_obj_rsp = ctx
.client
.get_object(&bucket_name, &object_name)
.send()
.await
.unwrap();
get_obj_rsp
.content
.to_file(PathBuf::from(&filename).as_path())
.await
.unwrap();
assert_eq!(get_hash(&object_name), get_hash(&filename));
@ -70,13 +83,25 @@ async fn upload_download_object() {
io::copy(&mut RandReader::new(size), &mut file).unwrap();
file.sync_all().unwrap();
ctx.client
.upload_object(&UploadObjectArgs::new(&bucket_name, &object_name, &object_name).unwrap())
.put_object_content(
&bucket_name,
&object_name,
PathBuf::from(&object_name).as_path(),
)
.send()
.await
.unwrap();
let filename = rand_object_name();
ctx.client
.download_object(&DownloadObjectArgs::new(&bucket_name, &object_name, &filename).unwrap())
let get_rsp = ctx
.client
.get_object(&bucket_name, &object_name)
.send()
.await
.unwrap();
get_rsp
.content
.to_file(PathBuf::from(&filename).as_path())
.await
.unwrap();
assert_eq!(get_hash(&object_name), get_hash(&filename));