diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4b4f61f..27a078d 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -7,6 +7,7 @@ on: branches: [ "master" ] env: + RUST_LOG: debug CARGO_TERM_COLOR: always jobs: diff --git a/benches/s3/bench_object_append.rs b/benches/s3/bench_object_append.rs index 2171168..0eb1802 100644 --- a/benches/s3/bench_object_append.rs +++ b/benches/s3/bench_object_append.rs @@ -43,7 +43,7 @@ pub(crate) async fn bench_object_append(criterion: &mut Criterion) { }) .unwrap(); - let offset_bytes: u64 = resp.size; + let offset_bytes: u64 = resp.size().unwrap(); AppendObject::new( ctx.client.clone(), ctx.bucket.clone(), diff --git a/examples/append_object.rs b/examples/append_object.rs index 2617799..d1d2f24 100644 --- a/examples/append_object.rs +++ b/examples/append_object.rs @@ -17,6 +17,7 @@ mod common; use crate::common::{create_bucket_if_not_exists, create_client_on_localhost}; use minio::s3::Client; +use minio::s3::response::a_response_traits::HasObjectSize; use minio::s3::response::{AppendObjectResponse, StatObjectResponse}; use minio::s3::segmented_bytes::SegmentedBytes; use minio::s3::types::S3Api; @@ -54,19 +55,21 @@ async fn main() -> Result<(), Box> { .await?; offset_bytes += data_size; - if resp.object_size != offset_bytes { + if resp.object_size() != offset_bytes { panic!( "from the append_object: size mismatch: expected {}, got {}", - resp.object_size, offset_bytes + resp.object_size(), + offset_bytes ) } //println!("Append response: {:#?}", resp); let resp: StatObjectResponse = client.stat_object(bucket_name, object_name).send().await?; - if resp.size != offset_bytes { + if resp.size()? != offset_bytes { panic!( "from the stat_Object: size mismatch: expected {}, got {}", - resp.size, offset_bytes + resp.size()?, + offset_bytes ) } println!("{}/{}", i, n_segments); diff --git a/examples/bucket_encryption.rs b/examples/bucket_encryption.rs index bca54e0..984e0a7 100644 --- a/examples/bucket_encryption.rs +++ b/examples/bucket_encryption.rs @@ -30,7 +30,7 @@ async fn main() -> Result<(), Box> { let resp: GetBucketEncryptionResponse = client.get_bucket_encryption(bucket_name).send().await?; - log::info!("encryption before: config={:?}", resp.config); + log::info!("encryption before: config={:?}", resp.config()); let config = SseConfig::default(); log::info!("going to set encryption config={:?}", config); @@ -43,7 +43,7 @@ async fn main() -> Result<(), Box> { let resp: GetBucketEncryptionResponse = client.get_bucket_encryption(bucket_name).send().await?; - log::info!("encryption after: config={:?}", resp.config); + log::info!("encryption after: config={:?}", resp.config()); Ok(()) } diff --git a/examples/bucket_versioning.rs b/examples/bucket_versioning.rs index 838cf75..d11e276 100644 --- a/examples/bucket_versioning.rs +++ b/examples/bucket_versioning.rs @@ -33,8 +33,8 @@ async fn main() -> Result<(), Box> { client.get_bucket_versioning(bucket_name).send().await?; log::info!( "versioning before: status={:?}, mfa_delete={:?}", - resp.status, - resp.mfa_delete + resp.status(), + resp.mfa_delete() ); let _resp: PutBucketVersioningResponse = client @@ -48,8 +48,8 @@ async fn main() -> Result<(), Box> { log::info!( "versioning after setting to Enabled: status={:?}, mfa_delete={:?}", - resp.status, - resp.mfa_delete + resp.status(), + resp.mfa_delete() ); let _resp: PutBucketVersioningResponse = client @@ -63,8 +63,8 @@ async fn main() -> Result<(), Box> { log::info!( "versioning after setting to Suspended: status={:?}, mfa_delete={:?}", - resp.status, - resp.mfa_delete + resp.status(), + resp.mfa_delete() ); let _resp: PutBucketVersioningResponse = client @@ -78,8 +78,8 @@ async fn main() -> Result<(), Box> { log::info!( "versioning after setting to None: status={:?}, mfa_delete={:?}", - resp.status, - resp.mfa_delete + resp.status(), + resp.mfa_delete() ); Ok(()) diff --git a/examples/common.rs b/examples/common.rs index ace1994..2b74cb5 100644 --- a/examples/common.rs +++ b/examples/common.rs @@ -42,7 +42,7 @@ pub async fn create_bucket_if_not_exists( let resp: BucketExistsResponse = client.bucket_exists(bucket_name).send().await?; // Make 'bucket_name' bucket if not exist. - if !resp.exists { + if !resp.exists() { client.create_bucket(bucket_name).send().await.unwrap(); }; Ok(()) diff --git a/examples/file_downloader.rs b/examples/file_downloader.rs index 03285ef..f26f0be 100644 --- a/examples/file_downloader.rs +++ b/examples/file_downloader.rs @@ -55,7 +55,10 @@ async fn main() -> Result<(), Box> { let get_object = client.get_object(bucket_name, object_name).send().await?; - get_object.content.to_file(Path::new(download_path)).await?; + get_object + .content()? + .to_file(Path::new(download_path)) + .await?; log::info!("Object '{object_name}' is successfully downloaded to file '{download_path}'."); diff --git a/examples/object_prompt.rs b/examples/object_prompt.rs index 85245fa..40625ce 100644 --- a/examples/object_prompt.rs +++ b/examples/object_prompt.rs @@ -72,7 +72,7 @@ async fn main() -> Result<(), Box> { .send() .await?; - log::info!("Object prompt result: '{}'", resp.prompt_response); + log::info!("Object prompt result: '{}'", resp.prompt_response()?); Ok(()) } diff --git a/examples/put_object.rs b/examples/put_object.rs index ec44391..56861f2 100644 --- a/examples/put_object.rs +++ b/examples/put_object.rs @@ -47,7 +47,7 @@ async fn main() -> Result<(), Box> { let resp: BucketExistsResponse = client.bucket_exists(&args.bucket).send().await.unwrap(); - if !resp.exists { + if !resp.exists() { client.create_bucket(&args.bucket).send().await.unwrap(); } diff --git a/src/lib.rs b/src/lib.rs index 84ff667..88952ee 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,7 +40,7 @@ //! .await //! .expect("request failed"); //! -//! println!("Bucket exists: {}", exists.exists); +//! println!("Bucket exists: {}", exists.exists()); //! } //! ``` //! diff --git a/src/s3/builders/append_object.rs b/src/s3/builders/append_object.rs index cebbc13..621b520 100644 --- a/src/s3/builders/append_object.rs +++ b/src/s3/builders/append_object.rs @@ -19,6 +19,7 @@ use crate::s3::builders::{ }; use crate::s3::error::Error; use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::a_response_traits::HasObjectSize; use crate::s3::response::{AppendObjectResponse, StatObjectResponse}; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::sse::Sse; @@ -26,7 +27,6 @@ use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, check_object_name}; use http::Method; use std::sync::Arc; - // region: append-object /// Argument builder for the [`AppendObject`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-buckets-objects-append.html) S3 API operation. @@ -234,7 +234,7 @@ impl AppendObjectContent { .await?; //println!("statObjectResponse={:#?}", resp); - let current_file_size = resp.size; + let current_file_size = resp.size()?; // In the first part read, if: // @@ -322,7 +322,7 @@ impl AppendObjectContent { let resp: AppendObjectResponse = append_object.send().await?; //println!("AppendObjectResponse: object_size={:?}", resp.object_size); - next_offset_bytes = resp.object_size; + next_offset_bytes = resp.object_size(); // Finally check if we are done. if buffer_size < part_size { diff --git a/src/s3/builders/copy_object.rs b/src/s3/builders/copy_object.rs index e2cec60..47ff81c 100644 --- a/src/s3/builders/copy_object.rs +++ b/src/s3/builders/copy_object.rs @@ -17,9 +17,11 @@ use crate::s3::Client; use crate::s3::client::{MAX_MULTIPART_COUNT, MAX_PART_SIZE}; use crate::s3::error::Error; use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::a_response_traits::HasEtagFromBody; use crate::s3::response::{ - AbortMultipartUploadResponse, ComposeObjectResponse, CopyObjectInternalResponse, - CopyObjectResponse, CreateMultipartUploadResponse, StatObjectResponse, UploadPartCopyResponse, + AbortMultipartUploadResponse, CompleteMultipartUploadResponse, ComposeObjectResponse, + CopyObjectInternalResponse, CopyObjectResponse, CreateMultipartUploadResponse, + StatObjectResponse, UploadPartCopyResponse, }; use crate::s3::sse::{Sse, SseCustomerKey}; use crate::s3::types::{Directive, PartInfo, Retention, S3Api, S3Request, ToS3Request}; @@ -445,7 +447,7 @@ impl CopyObject { if self.source.offset.is_some() || self.source.length.is_some() - || stat_resp.size > MAX_PART_SIZE + || stat_resp.size()? > MAX_PART_SIZE { if let Some(v) = &self.metadata_directive { match v { @@ -499,14 +501,8 @@ impl CopyObject { .send() .await?; - Ok(CopyObjectResponse { - headers: resp.headers, - bucket: resp.bucket, - object: resp.object, - region: resp.region, - etag: resp.etag, - version_id: resp.version_id, - }) + let resp: CopyObjectResponse = resp; // retype to CopyObjectResponse + Ok(resp) } else { let resp: CopyObjectInternalResponse = self .client @@ -526,14 +522,8 @@ impl CopyObject { .send() .await?; - Ok(CopyObjectResponse { - headers: resp.headers, - bucket: resp.bucket, - object: resp.object, - region: resp.region, - etag: resp.etag, - version_id: resp.version_id, - }) + let resp: CopyObjectResponse = resp; // retype to CopyObjectResponse + Ok(resp) } } } @@ -652,17 +642,9 @@ impl ComposeObjectInternal { Err(e) => return (Err(e), upload_id), }; - ( - Ok(ComposeObjectResponse { - headers: resp.headers, - bucket: resp.bucket, - object: resp.object, - region: resp.region, - etag: resp.etag, - version_id: resp.version_id, - }), - upload_id, - ) + let resp: ComposeObjectResponse = resp; // retype to ComposeObjectResponse + + (Ok(resp), upload_id) } else { let headers: Multimap = into_headers_copy_object( self.extra_headers, @@ -687,7 +669,11 @@ impl ComposeObjectInternal { }; // the multipart upload was successful: update the upload_id - upload_id.push_str(&cmu.upload_id); + let upload_id_cmu: String = match cmu.upload_id().await { + Ok(v) => v, + Err(e) => return (Err(e), upload_id), + }; + upload_id.push_str(&upload_id_cmu); let mut part_number = 0_u16; let ssec_headers: Multimap = match self.sse { @@ -739,9 +725,14 @@ impl ComposeObjectInternal { Err(e) => return (Err(e), upload_id), }; + let etag = match resp.etag() { + Ok(v) => v, + Err(e) => return (Err(e), upload_id), + }; + parts.push(PartInfo { number: part_number, - etag: resp.etag, + etag, size, }); } else { @@ -773,9 +764,14 @@ impl ComposeObjectInternal { Err(e) => return (Err(e), upload_id), }; + let etag = match resp.etag() { + Ok(v) => v, + Err(e) => return (Err(e), upload_id), + }; + parts.push(PartInfo { number: part_number, - etag: resp.etag, + etag, size, }); @@ -785,7 +781,7 @@ impl ComposeObjectInternal { } } - let resp = self + let resp: Result = self .client .complete_multipart_upload(&self.bucket, &self.object, &upload_id, parts) .region(self.region) @@ -793,17 +789,14 @@ impl ComposeObjectInternal { .await; match resp { - Ok(v) => ( - Ok(ComposeObjectResponse { + Ok(v) => { + let resp = ComposeObjectResponse { + request: v.request, headers: v.headers, - bucket: v.bucket, - object: v.object, - region: v.region, - etag: v.etag, - version_id: v.version_id, - }), - upload_id, - ), + body: v.body, + }; + (Ok(resp), upload_id) + } Err(e) => (Err(e), upload_id), } } diff --git a/src/s3/builders/delete_objects.rs b/src/s3/builders/delete_objects.rs index 3410e1e..bebc86e 100644 --- a/src/s3/builders/delete_objects.rs +++ b/src/s3/builders/delete_objects.rs @@ -241,7 +241,7 @@ impl ToS3Request for DeleteObjects { fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; - let mut data = String::from(""); + let mut data: String = String::from(""); if !self.verbose_mode { data.push_str("true"); } diff --git a/src/s3/builders/put_object.rs b/src/s3/builders/put_object.rs index 54c709c..9a5b9b7 100644 --- a/src/s3/builders/put_object.rs +++ b/src/s3/builders/put_object.rs @@ -15,6 +15,7 @@ use super::ObjectContent; use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::a_response_traits::HasEtagFromHeaders; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::utils::{check_object_name, insert}; use crate::s3::{ @@ -665,7 +666,7 @@ impl PutObjectContent { { let size = seg_bytes.len() as u64; - let res: PutObjectResponse = PutObject(UploadPart { + let resp: PutObjectResponse = PutObject(UploadPart { client: self.client.clone(), extra_headers: self.extra_headers.clone(), extra_query_params: self.extra_query_params.clone(), @@ -685,15 +686,7 @@ impl PutObjectContent { .send() .await?; - Ok(PutObjectContentResponse { - headers: res.headers, - bucket: res.bucket, - object: res.object, - region: res.region, - object_size: size, - etag: res.etag, - version_id: res.version_id, - }) + Ok(PutObjectContentResponse::new(resp, size)) } else if object_size.is_known() && (seg_bytes.len() as u64) < part_size { // Not enough data! let expected: u64 = object_size.as_u64().unwrap(); @@ -722,21 +715,17 @@ impl PutObjectContent { .await?; let client = self.client.clone(); + let upload_id: String = create_mpu_resp.upload_id().await?; + let mpu_res = self - .send_mpu( - part_size, - create_mpu_resp.upload_id.clone(), - object_size, - seg_bytes, - ) + .send_mpu(part_size, upload_id.clone(), object_size, seg_bytes) .await; if mpu_res.is_err() { // If we failed to complete the multipart upload, we should abort it. - let _ = - AbortMultipartUpload::new(client, bucket, object, create_mpu_resp.upload_id) - .send() - .await; + let _ = AbortMultipartUpload::new(client, bucket, object, upload_id) + .send() + .await; } mpu_res } @@ -815,7 +804,7 @@ impl PutObjectContent { parts.push(PartInfo { number: part_number, - etag: resp.etag, + etag: resp.etag()?, size: buffer_size, }); @@ -835,7 +824,7 @@ impl PutObjectContent { } } - let res: CompleteMultipartUploadResponse = CompleteMultipartUpload { + let resp: CompleteMultipartUploadResponse = CompleteMultipartUpload { client: self.client, extra_headers: self.extra_headers, extra_query_params: self.extra_query_params, @@ -848,15 +837,7 @@ impl PutObjectContent { .send() .await?; - Ok(PutObjectContentResponse { - headers: res.headers, - bucket: res.bucket, - object: res.object, - region: res.region, - object_size: size, - etag: res.etag, - version_id: res.version_id, - }) + Ok(PutObjectContentResponse::new(resp, size)) } } diff --git a/src/s3/client.rs b/src/s3/client.rs index 50cbe84..178c31b 100644 --- a/src/s3/client.rs +++ b/src/s3/client.rs @@ -21,16 +21,17 @@ use std::mem; use std::path::{Path, PathBuf}; use std::sync::{Arc, OnceLock}; +use crate::s3::builders::{BucketExists, ComposeSource}; use crate::s3::creds::Provider; use crate::s3::error::{Error, ErrorCode, ErrorResponse}; use crate::s3::http::BaseUrl; +use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::a_response_traits::{HasEtagFromHeaders, HasS3Fields}; use crate::s3::response::*; +use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::signer::sign_v4_s3; use crate::s3::utils::{EMPTY_SHA256, sha256_hash_sb, to_amz_date, utc_now}; -use crate::s3::builders::{BucketExists, ComposeSource}; -use crate::s3::multimap::{Multimap, MultimapExt}; -use crate::s3::segmented_bytes::SegmentedBytes; use bytes::Bytes; use dashmap::DashMap; use http::HeaderMap; @@ -280,7 +281,7 @@ impl Client { let express = match BucketExists::new(self.clone(), bucket_name).send().await { Ok(v) => { - if let Some(server) = v.headers.get("server") { + if let Some(server) = v.headers().get("server") { if let Ok(s) = server.to_str() { s.eq_ignore_ascii_case("MinIO Enterprise/S3Express") } else { @@ -300,7 +301,6 @@ impl Client { express } } - /// Add a bucket-region pair to the region cache if it does not exist. pub(crate) fn add_bucket_region(&mut self, bucket: &str, region: impl Into) { self.shared @@ -360,9 +360,9 @@ impl Client { .send() .await?; - source.build_headers(stat_resp.size, stat_resp.etag)?; + let mut size = stat_resp.size()?; + source.build_headers(size, stat_resp.etag()?)?; - let mut size = stat_resp.size; if let Some(l) = source.length { size = l; } else if let Some(o) = source.offset { @@ -493,15 +493,12 @@ impl Client { // Sort headers alphabetically by name header_strings.sort(); - let body_str: String = - String::from_utf8(body.unwrap_or(&SegmentedBytes::new()).to_bytes().to_vec())?; - println!( "S3 request: {} url={:?}; headers={:?}; body={}\n", method, url.path, header_strings.join("; "), - body_str + body.unwrap() ); } diff --git a/src/s3/client/append_object.rs b/src/s3/client/append_object.rs index be6eb59..92f9a02 100644 --- a/src/s3/client/append_object.rs +++ b/src/s3/client/append_object.rs @@ -36,6 +36,7 @@ impl Client { /// use minio::s3::response::{AppendObjectResponse, PutObjectResponse}; /// use minio::s3::segmented_bytes::SegmentedBytes; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasObjectSize; /// /// #[tokio::main] /// async fn main() { @@ -49,7 +50,7 @@ impl Client { /// let resp: AppendObjectResponse = client /// .append_object("bucket-name", "object-name", data2, offset_bytes) /// .send().await.unwrap(); - /// println!("size of the final object is {} bytes", resp.object_size); + /// println!("size of the final object is {} bytes", resp.object_size()); /// } /// ``` pub fn append_object, S2: Into>( @@ -85,10 +86,11 @@ impl Client { /// use minio::s3::builders::ObjectContent; /// use minio::s3::segmented_bytes::SegmentedBytes; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasObjectSize; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// let data1: SegmentedBytes = SegmentedBytes::from("aaaa".to_string()); /// let content2: String = "bbbb".to_string(); /// let resp: PutObjectResponse = client @@ -97,7 +99,7 @@ impl Client { /// let resp: AppendObjectResponse = client /// .append_object_content("bucket-name", "object-name", content2) /// .send().await.unwrap(); - /// println!("size of the final object is {} bytes", resp.object_size); + /// println!("size of the final object is {} bytes", resp.object_size()); /// } /// ``` pub fn append_object_content, S2: Into, C: Into>( diff --git a/src/s3/client/bucket_exists.rs b/src/s3/client/bucket_exists.rs index 9de6174..0927791 100644 --- a/src/s3/client/bucket_exists.rs +++ b/src/s3/client/bucket_exists.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::BucketExistsResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: BucketExistsResponse = client /// .bucket_exists("bucket-name") /// .send().await.unwrap(); - /// println!("bucket '{}' exists: {}", resp.bucket, resp.exists); + /// println!("bucket '{}' exists: {}", resp.bucket(), resp.exists()); /// } /// ``` pub fn bucket_exists>(&self, bucket: S) -> BucketExists { diff --git a/src/s3/client/copy_object.rs b/src/s3/client/copy_object.rs index 771f847..4b8d0f1 100644 --- a/src/s3/client/copy_object.rs +++ b/src/s3/client/copy_object.rs @@ -33,6 +33,7 @@ impl Client { /// use minio::s3::response::{UploadPartCopyResponse}; /// use minio::s3::segmented_bytes::SegmentedBytes; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { @@ -42,7 +43,7 @@ impl Client { /// let resp: UploadPartCopyResponse = client /// .upload_part_copy("bucket-name", "object-name", "TODO") /// .send().await.unwrap(); - /// println!("uploaded {}", resp.object); + /// println!("uploaded {}", resp.object()); /// } /// ``` pub fn upload_part_copy, S2: Into, S3: Into>( diff --git a/src/s3/client/create_bucket.rs b/src/s3/client/create_bucket.rs index 0a74f37..8740adf 100644 --- a/src/s3/client/create_bucket.rs +++ b/src/s3/client/create_bucket.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::CreateBucketResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: CreateBucketResponse = client /// .create_bucket("bucket-name") /// .send().await.unwrap(); - /// println!("Made bucket '{}' in region '{}'", resp.bucket, resp.region); + /// println!("Made bucket '{}' in region '{}'", resp.bucket(), resp.region()); /// } /// ``` pub fn create_bucket>(&self, bucket: S) -> CreateBucket { diff --git a/src/s3/client/delete_bucket.rs b/src/s3/client/delete_bucket.rs index 8c1e028..c83af3f 100644 --- a/src/s3/client/delete_bucket.rs +++ b/src/s3/client/delete_bucket.rs @@ -21,6 +21,7 @@ use crate::s3::response::{ DeleteBucketResponse, DeleteObjectResponse, DeleteObjectsResponse, PutObjectLegalHoldResponse, }; use crate::s3::types::{S3Api, ToStream}; +use bytes::Bytes; use futures::StreamExt; impl Client { @@ -35,13 +36,14 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; /// /// #[tokio::main] /// async fn main() { /// let client: Client = Default::default(); // configure your client here /// let resp: DeleteBucketResponse = /// client.delete_bucket("bucket-name").send().await.unwrap(); - /// println!("bucket '{}' in region '{}' is removed", resp.bucket, resp.region); + /// println!("bucket '{}' in region '{}' is removed", resp.bucket(), resp.region()); /// } /// ``` pub fn delete_bucket>(&self, bucket: S) -> DeleteBucket { @@ -90,7 +92,7 @@ impl Client { while let Some(item) = resp.next().await { let resp: DeleteObjectsResponse = item?; - for obj in resp.result.into_iter() { + for obj in resp.result()?.into_iter() { match obj { DeleteResult::Deleted(_) => {} DeleteResult::Error(v) => { @@ -115,14 +117,15 @@ impl Client { } } } - match self.delete_bucket(bucket).send().await { + let request: DeleteBucket = self.delete_bucket(bucket); + match request.send().await { Ok(resp) => Ok(resp), Err(Error::S3Error(e)) => { if e.code == ErrorCode::NoSuchBucket { Ok(DeleteBucketResponse { + request: Default::default(), //TODO consider how to handle this + body: Bytes::new(), headers: e.headers, - bucket: e.bucket_name, - region: String::new(), }) } else { Err(Error::S3Error(e)) diff --git a/src/s3/client/delete_bucket_encryption.rs b/src/s3/client/delete_bucket_encryption.rs index 44c0615..9c692cd 100644 --- a/src/s3/client/delete_bucket_encryption.rs +++ b/src/s3/client/delete_bucket_encryption.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketEncryptionResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: DeleteBucketEncryptionResponse = client /// .delete_bucket_encryption("bucket-name") /// .send().await.unwrap(); - /// println!("bucket '{}' is deleted", resp.bucket); + /// println!("bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_encryption>(&self, bucket: S) -> DeleteBucketEncryption { diff --git a/src/s3/client/delete_bucket_lifecycle.rs b/src/s3/client/delete_bucket_lifecycle.rs index 9a3bcb0..bcfe64f 100644 --- a/src/s3/client/delete_bucket_lifecycle.rs +++ b/src/s3/client/delete_bucket_lifecycle.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketLifecycleResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: DeleteBucketLifecycleResponse = client /// .delete_bucket_lifecycle("bucket-name") /// .send().await.unwrap(); - /// println!("lifecycle of bucket '{}' is deleted", resp.bucket); + /// println!("lifecycle of bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_lifecycle>(&self, bucket: S) -> DeleteBucketLifecycle { diff --git a/src/s3/client/delete_bucket_notification.rs b/src/s3/client/delete_bucket_notification.rs index cf3fa87..f311c59 100644 --- a/src/s3/client/delete_bucket_notification.rs +++ b/src/s3/client/delete_bucket_notification.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketNotificationResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: DeleteBucketNotificationResponse = client /// .delete_bucket_notification("bucket-name") /// .send().await.unwrap(); - /// println!("notification of bucket '{}' is deleted", resp.bucket); + /// println!("notification of bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_notification>( diff --git a/src/s3/client/delete_bucket_policy.rs b/src/s3/client/delete_bucket_policy.rs index bd587d5..61ce6cf 100644 --- a/src/s3/client/delete_bucket_policy.rs +++ b/src/s3/client/delete_bucket_policy.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketPolicyResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: DeleteBucketPolicyResponse = client /// .delete_bucket_policy("bucket-name") /// .send().await.unwrap(); - /// println!("policy of bucket '{}' is deleted", resp.bucket); + /// println!("policy of bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_policy>(&self, bucket: S) -> DeleteBucketPolicy { diff --git a/src/s3/client/delete_bucket_replication.rs b/src/s3/client/delete_bucket_replication.rs index 5d17d5d..d5f45c0 100644 --- a/src/s3/client/delete_bucket_replication.rs +++ b/src/s3/client/delete_bucket_replication.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketReplicationResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: DeleteBucketReplicationResponse = client /// .delete_bucket_replication("bucket-name") /// .send().await.unwrap(); - /// println!("replication of bucket '{}' is deleted", resp.bucket); + /// println!("replication of bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_replication>(&self, bucket: S) -> DeleteBucketReplication { diff --git a/src/s3/client/delete_bucket_tagging.rs b/src/s3/client/delete_bucket_tagging.rs index 4f0a33c..30b74d2 100644 --- a/src/s3/client/delete_bucket_tagging.rs +++ b/src/s3/client/delete_bucket_tagging.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteBucketTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: DeleteBucketTaggingResponse = client /// .delete_bucket_tagging("bucket-name") /// .send().await.unwrap(); - /// println!("tags of bucket '{}' are deleted", resp.bucket); + /// println!("tags of bucket '{}' are deleted", resp.bucket()); /// } /// ``` pub fn delete_bucket_tagging>(&self, bucket: S) -> DeleteBucketTagging { diff --git a/src/s3/client/delete_object_lock_config.rs b/src/s3/client/delete_object_lock_config.rs index af1c028..4965712 100644 --- a/src/s3/client/delete_object_lock_config.rs +++ b/src/s3/client/delete_object_lock_config.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::{DeleteObjectLockConfigResponse, CreateBucketResponse, PutObjectLockConfigResponse}; /// use minio::s3::types::{S3Api, ObjectLockConfig, RetentionMode}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -38,18 +39,19 @@ impl Client { /// /// let resp: CreateBucketResponse = /// client.create_bucket(bucket_name).object_lock(true).send().await.unwrap(); - /// println!("created bucket '{}' with object locking enabled", resp.bucket); + /// println!("created bucket '{}' with object locking enabled", resp.bucket()); /// - /// const DURATION_DAYS: i32 = 7; + /// + /// const DURATION_DAYS: i32 = 7; /// let config = ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(DURATION_DAYS), None).unwrap(); /// /// let resp: PutObjectLockConfigResponse = /// client.put_object_lock_config(bucket_name).config(config).send().await.unwrap(); - /// println!("configured object locking for bucket '{}'", resp.bucket); + /// println!("configured object locking for bucket '{}'", resp.bucket()); /// /// let resp: DeleteObjectLockConfigResponse = /// client.delete_object_lock_config(bucket_name).send().await.unwrap(); - /// println!("object locking of bucket '{}' is deleted", resp.bucket); + /// println!("object locking of bucket '{}' is deleted", resp.bucket()); /// } /// ``` pub fn delete_object_lock_config>(&self, bucket: S) -> DeleteObjectLockConfig { diff --git a/src/s3/client/delete_object_tagging.rs b/src/s3/client/delete_object_tagging.rs index 990a2d3..4a40b05 100644 --- a/src/s3/client/delete_object_tagging.rs +++ b/src/s3/client/delete_object_tagging.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::DeleteObjectTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasObject}; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: DeleteObjectTaggingResponse = client /// .delete_object_tagging("bucket-name", "object_name") /// .send().await.unwrap(); - /// println!("legal hold of object '{}' in bucket '{}' is deleted", resp.object, resp.bucket); + /// println!("legal hold of object '{}' in bucket '{}' is deleted", resp.object(), resp.bucket()); /// } /// ``` pub fn delete_object_tagging, S2: Into>( diff --git a/src/s3/client/delete_objects.rs b/src/s3/client/delete_objects.rs index 2951616..51079ac 100644 --- a/src/s3/client/delete_objects.rs +++ b/src/s3/client/delete_objects.rs @@ -33,14 +33,15 @@ impl Client { /// use minio::s3::response::DeleteObjectResponse; /// use minio::s3::builders::ObjectToDelete; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasVersion; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// let resp: DeleteObjectResponse = client /// .delete_object("bucket-name", ObjectToDelete::from("object-name")) /// .send().await.unwrap(); - /// println!("the object is deleted. The delete marker has version '{:?}'", resp.version_id); + /// println!("the object is deleted. The delete marker has version '{:?}'", resp.version_id()); /// } /// ``` pub fn delete_object, D: Into>( diff --git a/src/s3/client/get_bucket_encryption.rs b/src/s3/client/get_bucket_encryption.rs index 5efd9e7..e1c9189 100644 --- a/src/s3/client/get_bucket_encryption.rs +++ b/src/s3/client/get_bucket_encryption.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketEncryptionResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: GetBucketEncryptionResponse = client /// .get_bucket_encryption("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved SseConfig '{:?}' from bucket '{}'", resp.config, resp.bucket); + /// println!("retrieved SseConfig '{:?}' from bucket '{}'", resp.config(), resp.bucket()); /// } /// ``` pub fn get_bucket_encryption>(&self, bucket: S) -> GetBucketEncryption { diff --git a/src/s3/client/get_bucket_lifecycle.rs b/src/s3/client/get_bucket_lifecycle.rs index 6362287..39b3a47 100644 --- a/src/s3/client/get_bucket_lifecycle.rs +++ b/src/s3/client/get_bucket_lifecycle.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketLifecycleResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: GetBucketLifecycleResponse = client /// .get_bucket_lifecycle("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved bucket lifecycle config '{:?}' from bucket '{}'", resp.config, resp.bucket); + /// println!("retrieved bucket lifecycle config '{:?}' from bucket '{}'", resp.config(), resp.bucket()); /// } /// ``` pub fn get_bucket_lifecycle>(&self, bucket: S) -> GetBucketLifecycle { diff --git a/src/s3/client/get_bucket_notification.rs b/src/s3/client/get_bucket_notification.rs index 8c1251b..66c0ffe 100644 --- a/src/s3/client/get_bucket_notification.rs +++ b/src/s3/client/get_bucket_notification.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketNotificationResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: GetBucketNotificationResponse = client /// .get_bucket_notification("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved bucket notification config '{:?}' from bucket '{}'", resp.config, resp.bucket); + /// println!("retrieved bucket notification config '{:?}' from bucket '{}'", resp.config(), resp.bucket()); /// } /// ``` pub fn get_bucket_notification>(&self, bucket: S) -> GetBucketNotification { diff --git a/src/s3/client/get_bucket_policy.rs b/src/s3/client/get_bucket_policy.rs index 1bb91b1..4d7fb7e 100644 --- a/src/s3/client/get_bucket_policy.rs +++ b/src/s3/client/get_bucket_policy.rs @@ -28,6 +28,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketPolicyResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -35,7 +36,7 @@ impl Client { /// let resp: GetBucketPolicyResponse = client /// .get_bucket_policy("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved bucket policy config '{:?}' from bucket '{}'", resp.config, resp.bucket); + /// println!("retrieved bucket policy config '{:?}' from bucket '{}'", resp.config(), resp.bucket()); /// } /// ``` pub fn get_bucket_policy>(&self, bucket: S) -> GetBucketPolicy { diff --git a/src/s3/client/get_bucket_replication.rs b/src/s3/client/get_bucket_replication.rs index 28348ba..6eddf41 100644 --- a/src/s3/client/get_bucket_replication.rs +++ b/src/s3/client/get_bucket_replication.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketReplicationResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetBucketReplicationResponse = client /// .get_bucket_replication("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved bucket replication config '{:?}' from bucket '{}'", resp.config, resp.bucket); + /// println!("retrieved bucket replication config '{:?}' from bucket '{}'", resp.config(), resp.bucket()); /// } /// ``` pub fn get_bucket_replication>(&self, bucket: S) -> GetBucketReplication { diff --git a/src/s3/client/get_bucket_tagging.rs b/src/s3/client/get_bucket_tagging.rs index c7211c6..2bb0237 100644 --- a/src/s3/client/get_bucket_tagging.rs +++ b/src/s3/client/get_bucket_tagging.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasTagging}; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetBucketTaggingResponse = client /// .get_bucket_tagging("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved bucket tags '{:?}' from bucket '{}'", resp.tags, resp.bucket); + /// println!("retrieved bucket tags '{:?}' from bucket '{}'", resp.tags(), resp.bucket()); /// } /// ``` pub fn get_bucket_tagging>(&self, bucket: S) -> GetBucketTagging { diff --git a/src/s3/client/get_bucket_versioning.rs b/src/s3/client/get_bucket_versioning.rs index db95531..b1de3e3 100644 --- a/src/s3/client/get_bucket_versioning.rs +++ b/src/s3/client/get_bucket_versioning.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetBucketVersioningResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetBucketVersioningResponse = client /// .get_bucket_versioning("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved versioning status '{:?}' from bucket '{}'", resp.status, resp.bucket); + /// println!("retrieved versioning status '{:?}' from bucket '{}'", resp.status(), resp.bucket()); /// } /// ``` pub fn get_bucket_versioning>(&self, bucket: S) -> GetBucketVersioning { diff --git a/src/s3/client/get_object.rs b/src/s3/client/get_object.rs index ddd7089..539cb5b 100644 --- a/src/s3/client/get_object.rs +++ b/src/s3/client/get_object.rs @@ -38,7 +38,7 @@ impl Client { /// let resp: GetObjectResponse = client /// .get_object("bucket-name", "object-name") /// .send().await.unwrap(); - /// let content_bytes = resp.content.to_segmented_bytes().await.unwrap().to_bytes(); + /// let content_bytes = resp.content().unwrap().to_segmented_bytes().await.unwrap().to_bytes(); /// let content_str = String::from_utf8(content_bytes.to_vec()).unwrap(); /// println!("retrieved content '{content_str}'"); /// } diff --git a/src/s3/client/get_object_legal_hold.rs b/src/s3/client/get_object_legal_hold.rs index 2c70219..13ab6fb 100644 --- a/src/s3/client/get_object_legal_hold.rs +++ b/src/s3/client/get_object_legal_hold.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetObjectLegalHoldResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasObject}; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetObjectLegalHoldResponse = client /// .get_object_legal_hold("bucket-name", "object-name") /// .send().await.unwrap(); - /// println!("legal hold of object '{}' in bucket '{}' is enabled: {}", resp.object, resp.bucket, resp.enabled); + /// println!("legal hold of object '{}' in bucket '{}' is enabled: {:?}", resp.object(), resp.bucket(), resp.enabled()); /// } /// ``` pub fn get_object_legal_hold, S2: Into>( diff --git a/src/s3/client/get_object_lock_config.rs b/src/s3/client/get_object_lock_config.rs index d65f383..f97c64c 100644 --- a/src/s3/client/get_object_lock_config.rs +++ b/src/s3/client/get_object_lock_config.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetObjectLockConfigResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetObjectLockConfigResponse = client /// .get_object_lock_config("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved object lock config '{:?}' from bucket '{}' is enabled", resp.config, resp.bucket); + /// println!("retrieved object lock config '{:?}' from bucket '{}' is enabled", resp.config(), resp.bucket()); /// } /// ``` pub fn get_object_lock_config>(&self, bucket: S) -> GetObjectLockConfig { diff --git a/src/s3/client/get_object_prompt.rs b/src/s3/client/get_object_prompt.rs index dfdb4e5..7f64c56 100644 --- a/src/s3/client/get_object_prompt.rs +++ b/src/s3/client/get_object_prompt.rs @@ -36,7 +36,7 @@ impl Client { /// let resp: GetObjectPromptResponse = client /// .get_object_prompt("bucket-name", "object-name", "What is it about?") /// .send().await.unwrap(); - /// println!("the prompt response is: '{}'", resp.prompt_response); + /// println!("the prompt response is: '{:?}'", resp.prompt_response()); /// } /// ``` pub fn get_object_prompt, S2: Into, S3: Into>( diff --git a/src/s3/client/get_object_retention.rs b/src/s3/client/get_object_retention.rs index 6111b70..06288dc 100644 --- a/src/s3/client/get_object_retention.rs +++ b/src/s3/client/get_object_retention.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetObjectRetentionResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetObjectRetentionResponse = client /// .get_object_retention("bucket-name", "object-name") /// .send().await.unwrap(); - /// println!("retrieved retention mode '{:?}' until '{:?}' from bucket '{}' is enabled", resp.retention_mode, resp.retain_until_date, resp.bucket); + /// println!("retrieved retention mode '{:?}' until '{:?}' from bucket '{}' is enabled", resp.retention_mode(), resp.retain_until_date(), resp.bucket()); /// } /// ``` pub fn get_object_retention, S2: Into>( diff --git a/src/s3/client/get_object_tagging.rs b/src/s3/client/get_object_tagging.rs index 0a06729..ce57404 100644 --- a/src/s3/client/get_object_tagging.rs +++ b/src/s3/client/get_object_tagging.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetObjectTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasBucket, HasObject, HasTagging}; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: GetObjectTaggingResponse = client /// .get_object_tagging("bucket-name", "object-name") /// .send().await.unwrap(); - /// println!("retrieved object tags '{:?}' from object '{}' in bucket '{}' is enabled", resp.tags, resp.object, resp.bucket); + /// println!("retrieved object tags '{:?}' from object '{}' in bucket '{}' is enabled", resp.tags(), resp.object(), resp.bucket()); /// } /// ``` pub fn get_object_tagging, S2: Into>( diff --git a/src/s3/client/get_region.rs b/src/s3/client/get_region.rs index 634379e..0d178d1 100644 --- a/src/s3/client/get_region.rs +++ b/src/s3/client/get_region.rs @@ -16,7 +16,6 @@ use super::{Client, DEFAULT_REGION}; use crate::s3::builders::GetRegion; use crate::s3::error::Error; -use crate::s3::response::GetRegionResponse; use crate::s3::types::S3Api; impl Client { @@ -31,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::GetRegionResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -38,7 +38,7 @@ impl Client { /// let resp: GetRegionResponse = client /// .get_region("bucket-name") /// .send().await.unwrap(); - /// println!("retrieved region '{:?}' for bucket '{}'", resp.region_response, resp.bucket); + /// println!("retrieved region '{:?}' for bucket '{}'", resp.region_response(), resp.bucket()); /// } /// ``` pub fn get_region>(&self, bucket: S) -> GetRegion { @@ -82,18 +82,20 @@ impl Client { return Ok(v.value().clone()); } - // Otherwise, fetch the region and cache it - let resp: GetRegionResponse = self.get_region(&bucket).send().await?; - - let resolved_region: String = if resp.region_response.is_empty() { - DEFAULT_REGION.to_owned() - } else { - resp.region_response + // Otherwise, fetch the region from the server and cache it + let resolved_region: String = { + let region = self.get_region(&bucket).send().await?.region_response()?; + if !region.is_empty() { + region + } else { + DEFAULT_REGION.to_owned() + } }; self.shared .region_map .insert(bucket, resolved_region.clone()); + Ok(resolved_region) } } diff --git a/src/s3/client/list_buckets.rs b/src/s3/client/list_buckets.rs index ada5ad7..5f8c3e9 100644 --- a/src/s3/client/list_buckets.rs +++ b/src/s3/client/list_buckets.rs @@ -37,7 +37,7 @@ impl Client { /// let resp: ListBucketsResponse = client /// .list_buckets() /// .send().await.unwrap(); - /// println!("retrieved buckets '{:?}'", resp.buckets); + /// println!("retrieved buckets '{:?}'", resp.buckets()); /// } /// ``` pub fn list_buckets(&self) -> ListBuckets { diff --git a/src/s3/client/put_bucket_encryption.rs b/src/s3/client/put_bucket_encryption.rs index edfada0..a78192c 100644 --- a/src/s3/client/put_bucket_encryption.rs +++ b/src/s3/client/put_bucket_encryption.rs @@ -31,6 +31,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::PutBucketEncryptionResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -40,7 +41,7 @@ impl Client { /// .put_bucket_encryption("bucket-name") /// .sse_config(config) /// .send().await.unwrap(); - /// println!("set encryption on bucket '{}'", resp.bucket); + /// println!("set encryption on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_encryption>(&self, bucket: S) -> PutBucketEncryption { diff --git a/src/s3/client/put_bucket_lifecycle.rs b/src/s3/client/put_bucket_lifecycle.rs index 19df3e8..58e8268 100644 --- a/src/s3/client/put_bucket_lifecycle.rs +++ b/src/s3/client/put_bucket_lifecycle.rs @@ -31,11 +31,11 @@ impl Client { /// use minio::s3::response::PutBucketLifecycleResponse; /// use minio::s3::types::{Filter, S3Api}; /// use minio::s3::lifecycle_config::{LifecycleRule, LifecycleConfig}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here - /// + /// let client: Client = Default::default(); // configure your client here /// let rules: Vec = vec![LifecycleRule { /// id: String::from("rule1"), /// filter: Filter {and_operator: None, prefix: Some(String::from("logs/")), tag: None}, @@ -48,7 +48,7 @@ impl Client { /// .put_bucket_lifecycle("bucket-name") /// .life_cycle_config(LifecycleConfig { rules }) /// .send().await.unwrap(); - /// println!("set bucket replication policy on bucket '{}'", resp.bucket); + /// println!("set bucket replication policy on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_lifecycle>(&self, bucket: S) -> PutBucketLifecycle { diff --git a/src/s3/client/put_bucket_notification.rs b/src/s3/client/put_bucket_notification.rs index fb67fd5..0086f59 100644 --- a/src/s3/client/put_bucket_notification.rs +++ b/src/s3/client/put_bucket_notification.rs @@ -28,11 +28,11 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::types::{NotificationConfig, PrefixFilterRule, QueueConfig, S3Api, SuffixFilterRule}; /// use minio::s3::response::PutBucketNotificationResponse; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { /// let client: Client = Default::default(); // configure your client here - /// /// let config = NotificationConfig { /// cloud_func_config_list: None, /// queue_config_list: Some(vec![QueueConfig { @@ -56,7 +56,7 @@ impl Client { /// .put_bucket_notification("bucket-name") /// .notification_config(config) /// .send().await.unwrap(); - /// println!("set bucket notification for bucket '{:?}'", resp.bucket); + /// println!("set bucket notification for bucket '{:?}'", resp.bucket()); /// } /// ``` pub fn put_bucket_notification>(&self, bucket: S) -> PutBucketNotification { diff --git a/src/s3/client/put_bucket_policy.rs b/src/s3/client/put_bucket_policy.rs index 3ff4799..ced7e48 100644 --- a/src/s3/client/put_bucket_policy.rs +++ b/src/s3/client/put_bucket_policy.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::builders::VersioningStatus; /// use minio::s3::response::PutBucketPolicyResponse; /// use minio::s3::types::{S3Api, AndOperator, Destination, Filter, ReplicationConfig, ReplicationRule}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -63,7 +64,7 @@ impl Client { /// .put_bucket_policy("bucket-name") /// .config(config.to_owned()) /// .send().await.unwrap(); - /// println!("set bucket replication policy on bucket '{}'", resp.bucket); + /// println!("set bucket replication policy on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_policy>(&self, bucket: S) -> PutBucketPolicy { diff --git a/src/s3/client/put_bucket_replication.rs b/src/s3/client/put_bucket_replication.rs index 69737db..3dcce23 100644 --- a/src/s3/client/put_bucket_replication.rs +++ b/src/s3/client/put_bucket_replication.rs @@ -31,12 +31,13 @@ impl Client { /// use minio::s3::builders::VersioningStatus; /// use minio::s3::response::PutBucketReplicationResponse; /// use minio::s3::types::{S3Api, AndOperator, Destination, Filter, ReplicationConfig, ReplicationRule}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// use std::collections::HashMap; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// /// let mut tags: HashMap = HashMap::new(); /// tags.insert(String::from("key1"), String::from("value1")); @@ -75,7 +76,7 @@ impl Client { /// .put_bucket_replication("bucket-name") /// .replication_config(ReplicationConfig {role: None, rules}) /// .send().await.unwrap(); - /// println!("enabled versioning on bucket '{}'", resp.bucket); + /// println!("enabled versioning on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_replication>(&self, bucket: S) -> PutBucketReplication { diff --git a/src/s3/client/put_bucket_tagging.rs b/src/s3/client/put_bucket_tagging.rs index 97e1f4b..de02b9b 100644 --- a/src/s3/client/put_bucket_tagging.rs +++ b/src/s3/client/put_bucket_tagging.rs @@ -31,12 +31,13 @@ impl Client { /// use minio::s3::builders::VersioningStatus; /// use minio::s3::response::PutBucketTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// use std::collections::HashMap; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// /// let mut tags: HashMap = HashMap::new(); /// tags.insert(String::from("Project"), String::from("Project One")); @@ -46,7 +47,7 @@ impl Client { /// .put_bucket_tagging("bucket-name") /// .tags(tags) /// .send().await.unwrap(); - /// println!("set tags on bucket '{}'", resp.bucket); + /// println!("set tags on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_tagging>(&self, bucket: S) -> PutBucketTagging { diff --git a/src/s3/client/put_bucket_versioning.rs b/src/s3/client/put_bucket_versioning.rs index 15fe27c..12ce014 100644 --- a/src/s3/client/put_bucket_versioning.rs +++ b/src/s3/client/put_bucket_versioning.rs @@ -31,6 +31,7 @@ impl Client { /// use minio::s3::builders::VersioningStatus; /// use minio::s3::response::PutBucketVersioningResponse; /// use minio::s3::types::{S3Api, ObjectLockConfig, RetentionMode}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -40,7 +41,7 @@ impl Client { /// .put_bucket_versioning("bucket-name") /// .versioning_status(VersioningStatus::Enabled) /// .send().await.unwrap(); - /// println!("enabled versioning on bucket '{}'", resp.bucket); + /// println!("enabled versioning on bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_bucket_versioning>(&self, bucket: S) -> PutBucketVersioning { diff --git a/src/s3/client/put_object.rs b/src/s3/client/put_object.rs index 41afe2a..aec33f0 100644 --- a/src/s3/client/put_object.rs +++ b/src/s3/client/put_object.rs @@ -29,7 +29,11 @@ impl Client { /// /// For handling large files requiring multipart upload, see [`create_multipart_upload`](#method.create_multipart_upload). /// - /// To execute the request, call [`PutObject::send()`](crate::s3::types::S3Api::send), + /// For handling large files requiring multipart upload, see [`create_multipart_upload`](#method.create_multipart_upload). + /// + /// For handling large files requiring multipart upload, see [`create_multipart_upload`](#method.create_multipart_upload). + /// + /// To execute the request, call [`PutObjects::send()`](crate::s3::types::S3Api::send), /// which returns a [`Result`] containing a [`PutObjectResponse`](crate::s3::response::PutObjectResponse). /// /// For more information, refer to the [AWS S3 PutObject API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html). @@ -41,6 +45,7 @@ impl Client { /// use minio::s3::response::PutObjectResponse; /// use minio::s3::types::S3Api; /// use minio::s3::segmented_bytes::SegmentedBytes; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { @@ -48,7 +53,7 @@ impl Client { /// let data = SegmentedBytes::from("Hello world".to_string()); /// let resp: PutObjectResponse = /// client.put_object("bucket-name", "object-name", data).send().await.unwrap(); - /// println!("successfully put object '{}'", resp.object); + /// println!("successfully put object '{}'", resp.object()); /// } /// ``` pub fn put_object, S2: Into>( @@ -80,7 +85,7 @@ impl Client { /// let resp: CreateMultipartUploadResponse = client /// .create_multipart_upload("bucket-name", "large-object") /// .send().await.unwrap(); - /// println!("Initiated multipart upload with UploadId '{}'", resp.upload_id); + /// println!("Initiated multipart upload with UploadId '{:?}'", resp.upload_id().await); /// } /// ``` pub fn create_multipart_upload, S2: Into>( @@ -135,8 +140,8 @@ impl Client { /// ```no_run /// use minio::s3::Client; /// use minio::s3::response::CompleteMultipartUploadResponse; - /// use minio::s3::types::S3Api; - /// use minio::s3::types::PartInfo; + /// use minio::s3::types::{S3Api, PartInfo}; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { @@ -145,7 +150,7 @@ impl Client { /// let resp: CompleteMultipartUploadResponse = client /// .complete_multipart_upload("bucket-name", "object-name", "upload-id-123", parts) /// .send().await.unwrap(); - /// println!("Completed multipart upload for '{}'", resp.object); + /// println!("Completed multipart upload for '{}'", resp.object()); /// } /// ``` pub fn complete_multipart_upload, S2: Into, S3: Into>( @@ -178,6 +183,7 @@ impl Client { /// use minio::s3::response::UploadPartResponse; /// use minio::s3::types::S3Api; /// use minio::s3::segmented_bytes::SegmentedBytes; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { @@ -186,7 +192,7 @@ impl Client { /// let resp: UploadPartResponse = client /// .upload_part("bucket-name", "object-name", "upload-id", 1, data) /// .send().await.unwrap(); - /// println!("Uploaded object: {}", resp.object); + /// println!("Uploaded object: {}", resp.object()); /// } /// ``` pub fn upload_part, S2: Into, S3: Into>( @@ -220,6 +226,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::PutObjectContentResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::{HasObject, HasEtagFromHeaders}; /// /// #[tokio::main] /// async fn main() { @@ -228,7 +235,7 @@ impl Client { /// let resp: PutObjectContentResponse = client /// .put_object_content("bucket", "object", content) /// .send().await.unwrap(); - /// println!("Uploaded object '{}' with ETag '{}'", resp.object, resp.etag); + /// println!("Uploaded object '{}' with ETag '{:?}'", resp.object(), resp.etag()); /// } /// ``` pub fn put_object_content, S2: Into, C: Into>( diff --git a/src/s3/client/put_object_legal_hold.rs b/src/s3/client/put_object_legal_hold.rs index 38a57dd..22be7a0 100644 --- a/src/s3/client/put_object_legal_hold.rs +++ b/src/s3/client/put_object_legal_hold.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::PutObjectLegalHoldResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -37,7 +38,7 @@ impl Client { /// let resp: PutObjectLegalHoldResponse = client /// .put_object_legal_hold("bucket-name", "object-name", true) /// .send().await.unwrap(); - /// println!("legal hold of bucket '{}' is enabled", resp.bucket); + /// println!("legal hold of bucket '{}' is enabled", resp.bucket()); /// } /// ``` pub fn put_object_legal_hold, S2: Into>( diff --git a/src/s3/client/put_object_lock_config.rs b/src/s3/client/put_object_lock_config.rs index 5a2655a..e8597f0 100644 --- a/src/s3/client/put_object_lock_config.rs +++ b/src/s3/client/put_object_lock_config.rs @@ -30,6 +30,7 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::{CreateBucketResponse, PutObjectLockConfigResponse}; /// use minio::s3::types::{S3Api, ObjectLockConfig, RetentionMode}; + /// use minio::s3::response::a_response_traits::HasBucket; /// /// #[tokio::main] /// async fn main() { @@ -38,14 +39,14 @@ impl Client { /// /// let resp: CreateBucketResponse = /// client.create_bucket(bucket_name).object_lock(true).send().await.unwrap(); - /// println!("created bucket '{}' with object locking enabled", resp.bucket); + /// println!("created bucket '{}' with object locking enabled", resp.bucket()); /// /// const DURATION_DAYS: i32 = 7; /// let config = ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(DURATION_DAYS), None).unwrap(); /// /// let resp: PutObjectLockConfigResponse = /// client.put_object_lock_config(bucket_name).config(config).send().await.unwrap(); - /// println!("configured object locking for bucket '{}'", resp.bucket); + /// println!("configured object locking for bucket '{}'", resp.bucket()); /// } /// ``` pub fn put_object_lock_config>(&self, bucket: S) -> PutObjectLockConfig { diff --git a/src/s3/client/put_object_retention.rs b/src/s3/client/put_object_retention.rs index 635775b..e97b3c5 100644 --- a/src/s3/client/put_object_retention.rs +++ b/src/s3/client/put_object_retention.rs @@ -32,17 +32,18 @@ impl Client { /// use minio::s3::builders::ObjectToDelete; /// use minio::s3::types::{S3Api, RetentionMode}; /// use minio::s3::utils::utc_now; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// let retain_until_date = utc_now() + chrono::Duration::days(1); /// let resp: PutObjectRetentionResponse = client /// .put_object_retention("bucket-name", "object-name") /// .retention_mode(Some(RetentionMode::GOVERNANCE)) /// .retain_until_date(Some(retain_until_date)) /// .send().await.unwrap(); - /// println!("set the object retention for object '{}'", resp.object); + /// println!("set the object retention for object '{}'", resp.object()); /// } /// ``` pub fn put_object_retention, S2: Into>( diff --git a/src/s3/client/put_object_tagging.rs b/src/s3/client/put_object_tagging.rs index 543e463..3189ef5 100644 --- a/src/s3/client/put_object_tagging.rs +++ b/src/s3/client/put_object_tagging.rs @@ -31,10 +31,11 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::PutObjectTaggingResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { - /// let client: Client = Default::default(); // configure your client here + /// let client: Client = Default::default(); // configure your client here /// let tags = HashMap::from([ /// (String::from("Project"), String::from("Project One")), /// (String::from("User"), String::from("jsmith")), @@ -43,7 +44,7 @@ impl Client { /// .put_object_tagging("bucket-name", "object-name") /// .tags(tags) /// .send().await.unwrap(); - /// println!("set the object tags for object '{}'", resp.object); + /// println!("set the object tags for object '{}'", resp.object()); /// } /// ``` pub fn put_object_tagging>(&self, bucket: S, object: S) -> PutObjectTagging { diff --git a/src/s3/client/stat_object.rs b/src/s3/client/stat_object.rs index 00cbbb2..0575eec 100644 --- a/src/s3/client/stat_object.rs +++ b/src/s3/client/stat_object.rs @@ -28,13 +28,14 @@ impl Client { /// use minio::s3::Client; /// use minio::s3::response::StatObjectResponse; /// use minio::s3::types::S3Api; + /// use minio::s3::response::a_response_traits::HasObject; /// /// #[tokio::main] /// async fn main() { /// let client: Client = Default::default(); // configure your client here /// let resp: StatObjectResponse = /// client.stat_object("bucket-name", "object-name").send().await.unwrap(); - /// println!("stat of object '{}' are {:#?}", resp.object, resp); + /// println!("stat of object '{}' are {:#?}", resp.object(), resp); /// } /// ``` pub fn stat_object, S2: Into>( diff --git a/src/s3/error.rs b/src/s3/error.rs index 64a9622..aedfaf6 100644 --- a/src/s3/error.rs +++ b/src/s3/error.rs @@ -129,12 +129,7 @@ pub enum Error { StrError(reqwest::header::ToStrError), IntError(std::num::ParseIntError), BoolError(std::str::ParseBoolError), - Utf8Error(Box), - /// Occurs when converting Vec to String (e.g. String::from_utf8) - //FromUtf8Error(alloc::string::FromUtf8Error), - /// Occurs when converting &[u8] to &str (e.g. std::str::from_utf8) - //Utf8Error(std::str::Utf8Error), JsonError(serde_json::Error), XmlError(String), InvalidBaseUrl(String), @@ -203,7 +198,6 @@ impl fmt::Display for Error { Error::IntError(e) => write!(f, "{e}"), Error::BoolError(e) => write!(f, "{e}"), Error::Utf8Error(e) => write!(f, "{e}"), - //Error::FromUtf8Error(e) => write!(f, "{e}"), Error::JsonError(e) => write!(f, "{e}"), Error::XmlError(m) => write!(f, "{m}"), Error::InvalidBucketName(m) => write!(f, "{m}"), diff --git a/src/s3/response.rs b/src/s3/response.rs index 793ef66..cc5dba3 100644 --- a/src/s3/response.rs +++ b/src/s3/response.rs @@ -60,6 +60,9 @@ mod put_object_tagging; mod select_object_content; mod stat_object; +#[macro_use] +pub mod a_response_traits; + pub use append_object::AppendObjectResponse; pub use bucket_exists::BucketExistsResponse; pub use copy_object::*; diff --git a/src/s3/response/a_response_traits.rs b/src/s3/response/a_response_traits.rs new file mode 100644 index 0000000..2daf26a --- /dev/null +++ b/src/s3/response/a_response_traits.rs @@ -0,0 +1,219 @@ +use crate::s3::error::Error; +use crate::s3::types::S3Request; +use crate::s3::utils::{get_text, trim_quotes}; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use std::collections::HashMap; +use xmltree::Element; + +#[macro_export] +/// Implements the `FromS3Response` trait for the specified types. +macro_rules! impl_from_s3response { + ($($ty:ty),* $(,)?) => { + $( + #[async_trait::async_trait] + impl FromS3Response for $ty { + async fn from_s3response( + request: S3Request, + response: Result, + ) -> Result { + let mut resp: reqwest::Response = response?; + Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await? + }) + } + } + )* + }; +} + +#[macro_export] +/// Implements the `FromS3Response` trait for the specified types with an additional `object_size` field. +macro_rules! impl_from_s3response_with_size { + ($($ty:ty),* $(,)?) => { + $( + #[async_trait::async_trait] + impl FromS3Response for $ty { + async fn from_s3response( + request: S3Request, + response: Result, + ) -> Result { + let mut resp: reqwest::Response = response?; + Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, + object_size: 0, // Default value, can be set later + }) + } + } + )* + }; +} + +#[macro_export] +/// Implements the `HasS3Fields` trait for the specified types. +macro_rules! impl_has_s3fields { + ($($ty:ty),* $(,)?) => { + $( + impl HasS3Fields for $ty { + /// The request that was sent to the S3 API. + fn request(&self) -> &S3Request { + &self.request + } + + /// The response of the S3 API. + fn headers(&self) -> &HeaderMap { + &self.headers + } + + /// The response of the S3 API. + fn body(&self) -> &Bytes { + &self.body + } + } + )* + }; +} + +pub trait HasS3Fields { + /// The request that was sent to the S3 API. + fn request(&self) -> &S3Request; + /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. + fn headers(&self) -> &HeaderMap; + /// The response body returned by the server, which may contain the object data or other information. + fn body(&self) -> &Bytes; +} +/// Returns the name of the S3 bucket. +pub trait HasBucket: HasS3Fields { + /// Returns the name of the S3 bucket. + #[inline] + fn bucket(&self) -> &str { + self.request().bucket.as_deref().unwrap_or_default() + } +} +/// Returns the object key (name) of the S3 object. +pub trait HasObject: HasS3Fields { + /// Returns the object key (name) of the S3 object. + #[inline] + fn object(&self) -> &str { + self.request().object.as_deref().unwrap_or_default() + } +} +/// Returns the region of the S3 bucket. +pub trait HasRegion: HasS3Fields { + /// Returns the region of the S3 bucket. + #[inline] + fn region(&self) -> &str { + &self.request().inner_region + } +} + +/// Returns the version ID of the object (`x-amz-version-id`), if versioning is enabled for the bucket. +pub trait HasVersion: HasS3Fields { + /// Returns the version ID of the object (`x-amz-version-id`), if versioning is enabled for the bucket. + #[inline] + fn version_id(&self) -> Option<&str> { + self.headers() + .get("x-amz-version-id") + .and_then(|v| v.to_str().ok()) + } +} + +/// Returns the value of the `ETag` header from response headers (for operations that return ETag in headers). +/// The ETag is typically a hash of the object content, but it may vary based on the storage backend. +pub trait HasEtagFromHeaders: HasS3Fields { + /// Returns the value of the `ETag` header from response headers (for operations that return ETag in headers). + /// The ETag is typically a hash of the object content, but it may vary based on the storage backend. + #[inline] + fn etag(&self) -> Result { + // Retrieve the ETag from the response headers. + let etag = self + .headers() + .get("etag") + .and_then(|v| v.to_str().ok()) + .map(|s| s.trim_matches('"')) + .unwrap_or_default() + .to_string(); + Ok(etag) + } +} + +/// Returns the value of the `ETag` from the response body, which is a unique identifier for +/// the object version. The ETag is typically a hash of the object content, but it may vary +/// based on the storage backend. +pub trait HasEtagFromBody: HasS3Fields { + /// Returns the value of the `ETag` from the response body, which is a unique identifier for + /// the object version. The ETag is typically a hash of the object content, but it may vary + /// based on the storage backend. + fn etag(&self) -> Result { + // Retrieve the ETag from the response body. + let root = xmltree::Element::parse(self.body().clone().reader())?; + let etag: String = get_text(&root, "ETag")?; + Ok(trim_quotes(etag)) + } +} + +/// Returns the size of the object in bytes, as specified by the `x-amz-object-size` header. +pub trait HasObjectSize: HasS3Fields { + /// Returns the size of the object in bytes, as specified by the `x-amz-object-size` header. + #[inline] + fn object_size(&self) -> u64 { + self.headers() + .get("x-amz-object-size") + .and_then(|v| v.to_str().ok()) + .and_then(|s| s.parse::().ok()) + .unwrap_or(0) + } +} + +/// Value of the `x-amz-delete-marker` header. +/// Indicates whether the specified object version that was permanently deleted was (true) or +/// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates +/// whether (true) or not (false) the current version of the object is a delete marker. +pub trait HasIsDeleteMarker: HasS3Fields { + /// Returns `true` if the object is a delete marker, `false` otherwise. + /// + /// Value of the `x-amz-delete-marker` header. + /// Indicates whether the specified object version that was permanently deleted was (true) or + /// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates + /// whether (true) or not (false) the current version of the object is a delete marker. + #[inline] + fn is_delete_marker(&self) -> Result, Error> { + Ok(Some( + self.headers() + .get("x-amz-delete-marker") + .map(|v| v == "true") + .unwrap_or(false), + )) + + //Ok(match self.headers().get("x-amz-delete-marker") { + // Some(v) => Some(v.to_str()?.parse::()?), + // None => None, + //}) + } +} + +pub trait HasTagging: HasS3Fields { + /// Returns the tags associated with the bucket. + /// + /// If the bucket has no tags, this will return an empty `HashMap`. + #[inline] + fn tags(&self) -> Result, Error> { + let mut tags = HashMap::new(); + if self.body().is_empty() { + // Note: body is empty when server responses with NoSuchTagSet + return Ok(tags); + } + let mut root = Element::parse(self.body().clone().reader())?; + let element = root + .get_mut_child("TagSet") + .ok_or(Error::XmlError(" tag not found".to_string()))?; + while let Some(v) = element.take_child("Tag") { + tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); + } + Ok(tags) + } +} diff --git a/src/s3/response/append_object.rs b/src/s3/response/append_object.rs index b00e577..fa21b6a 100644 --- a/src/s3/response/append_object.rs +++ b/src/s3/response/append_object.rs @@ -14,78 +14,30 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasObject, HasObjectSize, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the `append_object` API call. /// This struct contains metadata and information about the object being appended. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket containing the object. -/// * `object` - Key (path) identifying the object within the bucket. -/// * `etag` - Entity tag representing a specific version of the object. -/// * `version_id` - Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. -/// * `object_size` - Value of the `x-amz-object-size` header. -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct AppendObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Entity tag representing a specific version of the object. - pub etag: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, - - /// Value of the `x-amz-object-size` header. - pub object_size: u64, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for AppendObjectResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - let headers: HeaderMap = mem::take(resp.headers_mut()); +impl_from_s3response!(AppendObjectResponse); +impl_has_s3fields!(AppendObjectResponse); - let etag: String = match headers.get("etag") { - Some(v) => v.to_str()?.to_string().trim_matches('"').to_string(), - _ => String::new(), - }; - let version_id: Option = match headers.get("x-amz-version-id") { - Some(v) => Some(v.to_str()?.to_string()), - None => None, - }; - let object_size: u64 = match headers.get("x-amz-object-size") { - Some(v) => v.to_str()?.parse::()?, - None => 0, - }; - - Ok(Self { - headers, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - region: req.inner_region, - etag, - version_id, - object_size, - }) - } -} +impl HasBucket for AppendObjectResponse {} +impl HasObject for AppendObjectResponse {} +impl HasRegion for AppendObjectResponse {} +impl HasVersion for AppendObjectResponse {} +impl HasEtagFromHeaders for AppendObjectResponse {} +impl HasObjectSize for AppendObjectResponse {} diff --git a/src/s3/response/bucket_exists.rs b/src/s3/response/bucket_exists.rs index ff71cbe..829a86f 100644 --- a/src/s3/response/bucket_exists.rs +++ b/src/s3/response/bucket_exists.rs @@ -13,59 +13,57 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [bucket_exists()](crate::s3::client::Client::bucket_exists) API call. /// This struct contains metadata and information about the existence of a bucket. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. If the bucket does not exist, this will be an empty string. -/// * `bucket` - The name of the bucket being checked. -/// * `exists` - A boolean indicating whether the bucket exists or not. #[derive(Clone, Debug)] pub struct BucketExistsResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, - /// The AWS region where the bucket resides. - pub region: String, - - /// The name of the bucket being checked. - pub bucket: String, - - /// Whether the bucket exists or not. - pub exists: bool, + pub(crate) exists: bool, } +impl_has_s3fields!(BucketExistsResponse); + +impl HasBucket for BucketExistsResponse {} +impl HasRegion for BucketExistsResponse {} #[async_trait] impl FromS3Response for BucketExistsResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => Ok(Self { - headers: mem::take(r.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, exists: true, }), - Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchBucket => { - Ok(Self { - headers: e.headers, - region: String::new(), // NOTE the bucket does not exist and the region is not provided - bucket: take_bucket(req.bucket)?, - exists: false, - }) - } + Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchBucket => Ok(Self { + request, + headers: e.headers, + body: Bytes::new(), + exists: false, + }), Err(e) => Err(e), } } } + +impl BucketExistsResponse { + /// Returns `true` if the bucket exists, `false` otherwise. + pub fn exists(&self) -> bool { + self.exists + } +} diff --git a/src/s3/response/copy_object.rs b/src/s3/response/copy_object.rs index ab3e66c..31ef39c 100644 --- a/src/s3/response/copy_object.rs +++ b/src/s3/response/copy_object.rs @@ -14,186 +14,43 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{ + HasBucket, HasEtagFromBody, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_text, take_bucket, take_object}; -use async_trait::async_trait; -use bytes::{Buf, Bytes}; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; -use xmltree::Element; + +/// Base response struct that contains common functionality for S3 operations +#[derive(Clone, Debug)] +pub struct S3Response2 { + pub(crate) request: S3Request, + pub(crate) headers: HeaderMap, + pub(crate) body: Bytes, +} + +impl_from_s3response!(S3Response2); +impl_has_s3fields!(S3Response2); + +impl HasBucket for S3Response2 {} +impl HasObject for S3Response2 {} +impl HasRegion for S3Response2 {} +impl HasVersion for S3Response2 {} +impl HasEtagFromBody for S3Response2 {} /// Represents the response of the `upload_part_copy` API call. /// This struct contains metadata and information about the part being copied during a multipart upload. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket containing the object. -/// * `object` - Key (path) identifying the object within the bucket. -/// * `etag` - Entity tag representing a specific version of the object. -/// * `version_id` - Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. -#[derive(Clone, Debug)] -pub struct UploadPartCopyResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, +pub type UploadPartCopyResponse = S3Response2; - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Entity tag representing a specific version of the object. - pub etag: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, -} - -#[async_trait] -impl FromS3Response for UploadPartCopyResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - - let etag: String = { - let body: Bytes = resp.bytes().await?; - let root = Element::parse(body.reader())?; - get_text(&root, "ETag")?.trim_matches('"').to_string() - }; - - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - etag, - version_id, - }) - } -} - -#[derive(Clone, Debug)] -pub struct CopyObjectInternalResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - pub region: String, - pub bucket: String, - - pub object: String, - pub etag: String, - pub version_id: Option, -} - -#[async_trait] -impl FromS3Response for CopyObjectInternalResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let bucket = req - .bucket - .ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?; - let object = req - .object - .ok_or_else(|| Error::InvalidObjectName("no object specified".into()))?; - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - - let etag: String = { - let body: Bytes = resp.bytes().await?; - let root = Element::parse(body.reader())?; - get_text(&root, "ETag")?.trim_matches('"').to_string() - }; - - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); - - Ok(CopyObjectInternalResponse { - headers, - region: req.inner_region, - bucket, - object, - etag, - version_id, - }) - } -} +/// Internal response type for copy operations +pub type CopyObjectInternalResponse = S3Response2; /// Represents the response of the `copy_object` API call. /// This struct contains metadata and information about the object being copied. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket containing the object. -/// * `object` - Key (path) identifying the object within the bucket. -/// * `etag` - Entity tag representing a specific version of the object. -/// * `version_id` - Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. -#[derive(Clone, Debug)] -pub struct CopyObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Entity tag representing a specific version of the object. - pub etag: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, -} +pub type CopyObjectResponse = S3Response2; /// Represents the response of the `[compose_object()](crate::s3::client::Client::compose_object) API call. /// This struct contains metadata and information about the composed object. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `bucket` - Name of the bucket containing the composed object. -/// * `object` - Key (path) identifying the composed object within the bucket. -/// * `region` - The AWS region where the bucket resides. -/// * `etag` - Entity tag representing a specific version of the composed object. -/// * `version_id` - Version ID of the composed object, if versioning is enabled. -#[derive(Debug, Clone)] -pub struct ComposeObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// Name of the bucket containing the composed object. - pub bucket: String, - - /// Key (path) identifying the composed object within the bucket. - pub object: String, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Entity tag representing a specific version of the composed object. - pub etag: String, - - /// Version ID of the composed object, if versioning is enabled. - pub version_id: Option, -} +pub type ComposeObjectResponse = S3Response2; diff --git a/src/s3/response/create_bucket.rs b/src/s3/response/create_bucket.rs index 9b5c33a..30525b7 100644 --- a/src/s3/response/create_bucket.rs +++ b/src/s3/response/create_bucket.rs @@ -13,10 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -25,31 +27,36 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct CreateBucketResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } +impl_has_s3fields!(CreateBucketResponse); + +impl HasBucket for CreateBucketResponse {} +impl HasRegion for CreateBucketResponse {} + #[async_trait] impl FromS3Response for CreateBucketResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut req = req; - let bucket: String = take_bucket(req.bucket)?; - req.client.add_bucket_region(&bucket, &req.inner_region); - let mut resp = resp?; + let mut resp: reqwest::Response = response?; + + let mut request = request; + let bucket: &str = request + .bucket + .as_deref() + .ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?; + let region: &str = &request.inner_region; + request.client.add_bucket_region(bucket, region); Ok(Self { + request, headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket, + body: resp.bytes().await?, }) } } diff --git a/src/s3/response/delete_bucket.rs b/src/s3/response/delete_bucket.rs index 24d16de..f70ee8c 100644 --- a/src/s3/response/delete_bucket.rs +++ b/src/s3/response/delete_bucket.rs @@ -13,10 +13,11 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -25,31 +26,34 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct DeleteBucketResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + pub(crate) request: S3Request, + pub(crate) headers: HeaderMap, + pub(crate) body: Bytes, } +impl_has_s3fields!(DeleteBucketResponse); -#[async_trait] +impl HasBucket for DeleteBucketResponse {} +impl HasRegion for DeleteBucketResponse {} + +#[async_trait::async_trait] impl FromS3Response for DeleteBucketResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut req = req; - let bucket: String = take_bucket(req.bucket)?; - req.client.remove_bucket_region(&bucket); - let mut resp = resp?; + let mut resp: reqwest::Response = response?; + let mut request = request; + let bucket: &str = request + .bucket + .as_deref() + .ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?; + + request.client.remove_bucket_region(bucket); Ok(Self { + request, headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket, + body: resp.bytes().await?, }) } } diff --git a/src/s3/response/delete_bucket_encryption.rs b/src/s3/response/delete_bucket_encryption.rs index ce5acad..a2aaf29 100644 --- a/src/s3/response/delete_bucket_encryption.rs +++ b/src/s3/response/delete_bucket_encryption.rs @@ -14,44 +14,24 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [delete_bucket_encryption()](crate::s3::client::Client::delete_bucket_encryption) API call. /// This struct contains metadata and information about the bucket whose encryption configuration was removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the encryption configuration was removed. #[derive(Clone, Debug)] pub struct DeleteBucketEncryptionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Encryption configuration was removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteBucketEncryptionResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteBucketEncryptionResponse); +impl_has_s3fields!(DeleteBucketEncryptionResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for DeleteBucketEncryptionResponse {} +impl HasRegion for DeleteBucketEncryptionResponse {} diff --git a/src/s3/response/delete_bucket_lifecycle.rs b/src/s3/response/delete_bucket_lifecycle.rs index dbc691e..69f5121 100644 --- a/src/s3/response/delete_bucket_lifecycle.rs +++ b/src/s3/response/delete_bucket_lifecycle.rs @@ -14,44 +14,24 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [delete_bucket_lifecycle()](crate::s3::client::Client::delete_bucket_lifecycle) API call. /// This struct contains metadata and information about the bucket whose lifecycle configuration was removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the Bucket Lifecycle configuration was removed. #[derive(Clone, Debug)] pub struct DeleteBucketLifecycleResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Bucket Lifecycle configuration was removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteBucketLifecycleResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteBucketLifecycleResponse); +impl_has_s3fields!(DeleteBucketLifecycleResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for DeleteBucketLifecycleResponse {} +impl HasRegion for DeleteBucketLifecycleResponse {} diff --git a/src/s3/response/delete_bucket_notification.rs b/src/s3/response/delete_bucket_notification.rs index e7b1e4c..15e31fb 100644 --- a/src/s3/response/delete_bucket_notification.rs +++ b/src/s3/response/delete_bucket_notification.rs @@ -14,44 +14,24 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [delete_bucket_notification()](crate::s3::client::Client::delete_bucket_notification) API call. /// This struct contains metadata and information about the bucket whose notifications were removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the Bucket Notifications were removed. #[derive(Clone, Debug)] pub struct DeleteBucketNotificationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Bucket Notifications were removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteBucketNotificationResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteBucketNotificationResponse); +impl_has_s3fields!(DeleteBucketNotificationResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for DeleteBucketNotificationResponse {} +impl HasRegion for DeleteBucketNotificationResponse {} diff --git a/src/s3/response/delete_bucket_policy.rs b/src/s3/response/delete_bucket_policy.rs index 9448f38..9c20844 100644 --- a/src/s3/response/delete_bucket_policy.rs +++ b/src/s3/response/delete_bucket_policy.rs @@ -13,49 +13,45 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [delete_bucket_policy()](crate::s3::client::Client::delete_bucket_policy) API call. /// This struct contains metadata and information about the bucket whose policy was removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the Bucket Policy was removed. #[derive(Clone, Debug)] pub struct DeleteBucketPolicyResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Bucket Policy was removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } +impl_has_s3fields!(DeleteBucketPolicyResponse); + +impl HasBucket for DeleteBucketPolicyResponse {} +impl HasRegion for DeleteBucketPolicyResponse {} + #[async_trait] impl FromS3Response for DeleteBucketPolicyResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => Ok(Self { - headers: mem::take(r.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, }), Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchBucketPolicy => Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + body: Bytes::new(), }), Err(e) => Err(e), } diff --git a/src/s3/response/delete_bucket_replication.rs b/src/s3/response/delete_bucket_replication.rs index 5bb05cd..31ea8d9 100644 --- a/src/s3/response/delete_bucket_replication.rs +++ b/src/s3/response/delete_bucket_replication.rs @@ -13,52 +13,48 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the `[delete_bucket_replication()](crate::s3::client::Client::delete_bucket_replication) API call. /// This struct contains metadata and information about the bucket whose replication configuration was removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the Replication configuration was removed. #[derive(Clone, Debug)] pub struct DeleteBucketReplicationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Replication configuration was removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } +impl_has_s3fields!(DeleteBucketReplicationResponse); + +impl HasBucket for DeleteBucketReplicationResponse {} +impl HasRegion for DeleteBucketReplicationResponse {} + #[async_trait] impl FromS3Response for DeleteBucketReplicationResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => Ok(Self { - headers: mem::take(r.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, }), Err(Error::S3Error(e)) if e.code == ErrorCode::ReplicationConfigurationNotFoundError => { Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + body: Bytes::new(), }) } Err(e) => Err(e), diff --git a/src/s3/response/delete_bucket_tagging.rs b/src/s3/response/delete_bucket_tagging.rs index db68055..309c24c 100644 --- a/src/s3/response/delete_bucket_tagging.rs +++ b/src/s3/response/delete_bucket_tagging.rs @@ -14,44 +14,24 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Represents the response of the [delete_bucket_tagging()](crate::s3::client::Client::delete_bucket_tagging) API call. /// This struct contains metadata and information about the bucket whose tags were removed. -/// -/// # Fields -/// -/// * `headers` - HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. -/// * `region` - The AWS region where the bucket resides. -/// * `bucket` - Name of the bucket from which the tags were removed. #[derive(Clone, Debug)] pub struct DeleteBucketTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the tags were removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteBucketTaggingResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteBucketTaggingResponse); +impl_has_s3fields!(DeleteBucketTaggingResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for DeleteBucketTaggingResponse {} +impl HasRegion for DeleteBucketTaggingResponse {} diff --git a/src/s3/response/delete_object.rs b/src/s3/response/delete_object.rs index c75b56d..2b0621c 100644 --- a/src/s3/response/delete_object.rs +++ b/src/s3/response/delete_object.rs @@ -13,58 +13,32 @@ // See the License for the specific language governing permissions and // limitations under the License. -use async_trait::async_trait; -use bytes::Buf; +use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{ + HasBucket, HasIsDeleteMarker, HasRegion, HasS3Fields, HasVersion, +}; +use crate::s3::types::{FromS3Response, S3Request}; +use crate::s3::utils::{get_default_text, get_option_text, get_text}; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; -use crate::s3::{ - error::Error, - types::{FromS3Response, S3Request}, - utils::{get_default_text, get_option_text, get_text}, -}; - -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct DeleteObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// Value of the `x-amz-delete-marker` header. - /// Indicates whether the specified object version that was permanently deleted was (true) or - /// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates - /// whether (true) or not (false) the current version of the object is a delete marker. - pub is_delete_marker: bool, - - /// Value of the `x-amz-version-id` header. - /// If a delete marker was created, this field will contain the version_id of the delete marker. - pub version_id: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteObjectResponse { - async fn from_s3response( - _req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - let headers: HeaderMap = mem::take(resp.headers_mut()); - let is_delete_marker = headers - .get("x-amz-delete-marker") - .map(|v| v == "true") - .unwrap_or(false); +impl_from_s3response!(DeleteObjectResponse); +impl_has_s3fields!(DeleteObjectResponse); - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); - - Ok(DeleteObjectResponse { - headers, - is_delete_marker, - version_id, - }) - } -} +impl HasBucket for DeleteObjectResponse {} +impl HasRegion for DeleteObjectResponse {} +impl HasVersion for DeleteObjectResponse {} +impl HasIsDeleteMarker for DeleteObjectResponse {} /// Error info returned by the S3 API when an object could not be deleted. #[derive(Clone, Debug)] @@ -84,18 +58,6 @@ pub struct DeletedObject { pub delete_marker_version_id: Option, } -/// Response of -/// [delete_objects()](crate::s3::client::Client::delete_objects) -/// S3 API. It is also returned by the -/// [remove_objects()](crate::s3::client::Client::delete_objects_streaming) API in the -/// form of a stream. -#[derive(Clone, Debug)] -pub struct DeleteObjectsResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - pub result: Vec, -} - /// Result of deleting an object. #[derive(Clone, Debug)] pub enum DeleteResult { @@ -116,24 +78,30 @@ impl DeleteResult { pub fn is_deleted(&self) -> bool { matches!(self, DeleteResult::Deleted(_)) } - pub fn is_error(&self) -> bool { matches!(self, DeleteResult::Error(_)) } } -#[async_trait] -impl FromS3Response for DeleteObjectsResponse { - async fn from_s3response( - _req: S3Request, - resp: Result, - ) -> Result { - let resp = resp?; - let headers = resp.headers().clone(); +/// Response of +/// [delete_objects()](crate::s3::client::Client::delete_objects) +/// S3 API. It is also returned by the +/// [remove_objects()](crate::s3::client::Client::delete_objects_streaming) API in the +/// form of a stream. +#[derive(Clone, Debug)] +pub struct DeleteObjectsResponse { + request: S3Request, + pub(crate) headers: HeaderMap, + body: Bytes, +} - let body = resp.bytes().await?; +impl_from_s3response!(DeleteObjectsResponse); +impl_has_s3fields!(DeleteObjectsResponse); - let root = Element::parse(body.reader())?; +impl DeleteObjectsResponse { + /// Returns the bucket name for which the delete operation was performed. + pub fn result(&self) -> Result, Error> { + let root = Element::parse(self.body.clone().reader())?; let result = root .children .iter() @@ -158,7 +126,6 @@ impl FromS3Response for DeleteObjectsResponse { } }) .collect::, Error>>()?; - - Ok(Self { headers, result }) + Ok(result) } } diff --git a/src/s3/response/delete_object_lock_config.rs b/src/s3/response/delete_object_lock_config.rs index c83724c..f99274d 100644 --- a/src/s3/response/delete_object_lock_config.rs +++ b/src/s3/response/delete_object_lock_config.rs @@ -14,9 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -29,28 +30,13 @@ use std::mem; /// For more information, refer to the [AWS S3 DeleteObjectLockConfiguration API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectLockConfiguration.html). #[derive(Clone, Debug)] pub struct DeleteObjectLockConfigResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket from which the Object Lock configuration was removed. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteObjectLockConfigResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteObjectLockConfigResponse); +impl_has_s3fields!(DeleteObjectLockConfigResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for DeleteObjectLockConfigResponse {} +impl HasRegion for DeleteObjectLockConfigResponse {} diff --git a/src/s3/response/delete_object_tagging.rs b/src/s3/response/delete_object_tagging.rs index 67472d6..ba55e95 100644 --- a/src/s3/response/delete_object_tagging.rs +++ b/src/s3/response/delete_object_tagging.rs @@ -14,10 +14,12 @@ // limitations under the License. use crate::s3::error::Error; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -30,38 +32,15 @@ use std::mem; /// For more information, refer to the [AWS S3 DeleteObjectTagging API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html). #[derive(Clone, Debug)] pub struct DeleteObjectTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (name) identifying the object within the bucket. - pub object: String, - - /// The version ID of the object from which the tags were removed. - /// - /// If versioning is not enabled on the bucket, this field may be `None`. - pub version_id: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for DeleteObjectTaggingResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(DeleteObjectTaggingResponse); +impl_has_s3fields!(DeleteObjectTaggingResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - }) - } -} +impl HasBucket for DeleteObjectTaggingResponse {} +impl HasRegion for DeleteObjectTaggingResponse {} +impl HasObject for DeleteObjectTaggingResponse {} +impl HasVersion for DeleteObjectTaggingResponse {} diff --git a/src/s3/response/get_bucket_encryption.rs b/src/s3/response/get_bucket_encryption.rs index 3c9602b..bff2853 100644 --- a/src/s3/response/get_bucket_encryption.rs +++ b/src/s3/response/get_bucket_encryption.rs @@ -13,11 +13,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SseConfig}; -use crate::s3::utils::{get_option_text, get_text, take_bucket}; +use crate::s3::utils::{get_option_text, get_text}; use async_trait::async_trait; -use bytes::Buf; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -32,64 +34,63 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketEncryption API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html). #[derive(Clone, Debug)] pub struct GetBucketEncryptionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, +} - /// The AWS region where the bucket resides. - pub region: String, +impl_has_s3fields!(GetBucketEncryptionResponse); - /// Name of the bucket whose encryption configuration is retrieved. - pub bucket: String, +impl HasBucket for GetBucketEncryptionResponse {} +impl HasRegion for GetBucketEncryptionResponse {} - /// The default server-side encryption configuration of the bucket. +impl GetBucketEncryptionResponse { + /// Returns the default server-side encryption configuration of the bucket. /// /// This includes the encryption algorithm and, if applicable, the AWS KMS key ID used for encrypting objects. - /// - /// If the bucket has no default encryption configuration, the `get_bucket_encryption` API call may return an error - /// with the code `ServerSideEncryptionConfigurationNotFoundError`. It's advisable to handle this case appropriately in your application. - pub config: SseConfig, + /// If the bucket has no default encryption configuration, this method returns a default `SseConfig` with empty fields. + pub fn config(&self) -> Result { + if self.body.is_empty() { + return Ok(SseConfig::default()); + } + let mut root = Element::parse(self.body.clone().reader())?; // clone of Bytes is inexpensive + + let rule = root + .get_mut_child("Rule") + .ok_or(Error::XmlError(" tag not found".into()))?; + + let sse_by_default = rule + .get_mut_child("ApplyServerSideEncryptionByDefault") + .ok_or(Error::XmlError( + " tag not found".into(), + ))?; + + Ok(SseConfig { + sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, + kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), + }) + } } #[async_trait] impl FromS3Response for GetBucketEncryptionResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => { - let headers: HeaderMap = mem::take(r.headers_mut()); - let body = r.bytes().await?; - let mut root = Element::parse(body.reader())?; - - let rule = root - .get_mut_child("Rule") - .ok_or(Error::XmlError(" tag not found".into()))?; - - let sse_by_default = rule - .get_mut_child("ApplyServerSideEncryptionByDefault") - .ok_or(Error::XmlError( - " tag not found".into(), - ))?; - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: SseConfig { - sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, - kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), - }, - }) - } + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, + }), Err(Error::S3Error(e)) if e.code == ErrorCode::ServerSideEncryptionConfigurationNotFoundError => { Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: Default::default(), + body: Bytes::new(), }) } Err(e) => Err(e), diff --git a/src/s3/response/get_bucket_lifecycle.rs b/src/s3/response/get_bucket_lifecycle.rs index e1e858a..d37b01a 100644 --- a/src/s3/response/get_bucket_lifecycle.rs +++ b/src/s3/response/get_bucket_lifecycle.rs @@ -15,10 +15,10 @@ use crate::s3::error::Error; use crate::s3::lifecycle_config::LifecycleConfig; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{UtcTime, take_bucket}; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use chrono::{DateTime, NaiveDateTime, Utc}; use http::HeaderMap; use std::mem; @@ -33,56 +33,36 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketLifecycleConfiguration API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html). #[derive(Clone, Debug)] pub struct GetBucketLifecycleResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket whose lifecycle configuration is retrieved. - pub bucket: String, - - /// The lifecycle configuration of the bucket. - /// - /// This includes a set of rules that define actions applied to objects, such as transitioning - /// them to different storage classes, expiring them, or aborting incomplete multipart uploads. - /// - /// If the bucket has no lifecycle configuration, this field may contain an empty configuration. - pub config: LifecycleConfig, - - /// Optional value of `X-Minio-LifecycleConfig-UpdatedAt` header, indicating the last update - /// time of the lifecycle configuration. - pub updated_at: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetBucketLifecycleResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - let headers: HeaderMap = mem::take(resp.headers_mut()); - let config: LifecycleConfig = { - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - LifecycleConfig::from_xml(&root)? - }; - let updated_at: Option> = headers +impl_from_s3response!(GetBucketLifecycleResponse); +impl_has_s3fields!(GetBucketLifecycleResponse); + +impl HasBucket for GetBucketLifecycleResponse {} +impl HasRegion for GetBucketLifecycleResponse {} + +impl GetBucketLifecycleResponse { + /// Returns the lifecycle configuration of the bucket. + /// + /// This configuration includes rules for managing the lifecycle of objects in the bucket, + /// such as transitioning them to different storage classes or expiring them after a specified period. + pub fn config(&self) -> Result { + LifecycleConfig::from_xml(&Element::parse(self.body.clone().reader())?) + } + + /// Returns the last update time of the lifecycle configuration + /// (`X-Minio-LifecycleConfig-UpdatedAt`), if available. + pub fn updated_at(&self) -> Option> { + self.headers .get("x-minio-lifecycleconfig-updatedat") .and_then(|v| v.to_str().ok()) .and_then(|v| { NaiveDateTime::parse_from_str(v, "%Y%m%dT%H%M%SZ") .ok() .map(|naive| DateTime::from_naive_utc_and_offset(naive, Utc)) - }); - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config, - updated_at, - }) + }) } } diff --git a/src/s3/response/get_bucket_notification.rs b/src/s3/response/get_bucket_notification.rs index 6bc9325..0b3153d 100644 --- a/src/s3/response/get_bucket_notification.rs +++ b/src/s3/response/get_bucket_notification.rs @@ -14,10 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, NotificationConfig, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -31,42 +31,23 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketNotificationConfiguration API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html). #[derive(Clone, Debug)] pub struct GetBucketNotificationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket whose notification configuration is retrieved. - pub bucket: String, - - /// The notification configuration of the bucket. - /// - /// This includes the event types and the destinations (e.g., SNS topics, SQS queues, Lambda functions) - /// configured to receive notifications for those events. - /// - /// If the bucket has no notification configuration, this field may contain an empty configuration. - pub config: NotificationConfig, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetBucketNotificationResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetBucketNotificationResponse); +impl_has_s3fields!(GetBucketNotificationResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let mut root = Element::parse(body.reader())?; - let config = NotificationConfig::from_xml(&mut root)?; +impl HasBucket for GetBucketNotificationResponse {} +impl HasRegion for GetBucketNotificationResponse {} - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config, - }) +impl GetBucketNotificationResponse { + /// Returns the notification configuration of the bucket. + /// + /// This configuration includes the event types and the destinations (e.g., SNS topics, SQS queues, Lambda functions) + /// configured to receive notifications for those events. + pub fn config(&self) -> Result { + NotificationConfig::from_xml(&mut Element::parse(self.body.clone().reader())?) } } diff --git a/src/s3/response/get_bucket_policy.rs b/src/s3/response/get_bucket_policy.rs index 3d5ddcc..6f9bf34 100644 --- a/src/s3/response/get_bucket_policy.rs +++ b/src/s3/response/get_bucket_policy.rs @@ -13,10 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -29,45 +31,44 @@ use std::mem; /// For more information, refer to the [AWS S3 GetBucketPolicy API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicy.html). #[derive(Clone, Debug)] pub struct GetBucketPolicyResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, +} - /// The AWS region where the bucket resides. - pub region: String, +impl_has_s3fields!(GetBucketPolicyResponse); - /// Name of the bucket whose policy is retrieved. - pub bucket: String, +impl HasBucket for GetBucketPolicyResponse {} +impl HasRegion for GetBucketPolicyResponse {} - /// The bucket policy as a JSON-formatted string. +impl GetBucketPolicyResponse { + /// Returns the bucket policy as a JSON-formatted string. /// - /// This policy defines access permissions for the bucket. It specifies who can access the bucket, - /// what actions they can perform, and under what conditions. - /// - /// For example, a policy might grant read-only access to anonymous users or restrict access to specific IP addresses. - /// - /// Note: If the bucket has no policy, the `get_bucket_policy` API call may return an error - /// with the code `NoSuchBucketPolicy`. It's advisable to handle this case appropriately in your application. - pub config: String, + /// This method retrieves the policy associated with the bucket, which defines permissions + /// for accessing the bucket and its contents. + pub fn config(&self) -> Result<&str, Error> { + std::str::from_utf8(&self.body).map_err(|e| { + Error::Utf8Error(format!("Failed to parse bucket policy as UTF-8: {}", e).into()) + }) + } } #[async_trait] impl FromS3Response for GetBucketPolicyResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => Ok(Self { - headers: mem::take(r.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: r.text().await?, + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, }), Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchBucketPolicy => Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: String::from("{}"), + body: Bytes::from_static("{}".as_ref()), }), Err(e) => Err(e), } diff --git a/src/s3/response/get_bucket_replication.rs b/src/s3/response/get_bucket_replication.rs index b5af7a4..fa99f88 100644 --- a/src/s3/response/get_bucket_replication.rs +++ b/src/s3/response/get_bucket_replication.rs @@ -14,10 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, ReplicationConfig, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -30,42 +30,26 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketReplication API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html). #[derive(Clone, Debug)] pub struct GetBucketReplicationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, +} - /// The AWS region where the bucket resides. - pub region: String, +impl_from_s3response!(GetBucketReplicationResponse); +impl_has_s3fields!(GetBucketReplicationResponse); - /// Name of the bucket whose replication configuration is retrieved. - pub bucket: String, +impl HasBucket for GetBucketReplicationResponse {} +impl HasRegion for GetBucketReplicationResponse {} - /// The replication configuration of the bucket. +impl GetBucketReplicationResponse { + /// Returns the replication configuration of the bucket. /// /// This includes the IAM role that Amazon S3 assumes to replicate objects on your behalf, /// and one or more replication rules that specify the conditions under which objects are replicated. /// /// For more details on replication configuration elements, see the [AWS S3 Replication Configuration documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-add-config.html). - pub config: ReplicationConfig, -} - -#[async_trait] -impl FromS3Response for GetBucketReplicationResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - let config = ReplicationConfig::from_xml(&root)?; - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config, - }) + pub fn config(&self) -> Result { + let root = Element::parse(self.body.clone().reader())?; + ReplicationConfig::from_xml(&root) } } diff --git a/src/s3/response/get_bucket_tagging.rs b/src/s3/response/get_bucket_tagging.rs index a102666..10ec3c5 100644 --- a/src/s3/response/get_bucket_tagging.rs +++ b/src/s3/response/get_bucket_tagging.rs @@ -13,15 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields, HasTagging}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_text, take_bucket}; use async_trait::async_trait; -use bytes::Buf; +use bytes::Bytes; use http::HeaderMap; -use std::collections::HashMap; use std::mem; -use xmltree::Element; /// Response from the [`get_bucket_tagging`](crate::s3::client::Client::get_bucket_tagging) API call, /// providing the set of tags associated with an S3 bucket. @@ -32,57 +31,33 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketTagging API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html). #[derive(Clone, Debug)] pub struct GetBucketTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket whose tags are retrieved. - pub bucket: String, - - /// A collection of tags assigned to the bucket. - /// - /// Each tag is a key-value pair represented as a `HashMap`. - /// If the bucket has no tags, this map will be empty. - /// - /// Note: If the bucket has no tags, the `get_bucket_tags` API call may return an error - /// with the code `NoSuchTagSet`. It's advisable to handle this case appropriately in your application. - pub tags: HashMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, } +impl_has_s3fields!(GetBucketTaggingResponse); + +impl HasBucket for GetBucketTaggingResponse {} +impl HasRegion for GetBucketTaggingResponse {} +impl HasTagging for GetBucketTaggingResponse {} + #[async_trait] impl FromS3Response for GetBucketTaggingResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => { - let headers: HeaderMap = mem::take(r.headers_mut()); - let body = r.bytes().await?; - let mut root = Element::parse(body.reader())?; - - let element = root - .get_mut_child("TagSet") - .ok_or(Error::XmlError(" tag not found".to_string()))?; - let mut tags = HashMap::new(); - while let Some(v) = element.take_child("Tag") { - tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); - } - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - tags, - }) - } + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, + }), Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchTagSet => Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - tags: HashMap::new(), + body: Bytes::new(), }), Err(e) => Err(e), } diff --git a/src/s3/response/get_bucket_versioning.rs b/src/s3/response/get_bucket_versioning.rs index 3bcc9d5..81caae0 100644 --- a/src/s3/response/get_bucket_versioning.rs +++ b/src/s3/response/get_bucket_versioning.rs @@ -15,10 +15,11 @@ use crate::s3::builders::VersioningStatus; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_option_text, take_bucket}; -use async_trait::async_trait; -use bytes::Buf; +use crate::s3::utils::get_option_text; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -32,59 +33,40 @@ use xmltree::Element; /// For more information, refer to the [AWS S3 GetBucketVersioning API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html). #[derive(Clone, Debug)] pub struct GetBucketVersioningResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket whose versioning configuration is retrieved. - pub bucket: String, - - /// The versioning status of the bucket. - /// - /// - `Some(VersioningStatus::Enabled)`: Versioning is enabled. - /// - `Some(VersioningStatus::Suspended)`: Versioning is suspended. - /// - `None`: Versioning has never been configured for this bucket. - pub status: Option, - - /// Indicates whether MFA delete is enabled for the bucket. - /// - /// - `Some(true)`: MFA delete is enabled. - /// - `Some(false)`: MFA delete is disabled. - /// - `None`: MFA delete has never been configured for this bucket. - /// - /// Note: MFA delete adds an extra layer of security by requiring additional authentication - /// for certain operations. For more details, see the [AWS S3 MFA Delete documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/MultiFactorAuthenticationDelete.html). - pub mfa_delete: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetBucketVersioningResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetBucketVersioningResponse); +impl_has_s3fields!(GetBucketVersioningResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); +impl HasBucket for GetBucketVersioningResponse {} +impl HasRegion for GetBucketVersioningResponse {} - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - let status: Option = - get_option_text(&root, "Status").map(|v| match v.as_str() { - "Enabled" => VersioningStatus::Enabled, - _ => VersioningStatus::Suspended, // Default case - }); - let mfa_delete: Option = - get_option_text(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled")); +impl GetBucketVersioningResponse { + /// Returns the versioning status of the bucket. + /// + /// This method retrieves the current versioning status, which can be: + /// - `Some(VersioningStatus::Enabled)` if versioning is enabled. + /// - `Some(VersioningStatus::Suspended)` if versioning is suspended. + /// - `None` if versioning has never been configured for this bucket. + pub fn status(&self) -> Result, Error> { + let root = Element::parse(self.body.clone().reader())?; + Ok(get_option_text(&root, "Status").map(|v| match v.as_str() { + "Enabled" => VersioningStatus::Enabled, + _ => VersioningStatus::Suspended, // Default case + })) + } - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - status, - mfa_delete, - }) + /// Returns whether MFA delete is enabled for the bucket. + /// + /// This method retrieves the MFA delete setting, which can be: + /// - `Some(true)` if MFA delete is enabled. + /// - `Some(false)` if MFA delete is disabled. + /// - `None` if MFA delete has never been configured for this bucket. + pub fn mfa_delete(&self) -> Result, Error> { + let root = Element::parse(self.body.clone().reader())?; + Ok(get_option_text(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled"))) } } diff --git a/src/s3/response/get_object.rs b/src/s3/response/get_object.rs index 7d098dc..7eb97b0 100644 --- a/src/s3/response/get_object.rs +++ b/src/s3/response/get_object.rs @@ -13,75 +13,64 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::utils::{take_bucket, take_object}; +use crate::impl_has_s3fields; +use crate::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::{ builders::ObjectContent, error::Error, types::{FromS3Response, S3Request}, }; use async_trait::async_trait; +use bytes::Bytes; use futures_util::TryStreamExt; use http::HeaderMap; use std::mem; pub struct GetObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, // Note: not used + resp: reqwest::Response, +} - /// The AWS region where the bucket resides. - pub region: String, +impl_has_s3fields!(GetObjectResponse); - /// Name of the bucket containing the object. - pub bucket: String, +impl HasBucket for GetObjectResponse {} +impl HasRegion for GetObjectResponse {} +impl HasObject for GetObjectResponse {} +impl HasVersion for GetObjectResponse {} +impl HasEtagFromHeaders for GetObjectResponse {} - /// Key (path) identifying the object within the bucket. - pub object: String, +impl GetObjectResponse { + /// Returns the content of the object as a (streaming) byte buffer. Note: consumes the response. + pub fn content(self) -> Result { + let content_length: u64 = self.object_size()?; + let body = self.resp.bytes_stream().map_err(std::io::Error::other); + Ok(ObjectContent::new_from_stream(body, Some(content_length))) + } - /// Entity tag representing a specific version of the object. - pub etag: Option, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, - - /// The content of the object as a stream or byte buffer. - pub content: ObjectContent, - - /// Size of the object in bytes. - pub object_size: u64, + /// Returns the content size (in Bytes) of the object. + pub fn object_size(&self) -> Result { + self.resp + .content_length() + .ok_or(Error::ContentLengthUnknown) + } } #[async_trait] impl FromS3Response for GetObjectResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - - let etag: Option = headers - .get("etag") - .and_then(|v| v.to_str().ok()) - .map(|s| s.trim_matches('"').to_string()); - - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); - - let content_length: u64 = resp.content_length().ok_or(Error::ContentLengthUnknown)?; - let body = resp.bytes_stream().map_err(std::io::Error::other); - let content = ObjectContent::new_from_stream(body, Some(content_length)); - + let mut resp = response?; Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id, - content, - object_size: content_length, - etag, + request, + headers: mem::take(resp.headers_mut()), + body: Bytes::new(), + resp, }) } } diff --git a/src/s3/response/get_object_legal_hold.rs b/src/s3/response/get_object_legal_hold.rs index 295e427..5903cba 100644 --- a/src/s3/response/get_object_legal_hold.rs +++ b/src/s3/response/get_object_legal_hold.rs @@ -13,12 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::{Error, ErrorCode}; -use crate::s3::multimap::MultimapExt; +use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_default_text, take_bucket, take_object}; -use async_trait::async_trait; -use bytes::Buf; +use crate::s3::utils::get_default_text; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -28,57 +30,28 @@ use xmltree::Element; /// API #[derive(Clone, Debug)] pub struct GetObjectLegalHoldResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, - - /// Indicates whether the object legal hold is enabled. - pub enabled: bool, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetObjectLegalHoldResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - match resp { - Ok(mut r) => { - let headers: HeaderMap = mem::take(r.headers_mut()); - let body = r.bytes().await?; - let root = Element::parse(body.reader())?; +impl_from_s3response!(GetObjectLegalHoldResponse); +impl_has_s3fields!(GetObjectLegalHoldResponse); - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - enabled: get_default_text(&root, "Status") == "ON", - }) - } - Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchObjectLockConfiguration => { - Ok(Self { - headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - enabled: false, - }) - } - Err(e) => Err(e), +impl HasBucket for GetObjectLegalHoldResponse {} +impl HasRegion for GetObjectLegalHoldResponse {} +impl HasObject for GetObjectLegalHoldResponse {} +impl HasVersion for GetObjectLegalHoldResponse {} + +impl GetObjectLegalHoldResponse { + /// Returns the legal hold status of the object. + /// + /// This method retrieves whether the legal hold is enabled for the specified object. + pub fn enabled(&self) -> Result { + if self.body.is_empty() { + return Ok(false); // No legal hold configuration present due to NoSuchObjectLockConfiguration } + let root = Element::parse(self.body.clone().reader())?; + Ok(get_default_text(&root, "Status") == "ON") } } diff --git a/src/s3/response/get_object_lock_config.rs b/src/s3/response/get_object_lock_config.rs index 0da2b74..1be630c 100644 --- a/src/s3/response/get_object_lock_config.rs +++ b/src/s3/response/get_object_lock_config.rs @@ -14,10 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, ObjectLockConfig, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -29,36 +29,24 @@ use xmltree::Element; /// helping to enforce write-once-read-many (WORM) protection. #[derive(Clone, Debug)] pub struct GetObjectLockConfigResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket for which the Object Lock configuration is retrieved. - pub bucket: String, - - /// The Object Lock configuration of the bucket, including retention settings and legal hold status. - pub config: ObjectLockConfig, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetObjectLockConfigResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetObjectLockConfigResponse); +impl_has_s3fields!(GetObjectLockConfigResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; +impl HasBucket for GetObjectLockConfigResponse {} +impl HasRegion for GetObjectLockConfigResponse {} +impl HasObject for GetObjectLockConfigResponse {} - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: ObjectLockConfig::from_xml(&root)?, - }) +impl GetObjectLockConfigResponse { + /// Returns the Object Lock configuration of the bucket. + /// + /// This method retrieves the Object Lock settings, which include retention mode and period, + /// as well as legal hold status for the bucket. + pub fn config(&self) -> Result { + ObjectLockConfig::from_xml(&Element::parse(self.body.clone().reader())?) } } diff --git a/src/s3/response/get_object_prompt.rs b/src/s3/response/get_object_prompt.rs index 668968e..9b7a370 100644 --- a/src/s3/response/get_object_prompt.rs +++ b/src/s3/response/get_object_prompt.rs @@ -14,47 +14,33 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; pub struct GetObjectPromptResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// The prompt response for the object. - pub prompt_response: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetObjectPromptResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetObjectPromptResponse); +impl_has_s3fields!(GetObjectPromptResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let prompt_response: String = String::from_utf8(body.to_vec())?; +impl HasBucket for GetObjectPromptResponse {} +impl HasRegion for GetObjectPromptResponse {} +impl HasObject for GetObjectPromptResponse {} - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - prompt_response, +impl GetObjectPromptResponse { + /// Returns the prompt response for the object. + /// + /// This method retrieves the content of the object as a UTF-8 encoded string. + pub fn prompt_response(&self) -> Result<&str, Error> { + std::str::from_utf8(&self.body).map_err(|e| { + Error::Utf8Error(format!("Failed to parse prompt_response as UTF-8: {}", e).into()) }) } } diff --git a/src/s3/response/get_object_retention.rs b/src/s3/response/get_object_retention.rs index 929b537..5fd2714 100644 --- a/src/s3/response/get_object_retention.rs +++ b/src/s3/response/get_object_retention.rs @@ -13,12 +13,15 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::{Error, ErrorCode}; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, RetentionMode, S3Request}; -use crate::s3::utils::{UtcTime, from_iso8601utc, get_option_text, take_bucket, take_object}; +use crate::s3::utils::{UtcTime, from_iso8601utc, get_option_text}; use async_trait::async_trait; -use bytes::Buf; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -26,67 +29,65 @@ use xmltree::Element; /// Response of [get_object_retention()](crate::s3::client::Client::get_object_retention) API #[derive(Clone, Debug)] pub struct GetObjectRetentionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, +} - /// The AWS region where the bucket resides. - pub region: String, +impl_has_s3fields!(GetObjectRetentionResponse); - /// Name of the bucket containing the object. - pub bucket: String, +impl HasBucket for GetObjectRetentionResponse {} +impl HasRegion for GetObjectRetentionResponse {} +impl HasObject for GetObjectRetentionResponse {} +impl HasVersion for GetObjectRetentionResponse {} - /// Key (path) identifying the object within the bucket. - pub object: String, +impl GetObjectRetentionResponse { + /// Returns the retention mode of the object. + /// + /// This method retrieves the retention mode, which can be either `Governance` or `Compliance`. + pub fn retention_mode(&self) -> Result, Error> { + if self.body.is_empty() { + return Ok(None); + } + let root = Element::parse(self.body.clone().reader())?; + Ok(match get_option_text(&root, "Mode") { + Some(v) => Some(RetentionMode::parse(&v)?), + _ => None, + }) + } - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, - - /// The retention mode of the object. - pub retention_mode: Option, - - /// The date until which the object is retained. - pub retain_until_date: Option, + /// Returns the date until which the object is retained. + /// + /// This method retrieves the retention date, which indicates when the object will no longer be retained. + pub fn retain_until_date(&self) -> Result, Error> { + if self.body.is_empty() { + return Ok(None); + } + let root = Element::parse(self.body.clone().reader())?; + Ok(match get_option_text(&root, "RetainUntilDate") { + Some(v) => Some(from_iso8601utc(&v)?), + _ => None, + }) + } } #[async_trait] impl FromS3Response for GetObjectRetentionResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - match resp { - Ok(mut r) => { - let headers = mem::take(r.headers_mut()); - let body = r.bytes().await?; - let root = Element::parse(body.reader())?; - let retention_mode = match get_option_text(&root, "Mode") { - Some(v) => Some(RetentionMode::parse(&v)?), - _ => None, - }; - let retain_until_date = match get_option_text(&root, "RetainUntilDate") { - Some(v) => Some(from_iso8601utc(&v)?), - _ => None, - }; - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - retention_mode, - retain_until_date, - }) - } + match response { + Ok(mut resp) => Ok(Self { + request, + headers: mem::take(resp.headers_mut()), + body: resp.bytes().await?, + }), Err(Error::S3Error(e)) if e.code == ErrorCode::NoSuchObjectLockConfiguration => { Ok(Self { + request, headers: e.headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - retention_mode: None, - retain_until_date: None, + body: Bytes::new(), }) } Err(e) => Err(e), diff --git a/src/s3/response/get_object_tagging.rs b/src/s3/response/get_object_tagging.rs index 6d55a1c..fd26bbd 100644 --- a/src/s3/response/get_object_tagging.rs +++ b/src/s3/response/get_object_tagging.rs @@ -14,67 +14,30 @@ // limitations under the License. use crate::s3::error::Error; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasTagging, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_text, take_bucket, take_object}; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; -use std::collections::HashMap; use std::mem; -use xmltree::Element; /// Response of /// [get_object_tags()](crate::s3::client::Client::get_object_tagging) /// API #[derive(Clone, Debug)] pub struct GetObjectTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, - - /// Tags associated with the object. - pub tags: HashMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetObjectTaggingResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetObjectTaggingResponse); +impl_has_s3fields!(GetObjectTaggingResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - - let body = resp.bytes().await?; - let mut root = Element::parse(body.reader())?; - let element = root - .get_mut_child("TagSet") - .ok_or(Error::XmlError(" tag not found".to_string()))?; - let mut tags = HashMap::new(); - while let Some(v) = element.take_child("Tag") { - tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); - } - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - tags, - }) - } -} +impl HasBucket for GetObjectTaggingResponse {} +impl HasRegion for GetObjectTaggingResponse {} +impl HasObject for GetObjectTaggingResponse {} +impl HasVersion for GetObjectTaggingResponse {} +impl HasTagging for GetObjectTaggingResponse {} diff --git a/src/s3/response/get_region.rs b/src/s3/response/get_region.rs index 1435570..946c82c 100644 --- a/src/s3/response/get_region.rs +++ b/src/s3/response/get_region.rs @@ -15,10 +15,10 @@ use crate::s3::client::DEFAULT_REGION; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -28,44 +28,28 @@ use xmltree::Element; /// API #[derive(Clone, Debug)] pub struct GetRegionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// The region response for the bucket. - pub region_response: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for GetRegionResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(GetRegionResponse); +impl_has_s3fields!(GetRegionResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - let region_response: String = { - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; +impl HasBucket for GetRegionResponse {} +impl HasRegion for GetRegionResponse {} - let mut location = root.get_text().unwrap_or_default().to_string(); - if location.is_empty() { - location = String::from(DEFAULT_REGION); - } - location - }; +impl GetRegionResponse { + /// Returns the region response for the bucket. + /// + /// This method retrieves the region where the bucket is located. + pub fn region_response(&self) -> Result { + let root = Element::parse(self.body.clone().reader())?; - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - region_response, - }) + let mut location = root.get_text().unwrap_or_default().to_string(); + if location.is_empty() { + location = String::from(DEFAULT_REGION); + } + Ok(location) } } diff --git a/src/s3/response/list_buckets.rs b/src/s3/response/list_buckets.rs index 29ea2dc..e31dede 100644 --- a/src/s3/response/list_buckets.rs +++ b/src/s3/response/list_buckets.rs @@ -14,10 +14,11 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::HasS3Fields; use crate::s3::types::{Bucket, FromS3Response, S3Request}; use crate::s3::utils::{from_iso8601utc, get_text}; -use async_trait::async_trait; -use bytes::Buf; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -25,24 +26,18 @@ use xmltree::Element; /// Response of [list_buckets()](crate::s3::client::Client::list_buckets) API #[derive(Debug, Clone)] pub struct ListBucketsResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// the list of buckets that are present in the account. - pub buckets: Vec, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for ListBucketsResponse { - async fn from_s3response( - _req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - let headers: HeaderMap = mem::take(resp.headers_mut()); +impl_from_s3response!(ListBucketsResponse); +impl_has_s3fields!(ListBucketsResponse); - let body = resp.bytes().await?; - let mut root = Element::parse(body.reader())?; +impl ListBucketsResponse { + /// Returns the list of buckets in the account. + pub fn buckets(&self) -> Result, Error> { + let mut root = Element::parse(self.body().clone().reader())?; let buckets_xml = root .get_mut_child("Buckets") .ok_or(Error::XmlError(" tag not found".into()))?; @@ -55,7 +50,6 @@ impl FromS3Response for ListBucketsResponse { creation_date: from_iso8601utc(&get_text(&bucket, "CreationDate")?)?, }) } - - Ok(Self { headers, buckets }) + Ok(buckets) } } diff --git a/src/s3/response/list_objects.rs b/src/s3/response/list_objects.rs index 6c354b4..4978919 100644 --- a/src/s3/response/list_objects.rs +++ b/src/s3/response/list_objects.rs @@ -12,12 +12,8 @@ //! Response types for ListObjects APIs -use async_trait::async_trait; -use bytes::Buf; -use reqwest::header::HeaderMap; -use std::collections::HashMap; -use std::mem; - +use crate::impl_has_s3fields; +use crate::s3::response::a_response_traits::HasS3Fields; use crate::s3::{ error::Error, types::{FromS3Response, ListEntry, S3Request}, @@ -26,6 +22,11 @@ use crate::s3::{ xml::{Element, MergeXmlElements}, }, }; +use async_trait::async_trait; +use bytes::{Buf, Bytes}; +use reqwest::header::HeaderMap; +use std::collections::HashMap; +use std::mem; fn url_decode( encoding_type: &Option, @@ -188,8 +189,10 @@ fn parse_list_objects_common_prefixes( /// Response of [list_objects_v1()](crate::s3::client::Client::list_objects_v1) S3 API #[derive(Clone, Debug)] pub struct ListObjectsV1Response { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, + pub name: String, pub encoding_type: Option, pub prefix: Option, @@ -201,18 +204,20 @@ pub struct ListObjectsV1Response { pub next_marker: Option, } +impl_has_s3fields!(ListObjectsV1Response); + #[async_trait] impl FromS3Response for ListObjectsV1Response { async fn from_s3response( - _req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut resp = resp?; + let mut resp = response?; let headers: HeaderMap = mem::take(resp.headers_mut()); let body = resp.bytes().await?; - let xmltree_root = xmltree::Element::parse(body.reader())?; - let root = Element::from(&xmltree_root); + let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; let marker = url_decode(&encoding_type, root.get_child_text("Marker"))?; @@ -224,8 +229,11 @@ impl FromS3Response for ListObjectsV1Response { } parse_list_objects_common_prefixes(&mut contents, &root, &encoding_type)?; - Ok(ListObjectsV1Response { + Ok(Self { + request, headers, + body, + name, encoding_type, prefix, @@ -242,8 +250,10 @@ impl FromS3Response for ListObjectsV1Response { /// Response of [list_objects_v2()](crate::s3::client::Client::list_objects_v2) S3 API #[derive(Clone, Debug)] pub struct ListObjectsV2Response { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, + pub name: String, pub encoding_type: Option, pub prefix: Option, @@ -257,19 +267,20 @@ pub struct ListObjectsV2Response { pub next_continuation_token: Option, } +impl_has_s3fields!(ListObjectsV2Response); + #[async_trait] impl FromS3Response for ListObjectsV2Response { async fn from_s3response( - _req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut resp = resp?; + let mut resp = response?; let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let xmltree_root = xmltree::Element::parse(body.reader())?; - let root = Element::from(&xmltree_root); + let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; let key_count = root @@ -283,8 +294,11 @@ impl FromS3Response for ListObjectsV2Response { parse_list_objects_contents(&mut contents, &root, "Contents", &encoding_type, false)?; parse_list_objects_common_prefixes(&mut contents, &root, &encoding_type)?; - Ok(ListObjectsV2Response { + Ok(Self { + request, headers, + body, + name, encoding_type, prefix, @@ -303,8 +317,10 @@ impl FromS3Response for ListObjectsV2Response { /// Response of [list_object_versions()](crate::s3::client::Client::list_object_versions) S3 API #[derive(Clone, Debug)] pub struct ListObjectVersionsResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, + pub name: String, pub encoding_type: Option, pub prefix: Option, @@ -318,18 +334,20 @@ pub struct ListObjectVersionsResponse { pub next_version_id_marker: Option, } +impl_has_s3fields!(ListObjectVersionsResponse); + #[async_trait] impl FromS3Response for ListObjectVersionsResponse { async fn from_s3response( - _req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let resp = resp?; - let headers = resp.headers().clone(); + let mut resp = response?; + let headers: HeaderMap = mem::take(resp.headers_mut()); let body = resp.bytes().await?; - let xmltree_root = xmltree::Element::parse(body.reader())?; - let root = Element::from(&xmltree_root); + let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; let key_marker = url_decode(&encoding_type, root.get_child_text("KeyMarker"))?; @@ -340,8 +358,11 @@ impl FromS3Response for ListObjectVersionsResponse { parse_list_objects_contents(&mut contents, &root, "Version", &encoding_type, true)?; parse_list_objects_common_prefixes(&mut contents, &root, &encoding_type)?; - Ok(ListObjectVersionsResponse { + Ok(Self { + request, headers, + body, + name, encoding_type, prefix, @@ -360,8 +381,10 @@ impl FromS3Response for ListObjectVersionsResponse { /// Response of [list_objects()](crate::s3::client::Client::list_objects) API #[derive(Clone, Debug, Default)] pub struct ListObjectsResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, + pub name: String, pub encoding_type: Option, pub prefix: Option, @@ -387,10 +410,15 @@ pub struct ListObjectsResponse { pub next_version_id_marker: Option, } +impl_has_s3fields!(ListObjectsResponse); + impl From for ListObjectsResponse { fn from(value: ListObjectVersionsResponse) -> Self { - ListObjectsResponse { + Self { + request: value.request, headers: value.headers, + body: value.body, + name: value.name, encoding_type: value.encoding_type, prefix: value.prefix, @@ -409,8 +437,11 @@ impl From for ListObjectsResponse { impl From for ListObjectsResponse { fn from(value: ListObjectsV2Response) -> Self { - ListObjectsResponse { + Self { + request: value.request, headers: value.headers, + body: value.body, + name: value.name, encoding_type: value.encoding_type, prefix: value.prefix, @@ -430,7 +461,10 @@ impl From for ListObjectsResponse { impl From for ListObjectsResponse { fn from(value: ListObjectsV1Response) -> Self { Self { + request: value.request, headers: value.headers, + body: value.body, + name: value.name, encoding_type: value.encoding_type, prefix: value.prefix, diff --git a/src/s3/response/listen_bucket_notification.rs b/src/s3/response/listen_bucket_notification.rs index e135565..8c7ab6e 100644 --- a/src/s3/response/listen_bucket_notification.rs +++ b/src/s3/response/listen_bucket_notification.rs @@ -13,28 +13,31 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, NotificationRecords, S3Request}; -use crate::s3::utils::take_bucket; -use futures_util::{Stream, StreamExt, TryStreamExt}; +use async_std::stream::Stream; +use bytes::Bytes; +use futures_util::{StreamExt, TryStreamExt}; use http::HeaderMap; use std::mem; /// Response of -/// [listen _bucket_notification()](crate::s3::client::Client::listen_bucket_notification) +/// [listen_bucket_notification()](crate::s3::client::Client::listen_bucket_notification) /// API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct ListenBucketNotificationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, // Note: not used } +impl_has_s3fields!(ListenBucketNotificationResponse); + +impl HasBucket for ListenBucketNotificationResponse {} +impl HasRegion for ListenBucketNotificationResponse {} + #[async_trait::async_trait] impl FromS3Response for ( @@ -43,14 +46,13 @@ impl FromS3Response ) { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut resp = resp?; - let headers: HeaderMap = mem::take(resp.headers_mut()); + let mut resp = response?; - // A simple stateful decoder that buffers bytes and yields complete lines - let byte_stream = resp.bytes_stream(); // This is a futures::Stream> + let headers: HeaderMap = mem::take(resp.headers_mut()); + let byte_stream = resp.bytes_stream(); let line_stream = Box::pin(async_stream::try_stream! { let mut buf = Vec::new(); @@ -94,9 +96,9 @@ impl FromS3Response Ok(( ListenBucketNotificationResponse { + request, headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, + body: Bytes::new(), }, Box::new(line_stream), )) diff --git a/src/s3/response/put_bucket_encryption.rs b/src/s3/response/put_bucket_encryption.rs index 1e8186d..9ad784d 100644 --- a/src/s3/response/put_bucket_encryption.rs +++ b/src/s3/response/put_bucket_encryption.rs @@ -14,10 +14,11 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SseConfig}; -use crate::s3::utils::{get_option_text, get_text, take_bucket}; -use async_trait::async_trait; -use bytes::Buf; +use crate::s3::utils::{get_option_text, get_text}; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; @@ -27,30 +28,21 @@ use xmltree::Element; /// API #[derive(Clone, Debug)] pub struct PutBucketEncryptionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Server-side encryption configuration. - pub config: SseConfig, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketEncryptionResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketEncryptionResponse); +impl_has_s3fields!(PutBucketEncryptionResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let mut root = Element::parse(body.reader())?; +impl HasBucket for PutBucketEncryptionResponse {} +impl HasRegion for PutBucketEncryptionResponse {} + +impl PutBucketEncryptionResponse { + /// Returns the server-side encryption configuration. + pub fn config(&self) -> Result { + let mut root = Element::parse(self.body().clone().reader())?; let rule = root .get_mut_child("Rule") @@ -62,14 +54,9 @@ impl FromS3Response for PutBucketEncryptionResponse { " tag not found", )))?; - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - config: SseConfig { - sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, - kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), - }, + Ok(SseConfig { + sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, + kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), }) } } diff --git a/src/s3/response/put_bucket_lifecycle.rs b/src/s3/response/put_bucket_lifecycle.rs index 135f109..287094c 100644 --- a/src/s3/response/put_bucket_lifecycle.rs +++ b/src/s3/response/put_bucket_lifecycle.rs @@ -14,37 +14,23 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Response of [put_bucket_lifecycle()](crate::s3::client::Client::put_bucket_lifecycle) API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PutBucketLifecycleResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketLifecycleResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketLifecycleResponse); +impl_has_s3fields!(PutBucketLifecycleResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketLifecycleResponse {} +impl HasRegion for PutBucketLifecycleResponse {} diff --git a/src/s3/response/put_bucket_notification.rs b/src/s3/response/put_bucket_notification.rs index 564e2f0..93b7743 100644 --- a/src/s3/response/put_bucket_notification.rs +++ b/src/s3/response/put_bucket_notification.rs @@ -14,37 +14,23 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Response of [put_bucket_notification()](crate::s3::client::Client::put_bucket_notification) API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PutBucketNotificationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketNotificationResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketNotificationResponse); +impl_has_s3fields!(PutBucketNotificationResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketNotificationResponse {} +impl HasRegion for PutBucketNotificationResponse {} diff --git a/src/s3/response/put_bucket_policy.rs b/src/s3/response/put_bucket_policy.rs index 09af3c3..d502633 100644 --- a/src/s3/response/put_bucket_policy.rs +++ b/src/s3/response/put_bucket_policy.rs @@ -14,37 +14,23 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Response of [put_bucket_policy()](crate::s3::client::Client::put_bucket_policy) API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PutBucketPolicyResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketPolicyResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketPolicyResponse); +impl_has_s3fields!(PutBucketPolicyResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketPolicyResponse {} +impl HasRegion for PutBucketPolicyResponse {} diff --git a/src/s3/response/put_bucket_replication.rs b/src/s3/response/put_bucket_replication.rs index fdf800b..2cccda7 100644 --- a/src/s3/response/put_bucket_replication.rs +++ b/src/s3/response/put_bucket_replication.rs @@ -14,37 +14,23 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Response of [put_bucket_replication()](crate::s3::client::Client::put_bucket_replication) API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PutBucketReplicationResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketReplicationResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketReplicationResponse); +impl_has_s3fields!(PutBucketReplicationResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketReplicationResponse {} +impl HasRegion for PutBucketReplicationResponse {} diff --git a/src/s3/response/put_bucket_tagging.rs b/src/s3/response/put_bucket_tagging.rs index b8f1431..71d8b89 100644 --- a/src/s3/response/put_bucket_tagging.rs +++ b/src/s3/response/put_bucket_tagging.rs @@ -14,9 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -25,28 +26,13 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct PutBucketTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketTaggingResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketTaggingResponse); +impl_has_s3fields!(PutBucketTaggingResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketTaggingResponse {} +impl HasRegion for PutBucketTaggingResponse {} diff --git a/src/s3/response/put_bucket_versioning.rs b/src/s3/response/put_bucket_versioning.rs index dc9051a..45ec913 100644 --- a/src/s3/response/put_bucket_versioning.rs +++ b/src/s3/response/put_bucket_versioning.rs @@ -14,37 +14,23 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; /// Response of [put_bucket_versioning()](crate::s3::client::Client::put_bucket_versioning) API -#[derive(Debug)] +#[derive(Clone, Debug)] pub struct PutBucketVersioningResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutBucketVersioningResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutBucketVersioningResponse); +impl_has_s3fields!(PutBucketVersioningResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutBucketVersioningResponse {} +impl HasRegion for PutBucketVersioningResponse {} diff --git a/src/s3/response/put_object.rs b/src/s3/response/put_object.rs index b2c9f6e..2f6386f 100644 --- a/src/s3/response/put_object.rs +++ b/src/s3/response/put_object.rs @@ -1,5 +1,5 @@ // MinIO Rust Library for Amazon S3 Compatible Cloud Storage -// Copyright 2023 MinIO, Inc. +// Copyright 2025 MinIO, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -13,126 +13,112 @@ // See the License for the specific language governing permissions and // limitations under the License. -use async_trait::async_trait; -use bytes::Buf; +use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasObject, HasRegion, HasS3Fields, HasVersion, +}; +use crate::s3::types::{FromS3Response, S3Request}; +use crate::s3::utils::get_text; +use crate::{impl_from_s3response, impl_from_s3response_with_size, impl_has_s3fields}; +use bytes::{Buf, Bytes}; use http::HeaderMap; use std::mem; use xmltree::Element; -use crate::s3::utils::{take_bucket, take_object}; -use crate::s3::{ - error::Error, - types::{FromS3Response, S3Request}, - utils::get_text, -}; +// region + +/// Base response struct that contains common functionality for S3 operations +#[derive(Clone, Debug)] +pub struct S3Response1 { + pub(crate) request: S3Request, + pub(crate) headers: HeaderMap, + pub(crate) body: Bytes, +} + +impl_from_s3response!(S3Response1); +impl_has_s3fields!(S3Response1); + +impl HasBucket for S3Response1 {} +impl HasObject for S3Response1 {} +impl HasRegion for S3Response1 {} +impl HasVersion for S3Response1 {} +impl HasEtagFromHeaders for S3Response1 {} + +/// Extended response struct for operations that need additional data like object size +#[derive(Clone, Debug)] +pub struct S3Response1WithSize { + request: S3Request, + headers: HeaderMap, + body: Bytes, + + /// Additional object size information + pub(crate) object_size: u64, +} + +impl_from_s3response_with_size!(S3Response1WithSize); +impl_has_s3fields!(S3Response1WithSize); + +impl HasBucket for S3Response1WithSize {} +impl HasObject for S3Response1WithSize {} +impl HasRegion for S3Response1WithSize {} +impl HasVersion for S3Response1WithSize {} +impl HasEtagFromHeaders for S3Response1WithSize {} + +impl S3Response1WithSize { + pub fn new(response: S3Response1, object_size: u64) -> Self { + Self { + request: response.request, + headers: response.headers, + body: response.body, + object_size, + } + } + + /// Returns the object size for the response + pub fn object_size(&self) -> u64 { + self.object_size + } +} + +/// Extended response struct for multipart operations that need upload_id +#[derive(Clone, Debug)] +pub struct S3MultipartResponse { + request: S3Request, + headers: HeaderMap, + body: Bytes, +} + +impl_from_s3response!(S3MultipartResponse); +impl_has_s3fields!(S3MultipartResponse); + +impl HasBucket for S3MultipartResponse {} +impl HasObject for S3MultipartResponse {} +impl HasRegion for S3MultipartResponse {} +impl HasVersion for S3MultipartResponse {} +impl HasEtagFromHeaders for S3MultipartResponse {} + +impl S3MultipartResponse { + /// Returns the upload ID for the multipart upload, while consuming the response. + pub async fn upload_id(&self) -> Result { + let root = Element::parse(self.body.clone().reader())?; + get_text(&root, "UploadId").map_err(|e| Error::InvalidUploadId(e.to_string())) + } +} /// Response of [put_object_api()](crate::s3::client::Client::put_object) API -#[derive(Debug, Clone)] -pub struct PutObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - pub bucket: String, - pub object: String, - pub region: String, - pub etag: String, - pub version_id: Option, -} - -#[async_trait] -impl FromS3Response for PutObjectResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - - let etag: String = headers - .get("etag") - .and_then(|v| v.to_str().ok()) // Convert to &str safely - .map(|s| s.trim_matches('"').to_string()) // Trim and convert to String - .unwrap_or_default(); - - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); - - Ok(Self { - headers, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - region: req.inner_region, - etag, - version_id, - }) - } -} +pub type PutObjectResponse = S3Response1; /// Response of [create_multipart_upload()](crate::s3::client::Client::create_multipart_upload) API -#[derive(Debug, Clone)] -pub struct CreateMultipartUploadResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - pub region: String, - pub bucket: String, - pub object: String, - pub upload_id: String, -} - -#[async_trait] -impl FromS3Response for CreateMultipartUploadResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; - - let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - let upload_id: String = - get_text(&root, "UploadId").map_err(|e| Error::InvalidUploadId(e.to_string()))?; - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - upload_id, - }) - } -} +pub type CreateMultipartUploadResponse = S3MultipartResponse; /// Response of [abort_multipart_upload()](crate::s3::client::Client::abort_multipart_upload) API -pub type AbortMultipartUploadResponse = CreateMultipartUploadResponse; +pub type AbortMultipartUploadResponse = S3MultipartResponse; /// Response of [complete_multipart_upload()](crate::s3::client::Client::complete_multipart_upload) API -pub type CompleteMultipartUploadResponse = PutObjectResponse; +pub type CompleteMultipartUploadResponse = S3Response1; /// Response of [upload_part()](crate::s3::client::Client::upload_part) API -pub type UploadPartResponse = PutObjectResponse; +pub type UploadPartResponse = S3Response1; -#[derive(Debug, Clone)] -pub struct PutObjectContentResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Size of the object in bytes. - pub object_size: u64, - - /// Entity tag representing a specific version of the object. - pub etag: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, -} +/// Response for put_object operations that include object size information +pub type PutObjectContentResponse = S3Response1WithSize; diff --git a/src/s3/response/put_object_legal_hold.rs b/src/s3/response/put_object_legal_hold.rs index 6f06c83..1a7864a 100644 --- a/src/s3/response/put_object_legal_hold.rs +++ b/src/s3/response/put_object_legal_hold.rs @@ -14,10 +14,12 @@ // limitations under the License. use crate::s3::error::Error; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -30,38 +32,15 @@ use std::mem; /// For more information, refer to the [AWS S3 PutObjectLegalHold API documentation](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectLegalHold.html). #[derive(Clone, Debug)] pub struct PutObjectLegalHoldResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (name) identifying the object within the bucket. - pub object: String, - - /// The version ID of the object from which the legal hold was removed. - /// - /// If versioning is not enabled on the bucket, this field may be `None`. - pub version_id: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutObjectLegalHoldResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutObjectLegalHoldResponse); +impl_has_s3fields!(PutObjectLegalHoldResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - }) - } -} +impl HasBucket for PutObjectLegalHoldResponse {} +impl HasRegion for PutObjectLegalHoldResponse {} +impl HasObject for PutObjectLegalHoldResponse {} +impl HasVersion for PutObjectLegalHoldResponse {} diff --git a/src/s3/response/put_object_lock_config.rs b/src/s3/response/put_object_lock_config.rs index 30d7550..20331bc 100644 --- a/src/s3/response/put_object_lock_config.rs +++ b/src/s3/response/put_object_lock_config.rs @@ -14,9 +14,10 @@ // limitations under the License. use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::take_bucket; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -25,28 +26,13 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct PutObjectLockConfigResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutObjectLockConfigResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutObjectLockConfigResponse); +impl_has_s3fields!(PutObjectLockConfigResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - }) - } -} +impl HasBucket for PutObjectLockConfigResponse {} +impl HasRegion for PutObjectLockConfigResponse {} diff --git a/src/s3/response/put_object_retention.rs b/src/s3/response/put_object_retention.rs index c04bb36..375ae10 100644 --- a/src/s3/response/put_object_retention.rs +++ b/src/s3/response/put_object_retention.rs @@ -14,10 +14,12 @@ // limitations under the License. use crate::s3::error::Error; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -26,36 +28,15 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct PutObjectRetentionResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutObjectRetentionResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutObjectRetentionResponse); +impl_has_s3fields!(PutObjectRetentionResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - }) - } -} +impl HasBucket for PutObjectRetentionResponse {} +impl HasRegion for PutObjectRetentionResponse {} +impl HasObject for PutObjectRetentionResponse {} +impl HasVersion for PutObjectRetentionResponse {} diff --git a/src/s3/response/put_object_tagging.rs b/src/s3/response/put_object_tagging.rs index 51e333a..c845f32 100644 --- a/src/s3/response/put_object_tagging.rs +++ b/src/s3/response/put_object_tagging.rs @@ -14,10 +14,12 @@ // limitations under the License. use crate::s3::error::Error; -use crate::s3::multimap::MultimapExt; +use crate::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, +}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{take_bucket, take_object}; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::mem; @@ -26,36 +28,15 @@ use std::mem; /// API #[derive(Clone, Debug)] pub struct PutObjectTaggingResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Version ID of the object, if versioning is enabled. Value of the `x-amz-version-id` header. - pub version_id: Option, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for PutObjectTaggingResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(PutObjectTaggingResponse); +impl_has_s3fields!(PutObjectTaggingResponse); - Ok(Self { - headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - version_id: req.query_params.take_version(), - }) - } -} +impl HasBucket for PutObjectTaggingResponse {} +impl HasRegion for PutObjectTaggingResponse {} +impl HasObject for PutObjectTaggingResponse {} +impl HasVersion for PutObjectTaggingResponse {} diff --git a/src/s3/response/select_object_content.rs b/src/s3/response/select_object_content.rs index a88fcd7..fa03ce1 100644 --- a/src/s3/response/select_object_content.rs +++ b/src/s3/response/select_object_content.rs @@ -13,10 +13,13 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::impl_has_s3fields; use crate::s3::error::Error; +use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SelectProgress}; -use crate::s3::utils::{copy_slice, crc32, get_text, take_bucket, take_object, uint32}; +use crate::s3::utils::{copy_slice, crc32, get_text, uint32}; use async_trait::async_trait; +use bytes::Bytes; use http::HeaderMap; use std::collections::{HashMap, VecDeque}; use std::io::BufReader; @@ -29,21 +32,12 @@ use xmltree::Element; /// Response of [select_object_content()](crate::s3::client::Client::select_object_content) API #[derive(Debug)] pub struct SelectObjectContentResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - pub progress: SelectProgress, + request: S3Request, + headers: HeaderMap, + body: Bytes, resp: reqwest::Response, + pub progress: SelectProgress, done: bool, buf: VecDeque, @@ -66,6 +60,12 @@ pub struct SelectObjectContentResponse { payload_index: usize, } +impl_has_s3fields!(SelectObjectContentResponse); + +impl HasBucket for SelectObjectContentResponse {} +impl HasRegion for SelectObjectContentResponse {} +impl HasObject for SelectObjectContentResponse {} + impl SelectObjectContentResponse { fn reset(&mut self) { self.buf.clear(); @@ -331,22 +331,22 @@ impl SelectObjectContentResponse { #[async_trait] impl FromS3Response for SelectObjectContentResponse { async fn from_s3response( - req: S3Request, - resp: Result, + request: S3Request, + response: Result, ) -> Result { - let mut resp = resp?; + let mut resp = response?; Ok(Self { + request, headers: mem::take(resp.headers_mut()), - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, + body: Bytes::new(), // NOTE: note used + resp, + progress: SelectProgress { bytes_scanned: 0, bytes_progressed: 0, bytes_returned: 0, }, - resp, done: false, buf: VecDeque::::new(), prelude: [0_u8; 8], diff --git a/src/s3/response/stat_object.rs b/src/s3/response/stat_object.rs index a9a2864..f301212 100644 --- a/src/s3/response/stat_object.rs +++ b/src/s3/response/stat_object.rs @@ -13,134 +13,89 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::types::{RetentionMode, parse_legal_hold}; -use crate::s3::utils::{ - UtcTime, from_http_header_value, from_iso8601utc, take_bucket, take_object, +use crate::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasIsDeleteMarker, HasObject, HasRegion, HasS3Fields, }; +use crate::s3::types::{RetentionMode, parse_legal_hold}; +use crate::s3::utils::{UtcTime, from_http_header_value, from_iso8601utc}; use crate::s3::{ error::Error, types::{FromS3Response, S3Request}, }; -use async_trait::async_trait; +use crate::{impl_from_s3response, impl_has_s3fields}; +use bytes::Bytes; use http::HeaderMap; use std::collections::HashMap; use std::mem; -#[derive(Debug)] +#[derive(Clone, Debug)] /// Response from the [`stat_object`](crate::s3::client::Client::stat_object) API call, /// providing metadata about an object stored in S3 or a compatible service. pub struct StatObjectResponse { - /// HTTP headers returned by the server, containing metadata such as `Content-Type`, `ETag`, etc. - pub headers: HeaderMap, - - /// The AWS region where the bucket resides. - pub region: String, - - /// Name of the bucket containing the object. - pub bucket: String, - - /// Key (path) identifying the object within the bucket. - pub object: String, - - /// Size of the object in bytes. - pub size: u64, - - /// Entity tag representing a specific version of the object. - pub etag: String, - - /// Version ID of the object, if versioning is enabled. - pub version_id: Option, - - /// Timestamp indicating when the object was last modified. - pub last_modified: Option, - - /// Retention mode applied to the object (e.g., Governance, Compliance). - pub retention_mode: Option, - - /// Date until which the object is retained under the specified retention mode. - pub retention_retain_until_date: Option, - - /// Indicates if a legal hold is in place for the object. - pub legal_hold: Option, - - /// Indicates if the object is a delete marker (in versioned buckets). - pub delete_marker: Option, - - /// Custom user-defined metadata associated with the object. - pub user_metadata: HashMap, + request: S3Request, + headers: HeaderMap, + body: Bytes, } -#[async_trait] -impl FromS3Response for StatObjectResponse { - async fn from_s3response( - req: S3Request, - resp: Result, - ) -> Result { - let mut resp = resp?; +impl_from_s3response!(StatObjectResponse); +impl_has_s3fields!(StatObjectResponse); - let headers: HeaderMap = mem::take(resp.headers_mut()); +impl HasBucket for StatObjectResponse {} +impl HasRegion for StatObjectResponse {} +impl HasObject for StatObjectResponse {} +impl HasEtagFromHeaders for StatObjectResponse {} +impl HasIsDeleteMarker for StatObjectResponse {} - let size: u64 = match headers.get("Content-Length") { +impl StatObjectResponse { + /// Returns the size of the object (header-value of `Content-Length`). + pub fn size(&self) -> Result { + let size: u64 = match self.headers().get("Content-Length") { Some(v) => v.to_str()?.parse::()?, None => 0_u64, }; + Ok(size) + } - let etag: String = match headers.get("ETag") { - Some(v) => v.to_str()?.trim_matches('"').to_string(), - None => "".to_string(), - }; + /// Return the last modified time of the object (header-value of `Last-Modified`). + pub fn last_modified(&self) -> Result, Error> { + match self.headers().get("Last-Modified") { + Some(v) => Ok(Some(from_http_header_value(v.to_str()?)?)), + None => Ok(None), + } + } - let version_id: Option = headers - .get("x-amz-version-id") - .and_then(|v| v.to_str().ok().map(String::from)); + /// Return the retention mode of the object (header-value of `x-amz-object-lock-mode`). + pub fn retention_mode(&self) -> Result, Error> { + match self.headers().get("x-amz-object-lock-mode") { + Some(v) => Ok(Some(RetentionMode::parse(v.to_str()?)?)), + None => Ok(None), + } + } - let last_modified: Option = match headers.get("Last-Modified") { - Some(v) => Some(from_http_header_value(v.to_str()?)?), - None => None, - }; + /// Return the retention date of the object (header-value of `x-amz-object-lock-retain-until-date`). + pub fn retention_retain_until_date(&self) -> Result, Error> { + match self.headers().get("x-amz-object-lock-retain-until-date") { + Some(v) => Ok(Some(from_iso8601utc(v.to_str()?)?)), + None => Ok(None), + } + } - let retention_mode: Option = match headers.get("x-amz-object-lock-mode") { - Some(v) => Some(RetentionMode::parse(v.to_str()?)?), - None => None, - }; - - let retention_retain_until_date: Option = - match headers.get("x-amz-object-lock-retain-until-date") { - Some(v) => Some(from_iso8601utc(v.to_str()?)?), - None => None, - }; - - let legal_hold: Option = match headers.get("x-amz-object-lock-legal-hold") { - Some(v) => Some(parse_legal_hold(v.to_str()?)?), - None => None, - }; - - let delete_marker: Option = match headers.get("x-amz-delete-marker") { - Some(v) => Some(v.to_str()?.parse::()?), - None => None, - }; + /// Return the legal hold status of the object (header-value of `x-amz-object-lock-legal-hold`). + pub fn legal_hold(&self) -> Result, Error> { + match self.headers().get("x-amz-object-lock-legal-hold") { + Some(v) => Ok(Some(parse_legal_hold(v.to_str()?)?)), + None => Ok(None), + } + } + /// Returns the user-defined metadata of the object (header-value of `x-amz-meta-*`). + pub fn user_metadata(&self) -> Result, Error> { let mut user_metadata: HashMap = HashMap::new(); - for (key, value) in headers.iter() { + for (key, value) in self.headers().iter() { if let Some(v) = key.as_str().strip_prefix("x-amz-meta-") { user_metadata.insert(v.to_string(), value.to_str()?.to_string()); } } - - Ok(Self { - headers, - region: req.inner_region, - bucket: take_bucket(req.bucket)?, - object: take_object(req.object)?, - size, - etag, - version_id, - last_modified, - retention_mode, - retention_retain_until_date, - legal_hold, - delete_marker, - user_metadata, - }) + Ok(user_metadata) } } diff --git a/src/s3/segmented_bytes.rs b/src/s3/segmented_bytes.rs index 1e0069c..1d548ee 100644 --- a/src/s3/segmented_bytes.rs +++ b/src/s3/segmented_bytes.rs @@ -14,6 +14,7 @@ // limitations under the License. use bytes::{Bytes, BytesMut}; +use std::fmt; /// An aggregated collection of `Bytes` objects. #[derive(Debug, Clone)] @@ -76,6 +77,15 @@ impl SegmentedBytes { } } +impl fmt::Display for SegmentedBytes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match std::str::from_utf8(self.to_bytes().as_ref()) { + Ok(s) => write!(f, "{}", s), + Err(_) => Ok(()), // or: write!(f, "") + } + } +} + pub struct SegmentedBytesIntoIterator { sb: SegmentedBytes, current_segment: usize, diff --git a/src/s3/utils.rs b/src/s3/utils.rs index 9aa52f2..a25e17b 100644 --- a/src/s3/utils.rs +++ b/src/s3/utils.rs @@ -324,6 +324,15 @@ pub fn get_option_text(element: &Element, tag: &str) -> Option { None } +/// Trim leading and trailing quotes from a string. It consumes the +pub fn trim_quotes(mut s: String) -> String { + if s.len() >= 2 && s.starts_with('"') && s.ends_with('"') { + s.drain(0..1); // remove the leading quote + s.pop(); // remove the trailing quote + } + s +} + /// Gets default text value of given XML element for given tag. pub fn get_default_text(element: &Element, tag: &str) -> String { element.get_child(tag).map_or(String::new(), |v| { @@ -423,14 +432,6 @@ pub fn insert(data: Option, key: impl Into) -> Multimap { result } -pub fn take_bucket(opt_bucket: Option) -> Result { - opt_bucket.ok_or_else(|| Error::InvalidBucketName("no bucket specified".into())) -} - -pub fn take_object(opt_object: Option) -> Result { - opt_object.ok_or_else(|| Error::InvalidObjectName("no object specified".into())) -} - pub mod xml { use std::collections::HashMap; diff --git a/tests/test_append_object.rs b/tests/test_append_object.rs index 5dc82b5..9caf689 100644 --- a/tests/test_append_object.rs +++ b/tests/test_append_object.rs @@ -15,6 +15,9 @@ use minio::s3::builders::ObjectContent; use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasObject, HasObjectSize, +}; use minio::s3::response::{ AppendObjectResponse, GetObjectResponse, PutObjectContentResponse, PutObjectResponse, StatObjectResponse, @@ -42,8 +45,8 @@ async fn create_object_helper( .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); let resp: GetObjectResponse = ctx .client @@ -51,13 +54,14 @@ async fn create_object_helper( .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), size); // double check that the content we just have put is "aaaa" let content1: String = String::from_utf8( - resp.content + resp.content() + .unwrap() .to_segmented_bytes() .await .unwrap() @@ -95,9 +99,9 @@ async fn append_object_0() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size * 2); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size * 2); let resp: GetObjectResponse = ctx .client @@ -106,9 +110,14 @@ async fn append_object_0() { .await .unwrap(); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), size * 2); + // retrieve the content of the object and check that it is "aaaabbbb" let content: String = String::from_utf8( - resp.content + resp.content() + .unwrap() .to_segmented_bytes() .await .unwrap() @@ -117,10 +126,6 @@ async fn append_object_0() { ) .unwrap(); assert_eq!(content, format!("{}{}", content1, content2)); - - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size * 2); } /// Append to the beginning of an existing object (happy flow) @@ -150,9 +155,9 @@ async fn append_object_1() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); let resp: GetObjectResponse = ctx .client @@ -161,9 +166,14 @@ async fn append_object_1() { .await .unwrap(); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), size); + // retrieve the content of the object and check that it is "bbbb" let content: String = String::from_utf8( - resp.content + resp.content() + .unwrap() .to_segmented_bytes() .await .unwrap() @@ -172,9 +182,6 @@ async fn append_object_1() { ) .unwrap(); assert_eq!(content, content2); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); } /// Append to the middle of an existing object (error InvalidWriteOffset) @@ -273,9 +280,9 @@ async fn append_object_4() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); let resp: GetObjectResponse = ctx .client @@ -283,10 +290,14 @@ async fn append_object_4() { .send() .await .unwrap(); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), size); // retrieve the content of the object and check that it is "aaaa" let content: String = String::from_utf8( - resp.content + resp.content() + .unwrap() .to_segmented_bytes() .await .unwrap() @@ -295,9 +306,6 @@ async fn append_object_4() { ) .unwrap(); assert_eq!(content, content1); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); } /// Append beyond the size of a non-existing object (error NoSuchKey) @@ -354,9 +362,9 @@ async fn append_object_content_0() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size * 2); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size * 2); let resp: GetObjectResponse = ctx .client @@ -364,9 +372,13 @@ async fn append_object_content_0() { .send() .await .unwrap(); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), size * 2); let content: String = String::from_utf8( - resp.content + resp.content() + .unwrap() .to_segmented_bytes() .await .unwrap() @@ -375,10 +387,6 @@ async fn append_object_content_0() { ) .unwrap(); assert_eq!(content, format!("{}{}", content1, content2)); - - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size * 2); } #[tokio::test(flavor = "multi_thread")] @@ -403,9 +411,9 @@ async fn append_object_content_1() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, part_size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), part_size); for i in 1..n_parts { let expected_size: u64 = (i + 1) * part_size; @@ -417,9 +425,9 @@ async fn append_object_content_1() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, expected_size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), expected_size); let resp: StatObjectResponse = ctx .client @@ -427,9 +435,9 @@ async fn append_object_content_1() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size, expected_size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.size().unwrap(), expected_size); } } @@ -455,9 +463,9 @@ async fn append_object_content_2() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, *size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), *size); let expected_size: u64 = 2 * (*size); let data2: ObjectContent = ObjectContent::new_from_stream(RandSrc::new(*size), Some(*size)); @@ -467,9 +475,9 @@ async fn append_object_content_2() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, expected_size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), expected_size); let resp: StatObjectResponse = ctx .client @@ -477,9 +485,9 @@ async fn append_object_content_2() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size, expected_size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.size().unwrap(), expected_size); } } @@ -528,23 +536,23 @@ async fn append_object_content_3() { .send() .await .unwrap(); - assert_eq!(resp.object_size, initial_size); + assert_eq!(resp.object_size(), initial_size); let resp: AppendObjectResponse = client .append_object_content(&test_bucket, &object_name, item) .send() .await .unwrap(); - assert_eq!(resp.object_size, sizes[idx] + initial_size); - let etag = resp.etag; + assert_eq!(resp.object_size(), sizes[idx] + initial_size); + let etag: String = resp.etag().unwrap(); let resp: StatObjectResponse = client .stat_object(&test_bucket, &object_name) .send() .await .unwrap(); - assert_eq!(resp.size, sizes[idx] + initial_size); - assert_eq!(resp.etag, etag); + assert_eq!(resp.size().unwrap(), sizes[idx] + initial_size); + assert_eq!(resp.etag().unwrap(), etag); client .delete_object(&test_bucket, &object_name) .send() diff --git a/tests/test_bucket_create_delete.rs b/tests/test_bucket_create_delete.rs index f3ef589..2af45dd 100644 --- a/tests/test_bucket_create_delete.rs +++ b/tests/test_bucket_create_delete.rs @@ -15,6 +15,7 @@ use minio::s3::client::DEFAULT_REGION; use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{BucketExistsResponse, CreateBucketResponse, DeleteBucketResponse}; use minio::s3::types::S3Api; use minio_common::test_context::TestContext; @@ -27,14 +28,14 @@ async fn bucket_create() { // try to create a bucket that does not exist let resp: CreateBucketResponse = ctx.client.create_bucket(&bucket_name).send().await.unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // check that the bucket exists let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(resp.exists); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.exists()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // try to create a bucket that already exists let resp: Result = @@ -66,23 +67,23 @@ async fn bucket_delete() { // create a new bucket let resp: CreateBucketResponse = ctx.client.create_bucket(&bucket_name).send().await.unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // check that the bucket exists let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(resp.exists); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.exists()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // try to remove a bucket that exists let resp: DeleteBucketResponse = ctx.client.delete_bucket(&bucket_name).send().await.unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // check that the bucket does not exist anymore let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(!resp.exists); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, ""); + assert!(!resp.exists()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), ""); } diff --git a/tests/test_bucket_encryption.rs b/tests/test_bucket_encryption.rs index 6951d46..b82d574 100644 --- a/tests/test_bucket_encryption.rs +++ b/tests/test_bucket_encryption.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketEncryptionResponse, GetBucketEncryptionResponse, PutBucketEncryptionResponse, }; @@ -36,9 +37,9 @@ async fn bucket_encryption() { .send() .await .unwrap(); - assert_eq!(resp.config, config); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.config().unwrap(), config); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); } let resp: GetBucketEncryptionResponse = ctx @@ -47,9 +48,9 @@ async fn bucket_encryption() { .send() .await .unwrap(); - assert_eq!(resp.config, config); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.config().unwrap(), config); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteBucketEncryptionResponse = ctx .client @@ -57,8 +58,8 @@ async fn bucket_encryption() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketEncryptionResponse = ctx .client @@ -66,8 +67,8 @@ async fn bucket_encryption() { .send() .await .unwrap(); - assert_eq!(resp.config, SseConfig::default()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.config().unwrap(), SseConfig::default()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); //println!("response of getting encryption config: resp.sse_config={:?}", resp.config); } diff --git a/tests/test_bucket_exists.rs b/tests/test_bucket_exists.rs index e63872f..1b8e2e6 100644 --- a/tests/test_bucket_exists.rs +++ b/tests/test_bucket_exists.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{BucketExistsResponse, DeleteBucketResponse}; use minio::s3::types::S3Api; use minio_common::test_context::TestContext; @@ -24,16 +25,16 @@ async fn bucket_exists() { let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(resp.exists); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.exists()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteBucketResponse = ctx.client.delete_bucket(&bucket_name).send().await.unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(!resp.exists); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, ""); + assert!(!resp.exists()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), ""); } diff --git a/tests/test_bucket_lifecycle.rs b/tests/test_bucket_lifecycle.rs index 844351e..66ffedf 100644 --- a/tests/test_bucket_lifecycle.rs +++ b/tests/test_bucket_lifecycle.rs @@ -16,6 +16,7 @@ use minio::s3::client::DEFAULT_REGION; use minio::s3::error::{Error, ErrorCode}; use minio::s3::lifecycle_config::LifecycleConfig; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketLifecycleResponse, GetBucketLifecycleResponse, PutBucketLifecycleResponse, }; @@ -37,8 +38,8 @@ async fn bucket_lifecycle() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketLifecycleResponse = ctx .client @@ -47,10 +48,10 @@ async fn bucket_lifecycle() { .send() .await .unwrap(); - assert_eq!(resp.config, config); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert!(resp.updated_at.is_none()); + assert_eq!(resp.config().unwrap(), config); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert!(resp.updated_at().is_none()); let resp: GetBucketLifecycleResponse = ctx .client @@ -59,10 +60,10 @@ async fn bucket_lifecycle() { .send() .await .unwrap(); - assert_eq!(resp.config, config); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert!(resp.updated_at.is_some()); + assert_eq!(resp.config().unwrap(), config); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert!(resp.updated_at().is_some()); let resp: DeleteBucketLifecycleResponse = ctx .client @@ -70,8 +71,8 @@ async fn bucket_lifecycle() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: Result = ctx.client.get_bucket_lifecycle(&bucket_name).send().await; diff --git a/tests/test_bucket_notification.rs b/tests/test_bucket_notification.rs index 8298417..10203b4 100644 --- a/tests/test_bucket_notification.rs +++ b/tests/test_bucket_notification.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketNotificationResponse, GetBucketNotificationResponse, PutBucketNotificationResponse, }; @@ -42,8 +43,8 @@ async fn test_bucket_notification() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); //println!("response of setting notification: resp={:?}", resp); let resp: GetBucketNotificationResponse = ctx @@ -52,24 +53,25 @@ async fn test_bucket_notification() { .send() .await .unwrap(); - assert_eq!(resp.config, config); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + let config2 = resp.config().unwrap(); + assert_eq!(config2, config); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); //println!("response of getting notification: resp={:?}", resp); - assert_eq!(resp.config.queue_config_list.as_ref().unwrap().len(), 1); + assert_eq!(config2.queue_config_list.as_ref().unwrap().len(), 1); assert!( - resp.config.queue_config_list.as_ref().unwrap()[0] + config2.queue_config_list.as_ref().unwrap()[0] .events .contains(&String::from("s3:ObjectCreated:Put")) ); assert!( - resp.config.queue_config_list.as_ref().unwrap()[0] + config2.queue_config_list.as_ref().unwrap()[0] .events .contains(&String::from("s3:ObjectCreated:Copy")) ); assert_eq!( - resp.config.queue_config_list.as_ref().unwrap()[0] + config2.queue_config_list.as_ref().unwrap()[0] .prefix_filter_rule .as_ref() .unwrap() @@ -77,7 +79,7 @@ async fn test_bucket_notification() { "images" ); assert_eq!( - resp.config.queue_config_list.as_ref().unwrap()[0] + config2.queue_config_list.as_ref().unwrap()[0] .suffix_filter_rule .as_ref() .unwrap() @@ -85,7 +87,7 @@ async fn test_bucket_notification() { "pg" ); assert_eq!( - resp.config.queue_config_list.as_ref().unwrap()[0].queue, + config2.queue_config_list.as_ref().unwrap()[0].queue, SQS_ARN ); @@ -95,8 +97,8 @@ async fn test_bucket_notification() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); //println!("response of deleting notification: resp={:?}", resp); let resp: GetBucketNotificationResponse = ctx @@ -105,7 +107,7 @@ async fn test_bucket_notification() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.config, NotificationConfig::default()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert_eq!(resp.config().unwrap(), NotificationConfig::default()); } diff --git a/tests/test_bucket_policy.rs b/tests/test_bucket_policy.rs index 1f675f5..d1de72f 100644 --- a/tests/test_bucket_policy.rs +++ b/tests/test_bucket_policy.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketPolicyResponse, GetBucketPolicyResponse, PutBucketPolicyResponse, }; @@ -35,8 +36,8 @@ async fn bucket_policy() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketPolicyResponse = ctx .client @@ -47,9 +48,9 @@ async fn bucket_policy() { // TODO create a proper comparison of the retrieved config and the provided config // println!("response of getting policy: resp.config={:?}", resp.config); // assert_eq!(&resp.config, &config); - assert!(!resp.config.is_empty()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(!resp.config().unwrap().is_empty()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteBucketPolicyResponse = ctx .client @@ -57,8 +58,8 @@ async fn bucket_policy() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketPolicyResponse = ctx .client @@ -66,7 +67,7 @@ async fn bucket_policy() { .send() .await .unwrap(); - assert_eq!(resp.config, "{}"); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.config().unwrap(), "{}"); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); } diff --git a/tests/test_bucket_replication.rs b/tests/test_bucket_replication.rs index 0654e22..4cdf6a3 100644 --- a/tests/test_bucket_replication.rs +++ b/tests/test_bucket_replication.rs @@ -16,6 +16,7 @@ use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketReplicationResponse, GetBucketReplicationResponse, GetBucketVersioningResponse, PutBucketPolicyResponse, PutBucketReplicationResponse, PutBucketVersioningResponse, @@ -46,8 +47,8 @@ async fn bucket_replication_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: PutBucketVersioningResponse = ctx .client @@ -56,8 +57,8 @@ async fn bucket_replication_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name2); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name2); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketVersioningResponse = ctx .client @@ -65,9 +66,9 @@ async fn bucket_replication_s3() { .send() .await .unwrap(); - assert_eq!(resp.status, Some(VersioningStatus::Enabled)); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.status().unwrap(), Some(VersioningStatus::Enabled)); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); if false { //TODO: to allow replication policy needs to be applied, but this fails @@ -103,8 +104,8 @@ async fn bucket_replication_s3() { .await .unwrap(); //println!("response of setting replication: resp={:?}", resp); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketReplicationResponse = ctx .client @@ -113,8 +114,8 @@ async fn bucket_replication_s3() { .await .unwrap(); //assert_eq!(resp.config, config); //TODO - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); // TODO called `Result::unwrap()` on an `Err` value: S3Error(ErrorResponse { code: "XMinioAdminRemoteTargetNotFoundError", message: "The remote target does not exist", let resp: DeleteBucketReplicationResponse = ctx diff --git a/tests/test_bucket_tags.rs b/tests/test_bucket_tagging.rs similarity index 86% rename from tests/test_bucket_tags.rs rename to tests/test_bucket_tagging.rs index 54fa4ae..657a02e 100644 --- a/tests/test_bucket_tags.rs +++ b/tests/test_bucket_tagging.rs @@ -15,6 +15,7 @@ use minio::s3::client::DEFAULT_REGION; use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion, HasTagging}; use minio::s3::response::{ DeleteBucketTaggingResponse, GetBucketTaggingResponse, PutBucketTaggingResponse, }; @@ -40,8 +41,8 @@ async fn bucket_tags_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketTaggingResponse = ctx .client @@ -49,9 +50,9 @@ async fn bucket_tags_s3() { .send() .await .unwrap(); - assert_eq!(resp.tags, tags); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.tags().unwrap(), tags); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteBucketTaggingResponse = ctx .client @@ -59,8 +60,8 @@ async fn bucket_tags_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketTaggingResponse = ctx .client @@ -68,9 +69,9 @@ async fn bucket_tags_s3() { .send() .await .unwrap(); - assert!(resp.tags.is_empty()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.tags().unwrap().is_empty()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); } #[tokio::test(flavor = "multi_thread")] diff --git a/tests/test_bucket_versioning.rs b/tests/test_bucket_versioning.rs index 84b11a6..de39ce4 100644 --- a/tests/test_bucket_versioning.rs +++ b/tests/test_bucket_versioning.rs @@ -16,6 +16,7 @@ use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{GetBucketVersioningResponse, PutBucketVersioningResponse}; use minio::s3::types::S3Api; use minio_common::test_context::TestContext; @@ -36,8 +37,8 @@ async fn bucket_versioning_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketVersioningResponse = ctx .client @@ -45,9 +46,9 @@ async fn bucket_versioning_s3() { .send() .await .unwrap(); - assert_eq!(resp.status, Some(VersioningStatus::Enabled)); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.status().unwrap(), Some(VersioningStatus::Enabled)); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: PutBucketVersioningResponse = ctx .client @@ -56,8 +57,8 @@ async fn bucket_versioning_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetBucketVersioningResponse = ctx .client @@ -65,9 +66,9 @@ async fn bucket_versioning_s3() { .send() .await .unwrap(); - assert_eq!(resp.status, Some(VersioningStatus::Suspended)); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.status().unwrap(), Some(VersioningStatus::Suspended)); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); } #[tokio::test(flavor = "multi_thread")] diff --git a/tests/test_get_object.rs b/tests/test_get_object.rs index 93c05c2..04203be 100644 --- a/tests/test_get_object.rs +++ b/tests/test_get_object.rs @@ -14,6 +14,7 @@ // limitations under the License. use bytes::Bytes; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{GetObjectResponse, PutObjectContentResponse}; use minio::s3::types::S3Api; use minio_common::test_context::TestContext; @@ -32,9 +33,9 @@ async fn get_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, data.len() as u64); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), data.len() as u64); let resp: GetObjectResponse = ctx .client @@ -42,10 +43,16 @@ async fn get_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, data.len() as u64); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size().unwrap(), data.len() as u64); - let got = resp.content.to_segmented_bytes().await.unwrap().to_bytes(); + let got = resp + .content() + .unwrap() + .to_segmented_bytes() + .await + .unwrap() + .to_bytes(); assert_eq!(got, data); } diff --git a/tests/test_list_buckets.rs b/tests/test_list_buckets.rs index b90f084..96108be 100644 --- a/tests/test_list_buckets.rs +++ b/tests/test_list_buckets.rs @@ -36,10 +36,15 @@ async fn list_buckets() { let mut count = 0; let resp: ListBucketsResponse = ctx.client.list_buckets().send().await.unwrap(); - for bucket in resp.buckets.iter() { + for bucket in resp.buckets().unwrap().iter() { if names.contains(&bucket.name) { count += 1; - } + } // else if bucket.name.len() == 8 { + // match ctx.client.delete_and_purge_bucket(&bucket.name).await { + // Ok(_) => println!("Deleted bucket: {}", bucket.name), + // Err(e) => println!("Failed to delete bucket {}: {}", bucket.name, e) + // } + //} } assert_eq!(guards.len(), N_BUCKETS); assert_eq!(count, N_BUCKETS); diff --git a/tests/test_list_objects.rs b/tests/test_list_objects.rs index deeab65..6bacfab 100644 --- a/tests/test_list_objects.rs +++ b/tests/test_list_objects.rs @@ -15,6 +15,7 @@ use async_std::stream::StreamExt; use minio::s3::response::PutObjectContentResponse; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::types::ToStream; use minio_common::test_context::TestContext; use minio_common::utils::rand_object_name; @@ -61,8 +62,8 @@ async fn list_objects( .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); names_set_before.insert(object_name); } } diff --git a/tests/test_listen_bucket_notification.rs b/tests/test_listen_bucket_notification.rs index b8c838b..33a28da 100644 --- a/tests/test_listen_bucket_notification.rs +++ b/tests/test_listen_bucket_notification.rs @@ -17,6 +17,7 @@ use async_std::stream::StreamExt; use async_std::task; use minio::s3::builders::ObjectContent; use minio::s3::response::PutObjectContentResponse; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::types::{NotificationRecord, NotificationRecords, S3Api}; use minio_common::rand_src::RandSrc; use minio_common::test_context::TestContext; @@ -85,7 +86,8 @@ async fn listen_bucket_notification() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); spawned_listen_task.await; diff --git a/tests/test_object_compose.rs b/tests/test_object_compose.rs index 5604e4e..5c5ffa8 100644 --- a/tests/test_object_compose.rs +++ b/tests/test_object_compose.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::builders::{ComposeSource, ObjectContent}; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{ComposeObjectResponse, PutObjectContentResponse, StatObjectResponse}; use minio::s3::types::S3Api; use minio_common::rand_src::RandSrc; @@ -36,7 +37,7 @@ async fn compose_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.bucket(), bucket_name); let sources: Vec = { let mut sources = Vec::new(); @@ -53,8 +54,8 @@ async fn compose_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name_dst); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name_dst); let resp: StatObjectResponse = ctx .client @@ -62,6 +63,6 @@ async fn compose_object() { .send() .await .unwrap(); - assert_eq!(resp.size, 5); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.size().unwrap(), 5); + assert_eq!(resp.bucket(), bucket_name); } diff --git a/tests/test_object_copy.rs b/tests/test_object_copy.rs index b2757b9..f4edad8 100644 --- a/tests/test_object_copy.rs +++ b/tests/test_object_copy.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::builders::{CopySource, ObjectContent}; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{CopyObjectResponse, PutObjectContentResponse, StatObjectResponse}; use minio::s3::types::S3Api; use minio_common::rand_src::RandSrc; @@ -41,7 +42,8 @@ async fn copy_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name_src); let resp: CopyObjectResponse = ctx .client @@ -50,8 +52,8 @@ async fn copy_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name_dst); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name_dst); let resp: StatObjectResponse = ctx .client @@ -59,6 +61,6 @@ async fn copy_object() { .send() .await .unwrap(); - assert_eq!(resp.size, size); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.size().unwrap(), size); + assert_eq!(resp.bucket(), bucket_name); } diff --git a/tests/test_object_legal_hold.rs b/tests/test_object_legal_hold.rs index 2fe0862..68105b9 100644 --- a/tests/test_object_legal_hold.rs +++ b/tests/test_object_legal_hold.rs @@ -16,6 +16,7 @@ use bytes::Bytes; use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasVersion}; use minio::s3::response::{ CreateBucketResponse, GetObjectLegalHoldResponse, PutObjectContentResponse, PutObjectLegalHoldResponse, @@ -50,9 +51,9 @@ async fn object_legal_hold_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, data.len() as u64); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), data.len() as u64); let resp: PutObjectLegalHoldResponse = ctx .client @@ -60,10 +61,10 @@ async fn object_legal_hold_s3() { .send() .await .unwrap(); - assert_eq!(resp.object, object_name); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.version_id, None); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert_eq!(resp.version_id(), None); let resp: GetObjectLegalHoldResponse = ctx .client @@ -71,11 +72,11 @@ async fn object_legal_hold_s3() { .send() .await .unwrap(); - assert!(resp.enabled); - assert_eq!(resp.object, object_name); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.version_id, None); + assert!(resp.enabled().unwrap()); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert_eq!(resp.version_id(), None); let resp: PutObjectLegalHoldResponse = ctx .client @@ -83,10 +84,10 @@ async fn object_legal_hold_s3() { .send() .await .unwrap(); - assert_eq!(resp.object, object_name); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.version_id, None); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert_eq!(resp.version_id(), None); let resp: GetObjectLegalHoldResponse = ctx .client @@ -94,9 +95,9 @@ async fn object_legal_hold_s3() { .send() .await .unwrap(); - assert!(resp.enabled); - assert_eq!(resp.object, object_name); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.version_id, None); + assert!(resp.enabled().unwrap()); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); + assert_eq!(resp.version_id(), None); } diff --git a/tests/test_object_lock_config.rs b/tests/test_object_lock_config.rs index b42b324..8a2ac62 100644 --- a/tests/test_object_lock_config.rs +++ b/tests/test_object_lock_config.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteObjectLockConfigResponse, GetObjectLockConfigResponse, PutObjectLockConfigResponse, }; @@ -50,8 +51,8 @@ async fn object_lock_config() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectLockConfigResponse = ctx .client @@ -59,14 +60,13 @@ async fn object_lock_config() { .send() .await .unwrap(); - assert_eq!( - resp.config.retention_mode.unwrap(), - RetentionMode::GOVERNANCE - ); - assert_eq!(resp.config.retention_duration_days, Some(DURATION_DAYS)); - assert!(resp.config.retention_duration_years.is_none()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + + let config = resp.config().unwrap(); + assert_eq!(config.retention_mode.unwrap(), RetentionMode::GOVERNANCE); + assert_eq!(config.retention_duration_days, Some(DURATION_DAYS)); + assert!(config.retention_duration_years.is_none()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteObjectLockConfigResponse = ctx .client @@ -74,8 +74,8 @@ async fn object_lock_config() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectLockConfigResponse = ctx .client @@ -83,7 +83,8 @@ async fn object_lock_config() { .send() .await .unwrap(); - assert!(resp.config.retention_mode.is_none()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + let config = resp.config().unwrap(); + assert!(config.retention_mode.is_none()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.region(), DEFAULT_REGION); } diff --git a/tests/test_object_put.rs b/tests/test_object_put.rs index e24ebf1..87dc5e9 100644 --- a/tests/test_object_put.rs +++ b/tests/test_object_put.rs @@ -14,8 +14,12 @@ // limitations under the License. use http::header; -use minio::s3::builders::ObjectContent; -use minio::s3::response::{PutObjectContentResponse, StatObjectResponse}; +use minio::s3::builders::{MIN_PART_SIZE, ObjectContent}; +use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{ + HasBucket, HasEtagFromHeaders, HasIsDeleteMarker, HasObject, HasS3Fields, HasVersion, +}; +use minio::s3::response::{DeleteObjectResponse, PutObjectContentResponse, StatObjectResponse}; use minio::s3::types::S3Api; use minio_common::rand_src::RandSrc; use minio_common::test_context::TestContext; @@ -39,8 +43,10 @@ async fn put_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); let resp: StatObjectResponse = ctx .client @@ -48,20 +54,43 @@ async fn put_object() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.size().unwrap(), size); + + let resp: DeleteObjectResponse = ctx + .client + .delete_object(&bucket_name, &object_name) + .send() + .await + .unwrap(); + assert!(!resp.version_id().is_some()); + + // Validate delete succeeded. + let resp: Result = ctx + .client + .stat_object(&bucket_name, &object_name) + .send() + .await; + + match resp.err().unwrap() { + Error::S3Error(er) => { + assert_eq!(er.code, ErrorCode::NoSuchKey) + } + e => panic!("Unexpected error {:?}", e), + } } #[tokio::test(flavor = "multi_thread")] async fn put_object_multipart() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name: String = rand_object_name(); + let object_name = rand_object_name(); - let size: u64 = 16 + 5 * 1024 * 1024; + let size: u64 = 16 + MIN_PART_SIZE; - ctx.client + let resp: PutObjectContentResponse = ctx + .client .put_object_content( &bucket_name, &object_name, @@ -70,88 +99,125 @@ async fn put_object_multipart() { .send() .await .unwrap(); - let resp = ctx + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); + + let resp: StatObjectResponse = ctx .client .stat_object(&bucket_name, &object_name) .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size as u64, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.size().unwrap(), size); + + let resp: DeleteObjectResponse = ctx + .client + .delete_object(&bucket_name, &object_name) + .send() + .await + .unwrap(); + assert_eq!(resp.version_id(), None); } #[tokio::test(flavor = "multi_thread")] -async fn put_object_content() { +async fn put_object_content_1() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); - - let sizes = [16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; + let sizes = [16_u64, MIN_PART_SIZE, 16 + MIN_PART_SIZE]; for size in sizes.iter() { - let data_src = RandSrc::new(*size); - let rsp = ctx + let resp: PutObjectContentResponse = ctx .client .put_object_content( &bucket_name, &object_name, - ObjectContent::new_from_stream(data_src, Some(*size)), + ObjectContent::new_from_stream(RandSrc::new(*size), Some(*size)), ) .content_type(String::from("image/jpeg")) .send() .await .unwrap(); - assert_eq!(rsp.object_size, *size); - let etag = rsp.etag; - let resp = ctx + assert_eq!(resp.object_size(), *size); + + let etag = resp.etag().unwrap(); + let resp: StatObjectResponse = ctx .client .stat_object(&bucket_name, &object_name) .send() .await .unwrap(); - assert_eq!(resp.size, *size); - assert_eq!(resp.etag, etag); + assert_eq!(resp.size().unwrap(), *size); + assert_eq!(resp.etag().unwrap(), etag); assert_eq!( - resp.headers.get(header::CONTENT_TYPE).unwrap(), + resp.headers().get(header::CONTENT_TYPE).unwrap(), "image/jpeg" ); + + let resp: DeleteObjectResponse = ctx + .client + .delete_object(&bucket_name, &object_name) + .send() + .await + .unwrap(); + + assert!(!resp.is_delete_marker().unwrap().unwrap()); } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn put_object_content_2() { + let ctx = TestContext::new_from_env(); + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; + let object_name = rand_object_name(); + let sizes = [16_u64, MIN_PART_SIZE, 16 + MIN_PART_SIZE]; // Repeat test with no size specified in ObjectContent for size in sizes.iter() { let data_src = RandSrc::new(*size); - let rsp = ctx + let resp: PutObjectContentResponse = ctx .client .put_object_content( &bucket_name, &object_name, ObjectContent::new_from_stream(data_src, None), ) - .part_size(Some(5 * 1024 * 1024)) // Set part size to 5MB + .part_size(Some(MIN_PART_SIZE)) .send() .await .unwrap(); - assert_eq!(rsp.object_size, *size); - let etag = rsp.etag; - let resp = ctx + assert_eq!(resp.object_size(), *size); + let etag = resp.etag().unwrap(); + + let resp: StatObjectResponse = ctx .client .stat_object(&bucket_name, &object_name) .send() .await .unwrap(); - assert_eq!(resp.size, *size); - assert_eq!(resp.etag, etag); + assert_eq!(resp.size().unwrap(), *size); + assert_eq!(resp.etag().unwrap(), etag); + + let resp: DeleteObjectResponse = ctx + .client + .delete_object(&bucket_name, &object_name) + .send() + .await + .unwrap(); + assert_eq!(resp.version_id(), None); } } -/// Test sending ObjectContent across async tasks. +/// Test sending PutObject across async tasks. #[tokio::test(flavor = "multi_thread")] -async fn put_object_content_2() { +async fn put_object_content_3() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name: String = rand_object_name(); - let sizes = vec![16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; + let object_name = rand_object_name(); + let sizes = vec![16_u64, MIN_PART_SIZE, 16 + MIN_PART_SIZE]; let (sender, mut receiver): (mpsc::Sender, mpsc::Receiver) = mpsc::channel(2); @@ -177,20 +243,20 @@ async fn put_object_content_2() { tokio::spawn(async move { let mut idx = 0; while let Some(item) = receiver.recv().await { - let rsp = client + let resp: PutObjectContentResponse = client .put_object_content(&test_bucket, &object_name, item) .send() .await .unwrap(); - assert_eq!(rsp.object_size, sizes[idx]); - let etag = rsp.etag; - let resp = client + assert_eq!(resp.object_size(), sizes[idx]); + let etag = resp.etag().unwrap(); + let resp: StatObjectResponse = client .stat_object(&test_bucket, &object_name) .send() .await .unwrap(); - assert_eq!(resp.size, sizes[idx]); - assert_eq!(resp.etag, etag); + assert_eq!(resp.size().unwrap(), sizes[idx]); + assert_eq!(resp.etag().unwrap(), etag); client .delete_object(&test_bucket, &object_name) .send() diff --git a/tests/test_object_remove.rs b/tests/test_object_remove.rs index 8efb3ba..d8a65bb 100644 --- a/tests/test_object_remove.rs +++ b/tests/test_object_remove.rs @@ -16,6 +16,7 @@ use async_std::stream::StreamExt; use minio::s3::builders::ObjectToDelete; use minio::s3::response::PutObjectContentResponse; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::types::ToStream; use minio_common::test_context::TestContext; use minio_common::utils::rand_object_name; @@ -34,8 +35,8 @@ async fn remove_objects() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); names.push(object_name); } let del_items: Vec = names @@ -53,10 +54,12 @@ async fn remove_objects() { let mut del_count = 0; while let Some(item) = resp.next().await { let res = item.unwrap(); - for obj in res.result.iter() { + let del_result = res.result().unwrap(); + del_count += del_result.len(); + + for obj in del_result.into_iter() { assert!(obj.is_deleted()); } - del_count += res.result.len(); } assert_eq!(del_count, 3); } diff --git a/tests/test_object_retention.rs b/tests/test_object_retention.rs index a551d03..f7ace16 100644 --- a/tests/test_object_retention.rs +++ b/tests/test_object_retention.rs @@ -15,6 +15,7 @@ use minio::s3::builders::ObjectContent; use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasVersion}; use minio::s3::response::{ CreateBucketResponse, GetObjectRetentionResponse, PutObjectContentResponse, PutObjectRetentionResponse, @@ -43,7 +44,7 @@ async fn object_retention() { .await .unwrap(); let _cleanup = CleanupGuard::new(ctx.client.clone(), &bucket_name); - assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.bucket(), bucket_name); let object_name = rand_object_name(); let size = 16_u64; @@ -58,11 +59,11 @@ async fn object_retention() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); - assert_ne!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); + assert_ne!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); //assert_eq!(resp.etag, ""); let retain_until_date = utc_now() + chrono::Duration::days(1); @@ -74,10 +75,10 @@ async fn object_retention() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectRetentionResponse = ctx .client @@ -85,9 +86,12 @@ async fn object_retention() { .send() .await .unwrap(); - assert_eq!(resp.retention_mode.unwrap(), RetentionMode::GOVERNANCE); assert_eq!( - to_iso8601utc(resp.retain_until_date.unwrap()), + resp.retention_mode().unwrap().unwrap(), + RetentionMode::GOVERNANCE + ); + assert_eq!( + to_iso8601utc(resp.retain_until_date().unwrap().unwrap()), to_iso8601utc(retain_until_date) ); @@ -98,10 +102,10 @@ async fn object_retention() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectRetentionResponse = ctx .client @@ -109,10 +113,10 @@ async fn object_retention() { .send() .await .unwrap(); - assert!(resp.retention_mode.is_none()); - assert!(resp.retain_until_date.is_none()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.retention_mode().unwrap().is_none()); + assert!(resp.retain_until_date().unwrap().is_none()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); } diff --git a/tests/test_object_tags.rs b/tests/test_object_tagging.rs similarity index 69% rename from tests/test_object_tags.rs rename to tests/test_object_tagging.rs index 9dfb778..0d0251a 100644 --- a/tests/test_object_tags.rs +++ b/tests/test_object_tagging.rs @@ -15,6 +15,9 @@ use minio::s3::builders::ObjectContent; use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::a_response_traits::{ + HasBucket, HasObject, HasRegion, HasTagging, HasVersion, +}; use minio::s3::response::{ DeleteObjectTaggingResponse, GetObjectTaggingResponse, PutObjectContentResponse, PutObjectTaggingResponse, @@ -47,11 +50,11 @@ async fn object_tags() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let tags = HashMap::from([ (String::from("Project"), String::from("Project One")), @@ -65,10 +68,10 @@ async fn object_tags() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectTaggingResponse = ctx .client @@ -76,11 +79,11 @@ async fn object_tags() { .send() .await .unwrap(); - assert_eq!(resp.tags, tags); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.tags().unwrap(), tags); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: DeleteObjectTaggingResponse = ctx .client @@ -88,10 +91,10 @@ async fn object_tags() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); let resp: GetObjectTaggingResponse = ctx .client @@ -99,9 +102,9 @@ async fn object_tags() { .send() .await .unwrap(); - assert!(resp.tags.is_empty()); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.version_id, None); - assert_eq!(resp.region, DEFAULT_REGION); + assert!(resp.tags().unwrap().is_empty()); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.version_id(), None); + assert_eq!(resp.region(), DEFAULT_REGION); } diff --git a/tests/test_put_object.rs b/tests/test_put_object.rs deleted file mode 100644 index ed48377..0000000 --- a/tests/test_put_object.rs +++ /dev/null @@ -1,263 +0,0 @@ -// MinIO Rust Library for Amazon S3 Compatible Cloud Storage -// Copyright 2025 MinIO, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use http::header; -use minio::s3::builders::ObjectContent; -use minio::s3::client; -use minio::s3::error::{Error, ErrorCode}; -use minio::s3::response::{DeleteObjectResponse, PutObjectContentResponse, StatObjectResponse}; -use minio::s3::types::S3Api; -use minio_common::rand_src::RandSrc; -use minio_common::test_context::TestContext; -use minio_common::utils::rand_object_name; -use tokio::sync::mpsc; - -#[tokio::test(flavor = "multi_thread")] -async fn put_object() { - let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name = rand_object_name(); - - let size = 16_u64; - let resp: PutObjectContentResponse = ctx - .client - .put_object_content( - &bucket_name, - &object_name, - ObjectContent::new_from_stream(RandSrc::new(size), Some(size)), - ) - .send() - .await - .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); - - let resp: StatObjectResponse = ctx - .client - .stat_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size, size); - - let resp: DeleteObjectResponse = ctx - .client - .delete_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert!(!resp.version_id.is_some()); - - // Validate delete succeeded. - let resp: Result = ctx - .client - .stat_object(&bucket_name, &object_name) - .send() - .await; - - match resp.err().unwrap() { - Error::S3Error(er) => { - assert_eq!(er.code, ErrorCode::NoSuchKey) - } - e => panic!("Unexpected error {:?}", e), - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn put_object_multipart() { - let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name = rand_object_name(); - - let size: u64 = 16 + client::MIN_PART_SIZE; - - let resp: PutObjectContentResponse = ctx - .client - .put_object_content( - &bucket_name, - &object_name, - ObjectContent::new_from_stream(RandSrc::new(size), Some(size)), - ) - .send() - .await - .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); - - let resp: StatObjectResponse = ctx - .client - .stat_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.size, size); - - let resp: DeleteObjectResponse = ctx - .client - .delete_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.version_id, None); -} - -#[tokio::test(flavor = "multi_thread")] -async fn put_object_content_1() { - let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name = rand_object_name(); - let sizes = [16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; - - for size in sizes.iter() { - let resp: PutObjectContentResponse = ctx - .client - .put_object_content( - &bucket_name, - &object_name, - ObjectContent::new_from_stream(RandSrc::new(*size), Some(*size)), - ) - .content_type(String::from("image/jpeg")) - .send() - .await - .unwrap(); - assert_eq!(resp.object_size, *size); - let etag = resp.etag; - let resp: StatObjectResponse = ctx - .client - .stat_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.size, *size); - assert_eq!(resp.etag, etag); - assert_eq!( - resp.headers.get(header::CONTENT_TYPE).unwrap(), - "image/jpeg" - ); - let resp: DeleteObjectResponse = ctx - .client - .delete_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert!(!resp.is_delete_marker); - } -} - -#[tokio::test(flavor = "multi_thread")] -async fn put_object_content_2() { - let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name = rand_object_name(); - let sizes = [16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; - - // Repeat test with no size specified in ObjectContent - for size in sizes.iter() { - let data_src = RandSrc::new(*size); - let rsp = ctx - .client - .put_object_content( - &bucket_name, - &object_name, - ObjectContent::new_from_stream(data_src, None), - ) - .part_size(Some(5 * 1024 * 1024)) // Set part size to 5MB - .send() - .await - .unwrap(); - assert_eq!(rsp.object_size, *size); - let etag = rsp.etag; - let resp = ctx - .client - .stat_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.size, *size); - assert_eq!(resp.etag, etag); - ctx.client - .delete_object(&bucket_name, &object_name) - .send() - .await - .unwrap(); - } -} - -/// Test sending PutObject across async tasks. -#[tokio::test(flavor = "multi_thread")] -async fn put_object_content_3() { - let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let object_name = rand_object_name(); - let sizes = vec![16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; - - let (sender, mut receiver): (mpsc::Sender, mpsc::Receiver) = - mpsc::channel(2); - - let sender_handle = { - let sizes = sizes.clone(); - tokio::spawn(async move { - for size in sizes.iter() { - let data_src = RandSrc::new(*size); - sender - .send(ObjectContent::new_from_stream(data_src, Some(*size))) - .await - .unwrap(); - } - }) - }; - - let uploader_handler = { - let sizes = sizes.clone(); - let object_name = object_name.clone(); - let client = ctx.client.clone(); - let test_bucket = bucket_name.clone(); - tokio::spawn(async move { - let mut idx = 0; - while let Some(item) = receiver.recv().await { - let resp: PutObjectContentResponse = client - .put_object_content(&test_bucket, &object_name, item) - .send() - .await - .unwrap(); - assert_eq!(resp.object_size, sizes[idx]); - let etag = resp.etag; - let resp: StatObjectResponse = client - .stat_object(&test_bucket, &object_name) - .send() - .await - .unwrap(); - assert_eq!(resp.size, sizes[idx]); - assert_eq!(resp.etag, etag); - client - .delete_object(&test_bucket, &object_name) - .send() - .await - .unwrap(); - - idx += 1; - } - }) - }; - - sender_handle.await.unwrap(); - uploader_handler.await.unwrap(); -} diff --git a/tests/test_select_object_content.rs b/tests/test_select_object_content.rs index af9039f..95dee4a 100644 --- a/tests/test_select_object_content.rs +++ b/tests/test_select_object_content.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::error::{Error, ErrorCode}; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{PutObjectContentResponse, SelectObjectContentResponse}; use minio::s3::types::{S3Api, SelectRequest}; use minio_common::example::{create_select_content_data, create_select_content_request}; @@ -38,8 +39,8 @@ async fn select_object_content_s3() { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); let select_request: SelectRequest = create_select_content_request(); diff --git a/tests/test_upload_download_object.rs b/tests/test_upload_download_object.rs index 9422d3e..0feaf1a 100644 --- a/tests/test_upload_download_object.rs +++ b/tests/test_upload_download_object.rs @@ -15,6 +15,7 @@ use hex::ToHex; use minio::s3::builders::ObjectContent; +use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{GetObjectResponse, PutObjectContentResponse}; use minio::s3::types::S3Api; use minio_common::rand_reader::RandReader; @@ -65,9 +66,9 @@ async fn upload_download_object(size: u64) { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); - assert_eq!(resp.object_size, size); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); + assert_eq!(resp.object_size(), size); let filename: String = rand_object_name(); let resp: GetObjectResponse = ctx @@ -76,11 +77,12 @@ async fn upload_download_object(size: u64) { .send() .await .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.object, object_name); + assert_eq!(resp.bucket(), bucket_name); + assert_eq!(resp.object(), object_name); // save the object to a file - resp.content + resp.content() + .unwrap() .to_file(PathBuf::from(&filename).as_path()) .await .unwrap();