diff --git a/.rustfmt.toml b/.rustfmt.toml index 751c817..3ece47b 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -7,3 +7,5 @@ max_width = 100 array_width = 60 # default is 60 attr_fn_like_width = 70 # default is 70 chain_width = 60 # default is 60 + +reorder_imports = true \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index db3f178..615722a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,6 +53,7 @@ tokio-stream = "0.1.17" tokio-util = { version = "0.7.13", features = ["io"] } urlencoding = "2.1.3" xmltree = "0.11.0" +futures = "0.3.31" [dev-dependencies] async-std = { version = "1.13.0", features = ["attributes", "tokio1"] } diff --git a/examples/common.rs b/examples/common.rs index 8a393d1..8648c47 100644 --- a/examples/common.rs +++ b/examples/common.rs @@ -1,6 +1,7 @@ -use minio::s3::args::{BucketExistsArgs, MakeBucketArgs}; use minio::s3::creds::StaticProvider; use minio::s3::http::BaseUrl; +use minio::s3::response::BucketExistsResponse; +use minio::s3::types::S3Api; use minio::s3::{Client, ClientBuilder}; #[allow(dead_code)] @@ -25,16 +26,11 @@ pub async fn create_bucket_if_not_exists( client: &Client, ) -> Result<(), Box> { // Check 'bucket_name' bucket exist or not. - let exists: bool = client - .bucket_exists(&BucketExistsArgs::new(bucket_name).unwrap()) - .await?; + let resp: BucketExistsResponse = client.bucket_exists(bucket_name).send().await?; // Make 'bucket_name' bucket if not exist. - if !exists { - client - .make_bucket(&MakeBucketArgs::new(bucket_name).unwrap()) - .await - .unwrap(); + if !resp.exists { + client.make_bucket(bucket_name).send().await.unwrap(); }; Ok(()) } diff --git a/examples/put_object.rs b/examples/put_object.rs index 9ad0dc2..8ee306e 100644 --- a/examples/put_object.rs +++ b/examples/put_object.rs @@ -13,16 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::path::PathBuf; - use clap::Parser; use log::info; -use minio::s3::{ - args::{BucketExistsArgs, MakeBucketArgs}, - builders::ObjectContent, - client::ClientBuilder, - creds::StaticProvider, -}; +use minio::s3::response::BucketExistsResponse; +use minio::s3::types::S3Api; +use minio::s3::{builders::ObjectContent, client::ClientBuilder, creds::StaticProvider}; +use std::path::PathBuf; /// Upload a file to the given bucket and object path on the MinIO Play server. #[derive(Parser)] @@ -49,16 +45,10 @@ async fn main() -> Result<(), Box> { .provider(Some(Box::new(static_provider))) .build()?; - let exists: bool = client - .bucket_exists(&BucketExistsArgs::new(&args.bucket).unwrap()) - .await - .unwrap(); + let resp: BucketExistsResponse = client.bucket_exists(&args.bucket).send().await.unwrap(); - if !exists { - client - .make_bucket(&MakeBucketArgs::new(&args.bucket).unwrap()) - .await - .unwrap(); + if !resp.exists { + client.make_bucket(&args.bucket).send().await.unwrap(); } let content = ObjectContent::from(args.file.as_path()); diff --git a/src/s3/args.rs b/src/s3/args.rs index e9a6610..3b3b305 100644 --- a/src/s3/args.rs +++ b/src/s3/args.rs @@ -18,9 +18,7 @@ use crate::s3::error::Error; use crate::s3::signer::post_presign_v4; use crate::s3::sse::{Sse, SseCustomerKey}; -use crate::s3::types::{ - Directive, ObjectLockConfig, Part, Retention, RetentionMode, SelectRequest, -}; +use crate::s3::types::{Directive, Retention, SelectRequest}; use crate::s3::utils::{ Multimap, UtcTime, b64encode, check_bucket_name, merge, to_amz_date, to_http_header_value, to_iso8601utc, to_signer_date, urlencode, utc_now, @@ -31,9 +29,9 @@ use serde_json::Value; use serde_json::json; use std::collections::HashMap; -pub const MIN_PART_SIZE: usize = 5_242_880; // 5 MiB -pub const MAX_PART_SIZE: usize = 5_368_709_120; // 5 GiB -pub const MAX_OBJECT_SIZE: usize = 5_497_558_138_880; // 5 TiB +pub const MIN_PART_SIZE: u64 = 5_242_880; // 5 MiB +pub const MAX_PART_SIZE: u64 = 5_368_709_120; // 5 GiB +pub const MAX_OBJECT_SIZE: u64 = 5_497_558_138_880; // 5 TiB pub const MAX_MULTIPART_COUNT: u16 = 10_000; pub const DEFAULT_EXPIRY_SECONDS: u32 = 604_800; // 7 days @@ -98,58 +96,6 @@ fn object_write_args_headers( map } -fn calc_part_info( - object_size: Option, - part_size: Option, -) -> Result<(usize, i16), Error> { - if let Some(v) = part_size { - if v < MIN_PART_SIZE { - return Err(Error::InvalidMinPartSize(v as u64)); - } - - if v > MAX_PART_SIZE { - return Err(Error::InvalidMaxPartSize(v as u64)); - } - } - - if let Some(v) = object_size { - if v > MAX_OBJECT_SIZE { - return Err(Error::InvalidObjectSize(v as u64)); - } - } else { - if part_size.is_none() { - return Err(Error::MissingPartSize); - } - - return Ok((part_size.unwrap(), -1)); - } - - let mut psize = 0_usize; - if part_size.is_none() { - psize = (object_size.unwrap() as f64 / MAX_MULTIPART_COUNT as f64).ceil() as usize; - psize = MIN_PART_SIZE * (psize as f64 / MIN_PART_SIZE as f64).ceil() as usize; - } - - if psize > object_size.unwrap() { - psize = object_size.unwrap(); - } - - let mut part_count = 1_i16; - if psize > 0 { - part_count = (object_size.unwrap() as f64 / psize as f64).ceil() as i16; - } - - if part_count as u16 > MAX_MULTIPART_COUNT { - return Err(Error::InvalidPartCount( - object_size.unwrap() as u64, - psize as u64, - MAX_MULTIPART_COUNT, - )); - } - - Ok((psize, part_count)) -} - #[derive(Clone, Debug, Default)] /// Base bucket argument pub struct BucketArgs<'a> { @@ -180,50 +126,6 @@ impl<'a> BucketArgs<'a> { } } -/// Argument for [bucket_exists()](crate::s3::client::Client::bucket_exists) API -pub type BucketExistsArgs<'a> = BucketArgs<'a>; - -/// Argument for [remove_bucket()](crate::s3::client::Client::remove_bucket) API -pub type RemoveBucketArgs<'a> = BucketArgs<'a>; - -#[derive(Clone, Debug, Default)] -/// Base object argument -pub struct ObjectArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, -} - -impl<'a> ObjectArgs<'a> { - /// Returns a object argument with given bucket name and object name - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// let args = ObjectArgs::new("my-bucket", "my-object").unwrap(); - /// ``` - pub fn new(bucket_name: &'a str, object_name: &'a str) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - Ok(ObjectArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - }) - } -} - #[derive(Clone, Debug, Default)] /// Base object argument with optional version ID pub struct ObjectVersionArgs<'a> { @@ -231,6 +133,7 @@ pub struct ObjectVersionArgs<'a> { pub extra_query_params: Option<&'a Multimap>, pub region: Option<&'a str>, pub bucket: &'a str, + pub object: &'a str, pub version_id: Option<&'a str>, } @@ -265,204 +168,6 @@ impl<'a> ObjectVersionArgs<'a> { } } -/// Argument for [remove_object()](crate::s3::client::Client::remove_object) API -pub type RemoveObjectArgs<'a> = ObjectVersionArgs<'a>; - -#[derive(Clone, Debug, Default)] -/// Argument for [make_bucket()](crate::s3::client::Client::make_bucket) API -pub struct MakeBucketArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object_lock: bool, -} - -impl<'a> MakeBucketArgs<'a> { - /// Returns argument for [make_bucket()](crate::s3::client::Client::make_bucket) API with given bucket name - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// let args = MakeBucketArgs::new("my-bucket").unwrap(); - /// ``` - pub fn new(bucket_name: &'a str) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - Ok(MakeBucketArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object_lock: false, - }) - } -} - -#[derive(Clone, Debug, Default)] -/// Argument for [abort_multipart_upload()](crate::s3::client::Client::abort_multipart_upload) API -pub struct AbortMultipartUploadArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub upload_id: &'a str, -} - -impl<'a> AbortMultipartUploadArgs<'a> { - /// Returns argument for [abort_multipart_upload()](crate::s3::client::Client::abort_multipart_upload) API with given bucket name, object name and upload ID - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// let args = AbortMultipartUploadArgs::new( - /// "my-bucket", - /// "my-object", - /// "c53a2b73-f5e6-484a-9bc0-09cce13e8fd0", - /// ).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - upload_id: &'a str, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - if upload_id.is_empty() { - return Err(Error::InvalidUploadId(String::from( - "upload ID cannot be empty", - ))); - } - - Ok(AbortMultipartUploadArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - upload_id, - }) - } -} - -#[derive(Clone, Debug)] -/// Argument for [complete_multipart_upload()](crate::s3::client::Client::complete_multipart_upload) API -pub struct CompleteMultipartUploadArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub upload_id: &'a str, - pub parts: &'a Vec, -} - -impl<'a> CompleteMultipartUploadArgs<'a> { - /// Returns argument for [complete_multipart_upload()](crate::s3::client::Client::complete_multipart_upload) API with given bucket name, object name, upload ID and parts information - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// use minio::s3::types::Part; - /// let mut parts: Vec = Vec::new(); - /// parts.push(Part {number: 1, etag: String::from("0b2daaba1d0b52a15a98c7ab6927347a")}); - /// parts.push(Part {number: 2, etag: String::from("acc0485d88ec53f47b599e4e8998706d")}); - /// let args = CompleteMultipartUploadArgs::new( - /// "my-bucket", - /// "my-object", - /// "c53a2b73-f5e6-484a-9bc0-09cce13e8fd0", - /// &parts, - /// ).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - upload_id: &'a str, - parts: &'a Vec, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - if upload_id.is_empty() { - return Err(Error::InvalidUploadId(String::from( - "upload ID cannot be empty", - ))); - } - - if parts.is_empty() { - return Err(Error::EmptyParts(String::from("parts cannot be empty"))); - } - - Ok(CompleteMultipartUploadArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - upload_id, - parts, - }) - } -} - -#[derive(Clone, Debug, Default)] -/// Argument for [create_multipart_upload()](crate::s3::client::Client::create_multipart_upload) API -pub struct CreateMultipartUploadArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub headers: Option<&'a Multimap>, -} - -impl<'a> CreateMultipartUploadArgs<'a> { - /// Returns argument for [create_multipart_upload()](crate::s3::client::Client::create_multipart_upload) API with given bucket name and object name - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// let args = CreateMultipartUploadArgs::new("my-bucket", "my-object").unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - Ok(CreateMultipartUploadArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - headers: None, - }) - } -} - #[derive(Clone, Debug, Default)] /// Argument for [put_object_api()](crate::s3::client::Client::put_object_api) S3 API pub struct PutObjectApiArgs<'a> { @@ -627,93 +332,6 @@ impl<'a> UploadPartArgs<'a> { } } -/// Argument for [put_object()](crate::s3::client::Client::put_object) API -pub struct PutObjectArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub headers: Option<&'a Multimap>, - pub user_metadata: Option<&'a Multimap>, - pub sse: Option<&'a (dyn Sse + Send + Sync)>, - pub tags: Option<&'a HashMap>, - pub retention: Option<&'a Retention>, - pub legal_hold: bool, - pub object_size: Option, - pub part_size: usize, - pub part_count: i16, - pub content_type: &'a str, - pub stream: &'a mut dyn std::io::Read, -} - -impl<'a> PutObjectArgs<'a> { - /// Returns argument for [put_object()](crate::s3::client::Client::put_object) API with given bucket name, object name, stream, optional object size and optional part size - /// - /// * If stream size is known and wanted to create object with entire stream data, pass stream size as object size. - /// * If part size is omitted, this API calculates optimal part size for given object size. - /// - /// # Examples - /// - /// ```no_run - /// use minio::s3::args::*; - /// use std::fs::File; - /// let filename = "asiaphotos-2015.zip"; - /// let meta = std::fs::metadata(filename).unwrap(); - /// let object_size = Some(meta.len() as usize); - /// let mut file = File::open(filename).unwrap(); - /// let args = PutObjectArgs::new("my-bucket", "my-object", &mut file, object_size, None).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - stream: &'a mut dyn std::io::Read, - object_size: Option, - part_size: Option, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - let (psize, part_count) = calc_part_info(object_size, part_size)?; - - Ok(PutObjectArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - headers: None, - user_metadata: None, - sse: None, - tags: None, - retention: None, - legal_hold: false, - object_size, - part_size: psize, - part_count, - content_type: "application/octet-stream", - stream, - }) - } - - pub fn get_headers(&self) -> Multimap { - object_write_args_headers( - self.extra_headers, - self.headers, - self.user_metadata, - self.sse, - self.tags, - self.retention, - self.legal_hold, - ) - } -} - #[derive(Clone, Debug, Default)] /// Base argument for object conditional read APIs pub struct ObjectConditionalReadArgs<'a> { @@ -724,8 +342,8 @@ pub struct ObjectConditionalReadArgs<'a> { pub object: &'a str, pub version_id: Option<&'a str>, pub ssec: Option<&'a SseCustomerKey>, - pub offset: Option, - pub length: Option, + pub offset: Option, + pub length: Option, pub match_etag: Option<&'a str>, pub not_match_etag: Option<&'a str>, pub modified_since: Option, @@ -772,7 +390,7 @@ impl<'a> ObjectConditionalReadArgs<'a> { fn get_range_value(&self) -> String { let (offset, length) = match self.length { - Some(_) => (Some(self.offset.unwrap_or(0_usize)), self.length), + Some(_) => (Some(self.offset.unwrap_or(0_u64)), self.length), None => (self.offset, None), }; @@ -871,9 +489,6 @@ impl<'a> ObjectConditionalReadArgs<'a> { } } -/// Argument for [get_object()](crate::s3::client::Client::get_object) API -pub type GetObjectArgs<'a> = ObjectConditionalReadArgs<'a>; - /// Argument for [stat_object()](crate::s3::client::Client::stat_object) API pub type StatObjectArgs<'a> = ObjectConditionalReadArgs<'a>; @@ -1100,15 +715,15 @@ pub struct ComposeSource<'a> { pub object: &'a str, pub version_id: Option<&'a str>, pub ssec: Option<&'a SseCustomerKey>, - pub offset: Option, - pub length: Option, + pub offset: Option, + pub length: Option, pub match_etag: Option<&'a str>, pub not_match_etag: Option<&'a str>, pub modified_since: Option, pub unmodified_since: Option, - object_size: Option, // populated by build_headers() - headers: Option, // populated by build_headers() + object_size: Option, // populated by build_headers() + headers: Option, // populated by build_headers() } impl<'a> ComposeSource<'a> { @@ -1148,7 +763,7 @@ impl<'a> ComposeSource<'a> { }) } - pub fn get_object_size(&self) -> usize { + pub fn get_object_size(&self) -> u64 { self.object_size.expect("ABORT: ComposeSource::build_headers() must be called prior to this method invocation. This shoud not happen.") } @@ -1156,7 +771,7 @@ impl<'a> ComposeSource<'a> { self.headers.as_ref().expect("ABORT: ComposeSource::build_headers() must be called prior to this method invocation. This shoud not happen.").clone() } - pub fn build_headers(&mut self, object_size: usize, etag: String) -> Result<(), Error> { + pub fn build_headers(&mut self, object_size: u64, etag: String) -> Result<(), Error> { if let Some(v) = self.offset { if v >= object_size { return Err(Error::InvalidComposeSourceOffset( @@ -1314,179 +929,6 @@ impl<'a> ComposeObjectArgs<'a> { } } -/// Argument for [enable_object_legal_hold()](crate::s3::client::Client::enable_object_legal_hold) API -pub type EnableObjectLegalHoldArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [disable_object_legal_hold()](crate::s3::client::Client::disable_object_legal_hold) API -pub type DisableObjectLegalHoldArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [is_object_legal_hold_enabled()](crate::s3::client::Client::is_object_legal_hold_enabled) API -pub type IsObjectLegalHoldEnabledArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [delete_object_lock_config()](crate::s3::client::Client::delete_object_lock_config) API -pub type DeleteObjectLockConfigArgs<'a> = BucketArgs<'a>; - -/// Argument for [get_object_lock_config()](crate::s3::client::Client::get_object_lock_config) API -pub type GetObjectLockConfigArgs<'a> = BucketArgs<'a>; - -/// Argument for [set_object_lock_config()](crate::s3::client::Client::set_object_lock_config) API -pub struct SetObjectLockConfigArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub config: &'a ObjectLockConfig, -} - -impl<'a> SetObjectLockConfigArgs<'a> { - /// Returns argument for [set_object_lock_config()](crate::s3::client::Client::set_object_lock_config) API with given bucket name and configuration - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// use minio::s3::types::*; - /// let config = ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(100), None).unwrap(); - /// let args = SetObjectLockConfigArgs::new("my-bucket", &config).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - config: &'a ObjectLockConfig, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - Ok(SetObjectLockConfigArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - config, - }) - } -} - -/// Argument for [get_object_retention()](crate::s3::client::Client::get_object_retention) API -pub type GetObjectRetentionArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [set_object_retention()](crate::s3::client::Client::set_object_retention) API -pub struct SetObjectRetentionArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub version_id: Option<&'a str>, - pub bypass_governance_mode: bool, - pub retention_mode: Option, - pub retain_until_date: Option, -} - -impl<'a> SetObjectRetentionArgs<'a> { - /// Returns argument for [set_object_retention()](crate::s3::client::Client::set_object_retention) API with given bucket name, object name, retention mode and retain-until date. - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// use minio::s3::types::RetentionMode; - /// use minio::s3::utils::*; - /// use chrono::Timelike; - /// let args = SetObjectRetentionArgs::new( - /// "my-bucket", - /// "my-object", - /// Some(RetentionMode::COMPLIANCE), - /// Some(utc_now().with_nanosecond(0).unwrap()), - /// ).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - retention_mode: Option, - retain_until_date: Option, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - if retention_mode.is_some() ^ retain_until_date.is_some() { - return Err(Error::InvalidRetentionConfig(String::from( - "both mode and retain_until_date must be set or unset", - ))); - } - - Ok(SetObjectRetentionArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - version_id: None, - bypass_governance_mode: false, - retention_mode, - retain_until_date, - }) - } -} - -/// Argument for [delete_object_tags()](crate::s3::client::Client::delete_object_tags) API -pub type DeleteObjectTagsArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [get_object_tags()](crate::s3::client::Client::get_object_tags) API -pub type GetObjectTagsArgs<'a> = ObjectVersionArgs<'a>; - -/// Argument for [set_object_tags()](crate::s3::client::Client::set_object_tags) API -pub struct SetObjectTagsArgs<'a> { - pub extra_headers: Option<&'a Multimap>, - pub extra_query_params: Option<&'a Multimap>, - pub region: Option<&'a str>, - pub bucket: &'a str, - pub object: &'a str, - pub version_id: Option<&'a str>, - pub tags: &'a HashMap, -} - -impl<'a> SetObjectTagsArgs<'a> { - /// Returns argument for [set_object_tags()](crate::s3::client::Client::set_object_tags) API with given bucket name, object name and tags - /// - /// # Examples - /// - /// ``` - /// use minio::s3::args::*; - /// use std::collections::HashMap; - /// let mut tags: HashMap = HashMap::new(); - /// tags.insert(String::from("Project"), String::from("Project One")); - /// tags.insert(String::from("User"), String::from("jsmith")); - /// let args = SetObjectTagsArgs::new("my-bucket", "my-object", &tags).unwrap(); - /// ``` - pub fn new( - bucket_name: &'a str, - object_name: &'a str, - tags: &'a HashMap, - ) -> Result, Error> { - check_bucket_name(bucket_name, true)?; - - if object_name.is_empty() { - return Err(Error::InvalidObjectName(String::from( - "object name cannot be empty", - ))); - } - - Ok(SetObjectTagsArgs { - extra_headers: None, - extra_query_params: None, - region: None, - bucket: bucket_name, - object: object_name, - version_id: None, - tags, - }) - } -} - /// Argument for [get_presigned_object_url()](crate::s3::client::Client::get_presigned_object_url) API pub struct GetPresignedObjectUrlArgs<'a> { pub extra_query_params: Option<&'a Multimap>, diff --git a/src/s3/builders.rs b/src/s3/builders.rs index 8bc025d..f8dc8fc 100644 --- a/src/s3/builders.rs +++ b/src/s3/builders.rs @@ -16,12 +16,17 @@ //! Argument builders for [minio::s3::client::Client](crate::s3::client::Client) APIs mod bucket_common; +mod bucket_exists; mod delete_bucket_encryption; mod delete_bucket_lifecycle; mod delete_bucket_notification; mod delete_bucket_policy; mod delete_bucket_replication; mod delete_bucket_tags; +mod delete_object_lock_config; +mod delete_object_tags; +mod disable_object_legal_hold; +mod enable_object_legal_hold; mod get_bucket_encryption; mod get_bucket_lifecycle; mod get_bucket_notification; @@ -30,12 +35,18 @@ mod get_bucket_replication; mod get_bucket_tags; mod get_bucket_versioning; mod get_object; +mod get_object_lock_config; +mod get_object_retention; +mod get_object_tags; +mod is_object_legal_hold_enabled; mod list_buckets; mod list_objects; mod listen_bucket_notification; +mod make_bucket; mod object_content; mod object_prompt; mod put_object; +mod remove_bucket; mod remove_objects; mod set_bucket_encryption; mod set_bucket_lifecycle; @@ -44,14 +55,22 @@ mod set_bucket_policy; mod set_bucket_replication; mod set_bucket_tags; mod set_bucket_versioning; +mod set_object_lock_config; +mod set_object_retention; +mod set_object_tags; pub use bucket_common::*; +pub use bucket_exists::*; pub use delete_bucket_encryption::*; pub use delete_bucket_lifecycle::*; pub use delete_bucket_notification::*; pub use delete_bucket_policy::*; pub use delete_bucket_replication::*; pub use delete_bucket_tags::*; +pub use delete_object_lock_config::*; +pub use delete_object_tags::*; +pub use disable_object_legal_hold::*; +pub use enable_object_legal_hold::*; pub use get_bucket_encryption::*; pub use get_bucket_lifecycle::*; pub use get_bucket_notification::*; @@ -60,12 +79,18 @@ pub use get_bucket_replication::*; pub use get_bucket_tags::*; pub use get_bucket_versioning::*; pub use get_object::*; +pub use get_object_lock_config::*; +pub use get_object_retention::*; +pub use get_object_tags::*; +pub use is_object_legal_hold_enabled::*; pub use list_buckets::*; pub use list_objects::*; pub use listen_bucket_notification::*; +pub use make_bucket::*; pub use object_content::*; pub use object_prompt::*; pub use put_object::*; +pub use remove_bucket::*; pub use remove_objects::*; pub use set_bucket_encryption::*; pub use set_bucket_lifecycle::*; @@ -74,3 +99,6 @@ pub use set_bucket_policy::*; pub use set_bucket_replication::*; pub use set_bucket_tags::*; pub use set_bucket_versioning::*; +pub use set_object_lock_config::*; +pub use set_object_retention::*; +pub use set_object_tags::*; diff --git a/src/s3/builders/bucket_exists.rs b/src/s3/builders/bucket_exists.rs new file mode 100644 index 0000000..2847279 --- /dev/null +++ b/src/s3/builders/bucket_exists.rs @@ -0,0 +1,61 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::BucketCommon; +use crate::s3::error::Error; +use crate::s3::response::BucketExistsResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::check_bucket_name; +use http::Method; + +/// Argument builder for [bucket_exists()](Client::bucket_exists) API +pub type BucketExists = BucketCommon; + +#[derive(Default, Debug)] +pub struct BucketExistsPhantomData; + +impl S3Api for BucketExists { + type S3Response = BucketExistsResponse; +} + +impl ToS3Request for BucketExists { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::HEAD) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers); + + Ok(req) + } +} diff --git a/src/s3/builders/delete_object_lock_config.rs b/src/s3/builders/delete_object_lock_config.rs new file mode 100644 index 0000000..97c668a --- /dev/null +++ b/src/s3/builders/delete_object_lock_config.rs @@ -0,0 +1,74 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::{BucketCommon, SegmentedBytes}; +use crate::s3::error::Error; +use crate::s3::response::DeleteObjectLockConfigResponse; +use crate::s3::types::{ObjectLockConfig, S3Api, S3Request, ToS3Request}; +use crate::s3::utils::check_bucket_name; +use bytes::Bytes; +use http::Method; + +/// Argument builder for [delete_object_lock_config()](Client::delete_object_lock_config) API +pub type DeleteObjectLockConfig = BucketCommon; + +#[derive(Default, Debug)] +pub struct DeleteObjectLockConfigPhantomData; + +impl S3Api for DeleteObjectLockConfig { + type S3Response = DeleteObjectLockConfigResponse; +} + +impl ToS3Request for DeleteObjectLockConfig { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + query_params.insert(String::from("object-lock"), String::new()); + + let config = ObjectLockConfig { + retention_mode: None, + retention_duration_days: None, + retention_duration_years: None, + }; + let bytes: Bytes = config.to_xml().into(); + let body: Option = Some(SegmentedBytes::from(bytes)); + //TODO consider const body + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/delete_object_tags.rs b/src/s3/builders/delete_object_tags.rs new file mode 100644 index 0000000..0d154f6 --- /dev/null +++ b/src/s3/builders/delete_object_tags.rs @@ -0,0 +1,112 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::error::Error; +use crate::s3::response::DeleteObjectTagsResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use http::Method; + +/// Argument builder for [delete_object_tags()](Client::delete_object_tags) API +#[derive(Clone, Debug, Default)] +pub struct DeleteObjectTags { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object: String, + pub version_id: Option, +} + +impl DeleteObjectTags { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for DeleteObjectTags { + type S3Response = DeleteObjectTagsResponse; +} + +impl ToS3Request for DeleteObjectTags { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert("tagging".into(), String::new()); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::DELETE) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .object(Some(&self.object)) + .headers(headers); + + Ok(req) + } +} diff --git a/src/s3/builders/disable_object_legal_hold.rs b/src/s3/builders/disable_object_legal_hold.rs new file mode 100644 index 0000000..ddf65be --- /dev/null +++ b/src/s3/builders/disable_object_legal_hold.rs @@ -0,0 +1,116 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::error::Error; +use crate::s3::response::DisableObjectLegalHoldResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name, md5sum_hash}; +use bytes::Bytes; +use http::Method; + +/// Argument builder for [disable_object_legal_hold()](Client::disable_object_legal_hold) API +#[derive(Clone, Debug, Default)] +pub struct DisableObjectLegalHold { + pub(crate) client: Option, + + pub(crate) extra_headers: Option, + pub(crate) extra_query_params: Option, + pub(crate) region: Option, + pub(crate) bucket: String, + + pub(crate) object: String, + pub(crate) version_id: Option, +} + +impl DisableObjectLegalHold { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for DisableObjectLegalHold { + type S3Response = DisableObjectLegalHoldResponse; +} + +impl ToS3Request for DisableObjectLegalHold { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let mut headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert(String::from("legal-hold"), String::new()); + + const PAYLOAD: &str = "OFF"; + headers.insert(String::from("Content-MD5"), md5sum_hash(PAYLOAD.as_ref())); + let body: Option = Some(SegmentedBytes::from(Bytes::from(PAYLOAD))); + //TODO consider const body + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .object(Some(&self.object)) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/enable_object_legal_hold.rs b/src/s3/builders/enable_object_legal_hold.rs new file mode 100644 index 0000000..6f7ab4c --- /dev/null +++ b/src/s3/builders/enable_object_legal_hold.rs @@ -0,0 +1,116 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::error::Error; +use crate::s3::response::EnableObjectLegalHoldResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name, md5sum_hash}; +use bytes::Bytes; +use http::Method; + +/// Argument builder for [enable_object_legal_hold()](Client::enable_object_legal_hold) API +#[derive(Clone, Debug, Default)] +pub struct EnableObjectLegalHold { + pub(crate) client: Option, + + pub(crate) extra_headers: Option, + pub(crate) extra_query_params: Option, + pub(crate) region: Option, + pub(crate) bucket: String, + + pub(crate) object: String, + pub(crate) version_id: Option, +} + +impl EnableObjectLegalHold { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for EnableObjectLegalHold { + type S3Response = EnableObjectLegalHoldResponse; +} + +impl ToS3Request for EnableObjectLegalHold { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let mut headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert(String::from("legal-hold"), String::new()); + + const PAYLOAD: &str = "ON"; + headers.insert(String::from("Content-MD5"), md5sum_hash(PAYLOAD.as_ref())); + let body: Option = Some(SegmentedBytes::from(Bytes::from(PAYLOAD))); + //TODO consider const body + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .object(Some(&self.object)) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/get_object_lock_config.rs b/src/s3/builders/get_object_lock_config.rs new file mode 100644 index 0000000..8ef81c6 --- /dev/null +++ b/src/s3/builders/get_object_lock_config.rs @@ -0,0 +1,63 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::BucketCommon; +use crate::s3::error::Error; +use crate::s3::response::GetObjectLockConfigResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::check_bucket_name; +use http::Method; + +/// Argument builder for [get_object_lock_config()](Client::get_object_lock_config) API +pub type GetObjectLockConfig = BucketCommon; + +#[derive(Default, Debug)] +pub struct GetObjectLockConfigPhantomData; + +impl S3Api for GetObjectLockConfig { + type S3Response = GetObjectLockConfigResponse; +} + +impl ToS3Request for GetObjectLockConfig { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + query_params.insert(String::from("object-lock"), String::new()); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::GET) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers); + + Ok(req) + } +} diff --git a/src/s3/builders/get_object_retention.rs b/src/s3/builders/get_object_retention.rs new file mode 100644 index 0000000..f4c8596 --- /dev/null +++ b/src/s3/builders/get_object_retention.rs @@ -0,0 +1,112 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::error::Error; +use crate::s3::response::GetObjectRetentionResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use http::Method; + +/// Argument builder for [get_object_retention()](Client::get_object_retention) API +#[derive(Clone, Debug, Default)] +pub struct GetObjectRetention { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object: String, + pub version_id: Option, +} + +impl GetObjectRetention { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for GetObjectRetention { + type S3Response = GetObjectRetentionResponse; +} + +impl ToS3Request for GetObjectRetention { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert(String::from("retention"), String::new()); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::GET) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .object(Some(&self.object)); + + Ok(req) + } +} diff --git a/src/s3/builders/get_object_tags.rs b/src/s3/builders/get_object_tags.rs new file mode 100644 index 0000000..a935fd0 --- /dev/null +++ b/src/s3/builders/get_object_tags.rs @@ -0,0 +1,112 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::error::Error; +use crate::s3::response::GetObjectTagsResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use http::Method; + +/// Argument builder for [get_object_tags()](Client::get_object_tags) API +#[derive(Clone, Debug, Default)] +pub struct GetObjectTags { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object: String, + pub version_id: Option, +} + +impl GetObjectTags { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for GetObjectTags { + type S3Response = GetObjectTagsResponse; +} + +impl ToS3Request for GetObjectTags { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert("tagging".into(), String::new()); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::GET) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .object(Some(&self.object)) + .headers(headers); + + Ok(req) + } +} diff --git a/src/s3/builders/is_object_legal_hold_enabled.rs b/src/s3/builders/is_object_legal_hold_enabled.rs new file mode 100644 index 0000000..737ac6f --- /dev/null +++ b/src/s3/builders/is_object_legal_hold_enabled.rs @@ -0,0 +1,108 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::error::Error; +use crate::s3::response::IsObjectLegalHoldEnabledResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use http::Method; + +/// Argument builder for [is_object_legal_hold_enabled()](Client::is_object_legal_hold_enabled) API +#[derive(Clone, Debug, Default)] +pub struct IsObjectLegalHoldEnabled { + pub(crate) client: Option, + + pub(crate) extra_headers: Option, + pub(crate) extra_query_params: Option, + pub(crate) region: Option, + pub(crate) bucket: String, + + pub(crate) object: String, + pub(crate) version_id: Option, +} + +impl IsObjectLegalHoldEnabled { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } +} + +impl S3Api for IsObjectLegalHoldEnabled { + type S3Response = IsObjectLegalHoldEnabledResponse; +} + +impl ToS3Request for IsObjectLegalHoldEnabled { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert(String::from("legal-hold"), String::new()); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::GET) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .object(Some(&self.object)); + + Ok(req) + } +} diff --git a/src/s3/builders/make_bucket.rs b/src/s3/builders/make_bucket.rs new file mode 100644 index 0000000..136c854 --- /dev/null +++ b/src/s3/builders/make_bucket.rs @@ -0,0 +1,150 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::client::DEFAULT_REGION; +use crate::s3::error::Error; +use crate::s3::http::BaseUrl; +use crate::s3::response::MakeBucketResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use http::Method; + +/// Argument builder for [make_bucket()](Client::make_bucket) API +#[derive(Clone, Debug, Default)] +pub struct MakeBucket { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object_lock: bool, +} + +impl MakeBucket { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object_lock(mut self, object_lock: bool) -> Self { + self.object_lock = object_lock; + self + } +} + +#[derive(Default, Debug)] +pub struct MakeBucketPhantomData; + +impl S3Api for MakeBucket { + type S3Response = MakeBucketResponse; +} + +impl ToS3Request for MakeBucket { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let base_url: &BaseUrl = match &self.client { + None => return Err(Error::NoClientProvided), + Some(c) => &c.base_url, + }; + + let region1: Option<&str> = self.region.as_deref(); + let region2: Option<&str> = if base_url.region.is_empty() { + None + } else { + Some(base_url.region.as_str()) + }; + + let region: &str = match (region1, region2) { + (None, None) => DEFAULT_REGION, + (Some(r), None) | (None, Some(r)) => r, // Take the non-None value + (Some(r1), Some(r2)) if r1 == r2 => r1, // Both are Some and equal + (Some(r1), Some(r2)) => { + return Err(Error::RegionMismatch(r1.to_string(), r2.to_string())); + } + }; + + let mut headers: Multimap = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if self.object_lock { + headers.insert( + String::from("x-amz-bucket-object-lock-enabled"), + String::from("true"), + ); + } + + let query_params: Multimap = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + let data: String = match region { + DEFAULT_REGION => String::new(), + _ => format!( + "{}", + region + ), + }; + + let body: Option = match data.is_empty() { + true => None, + false => Some(SegmentedBytes::from(data)), + }; + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(Some(region)) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/object_content.rs b/src/s3/builders/object_content.rs index bbe3dca..78fb4bf 100644 --- a/src/s3/builders/object_content.rs +++ b/src/s3/builders/object_content.rs @@ -353,7 +353,7 @@ impl SegmentedBytes { } } - // Copy all the content into a single `Bytes` object. + /// Copy all the content into a single `Bytes` object. pub fn to_bytes(&self) -> Bytes { let mut buf = BytesMut::with_capacity(self.total_size); for segment in &self.segments { @@ -449,3 +449,9 @@ impl From for SegmentedBytes { sb } } + +impl From for SegmentedBytes { + fn from(s: String) -> Self { + SegmentedBytes::from(Bytes::from(s)) + } +} diff --git a/src/s3/builders/put_object.rs b/src/s3/builders/put_object.rs index 5512f0b..00f0b8a 100644 --- a/src/s3/builders/put_object.rs +++ b/src/s3/builders/put_object.rs @@ -835,8 +835,8 @@ impl PutObjectContent { let res = po.send().await?; return Ok(PutObjectContentResponse { headers: res.headers, - bucket_name: self.bucket, - object_name: self.object, + bucket: self.bucket, + object: self.object, location: res.location, object_size: size, etag: res.etag, @@ -951,8 +951,8 @@ impl PutObjectContent { let res = complete_mpu.send().await?; Ok(PutObjectContentResponse { headers: res.headers, - bucket_name: self.bucket.clone(), - object_name: self.object.clone(), + bucket: self.bucket.clone(), + object: self.object.clone(), location: res.location, object_size: size, etag: res.etag, diff --git a/src/s3/builders/remove_bucket.rs b/src/s3/builders/remove_bucket.rs new file mode 100644 index 0000000..fcebd41 --- /dev/null +++ b/src/s3/builders/remove_bucket.rs @@ -0,0 +1,61 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::BucketCommon; +use crate::s3::error::Error; +use crate::s3::response::RemoveBucketResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::check_bucket_name; +use http::Method; + +/// Argument builder for [remove_bucket()](Client::remove_bucket) API +pub type RemoveBucket = BucketCommon; + +#[derive(Default, Debug)] +pub struct RemoveBucketPhantomData; + +impl S3Api for RemoveBucket { + type S3Response = RemoveBucketResponse; +} + +impl ToS3Request for RemoveBucket { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::DELETE) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers); + + Ok(req) + } +} diff --git a/src/s3/builders/set_bucket_policy.rs b/src/s3/builders/set_bucket_policy.rs index 7e26fd2..81e4555 100644 --- a/src/s3/builders/set_bucket_policy.rs +++ b/src/s3/builders/set_bucket_policy.rs @@ -16,7 +16,7 @@ use crate::s3::Client; use crate::s3::builders::SegmentedBytes; use crate::s3::error::Error; -use crate::s3::response::SetBucketLifecycleResponse; +use crate::s3::response::SetBucketPolicyResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{Multimap, check_bucket_name}; use bytes::Bytes; @@ -69,7 +69,7 @@ impl SetBucketPolicy { } impl S3Api for SetBucketPolicy { - type S3Response = SetBucketLifecycleResponse; + type S3Response = SetBucketPolicyResponse; } impl ToS3Request for SetBucketPolicy { diff --git a/src/s3/builders/set_object_lock_config.rs b/src/s3/builders/set_object_lock_config.rs new file mode 100644 index 0000000..c9c9894 --- /dev/null +++ b/src/s3/builders/set_object_lock_config.rs @@ -0,0 +1,110 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::error::Error; +use crate::s3::response::SetObjectLockConfigResponse; +use crate::s3::types::{ObjectLockConfig, S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use bytes::Bytes; +use http::Method; + +/// Argument builder for [set_object_lock_config()](Client::set_object_lock_config) API + +#[derive(Clone, Debug, Default)] +pub struct SetObjectLockConfig { + pub(crate) client: Option, + + pub(crate) extra_headers: Option, + pub(crate) extra_query_params: Option, + pub(crate) region: Option, + pub(crate) bucket: String, + + pub(crate) config: ObjectLockConfig, +} + +impl SetObjectLockConfig { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn config(mut self, config: ObjectLockConfig) -> Self { + self.config = config; + self + } +} + +impl S3Api for SetObjectLockConfig { + type S3Response = SetObjectLockConfigResponse; +} + +impl ToS3Request for SetObjectLockConfig { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + query_params.insert(String::from("object-lock"), String::new()); + + let bytes: Bytes = self.config.to_xml().into(); + let body: Option = Some(SegmentedBytes::from(bytes)); + + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/set_object_retention.rs b/src/s3/builders/set_object_retention.rs new file mode 100644 index 0000000..edef5b3 --- /dev/null +++ b/src/s3/builders/set_object_retention.rs @@ -0,0 +1,172 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::error::Error; +use crate::s3::response::SetObjectRetentionResponse; +use crate::s3::types::{RetentionMode, S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, UtcTime, check_bucket_name, md5sum_hash, to_iso8601utc}; +use bytes::Bytes; +use http::Method; + +/// Argument builder for [set_object_retention()](Client::set_object_retention) API +#[derive(Clone, Debug, Default)] +pub struct SetObjectRetention { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object: String, + pub version_id: Option, + pub bypass_governance_mode: bool, + pub retention_mode: Option, + pub retain_until_date: Option, +} + +impl SetObjectRetention { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + bypass_governance_mode: false, + ..Default::default() + } + } + + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } + + pub fn bypass_governance_mode(mut self, bypass_governance_mode: bool) -> Self { + self.bypass_governance_mode = bypass_governance_mode; + self + } + + pub fn retention_mode(mut self, retention_mode: Option) -> Self { + self.retention_mode = retention_mode; + self + } + + pub fn retain_until_date(mut self, retain_until_date: Option) -> Self { + self.retain_until_date = retain_until_date; + self + } +} + +impl S3Api for SetObjectRetention { + type S3Response = SetObjectRetentionResponse; +} + +impl ToS3Request for SetObjectRetention { + fn to_s3request(&self) -> Result { + //TODO move the following checks to a validate fn + check_bucket_name(&self.bucket, true)?; + + if self.object.is_empty() { + return Err(Error::InvalidObjectName(String::from( + "object name cannot be empty", + ))); + } + + if self.retention_mode.is_some() ^ self.retain_until_date.is_some() { + return Err(Error::InvalidRetentionConfig(String::from( + "both mode and retain_until_date must be set or unset", + ))); + } + + let mut headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if self.bypass_governance_mode { + headers.insert( + String::from("x-amz-bypass-governance-retention"), + String::from("true"), + ); + } + + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert(String::from("retention"), String::new()); + + let mut data: String = String::from(""); + if let Some(v) = &self.retention_mode { + data.push_str(""); + data.push_str(&v.to_string()); + data.push_str(""); + } + if let Some(v) = &self.retain_until_date { + data.push_str(""); + data.push_str(&to_iso8601utc(*v)); + data.push_str(""); + } + data.push_str(""); + + headers.insert(String::from("Content-MD5"), md5sum_hash(data.as_ref())); + + let body: Option = Some(SegmentedBytes::from(Bytes::from(data))); + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .headers(headers) + .object(Some(&self.object)) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/builders/set_object_tags.rs b/src/s3/builders/set_object_tags.rs new file mode 100644 index 0000000..2780236 --- /dev/null +++ b/src/s3/builders/set_object_tags.rs @@ -0,0 +1,148 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::Client; +use crate::s3::builders::SegmentedBytes; +use crate::s3::error::Error; +use crate::s3::response::SetObjectTagsResponse; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{Multimap, check_bucket_name}; +use bytes::Bytes; +use http::Method; +use std::collections::HashMap; + +/// Argument builder for [set_object_tags()](Client::set_object_tags) API +#[derive(Clone, Debug, Default)] +pub struct SetObjectTags { + pub client: Option, + + pub extra_headers: Option, + pub extra_query_params: Option, + pub region: Option, + pub bucket: String, + + pub object: String, + pub version_id: Option, + pub tags: HashMap, +} + +impl SetObjectTags { + pub fn new(bucket: &str) -> Self { + Self { + bucket: bucket.to_owned(), + ..Default::default() + } + } + pub fn client(mut self, client: &Client) -> Self { + self.client = Some(client.clone()); + self + } + + pub fn extra_headers(mut self, extra_headers: Option) -> Self { + self.extra_headers = extra_headers; + self + } + + pub fn extra_query_params(mut self, extra_query_params: Option) -> Self { + self.extra_query_params = extra_query_params; + self + } + + pub fn region(mut self, region: Option) -> Self { + self.region = region; + self + } + + pub fn object(mut self, object: String) -> Self { + self.object = object; + self + } + + pub fn version_id(mut self, version_id: Option) -> Self { + self.version_id = version_id; + self + } + + pub fn tags(mut self, tags: HashMap) -> Self { + self.tags = tags; + self + } +} + +impl S3Api for SetObjectTags { + type S3Response = SetObjectTagsResponse; +} + +impl ToS3Request for SetObjectTags { + fn to_s3request(&self) -> Result { + check_bucket_name(&self.bucket, true)?; + + // TODO add to all other function (that use object) the following test + // TODO should it be moved to the object setter function? or use validate as in put_object + if self.object.is_empty() { + return Err(Error::InvalidObjectName(String::from( + "object name cannot be empty", + ))); + } + + let headers = self + .extra_headers + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + let mut query_params = self + .extra_query_params + .as_ref() + .filter(|v| !v.is_empty()) + .cloned() + .unwrap_or_default(); + + if let Some(v) = &self.version_id { + query_params.insert(String::from("versionId"), v.to_string()); + } + query_params.insert("tagging".into(), String::new()); + + let mut data = String::from(""); + if !self.tags.is_empty() { + data.push_str(""); + for (key, value) in self.tags.iter() { + data.push_str(""); + data.push_str(""); + data.push_str(key); + data.push_str(""); + data.push_str(""); + data.push_str(value); + data.push_str(""); + data.push_str(""); + } + data.push_str(""); + } + data.push_str(""); + + let body: Option = Some(SegmentedBytes::from(Bytes::from(data))); + let client: &Client = self.client.as_ref().ok_or(Error::NoClientProvided)?; + + let req = S3Request::new(client, Method::PUT) + .region(self.region.as_deref()) + .bucket(Some(&self.bucket)) + .query_params(query_params) + .object(Some(&self.object)) + .headers(headers) + .body(body); + + Ok(req) + } +} diff --git a/src/s3/client.rs b/src/s3/client.rs index ed47221..7671011 100644 --- a/src/s3/client.rs +++ b/src/s3/client.rs @@ -28,10 +28,9 @@ use crate::s3::http::{BaseUrl, Url}; use crate::s3::response::*; use crate::s3::signer::{presign_v4, sign_v4_s3}; use crate::s3::sse::SseCustomerKey; -use crate::s3::types::{Directive, ObjectLockConfig, RetentionMode}; +use crate::s3::types::Directive; use crate::s3::utils::{ - Multimap, from_iso8601utc, get_default_text, get_option_text, get_text, md5sum_hash, - md5sum_hash_sb, merge, sha256_hash_sb, to_amz_date, to_iso8601utc, utc_now, + Multimap, get_text, md5sum_hash, md5sum_hash_sb, merge, sha256_hash_sb, to_amz_date, utc_now, }; use async_recursion::async_recursion; @@ -39,16 +38,20 @@ use bytes::{Buf, Bytes}; use dashmap::DashMap; use hyper::http::Method; use reqwest::Body; -use reqwest::header::HeaderMap; use xmltree::Element; +mod bucket_exists; mod delete_bucket_encryption; mod delete_bucket_lifecycle; mod delete_bucket_notification; mod delete_bucket_policy; mod delete_bucket_replication; mod delete_bucket_tags; +mod delete_object_lock_config; +mod delete_object_tags; +mod disable_object_legal_hold; +mod enable_object_legal_hold; mod get_bucket_encryption; mod get_bucket_lifecycle; mod get_bucket_notification; @@ -57,10 +60,16 @@ mod get_bucket_replication; mod get_bucket_tags; mod get_bucket_versioning; mod get_object; +mod get_object_lock_config; +mod get_object_retention; +mod get_object_tags; +mod is_object_legal_hold_enabled; mod list_objects; mod listen_bucket_notification; +mod make_bucket; mod object_prompt; mod put_object; +mod remove_bucket; mod remove_objects; mod set_bucket_encryption; mod set_bucket_lifecycle; @@ -69,10 +78,15 @@ mod set_bucket_policy; mod set_bucket_replication; mod set_bucket_tags; mod set_bucket_versioning; +mod set_object_lock_config; +mod set_object_retention; +mod set_object_tags; use super::builders::{ListBuckets, SegmentedBytes}; use super::types::{PartInfo, S3Api}; +pub const DEFAULT_REGION: &str = "us-east-1"; + /// Client Builder manufactures a Client using given parameters. #[derive(Debug, Default)] pub struct ClientBuilder { @@ -180,9 +194,9 @@ impl ClientBuilder { #[derive(Clone, Debug, Default)] pub struct Client { client: reqwest::Client, - base_url: BaseUrl, + pub(crate) base_url: BaseUrl, provider: Option>>, - region_map: Arc>, + pub(crate) region_map: Arc>, } impl Client { @@ -597,7 +611,7 @@ impl Client { } if bucket_name.is_empty() || self.provider.is_none() { - return Ok(String::from("us-east-1")); + return Ok(String::from(DEFAULT_REGION)); } if let Some(v) = self.region_map.get(bucket_name) { @@ -611,7 +625,7 @@ impl Client { let resp = self .execute( Method::GET, - &String::from("us-east-1"), + &String::from(DEFAULT_REGION), &mut headers, &query_params, Some(bucket_name), @@ -624,7 +638,7 @@ impl Client { let mut location = root.get_text().unwrap_or_default().to_string(); if location.is_empty() { - location = String::from("us-east-1"); + location = String::from(DEFAULT_REGION); } self.region_map @@ -632,57 +646,8 @@ impl Client { Ok(location) } - pub async fn bucket_exists(&self, args: &BucketExistsArgs<'_>) -> Result { - let region; - match self.get_region(args.bucket, args.region).await { - Ok(r) => region = r, - Err(e) => match e { - Error::S3Error(ref er) => { - if er.code == "NoSuchBucket" { - return Ok(false); - } - return Err(e); - } - _ => return Err(e), - }, - }; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - let mut query_params = &Multimap::new(); - if let Some(v) = &args.extra_query_params { - query_params = v; - } - - match self - .execute( - Method::HEAD, - ®ion, - &mut headers, - query_params, - Some(args.bucket), - None, - None, - ) - .await - { - Ok(_) => Ok(true), - Err(e) => match e { - Error::S3Error(ref er) => { - if er.code == "NoSuchBucket" { - return Ok(false); - } - Err(e) - } - _ => Err(e), - }, - } - } - async fn calculate_part_count(&self, sources: &mut [ComposeSource<'_>]) -> Result { - let mut object_size = 0_usize; + let mut object_size = 0_u64; let mut i = 0; let mut part_count = 0_u16; @@ -735,7 +700,7 @@ impl Client { object_size += size; if object_size > MAX_OBJECT_SIZE { - return Err(Error::InvalidObjectSize(object_size as u64)); + return Err(Error::InvalidObjectSize(object_size)); } if size > MAX_PART_SIZE { @@ -866,7 +831,7 @@ impl Client { parts.push(PartInfo { number: part_number, etag: resp.etag, - size: size as u64, + size, }); } else { while size > 0 { @@ -897,7 +862,7 @@ impl Client { parts.push(PartInfo { number: part_number, etag: resp.etag, - size: size as u64, + size, }); offset += length; @@ -1051,316 +1016,6 @@ impl Client { }) } - pub async fn disable_object_legal_hold( - &self, - args: &DisableObjectLegalHoldArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("legal-hold"), String::new()); - - let resp = self - .execute( - Method::PUT, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - Some(Bytes::from( - &b"OFF"[..], - )), - ) - .await?; - - Ok(DisableObjectLegalHoldResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - }) - } - - pub async fn delete_object_lock_config( - &self, - args: &DeleteObjectLockConfigArgs<'_>, - ) -> Result { - self.set_object_lock_config(&SetObjectLockConfigArgs { - extra_headers: args.extra_headers, - extra_query_params: args.extra_query_params, - region: args.region, - bucket: args.bucket, - config: &ObjectLockConfig { - retention_mode: None, - retention_duration_days: None, - retention_duration_years: None, - }, - }) - .await - } - - pub async fn delete_object_tags( - &self, - args: &DeleteObjectTagsArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("tagging"), String::new()); - - let resp = self - .execute( - Method::DELETE, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - None, - ) - .await?; - - Ok(DeleteObjectTagsResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - }) - } - - pub async fn enable_object_legal_hold( - &self, - args: &EnableObjectLegalHoldArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("legal-hold"), String::new()); - - let resp = self - .execute( - Method::PUT, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - Some(Bytes::from( - &b"ON"[..], - )), - ) - .await?; - - Ok(EnableObjectLegalHoldResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - }) - } - - pub async fn get_object_lock_config( - &self, - args: &GetObjectLockConfigArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - query_params.insert(String::from("object-lock"), String::new()); - - let resp = self - .execute( - Method::GET, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - None, - None, - ) - .await?; - - let header_map = resp.headers().clone(); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - - Ok(GetObjectLockConfigResponse { - headers: header_map.clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - config: ObjectLockConfig::from_xml(&root)?, - }) - } - - pub async fn get_object_retention( - &self, - args: &GetObjectRetentionArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("retention"), String::new()); - - match self - .execute( - Method::GET, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - None, - ) - .await - { - Ok(resp) => { - let header_map = resp.headers().clone(); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - - Ok(GetObjectRetentionResponse { - headers: header_map.clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - retention_mode: match get_option_text(&root, "Mode") { - Some(v) => Some(RetentionMode::parse(&v)?), - _ => None, - }, - retain_until_date: match get_option_text(&root, "RetainUntilDate") { - Some(v) => Some(from_iso8601utc(&v)?), - _ => None, - }, - }) - } - Err(Error::S3Error(ref err)) - if err.code == Error::NoSuchObjectLockConfiguration.as_str() => - { - Ok(GetObjectRetentionResponse { - headers: HeaderMap::new(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - retention_mode: None, - retain_until_date: None, - }) - } - Err(e) => Err(e), - } - } - - pub async fn get_object_tags( - &self, - args: &GetObjectTagsArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("tagging"), String::new()); - - let resp = self - .execute( - Method::GET, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - None, - ) - .await?; - - let header_map = resp.headers().clone(); - let body = resp.bytes().await?; - let mut root = Element::parse(body.reader())?; - - let element = root - .get_mut_child("TagSet") - .ok_or(Error::XmlError(" tag not found".to_string()))?; - let mut tags = std::collections::HashMap::new(); - while let Some(v) = element.take_child("Tag") { - tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); - } - - Ok(GetObjectTagsResponse { - headers: header_map.clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - tags, - }) - } - pub async fn get_presigned_object_url( &self, args: &GetPresignedObjectUrlArgs<'_>, @@ -1411,8 +1066,8 @@ impl Client { Ok(GetPresignedObjectUrlResponse { region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), + bucket: args.bucket.to_string(), + object: args.object.to_string(), version_id: args.version_id.as_ref().map(|v| v.to_string()), url: url.to_string(), }) @@ -1438,139 +1093,10 @@ impl Client { ) } - pub async fn is_object_legal_hold_enabled( - &self, - args: &IsObjectLegalHoldEnabledArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("legal-hold"), String::new()); - - match self - .execute( - Method::GET, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - None, - ) - .await - { - Ok(resp) => { - let header_map = resp.headers().clone(); - let body = resp.bytes().await?; - let root = Element::parse(body.reader())?; - Ok(IsObjectLegalHoldEnabledResponse { - headers: header_map.clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - enabled: get_default_text(&root, "Status") == "ON", - }) - } - Err(Error::S3Error(ref err)) - if err.code == Error::NoSuchObjectLockConfiguration.as_str() => - { - Ok(IsObjectLegalHoldEnabledResponse { - headers: HeaderMap::new(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - enabled: false, - }) - } - Err(e) => Err(e), - } - } - pub fn list_buckets(&self) -> ListBuckets { ListBuckets::new().client(self) } - pub async fn make_bucket( - &self, - args: &MakeBucketArgs<'_>, - ) -> Result { - let mut region = "us-east-1"; - if let Some(r) = &args.region { - if !self.base_url.region.is_empty() { - if self.base_url.region != *r { - return Err(Error::RegionMismatch( - self.base_url.region.clone(), - r.to_string(), - )); - } - region = r; - } - } - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - }; - - if args.object_lock { - headers.insert( - String::from("x-amz-bucket-object-lock-enabled"), - String::from("true"), - ); - } - - let mut query_params = &Multimap::new(); - if let Some(v) = &args.extra_query_params { - query_params = v; - } - - let data = match region { - "us-east-1" => String::new(), - _ => format!( - "{}", - region - ), - }; - - let body = match data.is_empty() { - true => None, - false => Some(data.into()), - }; - - let resp = self - .execute( - Method::PUT, - region, - &mut headers, - query_params, - Some(args.bucket), - None, - body, - ) - .await?; - self.region_map - .insert(args.bucket.to_string(), region.to_string()); - - Ok(MakeBucketResponse { - headers: resp.headers().clone(), - region: region.to_string(), - bucket: args.bucket.to_string(), - }) - } - /// Executes [PutObject](https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html) S3 API pub async fn put_object_api( &self, @@ -1617,197 +1143,6 @@ impl Client { }) } - pub async fn remove_bucket( - &self, - args: &RemoveBucketArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - let mut query_params = &Multimap::new(); - if let Some(v) = &args.extra_query_params { - query_params = v; - } - - let resp = self - .execute( - Method::DELETE, - ®ion, - &mut headers, - query_params, - Some(args.bucket), - None, - None, - ) - .await?; - self.region_map.remove(&args.bucket.to_string()); - - Ok(RemoveBucketResponse { - headers: resp.headers().clone(), - region: region.to_string(), - bucket: args.bucket.to_string(), - }) - } - - pub async fn set_object_lock_config( - &self, - args: &SetObjectLockConfigArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - query_params.insert(String::from("object-lock"), String::new()); - - let resp = self - .execute( - Method::PUT, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - None, - Some(args.config.to_xml().into()), - ) - .await?; - - Ok(SetObjectLockConfigResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket: args.bucket.to_string(), - }) - } - - pub async fn set_object_retention( - &self, - args: &SetObjectRetentionArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - if args.bypass_governance_mode { - headers.insert( - String::from("x-amz-bypass-governance-retention"), - String::from("true"), - ); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("retention"), String::new()); - - let mut data = String::from(""); - if let Some(v) = &args.retention_mode { - data.push_str(""); - data.push_str(&v.to_string()); - data.push_str(""); - } - if let Some(v) = &args.retain_until_date { - data.push_str(""); - data.push_str(&to_iso8601utc(*v)); - data.push_str(""); - } - data.push_str(""); - - headers.insert(String::from("Content-MD5"), md5sum_hash(data.as_ref())); - - let resp = self - .execute( - Method::PUT, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - Some(data.into()), - ) - .await?; - - Ok(SetObjectRetentionResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - }) - } - - pub async fn set_object_tags( - &self, - args: &SetObjectTagsArgs<'_>, - ) -> Result { - let region = self.get_region(args.bucket, args.region).await?; - - let mut headers = Multimap::new(); - if let Some(v) = &args.extra_headers { - merge(&mut headers, v); - } - - let mut query_params = Multimap::new(); - if let Some(v) = &args.extra_query_params { - merge(&mut query_params, v); - } - if let Some(v) = args.version_id { - query_params.insert(String::from("versionId"), v.to_string()); - } - query_params.insert(String::from("tagging"), String::new()); - - let mut data = String::from(""); - if !args.tags.is_empty() { - data.push_str(""); - for (key, value) in args.tags.iter() { - data.push_str(""); - data.push_str(""); - data.push_str(key); - data.push_str(""); - data.push_str(""); - data.push_str(value); - data.push_str(""); - data.push_str(""); - } - data.push_str(""); - } - data.push_str(""); - - let resp = self - .execute( - Method::PUT, - ®ion, - &mut headers, - &query_params, - Some(args.bucket), - Some(args.object), - Some(data.into()), - ) - .await?; - - Ok(SetObjectTagsResponse { - headers: resp.headers().clone(), - region: region.clone(), - bucket_name: args.bucket.to_string(), - object_name: args.object.to_string(), - version_id: args.version_id.as_ref().map(|v| v.to_string()), - }) - } - pub async fn select_object_content( &self, args: &SelectObjectContentArgs<'_>, diff --git a/src/s3/client/bucket_exists.rs b/src/s3/client/bucket_exists.rs new file mode 100644 index 0000000..e5e3c71 --- /dev/null +++ b/src/s3/client/bucket_exists.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::BucketExists; + +impl Client { + /// Create a BucketExists request builder. + pub fn bucket_exists(&self, bucket: &str) -> BucketExists { + BucketExists::new(bucket).client(self) + } +} diff --git a/src/s3/client/delete_object_lock_config.rs b/src/s3/client/delete_object_lock_config.rs new file mode 100644 index 0000000..189e3b6 --- /dev/null +++ b/src/s3/client/delete_object_lock_config.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::DeleteObjectLockConfig; + +impl Client { + /// Create a DeleteObjectLockConfig request builder. + pub fn delete_object_lock_config(&self, bucket: &str) -> DeleteObjectLockConfig { + DeleteObjectLockConfig::new(bucket).client(self) + } +} diff --git a/src/s3/client/delete_object_tags.rs b/src/s3/client/delete_object_tags.rs new file mode 100644 index 0000000..2e41e94 --- /dev/null +++ b/src/s3/client/delete_object_tags.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::DeleteObjectTags; + +impl Client { + /// Create a DeleteObjectTags request builder. + pub fn delete_object_tags(&self, bucket: &str) -> DeleteObjectTags { + DeleteObjectTags::new(bucket).client(self) + } +} diff --git a/src/s3/client/disable_object_legal_hold.rs b/src/s3/client/disable_object_legal_hold.rs new file mode 100644 index 0000000..943bafa --- /dev/null +++ b/src/s3/client/disable_object_legal_hold.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::DisableObjectLegalHold; + +impl Client { + /// Create a DisableObjectLegalHold request builder. + pub fn disable_object_legal_hold(&self, bucket: &str) -> DisableObjectLegalHold { + DisableObjectLegalHold::new(bucket).client(self) + } +} diff --git a/src/s3/client/enable_object_legal_hold.rs b/src/s3/client/enable_object_legal_hold.rs new file mode 100644 index 0000000..de43585 --- /dev/null +++ b/src/s3/client/enable_object_legal_hold.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::EnableObjectLegalHold; + +impl Client { + /// Create a EnableObjectLegalHold request builder. + pub fn enable_object_legal_hold(&self, bucket: &str) -> EnableObjectLegalHold { + EnableObjectLegalHold::new(bucket).client(self) + } +} diff --git a/src/s3/client/get_object_lock_config.rs b/src/s3/client/get_object_lock_config.rs new file mode 100644 index 0000000..a84ee43 --- /dev/null +++ b/src/s3/client/get_object_lock_config.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::GetObjectLockConfig; + +impl Client { + /// Create a GetObjectLockConfig request builder. + pub fn get_object_lock_config(&self, bucket: &str) -> GetObjectLockConfig { + GetObjectLockConfig::new(bucket).client(self) + } +} diff --git a/src/s3/client/get_object_retention.rs b/src/s3/client/get_object_retention.rs new file mode 100644 index 0000000..7a25f05 --- /dev/null +++ b/src/s3/client/get_object_retention.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::GetObjectRetention; + +impl Client { + /// Create a GetObjectRetention request builder. + pub fn get_object_retention(&self, bucket: &str) -> GetObjectRetention { + GetObjectRetention::new(bucket).client(self) + } +} diff --git a/src/s3/client/get_object_tags.rs b/src/s3/client/get_object_tags.rs new file mode 100644 index 0000000..8ed2640 --- /dev/null +++ b/src/s3/client/get_object_tags.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::GetObjectTags; + +impl Client { + /// Create a GetObjectTags request builder. + pub fn get_object_tags(&self, bucket: &str) -> GetObjectTags { + GetObjectTags::new(bucket).client(self) + } +} diff --git a/src/s3/client/is_object_legal_hold_enabled.rs b/src/s3/client/is_object_legal_hold_enabled.rs new file mode 100644 index 0000000..091b0bb --- /dev/null +++ b/src/s3/client/is_object_legal_hold_enabled.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::IsObjectLegalHoldEnabled; + +impl Client { + /// Create a IsObjectLegalHoldEnabled request builder. + pub fn is_object_legal_hold_enabled(&self, bucket: &str) -> IsObjectLegalHoldEnabled { + IsObjectLegalHoldEnabled::new(bucket).client(self) + } +} diff --git a/src/s3/client/make_bucket.rs b/src/s3/client/make_bucket.rs new file mode 100644 index 0000000..ea15a8c --- /dev/null +++ b/src/s3/client/make_bucket.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::MakeBucket; + +impl Client { + /// Create a MakeBucket request builder. + pub fn make_bucket(&self, bucket: &str) -> MakeBucket { + MakeBucket::new(bucket).client(self) + } +} diff --git a/src/s3/client/remove_bucket.rs b/src/s3/client/remove_bucket.rs new file mode 100644 index 0000000..126bc3e --- /dev/null +++ b/src/s3/client/remove_bucket.rs @@ -0,0 +1,91 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::{ObjectToDelete, RemoveBucket}; +use crate::s3::error::Error; +use crate::s3::response::DeleteResult; +use crate::s3::response::{ + DisableObjectLegalHoldResponse, RemoveBucketResponse, RemoveObjectResponse, + RemoveObjectsResponse, +}; +use crate::s3::types::{ListEntry, S3Api, ToStream}; +use futures::StreamExt; + +impl Client { + /// Create a RemoveBucket request builder. + pub fn remove_bucket(&self, bucket: &str) -> RemoveBucket { + RemoveBucket::new(bucket).client(self) + } + + /// Removes a bucket and also removes non-empty buckets by first removing all objects before + /// deleting the bucket. Bypasses governance mode and legal hold. + pub async fn remove_and_purge_bucket( + &self, + bucket_name: &str, + ) -> Result { + let mut stream = self + .list_objects(bucket_name) + .include_versions(true) + .to_stream() + .await; + + while let Some(items) = stream.next().await { + let items: Vec = items?.contents; + let mut to_delete: Vec = Vec::with_capacity(items.len()); + for item in items { + to_delete.push(ObjectToDelete::from(( + item.name.as_ref(), + item.version_id.as_deref(), + ))) + } + let mut resp = self + .remove_objects(bucket_name, to_delete.into_iter()) + .bypass_governance_mode(true) + .to_stream() + .await; + + while let Some(item) = resp.next().await { + let res: RemoveObjectsResponse = item?; + for obj in res.result.iter() { + match obj { + DeleteResult::Deleted(_) => {} + DeleteResult::Error(v) => { + // the object is not deleted. try to disable legal hold and try again. + let _resp: DisableObjectLegalHoldResponse = self + .disable_object_legal_hold(bucket_name) + .object(v.object_name.clone()) + .version_id(v.version_id.clone()) + .send() + .await?; + + let key: &str = &v.object_name; + let version: Option<&str> = v.version_id.as_deref(); + let otd: ObjectToDelete = ObjectToDelete::from((key, version)); + let _resp: RemoveObjectResponse = self + .remove_object(bucket_name, otd) + .bypass_governance_mode(true) + .send() + .await?; + } + } + } + } + } + self.remove_bucket(bucket_name).send().await + } +} diff --git a/src/s3/client/set_bucket_tags.rs b/src/s3/client/set_bucket_tags.rs index 62f782b..0a9cbf7 100644 --- a/src/s3/client/set_bucket_tags.rs +++ b/src/s3/client/set_bucket_tags.rs @@ -41,7 +41,6 @@ impl Client { /// .send().await; /// } /// ``` - pub fn set_bucket_tags(&self, bucket: &str) -> SetBucketTags { SetBucketTags::new(bucket).client(self) } diff --git a/src/s3/client/set_object_lock_config.rs b/src/s3/client/set_object_lock_config.rs new file mode 100644 index 0000000..cd81442 --- /dev/null +++ b/src/s3/client/set_object_lock_config.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::SetObjectLockConfig; + +impl Client { + /// Create a SetObjectLockConfig request builder. + pub fn set_object_lock_config(&self, bucket: &str) -> SetObjectLockConfig { + SetObjectLockConfig::new(bucket).client(self) + } +} diff --git a/src/s3/client/set_object_retention.rs b/src/s3/client/set_object_retention.rs new file mode 100644 index 0000000..705d9a9 --- /dev/null +++ b/src/s3/client/set_object_retention.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::SetObjectRetention; + +impl Client { + /// Create a SetObjectRetention request builder. + pub fn set_object_retention(&self, bucket: &str) -> SetObjectRetention { + SetObjectRetention::new(bucket).client(self) + } +} diff --git a/src/s3/client/set_object_tags.rs b/src/s3/client/set_object_tags.rs new file mode 100644 index 0000000..09fcc7f --- /dev/null +++ b/src/s3/client/set_object_tags.rs @@ -0,0 +1,26 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! S3 APIs for bucket objects. + +use super::Client; +use crate::s3::builders::SetObjectTags; + +impl Client { + /// Create a SetObjectTags request builder. + pub fn set_object_tags(&self, bucket: &str) -> SetObjectTags { + SetObjectTags::new(bucket).client(self) + } +} diff --git a/src/s3/error.rs b/src/s3/error.rs index f5cf771..0359545 100644 --- a/src/s3/error.rs +++ b/src/s3/error.rs @@ -97,11 +97,11 @@ pub enum Error { SelectError(String, String), UnsupportedApi(String), InvalidComposeSource(String), - InvalidComposeSourceOffset(String, String, Option, usize, usize), - InvalidComposeSourceLength(String, String, Option, usize, usize), - InvalidComposeSourceSize(String, String, Option, usize, usize), - InvalidComposeSourcePartSize(String, String, Option, usize, usize), - InvalidComposeSourceMultipart(String, String, Option, usize, usize), + InvalidComposeSourceOffset(String, String, Option, u64, u64), + InvalidComposeSourceLength(String, String, Option, u64, u64), + InvalidComposeSourceSize(String, String, Option, u64, u64), + InvalidComposeSourcePartSize(String, String, Option, u64, u64), + InvalidComposeSourceMultipart(String, String, Option, u64, u64), InvalidDirective(String), InvalidCopyDirective(String), InvalidMultipartCount(u16), @@ -121,6 +121,7 @@ pub enum Error { ReplicationConfigurationNotFoundError, NoSuchObjectLockConfiguration, NoSuchBucketPolicy, + NoSuchBucket, } impl std::error::Error for Error {} @@ -296,6 +297,7 @@ impl fmt::Display for Error { } Error::NoSuchObjectLockConfiguration => write!(f, "no such object lock"), Error::NoSuchBucketPolicy => write!(f, "no such bucket policy"), + Error::NoSuchBucket => write!(f, "no such bucket"), } } } @@ -307,6 +309,7 @@ impl Error { Error::ReplicationConfigurationNotFoundError => "ReplicationConfigurationNotFoundError", Error::NoSuchObjectLockConfiguration => "NoSuchObjectLockConfiguration", Error::NoSuchBucketPolicy => "NoSuchBucketPolicy", + Error::NoSuchBucket => "NoSuchBucket", _ => "TODO", } } diff --git a/src/s3/http.rs b/src/s3/http.rs index 488f72c..694327d 100644 --- a/src/s3/http.rs +++ b/src/s3/http.rs @@ -15,6 +15,8 @@ //! HTTP URL definitions +use super::utils::urlencode_object_key; +use crate::s3::client::DEFAULT_REGION; use crate::s3::error::Error; use crate::s3::utils::match_hostname; use crate::s3::utils::{Multimap, to_query_string}; @@ -26,8 +28,6 @@ use regex::Regex; use std::fmt; use std::str::FromStr; -use super::utils::urlencode_object_key; - const AWS_S3_PREFIX: &str = r"^(((bucket\.|accesspoint\.)vpce(-[a-z_\d]+)+\.s3\.)|([a-z_\d-]{1,63}\.)s3-control(-[a-z_\d]+)*\.|(s3(-[a-z_\d]+)*\.))"; lazy_static! { @@ -185,7 +185,7 @@ fn get_aws_info( let domain_suffix = tokens.join("."); if host == "s3-external-1.amazonaws.com" { - region_in_host = "us-east-1".to_string(); + region_in_host = DEFAULT_REGION.to_string(); } if host == "s3-us-gov-west-1.amazonaws.com" || host == "s3-fips-us-gov-west-1.amazonaws.com" { region_in_host = "us-gov-west-1".to_string(); diff --git a/src/s3/response.rs b/src/s3/response.rs index 3712af2..6b975e7 100644 --- a/src/s3/response.rs +++ b/src/s3/response.rs @@ -23,17 +23,22 @@ use std::io::BufReader; use xmltree::Element; use crate::s3::error::Error; -use crate::s3::types::{ObjectLockConfig, RetentionMode, SelectProgress, parse_legal_hold}; +use crate::s3::types::{RetentionMode, SelectProgress, parse_legal_hold}; use crate::s3::utils::{ UtcTime, copy_slice, crc32, from_http_header_value, from_iso8601utc, get_text, uint32, }; +mod bucket_exists; mod delete_bucket_encryption; mod delete_bucket_lifecycle; mod delete_bucket_notification; mod delete_bucket_policy; mod delete_bucket_replication; mod delete_bucket_tags; +mod delete_object_lock_config; +mod delete_object_tags; +mod disable_object_legal_hold; +mod enable_object_legal_hold; mod get_bucket_encryption; mod get_bucket_lifecycle; mod get_bucket_notification; @@ -42,11 +47,17 @@ mod get_bucket_replication; mod get_bucket_tags; mod get_bucket_versioning; mod get_object; +mod get_object_lock_config; +mod get_object_retention; +mod get_object_tags; +mod is_object_legal_hold_enabled; mod list_buckets; pub(crate) mod list_objects; mod listen_bucket_notification; +mod make_bucket; mod object_prompt; mod put_object; +mod remove_bucket; mod remove_objects; mod set_bucket_encryption; mod set_bucket_lifecycle; @@ -55,13 +66,21 @@ mod set_bucket_policy; mod set_bucket_replication; mod set_bucket_tags; mod set_bucket_versioning; +mod set_object_lock_config; +mod set_object_retention; +mod set_object_tags; +pub use bucket_exists::BucketExistsResponse; pub use delete_bucket_encryption::DeleteBucketEncryptionResponse; pub use delete_bucket_lifecycle::DeleteBucketLifecycleResponse; pub use delete_bucket_notification::DeleteBucketNotificationResponse; pub use delete_bucket_policy::DeleteBucketPolicyResponse; pub use delete_bucket_replication::DeleteBucketReplicationResponse; pub use delete_bucket_tags::DeleteBucketTagsResponse; +pub use delete_object_lock_config::DeleteObjectLockConfigResponse; +pub use delete_object_tags::DeleteObjectTagsResponse; +pub use disable_object_legal_hold::DisableObjectLegalHoldResponse; +pub use enable_object_legal_hold::EnableObjectLegalHoldResponse; pub use get_bucket_encryption::GetBucketEncryptionResponse; pub use get_bucket_lifecycle::GetBucketLifecycleResponse; pub use get_bucket_notification::GetBucketNotificationResponse; @@ -70,16 +89,24 @@ pub use get_bucket_replication::GetBucketReplicationResponse; pub use get_bucket_tags::GetBucketTagsResponse; pub use get_bucket_versioning::GetBucketVersioningResponse; pub use get_object::GetObjectResponse; +pub use get_object_lock_config::GetObjectLockConfigResponse; +pub use get_object_retention::GetObjectRetentionResponse; +pub use get_object_tags::GetObjectTagsResponse; +pub use is_object_legal_hold_enabled::IsObjectLegalHoldEnabledResponse; pub use list_buckets::ListBucketsResponse; pub use list_objects::ListObjectsResponse; pub use listen_bucket_notification::ListenBucketNotificationResponse; +pub use make_bucket::MakeBucketResponse; pub use object_prompt::ObjectPromptResponse; pub use put_object::{ AbortMultipartUploadResponse2, CompleteMultipartUploadResponse2, CreateMultipartUploadResponse2, PutObjectContentResponse, PutObjectResponse, UploadPartResponse2, }; -pub use remove_objects::{DeleteError, DeletedObject, RemoveObjectResponse, RemoveObjectsResponse}; +pub use remove_bucket::RemoveBucketResponse; +pub use remove_objects::{ + DeleteError, DeleteResult, DeletedObject, RemoveObjectResponse, RemoveObjectsResponse, +}; pub use set_bucket_encryption::SetBucketEncryptionResponse; pub use set_bucket_lifecycle::SetBucketLifecycleResponse; pub use set_bucket_notification::SetBucketNotificationResponse; @@ -87,6 +114,9 @@ pub use set_bucket_policy::SetBucketPolicyResponse; pub use set_bucket_replication::SetBucketReplicationResponse; pub use set_bucket_tags::SetBucketTagsResponse; pub use set_bucket_versioning::SetBucketVersioningResponse; +pub use set_object_lock_config::SetObjectLockConfigResponse; +pub use set_object_retention::SetObjectRetentionResponse; +pub use set_object_tags::SetObjectTagsResponse; #[derive(Debug)] /// Base response for bucket operation @@ -96,19 +126,13 @@ pub struct BucketResponse { pub bucket: String, } -/// Response of [make_bucket()](crate::s3::client::Client::make_bucket) API -pub type MakeBucketResponse = BucketResponse; - -/// Response of [remove_bucket()](crate::s3::client::Client::remove_bucket) API -pub type RemoveBucketResponse = BucketResponse; - #[derive(Debug)] /// Base response for object operation pub struct ObjectResponse { pub headers: HeaderMap, pub region: String, - pub bucket_name: String, - pub object_name: String, + pub bucket: String, + pub object: String, pub version_id: Option, } @@ -183,7 +207,7 @@ pub struct StatObjectResponse { pub region: String, pub bucket_name: String, pub object_name: String, - pub size: usize, + pub size: u64, pub etag: String, pub version_id: Option, pub last_modified: Option, @@ -202,8 +226,8 @@ impl StatObjectResponse { object_name: &str, ) -> Result { let size = match headers.get("Content-Length") { - Some(v) => v.to_str()?.parse::()?, - None => 0_usize, + Some(v) => v.to_str()?.parse::()?, + None => 0_u64, }; let etag = match headers.get("ETag") { @@ -581,10 +605,7 @@ impl SelectObjectContentResponse { match self.do_read().await { Err(e) => { self.done = true; - return Err(std::io::Error::new( - std::io::ErrorKind::Other, - e.to_string(), - )); + return Err(std::io::Error::other(e.to_string())); } Ok(_) => { if self.payload.is_empty() { @@ -596,77 +617,12 @@ impl SelectObjectContentResponse { } } } - -/// Response of [enable_object_legal_hold()](crate::s3::client::Client::enable_object_legal_hold) API -pub type EnableObjectLegalHoldResponse = ObjectResponse; - -/// Response of [disable_object_legal_hold()](crate::s3::client::Client::disable_object_legal_hold) API -pub type DisableObjectLegalHoldResponse = ObjectResponse; - -#[derive(Clone, Debug)] -/// Response of [is_object_legal_hold_enabled()](crate::s3::client::Client::is_object_legal_hold_enabled) API -pub struct IsObjectLegalHoldEnabledResponse { - pub headers: HeaderMap, - pub region: String, - pub bucket_name: String, - pub object_name: String, - pub version_id: Option, - pub enabled: bool, -} - -/// Response of [delete_object_lock_config()](crate::s3::client::Client::delete_object_lock_config) API -pub type DeleteObjectLockConfigResponse = BucketResponse; - -#[derive(Clone, Debug)] -/// Response of [get_object_lock_config()](crate::s3::client::Client::get_object_lock_config) API -pub struct GetObjectLockConfigResponse { - pub headers: HeaderMap, - pub region: String, - pub bucket_name: String, - pub config: ObjectLockConfig, -} - -/// Response of [set_object_lock_config()](crate::s3::client::Client::set_object_lock_config) API -pub type SetObjectLockConfigResponse = BucketResponse; - -#[derive(Clone, Debug)] -/// Response of [get_object_retention()](crate::s3::client::Client::get_object_retention) API -pub struct GetObjectRetentionResponse { - pub headers: HeaderMap, - pub region: String, - pub bucket_name: String, - pub object_name: String, - pub version_id: Option, - pub retention_mode: Option, - pub retain_until_date: Option, -} - -/// Response of [set_object_retention()](crate::s3::client::Client::set_object_retention) API -pub type SetObjectRetentionResponse = ObjectResponse; - -/// Response of [delete_object_tags()](crate::s3::client::Client::delete_object_tags) API -pub type DeleteObjectTagsResponse = ObjectResponse; - -#[derive(Clone, Debug)] -/// Response of [get_object_tags()](crate::s3::client::Client::get_object_tags) API -pub struct GetObjectTagsResponse { - pub headers: HeaderMap, - pub region: String, - pub bucket_name: String, - pub object_name: String, - pub version_id: Option, - pub tags: std::collections::HashMap, -} - -/// Response of [set_object_tags()](crate::s3::client::Client::set_object_tags) API -pub type SetObjectTagsResponse = ObjectResponse; - #[derive(Clone, Debug)] /// Response of [get_presigned_object_url()](crate::s3::client::Client::get_presigned_object_url) API pub struct GetPresignedObjectUrlResponse { pub region: String, - pub bucket_name: String, - pub object_name: String, + pub bucket: String, + pub object: String, pub version_id: Option, pub url: String, } diff --git a/src/s3/response/bucket_exists.rs b/src/s3/response/bucket_exists.rs new file mode 100644 index 0000000..904a8ed --- /dev/null +++ b/src/s3/response/bucket_exists.rs @@ -0,0 +1,60 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [bucket_exists()](crate::s3::client::Client::bucket_exists) +/// API +#[derive(Clone, Debug)] +pub struct BucketExistsResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub exists: bool, +} + +#[async_trait] +impl FromS3Response for BucketExistsResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + match resp { + Ok(r) => Ok(BucketExistsResponse { + headers: r.headers().clone(), + region: req.get_computed_region(), + bucket, + exists: true, + }), + Err(Error::S3Error(ref err)) if err.code == Error::NoSuchBucket.as_str() => { + Ok(BucketExistsResponse { + headers: HeaderMap::new(), + region: String::new(), // NOTE the bucket does not exist and the region is not provided + bucket, + exists: false, + }) + } + Err(e) => Err(e), + } + } +} diff --git a/src/s3/response/delete_object_lock_config.rs b/src/s3/response/delete_object_lock_config.rs new file mode 100644 index 0000000..35c7262 --- /dev/null +++ b/src/s3/response/delete_object_lock_config.rs @@ -0,0 +1,49 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [delete_object_lock_config_response()](crate::s3::client::Client::delete_object_lock_config_response) +/// API +#[derive(Clone, Debug)] +pub struct DeleteObjectLockConfigResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, +} + +#[async_trait] +impl FromS3Response for DeleteObjectLockConfigResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + Ok(DeleteObjectLockConfigResponse { + headers: resp.headers().clone(), + region: req.get_computed_region(), + bucket, + }) + } +} diff --git a/src/s3/response/delete_object_tags.rs b/src/s3/response/delete_object_tags.rs new file mode 100644 index 0000000..626545c --- /dev/null +++ b/src/s3/response/delete_object_tags.rs @@ -0,0 +1,57 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [delete_object_tags()](crate::s3::client::Client::delete_object_tags) +/// API +#[derive(Clone, Debug)] +pub struct DeleteObjectTagsResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub object: String, + pub version_id: Option, +} + +#[async_trait] +impl FromS3Response for DeleteObjectTagsResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let region: String = req.get_computed_region(); + let object: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); + + Ok(DeleteObjectTagsResponse { + headers: resp.headers().clone(), + region, + bucket, + object, + version_id, + }) + } +} diff --git a/src/s3/response/disable_object_legal_hold.rs b/src/s3/response/disable_object_legal_hold.rs new file mode 100644 index 0000000..055693e --- /dev/null +++ b/src/s3/response/disable_object_legal_hold.rs @@ -0,0 +1,57 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [disable_object_legal_hold()](crate::s3::client::Client::disable_object_legal_hold) +/// API +#[derive(Clone, Debug)] +pub struct DisableObjectLegalHoldResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub object_name: String, + pub version_id: Option, +} + +#[async_trait] +impl FromS3Response for DisableObjectLegalHoldResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let region: String = req.get_computed_region(); + let object_name: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); + + Ok(DisableObjectLegalHoldResponse { + headers: resp.headers().clone(), + region, + bucket, + object_name, + version_id, + }) + } +} diff --git a/src/s3/response/enable_object_legal_hold.rs b/src/s3/response/enable_object_legal_hold.rs new file mode 100644 index 0000000..479f541 --- /dev/null +++ b/src/s3/response/enable_object_legal_hold.rs @@ -0,0 +1,57 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [enable_object_legal_hold()](crate::s3::client::Client::enable_object_legal_hold) +/// API +#[derive(Clone, Debug)] +pub struct EnableObjectLegalHoldResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub object_name: String, + pub version_id: Option, +} + +#[async_trait] +impl FromS3Response for EnableObjectLegalHoldResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let region: String = req.get_computed_region(); + let object_name: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); + + Ok(EnableObjectLegalHoldResponse { + headers: resp.headers().clone(), + region, + bucket, + object_name, + version_id, + }) + } +} diff --git a/src/s3/response/get_object.rs b/src/s3/response/get_object.rs index 610884a..b14b53b 100644 --- a/src/s3/response/get_object.rs +++ b/src/s3/response/get_object.rs @@ -13,14 +13,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -use async_trait::async_trait; -use tokio_stream::StreamExt; - use crate::s3::{ builders::ObjectContent, error::Error, types::{FromS3Response, S3Request}, }; +use async_trait::async_trait; +use futures_util::TryStreamExt; pub struct GetObjectResponse { pub headers: http::HeaderMap, @@ -52,9 +51,7 @@ impl FromS3Response for GetObjectResponse { let content_length = response .content_length() .ok_or(Error::ContentLengthUnknown)?; - let body = response.bytes_stream().map(|result| { - result.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err)) - }); + let body = response.bytes_stream().map_err(std::io::Error::other); let content = ObjectContent::new_from_stream(body, Some(content_length)); diff --git a/src/s3/response/get_object_lock_config.rs b/src/s3/response/get_object_lock_config.rs new file mode 100644 index 0000000..eea2fdd --- /dev/null +++ b/src/s3/response/get_object_lock_config.rs @@ -0,0 +1,57 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, ObjectLockConfig, S3Request}; +use async_trait::async_trait; +use bytes::Buf; +use http::HeaderMap; +use xmltree::Element; + +/// Response of +/// [get_object_lock_config_response()](crate::s3::client::Client::get_object_lock_config_response) +/// API +#[derive(Clone, Debug)] +pub struct GetObjectLockConfigResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub config: ObjectLockConfig, +} + +#[async_trait] +impl FromS3Response for GetObjectLockConfigResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let headers = resp.headers().clone(); + let body = resp.bytes().await?; + let root = Element::parse(body.reader())?; + + Ok(GetObjectLockConfigResponse { + headers, + region: req.get_computed_region(), + bucket, + config: ObjectLockConfig::from_xml(&root)?, + }) + } +} diff --git a/src/s3/response/get_object_retention.rs b/src/s3/response/get_object_retention.rs new file mode 100644 index 0000000..dfbc9a7 --- /dev/null +++ b/src/s3/response/get_object_retention.rs @@ -0,0 +1,94 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, RetentionMode, S3Request}; +use crate::s3::utils::{UtcTime, from_iso8601utc, get_option_text}; +use async_trait::async_trait; +use bytes::Buf; +use http::HeaderMap; +use xmltree::Element; + +/// Response of +/// [set_object_retention_response()](crate::s3::client::Client::set_object_retention_response) +/// API +#[derive(Clone, Debug)] +pub struct GetObjectRetentionResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + + pub object: String, + pub version_id: Option, + pub retention_mode: Option, + pub retain_until_date: Option, +} + +#[async_trait] +impl FromS3Response for GetObjectRetentionResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + + let region: String = req.get_computed_region(); + let object_name: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); + + match resp { + Ok(r) => { + let headers = r.headers().clone(); + let body = r.bytes().await?; + let root = Element::parse(body.reader())?; + let retention_mode = match get_option_text(&root, "Mode") { + Some(v) => Some(RetentionMode::parse(&v)?), + _ => None, + }; + let retain_until_date = match get_option_text(&root, "RetainUntilDate") { + Some(v) => Some(from_iso8601utc(&v)?), + _ => None, + }; + + Ok(GetObjectRetentionResponse { + headers, + region, + bucket, + object: object_name.clone(), + version_id, + retention_mode, + retain_until_date, + }) + } + Err(Error::S3Error(ref err)) + if err.code == Error::NoSuchObjectLockConfiguration.as_str() => + { + Ok(GetObjectRetentionResponse { + headers: HeaderMap::new(), + region, + bucket, + object: object_name.clone(), + version_id, + retention_mode: None, + retain_until_date: None, + }) + } + Err(e) => Err(e), + } + } +} diff --git a/src/s3/response/get_object_tags.rs b/src/s3/response/get_object_tags.rs new file mode 100644 index 0000000..1b31235 --- /dev/null +++ b/src/s3/response/get_object_tags.rs @@ -0,0 +1,75 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use crate::s3::utils::get_text; +use async_trait::async_trait; +use bytes::Buf; +use http::HeaderMap; +use std::collections::HashMap; +use xmltree::Element; + +/// Response of +/// [get_object_tags()](crate::s3::client::Client::get_object_tags) +/// API +#[derive(Clone, Debug)] +pub struct GetObjectTagsResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub object: String, + + pub version_id: Option, + pub tags: HashMap, +} + +#[async_trait] +impl FromS3Response for GetObjectTagsResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let headers: HeaderMap = resp.headers().clone(); + let region: String = req.get_computed_region(); + let object: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); //TODO consider taking the version_id + + let body = resp.bytes().await?; + let mut root = Element::parse(body.reader())?; + let element = root + .get_mut_child("TagSet") + .ok_or(Error::XmlError(" tag not found".to_string()))?; + let mut tags = HashMap::new(); + while let Some(v) = element.take_child("Tag") { + tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); + } + + Ok(GetObjectTagsResponse { + headers, + region, + bucket, + object, + version_id, + tags, + }) + } +} diff --git a/src/s3/response/is_object_legal_hold_enabled.rs b/src/s3/response/is_object_legal_hold_enabled.rs new file mode 100644 index 0000000..89c5f1c --- /dev/null +++ b/src/s3/response/is_object_legal_hold_enabled.rs @@ -0,0 +1,82 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use crate::s3::utils::get_default_text; +use async_trait::async_trait; +use bytes::Buf; +use http::HeaderMap; +use xmltree::Element; + +/// Response of +/// [is_object_legal_hold_enabled()](crate::s3::client::Client::is_object_legal_hold_enabled) +/// API +#[derive(Clone, Debug)] +pub struct IsObjectLegalHoldEnabledResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + pub object_name: String, + pub version_id: Option, + pub enabled: bool, +} + +#[async_trait] +impl FromS3Response for IsObjectLegalHoldEnabledResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + + let region: String = req.get_computed_region(); + let object_name: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); + + match resp { + Ok(r) => { + let headers = r.headers().clone(); + let body = r.bytes().await?; + let root = Element::parse(body.reader())?; + + Ok(IsObjectLegalHoldEnabledResponse { + headers, + region, + bucket, + object_name, + version_id, + enabled: get_default_text(&root, "Status") == "ON", + }) + } + Err(Error::S3Error(ref err)) + if err.code == Error::NoSuchObjectLockConfiguration.as_str() => + { + Ok(IsObjectLegalHoldEnabledResponse { + headers: HeaderMap::new(), + region, + bucket, + object_name, + version_id, + enabled: false, + }) + } + Err(e) => Err(e), + } + } +} diff --git a/src/s3/response/listen_bucket_notification.rs b/src/s3/response/listen_bucket_notification.rs index 5c71db2..8b69dcf 100644 --- a/src/s3/response/listen_bucket_notification.rs +++ b/src/s3/response/listen_bucket_notification.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use futures_util::{Stream, StreamExt, stream}; +use futures_util::{Stream, TryStreamExt, stream}; use http::HeaderMap; use tokio::io::AsyncBufReadExt; use tokio_util::io::StreamReader; @@ -47,10 +47,7 @@ impl FromS3Response let resp = resp?; let headers = resp.headers().clone(); - let body_stream = resp.bytes_stream(); - let body_stream = body_stream - .map(|r| r.map_err(|err| std::io::Error::new(std::io::ErrorKind::Other, err))); - let stream_reader = StreamReader::new(body_stream); + let stream_reader = StreamReader::new(resp.bytes_stream().map_err(std::io::Error::other)); let record_stream = Box::pin(stream::unfold( stream_reader, diff --git a/src/s3/response/make_bucket.rs b/src/s3/response/make_bucket.rs new file mode 100644 index 0000000..0f8c294 --- /dev/null +++ b/src/s3/response/make_bucket.rs @@ -0,0 +1,53 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [make_bucket()](crate::s3::client::Client::make_bucket) +/// API +#[derive(Clone, Debug)] +pub struct MakeBucketResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, +} + +#[async_trait] +impl FromS3Response for MakeBucketResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + let region: String = req.get_computed_region(); + if !req.client.region_map.contains_key(&bucket) { + req.client.region_map.insert(bucket.clone(), region.clone()); + } + + Ok(MakeBucketResponse { + headers: resp.headers().clone(), + region, + bucket, + }) + } +} diff --git a/src/s3/response/put_object.rs b/src/s3/response/put_object.rs index aa476e3..eb733d3 100644 --- a/src/s3/response/put_object.rs +++ b/src/s3/response/put_object.rs @@ -118,8 +118,8 @@ pub type UploadPartResponse2 = PutObjectResponse; #[derive(Debug, Clone)] pub struct PutObjectContentResponse { pub headers: HeaderMap, - pub bucket_name: String, - pub object_name: String, + pub bucket: String, + pub object: String, pub location: String, pub object_size: u64, pub etag: String, diff --git a/src/s3/response/remove_bucket.rs b/src/s3/response/remove_bucket.rs new file mode 100644 index 0000000..f852210 --- /dev/null +++ b/src/s3/response/remove_bucket.rs @@ -0,0 +1,50 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [remove_bucket()](crate::s3::client::Client::remove_bucket) +/// API +#[derive(Clone, Debug)] +pub struct RemoveBucketResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, +} + +#[async_trait] +impl FromS3Response for RemoveBucketResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + req.client.region_map.remove(&bucket); + + Ok(RemoveBucketResponse { + headers: resp.headers().clone(), + region: req.get_computed_region(), + bucket, + }) + } +} diff --git a/src/s3/response/set_bucket_tags.rs b/src/s3/response/set_bucket_tags.rs index 0d1d269..06a210d 100644 --- a/src/s3/response/set_bucket_tags.rs +++ b/src/s3/response/set_bucket_tags.rs @@ -39,7 +39,6 @@ impl FromS3Response for SetBucketTagsResponse { Some(v) => v.to_string(), }; let resp = resp?; - Ok(SetBucketTagsResponse { headers: resp.headers().clone(), region: req.get_computed_region(), diff --git a/src/s3/response/set_object_lock_config.rs b/src/s3/response/set_object_lock_config.rs new file mode 100644 index 0000000..4bb91ea --- /dev/null +++ b/src/s3/response/set_object_lock_config.rs @@ -0,0 +1,48 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [set_object_lock_config_response()](crate::s3::client::Client::set_object_lock_config_response) +/// API +#[derive(Clone, Debug)] +pub struct SetObjectLockConfigResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, +} + +#[async_trait] +impl FromS3Response for SetObjectLockConfigResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + Ok(SetObjectLockConfigResponse { + headers: resp.headers().clone(), + region: req.get_computed_region(), + bucket, + }) + } +} diff --git a/src/s3/response/set_object_retention.rs b/src/s3/response/set_object_retention.rs new file mode 100644 index 0000000..0262fe2 --- /dev/null +++ b/src/s3/response/set_object_retention.rs @@ -0,0 +1,59 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [set_object_retention_response()](crate::s3::client::Client::set_object_retention_response) +/// API +#[derive(Clone, Debug)] +pub struct SetObjectRetentionResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + + pub object: String, + pub version_id: Option, +} + +#[async_trait] +impl FromS3Response for SetObjectRetentionResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let headers: HeaderMap = resp.headers().clone(); + let region: String = req.get_computed_region(); + let object: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); //TODO consider taking the version_id + + Ok(SetObjectRetentionResponse { + headers, + region, + bucket, + object, + version_id, + }) + } +} diff --git a/src/s3/response/set_object_tags.rs b/src/s3/response/set_object_tags.rs new file mode 100644 index 0000000..f7991be --- /dev/null +++ b/src/s3/response/set_object_tags.rs @@ -0,0 +1,59 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::Error; +use crate::s3::types::{FromS3Response, S3Request}; +use async_trait::async_trait; +use http::HeaderMap; + +/// Response of +/// [set_object_tags()](crate::s3::client::Client::set_object_tags) +/// API +#[derive(Clone, Debug)] +pub struct SetObjectTagsResponse { + pub headers: HeaderMap, + pub region: String, + pub bucket: String, + + pub object: String, + pub version_id: Option, +} + +#[async_trait] +impl FromS3Response for SetObjectTagsResponse { + async fn from_s3response<'a>( + req: S3Request<'a>, + resp: Result, + ) -> Result { + let bucket: String = match req.bucket { + None => return Err(Error::InvalidBucketName("no bucket specified".to_string())), + Some(v) => v.to_string(), + }; + let resp = resp?; + + let headers: HeaderMap = resp.headers().clone(); + let region: String = req.get_computed_region(); + let object: String = req.object.unwrap().into(); + let version_id: Option = req.query_params.get("versionId").cloned(); //TODO consider taking the version_id + + Ok(SetObjectTagsResponse { + headers, + region, + bucket, + object, + version_id, + }) + } +} diff --git a/src/s3/types.rs b/src/s3/types.rs index a0f3913..d4db9cc 100644 --- a/src/s3/types.rs +++ b/src/s3/types.rs @@ -16,7 +16,7 @@ //! Various types for S3 API requests and responses use super::builders::SegmentedBytes; -use super::client::Client; +use super::client::{Client, DEFAULT_REGION}; use crate::s3::error::Error; use crate::s3::utils::{ Multimap, UtcTime, from_iso8601utc, get_default_text, get_option_text, get_text, to_iso8601utc, @@ -32,7 +32,7 @@ use std::collections::HashMap; use std::fmt; pub struct S3Request<'a> { - client: &'a Client, + pub(crate) client: &'a Client, pub method: Method, pub region: Option<&'a str>, @@ -100,7 +100,7 @@ impl<'a> S3Request<'a> { self.inner_region = if let Some(bucket) = self.bucket { self.client.get_region(bucket, self.region).await? } else { - "us-east-1".to_string() + DEFAULT_REGION.to_string() }; // Execute the API request. @@ -188,7 +188,7 @@ pub struct PartInfo { pub size: u64, } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Contains retention mode information pub enum RetentionMode { GOVERNANCE, @@ -831,21 +831,21 @@ impl SseConfig { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Contains key and value pub struct Tag { pub key: String, pub value: String, } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// And operator contains prefix and tags pub struct AndOperator { pub prefix: Option, pub tags: Option>, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq)] /// Filter information pub struct Filter { pub and_operator: Option, @@ -961,7 +961,7 @@ impl Filter { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Lifecycle rule information pub struct LifecycleRule { pub abort_incomplete_multipart_upload_days_after_initiation: Option, @@ -1099,7 +1099,7 @@ impl LifecycleRule { } } -#[derive(Clone, Debug, Default)] +#[derive(PartialEq, Clone, Debug, Default)] /// Lifecycle configuration pub struct LifecycleConfig { pub rules: Vec, @@ -1357,7 +1357,7 @@ fn to_xml_common_notification_config( data } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Prefix filter rule pub struct PrefixFilterRule { pub value: String, @@ -1367,7 +1367,7 @@ impl PrefixFilterRule { pub const NAME: &'static str = "prefix"; } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Suffix filter rule pub struct SuffixFilterRule { pub value: String, @@ -1377,7 +1377,7 @@ impl SuffixFilterRule { pub const NAME: &'static str = "suffix"; } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Cloud function configuration information pub struct CloudFuncConfig { pub events: Vec, @@ -1428,7 +1428,7 @@ impl CloudFuncConfig { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Queue configuration information pub struct QueueConfig { pub events: Vec, @@ -1479,7 +1479,7 @@ impl QueueConfig { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Topic configuration information pub struct TopicConfig { pub events: Vec, @@ -1530,7 +1530,7 @@ impl TopicConfig { } } -#[derive(Clone, Debug, Default)] +#[derive(PartialEq, Clone, Debug, Default)] /// Notification configuration information pub struct NotificationConfig { pub cloud_func_config_list: Option>, @@ -1621,7 +1621,7 @@ impl NotificationConfig { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Access control translation information pub struct AccessControlTranslation { pub owner: String, @@ -1641,13 +1641,13 @@ impl Default for AccessControlTranslation { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Encryption configuration information pub struct EncryptionConfig { pub replica_kms_key_id: Option, } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Metrics information pub struct Metrics { pub event_threshold_minutes: Option, @@ -1663,7 +1663,7 @@ impl Metrics { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Replication time information pub struct ReplicationTime { pub time_minutes: Option, @@ -1679,7 +1679,7 @@ impl ReplicationTime { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Destination information pub struct Destination { pub bucket_arn: String, @@ -1814,13 +1814,13 @@ impl Destination { } } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Source selection criteria information pub struct SourceSelectionCriteria { pub sse_kms_encrypted_objects_status: Option, } -#[derive(Clone, Debug)] +#[derive(PartialEq, Clone, Debug)] /// Replication rule information pub struct ReplicationRule { pub destination: Destination, @@ -1965,7 +1965,7 @@ impl ReplicationRule { } } -#[derive(Clone, Debug, Default)] +#[derive(PartialEq, Clone, Debug, Default)] /// Replication configuration information pub struct ReplicationConfig { pub role: Option, @@ -2010,7 +2010,7 @@ impl ReplicationConfig { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] /// Object lock configuration information pub struct ObjectLockConfig { pub retention_mode: Option, diff --git a/tests/common.rs b/tests/common.rs index 630d013..2b569b6 100644 --- a/tests/common.rs +++ b/tests/common.rs @@ -24,27 +24,27 @@ use tokio::io::AsyncRead; use tokio::time::timeout; use tokio_stream::Stream; -use minio::s3::args::*; use minio::s3::client::Client; use minio::s3::creds::StaticProvider; use minio::s3::http::BaseUrl; +use minio::s3::types::S3Api; pub struct RandReader { - size: usize, + size: u64, } impl RandReader { #[allow(dead_code)] - pub fn new(size: usize) -> RandReader { + pub fn new(size: u64) -> RandReader { RandReader { size } } } impl io::Read for RandReader { fn read(&mut self, buf: &mut [u8]) -> Result { - let bytes_read = match self.size > buf.len() { + let bytes_read: usize = match (self.size as usize) > buf.len() { true => buf.len(), - false => self.size, + false => self.size as usize, }; if bytes_read > 0 { @@ -52,7 +52,7 @@ impl io::Read for RandReader { random.fill_bytes(&mut buf[0..bytes_read]); } - self.size -= bytes_read; + self.size -= bytes_read as u64; Ok(bytes_read) } @@ -254,10 +254,7 @@ impl TestContext { #[allow(dead_code)] pub async fn create_bucket_helper(ctx: &TestContext) -> (String, CleanupGuard) { let bucket_name = rand_bucket_name(); - ctx.client - .make_bucket(&MakeBucketArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); + let _resp = ctx.client.make_bucket(&bucket_name).send().await.unwrap(); let guard = CleanupGuard::new(ctx, &bucket_name); (bucket_name, guard) } @@ -294,8 +291,7 @@ impl Drop for CleanupGuard { // do the actual removal of the bucket match timeout( std::time::Duration::from_secs(60), - ctx.client - .remove_bucket(&RemoveBucketArgs::new(&bucket_name).unwrap()), + ctx.client.remove_and_purge_bucket(&bucket_name), ) .await { @@ -305,7 +301,7 @@ impl Drop for CleanupGuard { } Err(e) => println!("Error removing bucket {}: {:?}", bucket_name, e), }, - Err(_) => println!("Timeout after 15s while removing bucket {}", bucket_name), + Err(_) => println!("Timeout after 60s while removing bucket {}", bucket_name), } }); }) diff --git a/tests/test_bucket_encryption.rs b/tests/test_bucket_encryption.rs index 4615999..a26a0ea 100644 --- a/tests/test_bucket_encryption.rs +++ b/tests/test_bucket_encryption.rs @@ -16,7 +16,10 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; -use minio::s3::response::{DeleteBucketEncryptionResponse, GetBucketEncryptionResponse}; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + DeleteBucketEncryptionResponse, GetBucketEncryptionResponse, SetBucketEncryptionResponse, +}; use minio::s3::types::{S3Api, SseConfig}; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] @@ -28,13 +31,16 @@ async fn set_get_delete_bucket_encryption() { if false { // TODO this gives a runtime error - let _resp = ctx + let resp: SetBucketEncryptionResponse = ctx .client .set_bucket_encryption(&bucket_name) .sse_config(config.clone()) .send() .await .unwrap(); + assert_eq!(resp.config, config); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } let resp: GetBucketEncryptionResponse = ctx @@ -43,15 +49,18 @@ async fn set_get_delete_bucket_encryption() { .send() .await .unwrap(); - assert_eq!(resp.config, config); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let _resp: DeleteBucketEncryptionResponse = ctx + let resp: DeleteBucketEncryptionResponse = ctx .client .delete_bucket_encryption(&bucket_name) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); let resp: GetBucketEncryptionResponse = ctx .client @@ -59,10 +68,8 @@ async fn set_get_delete_bucket_encryption() { .send() .await .unwrap(); - - println!( - "response of getting encryption config: resp.sse_config={:?}", - resp.config - ); assert_eq!(resp.config, SseConfig::default()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + //println!("response of getting encryption config: resp.sse_config={:?}", resp.config); } diff --git a/tests/test_bucket_exists.rs b/tests/test_bucket_exists.rs index 4407f78..d9b49f2 100644 --- a/tests/test_bucket_exists.rs +++ b/tests/test_bucket_exists.rs @@ -16,18 +16,18 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; -use minio::s3::args::BucketExistsArgs; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::BucketExistsResponse; +use minio::s3::types::S3Api; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn bucket_exists() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; - let exists = ctx - .client - .bucket_exists(&BucketExistsArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); + let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); - assert!(exists); + assert!(resp.exists); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_bucket_lifecycle.rs b/tests/test_bucket_lifecycle.rs index 5461f9f..2c00807 100644 --- a/tests/test_bucket_lifecycle.rs +++ b/tests/test_bucket_lifecycle.rs @@ -16,6 +16,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; +use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketLifecycleResponse, GetBucketLifecycleResponse, SetBucketLifecycleResponse, }; @@ -26,33 +27,37 @@ async fn set_get_delete_bucket_lifecycle() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; - let rules: Vec = vec![LifecycleRule { - abort_incomplete_multipart_upload_days_after_initiation: None, - expiration_date: None, - expiration_days: Some(365), - expiration_expired_object_delete_marker: None, - filter: Filter { - and_operator: None, - prefix: Some(String::from("logs/")), - tag: None, - }, - id: String::from("rule1"), - noncurrent_version_expiration_noncurrent_days: None, - noncurrent_version_transition_noncurrent_days: None, - noncurrent_version_transition_storage_class: None, - status: true, - transition_date: None, - transition_days: None, - transition_storage_class: None, - }]; + let config: LifecycleConfig = LifecycleConfig { + rules: vec![LifecycleRule { + abort_incomplete_multipart_upload_days_after_initiation: None, + expiration_date: None, + expiration_days: Some(365), + expiration_expired_object_delete_marker: None, + filter: Filter { + and_operator: None, + prefix: Some(String::from("logs/")), + tag: None, + }, + id: String::from("rule1"), + noncurrent_version_expiration_noncurrent_days: None, + noncurrent_version_transition_noncurrent_days: None, + noncurrent_version_transition_storage_class: None, + status: true, + transition_date: None, + transition_days: None, + transition_storage_class: None, + }], + }; - let _resp: SetBucketLifecycleResponse = ctx + let resp: SetBucketLifecycleResponse = ctx .client .set_bucket_lifecycle(&bucket_name) - .life_cycle_config(LifecycleConfig { rules }) + .life_cycle_config(config.clone()) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); //println!("response of setting lifecycle: resp={:?}", resp); if false { @@ -63,8 +68,10 @@ async fn set_get_delete_bucket_lifecycle() { .send() .await .unwrap(); + assert_eq!(resp.config, config); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); println!("response of getting lifecycle: resp={:?}", resp); - //assert_eq!(resp.config, rules.to_string()); } let _resp: DeleteBucketLifecycleResponse = ctx diff --git a/tests/test_bucket_notification.rs b/tests/test_bucket_notification.rs index d0a2c86..06a918a 100644 --- a/tests/test_bucket_notification.rs +++ b/tests/test_bucket_notification.rs @@ -1,6 +1,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; +use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketNotificationResponse, GetBucketNotificationResponse, SetBucketNotificationResponse, }; @@ -22,7 +23,7 @@ async fn set_get_delete_bucket_notification() { String::from("s3:ObjectCreated:Put"), String::from("s3:ObjectCreated:Copy"), ], - id: None, + id: Some("".to_string()), //TODO or should this be NONE?? prefix_filter_rule: Some(PrefixFilterRule { value: String::from("images"), }), @@ -37,11 +38,13 @@ async fn set_get_delete_bucket_notification() { let resp: SetBucketNotificationResponse = ctx .client .set_bucket_notification(&bucket_name) - .notification_config(config) + .notification_config(config.clone()) .send() .await .unwrap(); - println!("response of setting notification: resp={:?}", resp); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + //println!("response of setting notification: resp={:?}", resp); let resp: GetBucketNotificationResponse = ctx .client @@ -49,7 +52,10 @@ async fn set_get_delete_bucket_notification() { .send() .await .unwrap(); - println!("response of getting notification: resp={:?}", resp); + assert_eq!(resp.config, config); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + //println!("response of getting notification: resp={:?}", resp); assert_eq!(resp.config.queue_config_list.as_ref().unwrap().len(), 1); assert!( @@ -89,7 +95,9 @@ async fn set_get_delete_bucket_notification() { .send() .await .unwrap(); - println!("response of deleting notification: resp={:?}", resp); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + //println!("response of deleting notification: resp={:?}", resp); let resp: GetBucketNotificationResponse = ctx .client @@ -97,5 +105,7 @@ async fn set_get_delete_bucket_notification() { .send() .await .unwrap(); - assert!(resp.config.queue_config_list.is_none()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.config, NotificationConfig::default()); } diff --git a/tests/test_bucket_policy.rs b/tests/test_bucket_policy.rs index ce0fde3..4bfbafb 100644 --- a/tests/test_bucket_policy.rs +++ b/tests/test_bucket_policy.rs @@ -1,4 +1,8 @@ use crate::common::{TestContext, create_bucket_helper}; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + DeleteBucketPolicyResponse, GetBucketPolicyResponse, SetBucketPolicyResponse, +}; use minio::s3::types::S3Api; mod common; @@ -32,15 +36,17 @@ async fn set_get_delete_bucket_policy() { "# .replace("", &bucket_name); - let _resp = ctx + let resp: SetBucketPolicyResponse = ctx .client .set_bucket_policy(&bucket_name) .config(config.clone()) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let resp = ctx + let resp: GetBucketPolicyResponse = ctx .client .get_bucket_policy(&bucket_name) .send() @@ -50,19 +56,25 @@ async fn set_get_delete_bucket_policy() { // println!("response of getting policy: resp.config={:?}", resp.config); // assert_eq!(&resp.config, &config); assert!(!resp.config.is_empty()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let _resp = ctx + let resp: DeleteBucketPolicyResponse = ctx .client .delete_bucket_policy(&bucket_name) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let resp = ctx + let resp: GetBucketPolicyResponse = ctx .client .get_bucket_policy(&bucket_name) .send() .await .unwrap(); assert_eq!(resp.config, "{}"); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_bucket_replication.rs b/tests/test_bucket_replication.rs index f490850..8520f96 100644 --- a/tests/test_bucket_replication.rs +++ b/tests/test_bucket_replication.rs @@ -17,9 +17,10 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; use minio::s3::builders::VersioningStatus; +use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ - DeleteBucketReplicationResponse, GetBucketVersioningResponse, SetBucketReplicationResponse, - SetBucketVersioningResponse, + DeleteBucketReplicationResponse, GetBucketReplicationResponse, GetBucketVersioningResponse, + SetBucketReplicationResponse, SetBucketVersioningResponse, }; use minio::s3::types::{ AndOperator, Destination, Filter, ReplicationConfig, ReplicationRule, S3Api, @@ -66,34 +67,49 @@ async fn set_get_delete_bucket_replication() { }], }; - let _resp: SetBucketVersioningResponse = ctx + let resp: SetBucketVersioningResponse = ctx .client .set_bucket_versioning(&bucket_name) .versioning_status(VersioningStatus::Enabled) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - if false { - // TODO panic: called `Result::unwrap()` on an `Err` value: S3Error(ErrorResponse { code: "XMinioAdminRemoteTargetNotFoundError", message: "The remote target does not exist", - let resp: SetBucketReplicationResponse = ctx - .client - .set_bucket_replication(&bucket_name) - .replication_config(config) - .send() - .await - .unwrap(); - println!("response of setting replication: resp={:?}", resp); - } let resp: GetBucketVersioningResponse = ctx .client .get_bucket_versioning(&bucket_name) .send() .await .unwrap(); - println!("response of getting replication: resp={:?}", resp); + assert_eq!(resp.status, Some(VersioningStatus::Enabled)); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); if false { + // TODO panic: called `Result::unwrap()` on an `Err` value: S3Error(ErrorResponse { code: "XMinioAdminRemoteTargetNotFoundError", message: "The remote target does not exist", + let resp: SetBucketReplicationResponse = ctx + .client + .set_bucket_replication(&bucket_name) + .replication_config(config.clone()) + .send() + .await + .unwrap(); + println!("response of setting replication: resp={:?}", resp); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + + let resp: GetBucketReplicationResponse = ctx + .client + .get_bucket_replication(&bucket_name) + .send() + .await + .unwrap(); + //assert_eq!(resp.config, config); //TODO + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + // TODO called `Result::unwrap()` on an `Err` value: S3Error(ErrorResponse { code: "XMinioAdminRemoteTargetNotFoundError", message: "The remote target does not exist", let resp: DeleteBucketReplicationResponse = ctx .client diff --git a/tests/test_bucket_tags.rs b/tests/test_bucket_tags.rs index 8c4efc7..4c81e43 100644 --- a/tests/test_bucket_tags.rs +++ b/tests/test_bucket_tags.rs @@ -16,6 +16,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; +use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{DeleteBucketTagsResponse, GetBucketTagsResponse, SetBucketTagsResponse}; use minio::s3::types::S3Api; use std::collections::HashMap; @@ -30,13 +31,15 @@ async fn set_get_delete_bucket_tags() { (String::from("User"), String::from("jsmith")), ]); - let _resp: SetBucketTagsResponse = ctx + let resp: SetBucketTagsResponse = ctx .client .set_bucket_tags(&bucket_name) .tags(tags.clone()) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); let resp: GetBucketTagsResponse = ctx .client @@ -44,20 +47,26 @@ async fn set_get_delete_bucket_tags() { .send() .await .unwrap(); - assert_eq!(resp.tags, resp.tags); + assert_eq!(resp.tags, tags); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let _resp: DeleteBucketTagsResponse = ctx + let resp: DeleteBucketTagsResponse = ctx .client .delete_bucket_tags(&bucket_name) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let resp = ctx + let resp: GetBucketTagsResponse = ctx .client .get_bucket_tags(&bucket_name) .send() .await .unwrap(); assert!(resp.tags.is_empty()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_bucket_versioning.rs b/tests/test_bucket_versioning.rs index f3dfd48..8b53b2b 100644 --- a/tests/test_bucket_versioning.rs +++ b/tests/test_bucket_versioning.rs @@ -17,6 +17,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper}; use minio::s3::builders::VersioningStatus; +use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{GetBucketVersioningResponse, SetBucketVersioningResponse}; use minio::s3::types::S3Api; @@ -25,13 +26,15 @@ async fn set_get_delete_bucket_versioning() { let ctx = TestContext::new_from_env(); let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; - let _resp: SetBucketVersioningResponse = ctx + let resp: SetBucketVersioningResponse = ctx .client .set_bucket_versioning(&bucket_name) .versioning_status(VersioningStatus::Enabled) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); let resp: GetBucketVersioningResponse = ctx .client @@ -40,14 +43,18 @@ async fn set_get_delete_bucket_versioning() { .await .unwrap(); assert_eq!(resp.status, Some(VersioningStatus::Enabled)); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let _resp: SetBucketVersioningResponse = ctx + let resp: SetBucketVersioningResponse = ctx .client .set_bucket_versioning(&bucket_name) .versioning_status(VersioningStatus::Suspended) .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); let resp: GetBucketVersioningResponse = ctx .client @@ -56,4 +63,6 @@ async fn set_get_delete_bucket_versioning() { .await .unwrap(); assert_eq!(resp.status, Some(VersioningStatus::Suspended)); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_create_delete_bucket.rs b/tests/test_create_delete_bucket.rs index 3a2d96a..d7d9850 100644 --- a/tests/test_create_delete_bucket.rs +++ b/tests/test_create_delete_bucket.rs @@ -16,34 +16,28 @@ mod common; use crate::common::{TestContext, rand_bucket_name}; -use minio::s3::args::{BucketExistsArgs, MakeBucketArgs, RemoveBucketArgs}; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{BucketExistsResponse, RemoveBucketResponse}; +use minio::s3::types::S3Api; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn create_delete_bucket() { let ctx = TestContext::new_from_env(); let bucket_name = rand_bucket_name(); - ctx.client - .make_bucket(&MakeBucketArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); + ctx.client.make_bucket(&bucket_name).send().await.unwrap(); - let exists = ctx - .client - .bucket_exists(&BucketExistsArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); - assert!(exists); + let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); + assert!(resp.exists); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - ctx.client - .remove_bucket(&RemoveBucketArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); + let resp: RemoveBucketResponse = ctx.client.remove_bucket(&bucket_name).send().await.unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let exists = ctx - .client - .bucket_exists(&BucketExistsArgs::new(&bucket_name).unwrap()) - .await - .unwrap(); - assert!(!exists); + let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); + assert!(!resp.exists); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, ""); } diff --git a/tests/test_get_presigned_object_url.rs b/tests/test_get_presigned_object_url.rs index 2b51e33..198fa75 100644 --- a/tests/test_get_presigned_object_url.rs +++ b/tests/test_get_presigned_object_url.rs @@ -18,6 +18,8 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use http::Method; use minio::s3::args::GetPresignedObjectUrlArgs; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::GetPresignedObjectUrlResponse; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn get_presigned_object_url() { @@ -25,7 +27,7 @@ async fn get_presigned_object_url() { let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; let object_name = rand_object_name(); - let resp = ctx + let resp: GetPresignedObjectUrlResponse = ctx .client .get_presigned_object_url( &GetPresignedObjectUrlArgs::new(&bucket_name, &object_name, Method::GET).unwrap(), @@ -33,4 +35,7 @@ async fn get_presigned_object_url() { .await .unwrap(); assert!(resp.url.contains("X-Amz-Signature=")); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_get_presigned_post_form_data.rs b/tests/test_get_presigned_post_form_data.rs index 90f019d..a2e208b 100644 --- a/tests/test_get_presigned_post_form_data.rs +++ b/tests/test_get_presigned_post_form_data.rs @@ -18,6 +18,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::args::PostPolicy; use minio::s3::utils::utc_now; +use std::collections::HashMap; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn get_presigned_post_form_data() { @@ -33,11 +34,15 @@ async fn get_presigned_post_form_data() { .add_content_length_range_condition(1024 * 1024, 4 * 1024 * 1024) .unwrap(); - let form_data = ctx + let form_data: HashMap = ctx .client .get_presigned_post_form_data(&policy) .await .unwrap(); + //println!("form_data={:?}", &form_data); assert!(form_data.contains_key("x-amz-signature")); assert!(form_data.contains_key("policy")); + assert!(form_data.contains_key("x-amz-date")); + assert!(form_data.contains_key("x-amz-algorithm")); + assert!(form_data.contains_key("x-amz-credential")); } diff --git a/tests/test_list_buckets.rs b/tests/test_list_buckets.rs index a9b5ac9..eacf40b 100644 --- a/tests/test_list_buckets.rs +++ b/tests/test_list_buckets.rs @@ -1,4 +1,5 @@ use crate::common::{CleanupGuard, TestContext, create_bucket_helper}; +use minio::s3::response::ListBucketsResponse; // MinIO Rust Library for Amazon S3 Compatible Cloud Storage // Copyright 2025 MinIO, Inc. // @@ -34,7 +35,7 @@ async fn list_buckets() { assert_eq!(names.len(), N_BUCKETS); let mut count = 0; - let resp = ctx.client.list_buckets().send().await.unwrap(); + let resp: ListBucketsResponse = ctx.client.list_buckets().send().await.unwrap(); for bucket in resp.buckets.iter() { if names.contains(&bucket.name) { diff --git a/tests/test_list_objects.rs b/tests/test_list_objects.rs index 808e82b..1339979 100644 --- a/tests/test_list_objects.rs +++ b/tests/test_list_objects.rs @@ -17,6 +17,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::builders::ObjectToDelete; +use minio::s3::response::PutObjectContentResponse; use minio::s3::types::ToStream; use tokio_stream::StreamExt; @@ -29,11 +30,13 @@ async fn list_objects() { let mut names: Vec = Vec::new(); for _ in 1..=N_OBJECTS { let object_name = rand_object_name(); - ctx.client - .put_object_content(&bucket_name, &object_name, "") + let resp: PutObjectContentResponse = ctx + .client + .put_object_content(&bucket_name, &object_name, "hello world") .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); names.push(object_name); } @@ -59,6 +62,7 @@ async fn list_objects() { .verbose_mode(true) .to_stream() .await; + while let Some(item) = resp.next().await { let res = item.unwrap(); for obj in res.result.iter() { diff --git a/tests/test_object_legal_hold.rs b/tests/test_object_legal_hold.rs new file mode 100644 index 0000000..c4d6521 --- /dev/null +++ b/tests/test_object_legal_hold.rs @@ -0,0 +1,106 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod common; + +use crate::common::{CleanupGuard, TestContext, rand_bucket_name, rand_object_name}; +use bytes::Bytes; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + DisableObjectLegalHoldResponse, EnableObjectLegalHoldResponse, + IsObjectLegalHoldEnabledResponse, MakeBucketResponse, PutObjectContentResponse, +}; +use minio::s3::types::S3Api; + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn object_legal_hold() { + let ctx = TestContext::new_from_env(); + let bucket_name: String = rand_bucket_name(); + let _resp: MakeBucketResponse = ctx + .client + .make_bucket(&bucket_name) + .object_lock(true) + .send() + .await + .unwrap(); + let _cleanup = CleanupGuard::new(&ctx, &bucket_name); + let object_name = rand_object_name(); + + let data = Bytes::from("hello, world".to_string().into_bytes()); + let resp: PutObjectContentResponse = ctx + .client + .put_object_content(&bucket_name, &object_name, data.clone()) + .send() + .await + .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.object_size, data.len() as u64); + //println!("response of put object content: resp={:?}", resp); + + let resp: DisableObjectLegalHoldResponse = ctx + .client + .disable_object_legal_hold(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + //println!("response of setting object legal hold: resp={:?}", resp); + assert_eq!(resp.object_name, object_name); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.version_id, None); + + let resp: IsObjectLegalHoldEnabledResponse = ctx + .client + .is_object_legal_hold_enabled(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + //println!("response of getting object legal hold: resp={:?}", resp); + assert!(!resp.enabled); + assert_eq!(resp.object_name, object_name); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.version_id, None); + + let resp: EnableObjectLegalHoldResponse = ctx + .client + .enable_object_legal_hold(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + //println!("response of setting object legal hold: resp={:?}", resp); + assert_eq!(resp.object_name, object_name); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.version_id, None); + + let resp: IsObjectLegalHoldEnabledResponse = ctx + .client + .is_object_legal_hold_enabled(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + //println!("response of getting object legal hold: resp={:?}", resp); + assert!(resp.enabled); + assert_eq!(resp.object_name, object_name); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.version_id, None); +} diff --git a/tests/test_object_lock_config.rs b/tests/test_object_lock_config.rs index dfdc036..968991f 100644 --- a/tests/test_object_lock_config.rs +++ b/tests/test_object_lock_config.rs @@ -16,54 +16,69 @@ mod common; use crate::common::{CleanupGuard, TestContext, rand_bucket_name}; -use minio::s3::args::{ - DeleteObjectLockConfigArgs, GetObjectLockConfigArgs, MakeBucketArgs, SetObjectLockConfigArgs, +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + DeleteObjectLockConfigResponse, GetObjectLockConfigResponse, SetObjectLockConfigResponse, }; -use minio::s3::types::{ObjectLockConfig, RetentionMode}; +use minio::s3::types::{ObjectLockConfig, RetentionMode, S3Api}; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_object_lock_config() { let ctx = TestContext::new_from_env(); - let bucket_name = rand_bucket_name(); - - let mut args = MakeBucketArgs::new(&bucket_name).unwrap(); - args.object_lock = true; - ctx.client.make_bucket(&args).await.unwrap(); + let bucket_name: String = rand_bucket_name(); + ctx.client + .make_bucket(&bucket_name) + .object_lock(true) + .send() + .await + .unwrap(); let _cleanup = CleanupGuard::new(&ctx, &bucket_name); - ctx.client - .set_object_lock_config( - &SetObjectLockConfigArgs::new( - &bucket_name, - &ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(7), None).unwrap(), - ) - .unwrap(), - ) - .await - .unwrap(); + const DURATION_DAYS: i32 = 7; + let config = + ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(DURATION_DAYS), None).unwrap(); - let resp = ctx + let resp: SetObjectLockConfigResponse = ctx .client - .get_object_lock_config(&GetObjectLockConfigArgs::new(&bucket_name).unwrap()) + .set_object_lock_config(&bucket_name) + .config(config) + .send() .await .unwrap(); - assert!(match resp.config.retention_mode { - Some(r) => matches!(r, RetentionMode::GOVERNANCE), - _ => false, - }); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - assert_eq!(resp.config.retention_duration_days, Some(7)); + let resp: GetObjectLockConfigResponse = ctx + .client + .get_object_lock_config(&bucket_name) + .send() + .await + .unwrap(); + assert_eq!( + resp.config.retention_mode.unwrap(), + RetentionMode::GOVERNANCE + ); + assert_eq!(resp.config.retention_duration_days, Some(DURATION_DAYS)); assert!(resp.config.retention_duration_years.is_none()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - ctx.client - .delete_object_lock_config(&DeleteObjectLockConfigArgs::new(&bucket_name).unwrap()) + let resp: DeleteObjectLockConfigResponse = ctx + .client + .delete_object_lock_config(&bucket_name) + .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let resp = ctx + let resp: GetObjectLockConfigResponse = ctx .client - .get_object_lock_config(&GetObjectLockConfigArgs::new(&bucket_name).unwrap()) + .get_object_lock_config(&bucket_name) + .send() .await .unwrap(); assert!(resp.config.retention_mode.is_none()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); } diff --git a/tests/test_object_retention.rs b/tests/test_object_retention.rs index 5180deb..27daa0a 100644 --- a/tests/test_object_retention.rs +++ b/tests/test_object_retention.rs @@ -17,26 +17,33 @@ mod common; use crate::common::{CleanupGuard, TestContext, rand_bucket_name, rand_object_name}; use common::RandSrc; -use minio::s3::args::{GetObjectRetentionArgs, MakeBucketArgs, SetObjectRetentionArgs}; use minio::s3::builders::ObjectContent; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + GetObjectRetentionResponse, MakeBucketResponse, PutObjectContentResponse, RemoveObjectResponse, + SetObjectRetentionResponse, +}; use minio::s3::types::{RetentionMode, S3Api}; use minio::s3::utils::{to_iso8601utc, utc_now}; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn object_retention() { let ctx = TestContext::new_from_env(); - let bucket_name = rand_bucket_name(); - - let mut args = MakeBucketArgs::new(&bucket_name).unwrap(); - args.object_lock = true; - ctx.client.make_bucket(&args).await.unwrap(); + let bucket_name: String = rand_bucket_name(); + let resp: MakeBucketResponse = ctx + .client + .make_bucket(&bucket_name) + .object_lock(true) + .send() + .await + .unwrap(); let _cleanup = CleanupGuard::new(&ctx, &bucket_name); - + assert_eq!(resp.bucket, bucket_name); let object_name = rand_object_name(); let size = 16_u64; - let obj_resp = ctx + let resp: PutObjectContentResponse = ctx .client .put_object_content( &bucket_name, @@ -46,45 +53,70 @@ async fn object_retention() { .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.object_size, size); + //assert_eq!(resp.version_id, None); + assert_eq!(resp.location, ""); + //assert_eq!(resp.etag, ""); let retain_until_date = utc_now() + chrono::Duration::days(1); - let args = SetObjectRetentionArgs::new( - &bucket_name, - &object_name, - Some(RetentionMode::GOVERNANCE), - Some(retain_until_date), - ) - .unwrap(); - - ctx.client.set_object_retention(&args).await.unwrap(); - - let resp = ctx + let obj_resp: SetObjectRetentionResponse = ctx .client - .get_object_retention(&GetObjectRetentionArgs::new(&bucket_name, &object_name).unwrap()) + .set_object_retention(&bucket_name) + .object(object_name.clone()) + .retention_mode(Some(RetentionMode::GOVERNANCE)) + .retain_until_date(Some(retain_until_date)) + .send() .await .unwrap(); - assert!(match resp.retention_mode { - Some(v) => matches!(v, RetentionMode::GOVERNANCE), - _ => false, - }); - assert!(match resp.retain_until_date { - Some(v) => to_iso8601utc(v) == to_iso8601utc(retain_until_date), - _ => false, - },); + assert_eq!(obj_resp.bucket, bucket_name); + assert_eq!(obj_resp.version_id, None); + assert_eq!(obj_resp.region, DEFAULT_REGION); + assert_eq!(obj_resp.object, object_name); - let mut args = SetObjectRetentionArgs::new(&bucket_name, &object_name, None, None).unwrap(); - args.bypass_governance_mode = true; - ctx.client.set_object_retention(&args).await.unwrap(); - - let resp = ctx + let resp: GetObjectRetentionResponse = ctx .client - .get_object_retention(&GetObjectRetentionArgs::new(&bucket_name, &object_name).unwrap()) + .get_object_retention(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + assert_eq!(resp.retention_mode.unwrap(), RetentionMode::GOVERNANCE); + assert_eq!( + to_iso8601utc(resp.retain_until_date.unwrap()), + to_iso8601utc(retain_until_date) + ); + + let resp: SetObjectRetentionResponse = ctx + .client + .set_object_retention(&bucket_name) + .object(object_name.clone()) + .bypass_governance_mode(true) + .send() + .await + .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.object, object_name); + + let resp: GetObjectRetentionResponse = ctx + .client + .get_object_retention(&bucket_name) + .object(object_name.clone()) + .send() .await .unwrap(); assert!(resp.retention_mode.is_none()); assert!(resp.retain_until_date.is_none()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); + assert_eq!(resp.object, object_name); - ctx.client + let resp: RemoveObjectResponse = ctx + .client .remove_object( &bucket_name, (object_name.as_str(), obj_resp.version_id.as_deref()), @@ -92,4 +124,6 @@ async fn object_retention() { .send() .await .unwrap(); + //assert_eq!(resp.version_id, None); + assert!(resp.is_delete_marker); } diff --git a/tests/test_object_tags.rs b/tests/test_object_tags.rs index f65845d..9c7f988 100644 --- a/tests/test_object_tags.rs +++ b/tests/test_object_tags.rs @@ -17,8 +17,12 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use common::RandSrc; -use minio::s3::args::{DeleteObjectTagsArgs, GetObjectTagsArgs, SetObjectTagsArgs}; use minio::s3::builders::ObjectContent; +use minio::s3::client::DEFAULT_REGION; +use minio::s3::response::{ + DeleteObjectTagsResponse, GetObjectTagsResponse, PutObjectContentResponse, + RemoveObjectResponse, SetObjectTagsResponse, +}; use minio::s3::types::S3Api; use std::collections::HashMap; @@ -29,7 +33,8 @@ async fn object_tags() { let object_name = rand_object_name(); let size = 16_u64; - ctx.client + let resp: PutObjectContentResponse = ctx + .client .put_object_content( &bucket_name, &object_name, @@ -38,39 +43,74 @@ async fn object_tags() { .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.object_size, size); + assert_eq!(resp.version_id, None); + assert_eq!(&resp.location, ""); let tags = HashMap::from([ (String::from("Project"), String::from("Project One")), (String::from("User"), String::from("jsmith")), ]); - ctx.client - .set_object_tags(&SetObjectTagsArgs::new(&bucket_name, &object_name, &tags).unwrap()) - .await - .unwrap(); - - let resp = ctx + let resp: SetObjectTagsResponse = ctx .client - .get_object_tags(&GetObjectTagsArgs::new(&bucket_name, &object_name).unwrap()) + .set_object_tags(&bucket_name) + .object(object_name.clone()) + .tags(tags.clone()) + .send() .await .unwrap(); - assert!(resp.tags.len() == tags.len() && resp.tags.keys().all(|k| tags.contains_key(k))); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); - ctx.client - .delete_object_tags(&DeleteObjectTagsArgs::new(&bucket_name, &object_name).unwrap()) - .await - .unwrap(); - - let resp = ctx + let resp: GetObjectTagsResponse = ctx .client - .get_object_tags(&GetObjectTagsArgs::new(&bucket_name, &object_name).unwrap()) + .get_object_tags(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + assert_eq!(resp.tags, tags); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); + + let resp: DeleteObjectTagsResponse = ctx + .client + .delete_object_tags(&bucket_name) + .object(object_name.clone()) + .send() + .await + .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); + + let resp: GetObjectTagsResponse = ctx + .client + .get_object_tags(&bucket_name.clone()) + .object(object_name.clone()) + .send() .await .unwrap(); assert!(resp.tags.is_empty()); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.version_id, None); + assert_eq!(resp.region, DEFAULT_REGION); - ctx.client + let resp: RemoveObjectResponse = ctx + .client .remove_object(&bucket_name, object_name.as_str()) .send() .await .unwrap(); + assert_eq!(resp.version_id, None); + assert!(!resp.is_delete_marker) } diff --git a/tests/test_put_object_content.rs b/tests/test_put_object_content.rs index 70d8d6c..9b496b8 100644 --- a/tests/test_put_object_content.rs +++ b/tests/test_put_object_content.rs @@ -125,7 +125,7 @@ async fn put_object_content() { .stat_object(&StatObjectArgs::new(&bucket_name, &object_name).unwrap()) .await .unwrap(); - assert_eq!(resp.size, *size as usize); + assert_eq!(resp.size, *size); assert_eq!(resp.etag, etag); assert_eq!( resp.headers.get(header::CONTENT_TYPE).unwrap(), @@ -159,7 +159,7 @@ async fn put_object_content() { .stat_object(&StatObjectArgs::new(&bucket_name, &object_name).unwrap()) .await .unwrap(); - assert_eq!(resp.size, *size as usize); + assert_eq!(resp.size, *size); assert_eq!(resp.etag, etag); ctx.client .remove_object(&bucket_name, object_name.as_str()) @@ -212,7 +212,7 @@ async fn put_object_content_2() { .stat_object(&StatObjectArgs::new(&test_bucket, &object_name).unwrap()) .await .unwrap(); - assert_eq!(resp.size, sizes[idx] as usize); + assert_eq!(resp.size, sizes[idx]); assert_eq!(resp.etag, etag); client .remove_object(&test_bucket, object_name.as_str()) diff --git a/tests/test_remove_objects.rs b/tests/test_remove_objects.rs index 520edf3..0990696 100644 --- a/tests/test_remove_objects.rs +++ b/tests/test_remove_objects.rs @@ -17,6 +17,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::builders::ObjectToDelete; +use minio::s3::response::PutObjectContentResponse; use minio::s3::types::ToStream; use tokio_stream::StreamExt; @@ -28,11 +29,14 @@ async fn remove_objects() { let mut names: Vec = Vec::new(); for _ in 1..=3 { let object_name = rand_object_name(); - ctx.client + let resp: PutObjectContentResponse = ctx + .client .put_object_content(&bucket_name, &object_name, "") .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); names.push(object_name); } let del_items: Vec = names diff --git a/tests/test_select_object_content.rs b/tests/test_select_object_content.rs index 977da27..75d517d 100644 --- a/tests/test_select_object_content.rs +++ b/tests/test_select_object_content.rs @@ -17,6 +17,7 @@ mod common; use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::args::SelectObjectContentArgs; +use minio::s3::response::RemoveObjectResponse; use minio::s3::types::{ CsvInputSerialization, CsvOutputSerialization, FileHeaderInfo, QuoteFields, S3Api, SelectRequest, @@ -80,9 +81,11 @@ async fn select_object_content() { got += core::str::from_utf8(&buf[..size]).unwrap(); } assert_eq!(got, data); - ctx.client + let resp: RemoveObjectResponse = ctx + .client .remove_object(&bucket_name, object_name.as_str()) .send() .await .unwrap(); + assert!(!resp.is_delete_marker); } diff --git a/tests/test_upload_download_object.rs b/tests/test_upload_download_object.rs index 2482d17..1c0f54b 100644 --- a/tests/test_upload_download_object.rs +++ b/tests/test_upload_download_object.rs @@ -16,6 +16,7 @@ mod common; use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name}; +use minio::s3::response::PutObjectContentResponse; use minio::s3::types::S3Api; use sha2::{Digest, Sha256}; use std::path::PathBuf; @@ -34,12 +35,13 @@ async fn upload_download_object() { let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; let object_name = rand_object_name(); - let size = 16_usize; + let size = 16_u64; let mut file = fs::File::create(&object_name).unwrap(); io::copy(&mut RandReader::new(size), &mut file).unwrap(); file.sync_all().unwrap(); - ctx.client + let resp: PutObjectContentResponse = ctx + .client .put_object_content( &bucket_name, &object_name, @@ -48,6 +50,9 @@ async fn upload_download_object() { .send() .await .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); + assert_eq!(resp.object_size, size); let filename = rand_object_name(); let get_obj_rsp = ctx @@ -78,7 +83,7 @@ async fn upload_download_object() { .unwrap(); let object_name = rand_object_name(); - let size: usize = 16 + 5 * 1024 * 1024; + let size: u64 = 16 + 5 * 1024 * 1024; let mut file = fs::File::create(&object_name).unwrap(); io::copy(&mut RandReader::new(size), &mut file).unwrap(); file.sync_all().unwrap();