diff --git a/Cargo.toml b/Cargo.toml index 8a9fa67..624262c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,7 +11,7 @@ keywords = ["object-storage", "minio", "s3"] categories = ["api-bindings", "web-programming::http-client"] [dependencies.reqwest] -version = "0.12.18" +version = "0.12.22" default-features = false features = ["stream"] @@ -51,19 +51,20 @@ regex = "1.11.1" ring = { version = "0.17.14", optional = true, default-features = false, features = ["alloc"] } serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" -sha2 = { version = "0.10.8", optional = true } +sha2 = { version = "0.10.9", optional = true } urlencoding = "2.1.3" xmltree = "0.11.0" futures = "0.3.31" http = "1.3.1" +thiserror = "2.0.12" [dev-dependencies] -tokio = { version = "1.45.1", features = ["full"] } +tokio = { version = "1.47.1", features = ["full"] } minio_common = { path = "./common" } async-std = { version = "1.13.1", features = ["attributes", "tokio1"] } -clap = { version = "4.5.40", features = ["derive"] } +clap = { version = "4.5.44", features = ["derive"] } quickcheck = "1.0.3" -criterion = "0.6.0" +criterion = "0.7.0" minio-macros = { path = "./macros" } [lib] diff --git a/benches/s3/bench_object_append.rs b/benches/s3/bench_object_append.rs index 0eb1802..309333d 100644 --- a/benches/s3/bench_object_append.rs +++ b/benches/s3/bench_object_append.rs @@ -17,6 +17,7 @@ use crate::common_benches::{Ctx2, benchmark_s3_api}; use criterion::Criterion; use minio::s3::builders::AppendObject; +use minio::s3::error::Error; use minio::s3::response::StatObjectResponse; use minio::s3::segmented_bytes::SegmentedBytes; use minio::s3::types::S3Api; @@ -38,8 +39,9 @@ pub(crate) async fn bench_object_append(criterion: &mut Criterion) { let data1: SegmentedBytes = SegmentedBytes::from(content1.to_string()); let resp: StatObjectResponse = task::block_in_place(|| { - tokio::runtime::Runtime::new()? - .block_on(ctx.client.stat_object(&ctx.bucket, &ctx.object).send()) + let runtime = + tokio::runtime::Runtime::new().map_err(|e| Error::DriveIo(e.into()))?; + runtime.block_on(ctx.client.stat_object(&ctx.bucket, &ctx.object).send()) }) .unwrap(); diff --git a/benches/s3/common_benches.rs b/benches/s3/common_benches.rs index dc0662f..89ad455 100644 --- a/benches/s3/common_benches.rs +++ b/benches/s3/common_benches.rs @@ -24,7 +24,6 @@ use minio_common::utils::{ get_bytes_from_response, get_response_from_bytes, rand_bucket_name, rand_object_name, }; use std::env; - use tokio::runtime::Runtime; pub(crate) struct Ctx2 { @@ -170,7 +169,7 @@ pub(crate) fn benchmark_s3_api( pub(crate) async fn skip_express_mode(bench_name: &str) -> bool { let skip = TestContext::new_from_env().client.is_minio_express().await; if skip { - println!("Skipping benchmark '{}' (MinIO Express mode)", bench_name); + println!("Skipping benchmark '{bench_name}' (MinIO Express mode)"); } skip } diff --git a/common/Cargo.toml b/common/Cargo.toml index e5298a7..805523c 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -5,16 +5,16 @@ edition = "2024" [dependencies] minio = {path = ".." } -tokio = { version = "1.45.1", features = ["full"] } +tokio = { version = "1.47.1", features = ["full"] } async-std = "1.13.1" rand = { version = "0.8.5", features = ["small_rng"] } bytes = "1.10.1" log = "0.4.27" chrono = "0.4.41" -reqwest = "0.12.20" +reqwest = "0.12.22" http = "1.3.1" futures = "0.3.31" -uuid = { version = "1.17.0", features = ["v4"] } +uuid = { version = "1.18.0", features = ["v4"] } [lib] name = "minio_common" diff --git a/common/src/cleanup_guard.rs b/common/src/cleanup_guard.rs index 93105e3..cf9d82b 100644 --- a/common/src/cleanup_guard.rs +++ b/common/src/cleanup_guard.rs @@ -46,7 +46,7 @@ pub async fn cleanup(client: Client, bucket_name: &str) { //eprintln!("Bucket {} removed successfully", bucket_name); } Err(e) => { - eprintln!("Error removing bucket {}: {:?}", bucket_name, e); + eprintln!("Error removing bucket '{}':\n{}", bucket_name, e); } } } diff --git a/common/src/example.rs b/common/src/example.rs index a36cbfc..e045dd9 100644 --- a/common/src/example.rs +++ b/common/src/example.rs @@ -45,7 +45,7 @@ pub fn create_bucket_notification_config_example() -> NotificationConfig { String::from("s3:ObjectCreated:Put"), String::from("s3:ObjectCreated:Copy"), ], - id: Some("".to_string()), //TODO or should this be NONE?? + id: None, //Some("".to_string()), //TODO or should this be NONE?? prefix_filter_rule: Some(PrefixFilterRule { value: String::from("images"), }), diff --git a/examples/append_object.rs b/examples/append_object.rs index d1d2f24..132229b 100644 --- a/examples/append_object.rs +++ b/examples/append_object.rs @@ -57,23 +57,21 @@ async fn main() -> Result<(), Box> { offset_bytes += data_size; if resp.object_size() != offset_bytes { panic!( - "from the append_object: size mismatch: expected {}, got {}", + "from the append_object: size mismatch: expected {}, got {offset_bytes}", resp.object_size(), - offset_bytes ) } - //println!("Append response: {:#?}", resp); + //println!("Append response: {resp:#?}"); let resp: StatObjectResponse = client.stat_object(bucket_name, object_name).send().await?; if resp.size()? != offset_bytes { panic!( - "from the stat_Object: size mismatch: expected {}, got {}", + "from the stat_Object: size mismatch: expected {}, got {offset_bytes}", resp.size()?, - offset_bytes ) } - println!("{}/{}", i, n_segments); - //println!("Stat response: {:#?}", resp); + println!("{i}/{n_segments}"); + //println!("Stat response: {resp:#?}"); } Ok(()) diff --git a/examples/bucket_encryption.rs b/examples/bucket_encryption.rs index 984e0a7..c36769d 100644 --- a/examples/bucket_encryption.rs +++ b/examples/bucket_encryption.rs @@ -33,7 +33,7 @@ async fn main() -> Result<(), Box> { log::info!("encryption before: config={:?}", resp.config()); let config = SseConfig::default(); - log::info!("going to set encryption config={:?}", config); + log::info!("going to set encryption config={config:?}"); let _resp: PutBucketEncryptionResponse = client .put_bucket_encryption(bucket_name) diff --git a/examples/bucket_lifecycle.rs b/examples/bucket_lifecycle.rs index e264beb..6c69400 100644 --- a/examples/bucket_lifecycle.rs +++ b/examples/bucket_lifecycle.rs @@ -35,7 +35,7 @@ async fn main() -> Result<(), Box> { // TODO let resp: GetBucketLifecycleResponse = client.get_bucket_lifecycle(bucket_name).send().await?; - log::info!("life cycle settings before setting: resp={:?}", resp); + log::info!("life cycle settings before setting: resp={resp:?}"); } let rules: Vec = vec![LifecycleRule { @@ -54,20 +54,20 @@ async fn main() -> Result<(), Box> { .life_cycle_config(LifecycleConfig { rules }) .send() .await?; - log::info!("response of setting life cycle config: resp={:?}", resp); + log::info!("response of setting life cycle config: resp={resp:?}"); if false { // TODO let resp: GetBucketLifecycleResponse = client.get_bucket_lifecycle(bucket_name).send().await?; - log::info!("life cycle settings after setting: resp={:?}", resp); + log::info!("life cycle settings after setting: resp={resp:?}"); } if false { // TODO let resp: DeleteBucketLifecycleResponse = client.delete_bucket_lifecycle(bucket_name).send().await?; - log::info!("response of deleting lifecycle config: resp={:?}", resp); + log::info!("response of deleting lifecycle config: resp={resp:?}"); } Ok(()) } diff --git a/examples/common.rs b/examples/common.rs index 2b74cb5..d3624e0 100644 --- a/examples/common.rs +++ b/examples/common.rs @@ -7,7 +7,7 @@ use minio::s3::{Client, ClientBuilder}; #[allow(dead_code)] pub fn create_client_on_play() -> Result> { let base_url = "https://play.min.io".parse::()?; - log::info!("Trying to connect to MinIO at: `{:?}`", base_url); + log::info!("Trying to connect to MinIO at: `{base_url:?}`"); let static_provider = StaticProvider::new( "Q3AM3UQ867SPQQA43P2F", @@ -24,7 +24,7 @@ pub fn create_client_on_play() -> Result Result> { let base_url = "http://localhost:9000/".parse::()?; - log::info!("Trying to connect to MinIO at: `{:?}`", base_url); + log::info!("Trying to connect to MinIO at: `{base_url:?}`"); let static_provider = StaticProvider::new("minioadmin", "minioadmin", None); diff --git a/examples/object_prompt.rs b/examples/object_prompt.rs index 40625ce..ff84193 100644 --- a/examples/object_prompt.rs +++ b/examples/object_prompt.rs @@ -30,7 +30,7 @@ async fn main() -> Result<(), Box> { //Note: object prompt is not supported on play.min.io, you will need point to AIStor let base_url = "http://localhost:9000".parse::()?; - log::info!("Trying to connect to MinIO at: `{:?}`", base_url); + log::info!("Trying to connect to MinIO at: `{base_url:?}`"); let static_provider = StaticProvider::new("admin", "admin", None); diff --git a/macros/Cargo.toml b/macros/Cargo.toml index fb5ca76..85d0d5a 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -9,11 +9,11 @@ proc-macro = true [dependencies] -syn = "2.0.53" -proc-macro2 = "1.0.37" -quote = "1.0.18" -darling = "0.20.8" -darling_core = "0.20.8" +syn = "2.0.104" +proc-macro2 = "1.0.95" +quote = "1.0.40" +darling = "0.21.0" +darling_core = "0.21.0" uuid = { version = "1.17.0", features = ["v4"] } [dev-dependencies] diff --git a/rust-toolchain.toml b/rust-toolchain.toml index c579cf1..e3ff522 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.86.0" +channel = "1.88.0" components = ["clippy", "rustfmt"] #targets = ["x86_64-unknown-linux-musl"] \ No newline at end of file diff --git a/src/s3/builders/append_object.rs b/src/s3/builders/append_object.rs index 621b520..2e64702 100644 --- a/src/s3/builders/append_object.rs +++ b/src/s3/builders/append_object.rs @@ -17,14 +17,16 @@ use crate::s3::Client; use crate::s3::builders::{ ContentStream, MAX_MULTIPART_COUNT, ObjectContent, Size, calc_part_info, }; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::error::{Error, IoError}; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::a_response_traits::HasObjectSize; use crate::s3::response::{AppendObjectResponse, StatObjectResponse}; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::sse::Sse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; -use crate::s3::utils::{check_bucket_name, check_object_name}; +use crate::s3::utils::{check_bucket_name, check_object_name, check_sse}; use http::Method; use std::sync::Arc; // region: append-object @@ -83,20 +85,13 @@ impl S3Api for AppendObject { } impl ToS3Request for AppendObject { - fn to_s3request(self) -> Result { - { - check_bucket_name(&self.bucket, true)?; - check_object_name(&self.object)?; - - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - } + fn to_s3request(self) -> Result { + check_bucket_name(&self.bucket, true)?; + check_object_name(&self.object)?; + check_sse(&self.sse, &self.client)?; let mut headers: Multimap = self.extra_headers.unwrap_or_default(); - headers.add("x-amz-write-offset-bytes", self.offset_bytes.to_string()); + headers.add(X_AMZ_WRITE_OFFSET_BYTES, self.offset_bytes.to_string()); Ok(S3Request::new(self.client, Method::PUT) .region(self.region) @@ -191,29 +186,23 @@ impl AppendObjectContent { } pub async fn send(mut self) -> Result { - { - check_bucket_name(&self.bucket, true)?; - check_object_name(&self.object)?; - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - } + check_bucket_name(&self.bucket, true)?; + check_object_name(&self.object)?; + check_sse(&self.sse, &self.client)?; { let mut headers: Multimap = match self.extra_headers { Some(ref headers) => headers.clone(), None => Multimap::new(), }; - headers.add("x-amz-write-offset-bytes", self.offset_bytes.to_string()); + headers.add(X_AMZ_WRITE_OFFSET_BYTES, self.offset_bytes.to_string()); self.extra_query_params = Some(headers); } self.content_stream = std::mem::take(&mut self.input_content) .to_content_stream() .await - .map_err(Error::IOError)?; + .map_err(IoError::from)?; // object_size may be Size::Unknown. let object_size = self.content_stream.get_size(); @@ -224,7 +213,11 @@ impl AppendObjectContent { self.part_count = n_expected_parts; // Read the first part. - let seg_bytes = self.content_stream.read_upto(part_size as usize).await?; + let seg_bytes = self + .content_stream + .read_upto(part_size as usize) + .await + .map_err(IoError::from)?; // get the length (if any) of the current file let resp: StatObjectResponse = self @@ -261,7 +254,7 @@ impl AppendObjectContent { // Not enough data! let expected = object_size.as_u64().unwrap(); let got = seg_bytes.len() as u64; - Err(Error::InsufficientData(expected, got)) + Err(ValidationErr::InsufficientData { expected, got })? } else { // Otherwise, we start a multipart append. self.send_mpa(part_size, current_file_size, seg_bytes).await @@ -288,7 +281,10 @@ impl AppendObjectContent { if let Some(v) = first_part.take() { v } else { - self.content_stream.read_upto(part_size as usize).await? + self.content_stream + .read_upto(part_size as usize) + .await + .map_err(IoError::from)? } }; part_number += 1; @@ -304,7 +300,7 @@ impl AppendObjectContent { // Check if we have too many parts to upload. if self.part_count.is_none() && part_number > MAX_MULTIPART_COUNT { - return Err(Error::TooManyParts); + return Err(ValidationErr::TooManyParts(part_number as u64).into()); } // Append the part now. diff --git a/src/s3/builders/bucket_common.rs b/src/s3/builders/bucket_common.rs index 95af12d..54128e9 100644 --- a/src/s3/builders/bucket_common.rs +++ b/src/s3/builders/bucket_common.rs @@ -13,10 +13,9 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::marker::PhantomData; - use crate::s3::client::Client; use crate::s3::multimap::Multimap; +use std::marker::PhantomData; /// Common parameters for bucket operations /// diff --git a/src/s3/builders/bucket_exists.rs b/src/s3/builders/bucket_exists.rs index d15732f..9df5415 100644 --- a/src/s3/builders/bucket_exists.rs +++ b/src/s3/builders/bucket_exists.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::BucketExistsResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::check_bucket_name; @@ -35,7 +35,7 @@ impl S3Api for BucketExists { } impl ToS3Request for BucketExists { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::HEAD) diff --git a/src/s3/builders/copy_object.rs b/src/s3/builders/copy_object.rs index 8f8962b..7bef7bc 100644 --- a/src/s3/builders/copy_object.rs +++ b/src/s3/builders/copy_object.rs @@ -15,7 +15,8 @@ use crate::s3::Client; use crate::s3::client::{MAX_MULTIPART_COUNT, MAX_PART_SIZE}; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::a_response_traits::HasEtagFromBody; use crate::s3::response::{ @@ -26,7 +27,8 @@ use crate::s3::response::{ use crate::s3::sse::{Sse, SseCustomerKey}; use crate::s3::types::{Directive, PartInfo, Retention, S3Api, S3Request, ToS3Request}; use crate::s3::utils::{ - UtcTime, check_bucket_name, check_object_name, to_http_header_value, to_iso8601utc, url_encode, + UtcTime, check_bucket_name, check_object_name, check_sse, check_ssec, to_http_header_value, + to_iso8601utc, url_encode, }; use async_recursion::async_recursion; use http::Method; @@ -94,15 +96,17 @@ impl S3Api for UploadPartCopy { } impl ToS3Request for UploadPartCopy { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; if self.upload_id.is_empty() { - return Err(Error::InvalidUploadId("upload ID cannot be empty".into())); + return Err(ValidationErr::InvalidUploadId( + "upload ID cannot be empty".into(), + )); } if !(1..=MAX_MULTIPART_COUNT).contains(&self.part_number) { - return Err(Error::InvalidPartNumber(format!( + return Err(ValidationErr::InvalidPartNumber(format!( "part number must be between 1 and {MAX_MULTIPART_COUNT}" ))); } @@ -225,17 +229,9 @@ impl S3Api for CopyObjectInternal { } impl ToS3Request for CopyObjectInternal { - fn to_s3request(self) -> Result { - { - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - if self.source.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } + fn to_s3request(self) -> Result { + check_sse(&self.sse, &self.client)?; + check_ssec(&self.source.ssec, &self.client)?; let mut headers = self.headers; { @@ -259,24 +255,24 @@ impl ToS3Request for CopyObjectInternal { tagging.push_str(&url_encode(value)); } if !tagging.is_empty() { - headers.add("x-amz-tagging", tagging); + headers.add(X_AMZ_TAGGING, tagging); } } if let Some(v) = self.retention { - headers.add("x-amz-object-lock-mode", v.mode.to_string()); + headers.add(X_AMZ_OBJECT_LOCK_MODE, v.mode.to_string()); headers.add( - "x-amz-object-lock-retain-until-date", + X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, to_iso8601utc(v.retain_until_date), ); } if self.legal_hold { - headers.add("x-amz-object-lock-legal-hold", "ON"); + headers.add(X_AMZ_OBJECT_LOCK_LEGAL_HOLD, "ON"); } if let Some(v) = self.metadata_directive { - headers.add("x-amz-metadata-directive", v.to_string()); + headers.add(X_AMZ_METADATA_DIRECTIVE, v.to_string()); } if let Some(v) = self.tagging_directive { - headers.add("x-amz-tagging-directive", v.to_string()); + headers.add(X_AMZ_TAGGING_DIRECTIVE, v.to_string()); } let mut copy_source = String::from("/"); @@ -287,31 +283,28 @@ impl ToS3Request for CopyObjectInternal { copy_source.push_str("?versionId="); copy_source.push_str(&url_encode(v)); } - headers.add("x-amz-copy-source", copy_source); + headers.add(X_AMZ_COPY_SOURCE, copy_source); let range = self.source.get_range_value(); if !range.is_empty() { - headers.add("x-amz-copy-source-range", range); + headers.add(X_AMZ_COPY_SOURCE_RANGE, range); } if let Some(v) = self.source.match_etag { - headers.add("x-amz-copy-source-if-match", v); + headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, v); } if let Some(v) = self.source.not_match_etag { - headers.add("x-amz-copy-source-if-none-match", v); + headers.add(X_AMZ_COPY_SOURCE_IF_NONE_MATCH, v); } if let Some(v) = self.source.modified_since { - headers.add( - "x-amz-copy-source-if-modified-since", - to_http_header_value(v), - ); + headers.add(X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = self.source.unmodified_since { headers.add( - "x-amz-copy-source-if-unmodified-since", + X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE, to_http_header_value(v), ); } @@ -425,16 +418,9 @@ impl CopyObject { /// Functionally related to the [S3Api::send()](crate::s3::types::S3Api::send) method, but /// specifically tailored for the `CopyObject` operation. pub async fn send(self) -> Result { - { - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - if self.source.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } + check_sse(&self.sse, &self.client)?; + check_ssec(&self.source.ssec, &self.client)?; + let source = self.source.clone(); let stat_resp: StatObjectResponse = self @@ -458,9 +444,9 @@ impl CopyObject { if let Some(v) = &self.metadata_directive { match v { Directive::Copy => { - return Err(Error::InvalidCopyDirective( + return Err(ValidationErr::InvalidCopyDirective( "COPY metadata directive is not applicable to source object size greater than 5 GiB".into() - )); + ).into()); } _ => todo!(), // Nothing to do. } @@ -468,9 +454,9 @@ impl CopyObject { if let Some(v) = &self.tagging_directive { match v { Directive::Copy => { - return Err(Error::InvalidCopyDirective( + return Err(ValidationErr::InvalidCopyDirective( "COPY tagging directive is not applicable to source object size greater than 5 GiB".into() - )); + ).into()); } _ => todo!(), // Nothing to do. } @@ -677,7 +663,7 @@ impl ComposeObjectInternal { // the multipart upload was successful: update the upload_id let upload_id_cmu: String = match cmu.upload_id().await { Ok(v) => v, - Err(e) => return (Err(e), upload_id), + Err(e) => return (Err(e.into()), upload_id), }; upload_id.push_str(&upload_id_cmu); @@ -708,12 +694,12 @@ impl ComposeObjectInternal { part_number += 1; if let Some(l) = source.length { headers.add( - "x-amz-copy-source-range", + X_AMZ_COPY_SOURCE_RANGE, format!("bytes={}-{}", offset, offset + l - 1), ); } else if source.offset.is_some() { headers.add( - "x-amz-copy-source-range", + X_AMZ_COPY_SOURCE_RANGE, format!("bytes={}-{}", offset, offset + size - 1), ); } @@ -733,7 +719,7 @@ impl ComposeObjectInternal { let etag = match resp.etag() { Ok(v) => v, - Err(e) => return (Err(e), upload_id), + Err(e) => return (Err(e.into()), upload_id), }; parts.push(PartInfo { @@ -753,7 +739,7 @@ impl ComposeObjectInternal { let mut headers_copy = headers.clone(); headers_copy.add( - "x-amz-copy-source-range", + X_AMZ_COPY_SOURCE_RANGE, format!("bytes={offset}-{end_bytes}"), ); @@ -772,7 +758,7 @@ impl ComposeObjectInternal { let etag = match resp.etag() { Ok(v) => v, - Err(e) => return (Err(e), upload_id), + Err(e) => return (Err(e.into()), upload_id), }; parts.push(PartInfo { @@ -894,13 +880,8 @@ impl ComposeObject { } pub async fn send(self) -> Result { - { - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - } + check_sse(&self.sse, &self.client)?; + let object: String = self.object.clone(); let bucket: String = self.bucket.clone(); @@ -968,7 +949,7 @@ impl ComposeSource { /// use minio::s3::builders::ComposeSource; /// let src = ComposeSource::new("my-src-bucket", "my-src-object").unwrap(); /// ``` - pub fn new(bucket_name: &str, object_name: &str) -> Result { + pub fn new(bucket_name: &str, object_name: &str) -> Result { check_bucket_name(bucket_name, true)?; check_object_name(object_name)?; @@ -987,38 +968,38 @@ impl ComposeSource { self.headers.as_ref().expect("B: ABORT: ComposeSource::build_headers() must be called prior to this method invocation. This should not happen.").clone() } - pub fn build_headers(&mut self, object_size: u64, etag: String) -> Result<(), Error> { - if let Some(v) = self.offset { - if v >= object_size { - return Err(Error::InvalidComposeSourceOffset( - self.bucket.to_string(), - self.object.to_string(), - self.version_id.clone(), - v, - object_size, - )); - } + pub fn build_headers(&mut self, object_size: u64, etag: String) -> Result<(), ValidationErr> { + if let Some(v) = self.offset + && v >= object_size + { + return Err(ValidationErr::InvalidComposeSourceOffset { + bucket: self.bucket.to_string(), + object: self.object.to_string(), + version: self.version_id.clone(), + offset: v, + object_size, + }); } if let Some(v) = self.length { if v > object_size { - return Err(Error::InvalidComposeSourceLength( - self.bucket.to_string(), - self.object.to_string(), - self.version_id.clone(), - v, + return Err(ValidationErr::InvalidComposeSourceLength { + bucket: self.bucket.to_string(), + object: self.object.to_string(), + version: self.version_id.clone(), + length: v, object_size, - )); + }); } if (self.offset.unwrap_or_default() + v) > object_size { - return Err(Error::InvalidComposeSourceSize( - self.bucket.to_string(), - self.object.to_string(), - self.version_id.clone(), - self.offset.unwrap_or_default() + v, + return Err(ValidationErr::InvalidComposeSourceSize { + bucket: self.bucket.to_string(), + object: self.object.to_string(), + version: self.version_id.clone(), + compose_size: self.offset.unwrap_or_default() + v, object_size, - )); + }); } } @@ -1034,26 +1015,23 @@ impl ComposeSource { copy_source.push_str("?versionId="); copy_source.push_str(&url_encode(v)); } - headers.add("x-amz-copy-source", copy_source); + headers.add(X_AMZ_COPY_SOURCE, copy_source); if let Some(v) = &self.match_etag { - headers.add("x-amz-copy-source-if-match", v); + headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, v); } if let Some(v) = &self.not_match_etag { - headers.add("x-amz-copy-source-if-none-match", v); + headers.add(X_AMZ_COPY_SOURCE_IF_NONE_MATCH, v); } if let Some(v) = self.modified_since { - headers.add( - "x-amz-copy-source-if-modified-since", - to_http_header_value(v), - ); + headers.add(X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = self.unmodified_since { headers.add( - "x-amz-copy-source-if-unmodified-since", + X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE, to_http_header_value(v), ); } @@ -1062,8 +1040,8 @@ impl ComposeSource { headers.add_multimap(v.copy_headers()); } - if !headers.contains_key("x-amz-copy-source-if-match") { - headers.add("x-amz-copy-source-if-match", etag); + if !headers.contains_key(X_AMZ_COPY_SOURCE_IF_MATCH) { + headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, etag); } self.headers = Some(headers); @@ -1091,7 +1069,7 @@ pub struct CopySource { } impl CopySource { - pub fn new(bucket_name: &str, object_name: &str) -> Result { + pub fn new(bucket_name: &str, object_name: &str) -> Result { check_bucket_name(bucket_name, true)?; check_object_name(object_name)?; @@ -1161,20 +1139,20 @@ fn into_headers_copy_object( } if !tagging.is_empty() { - map.add("x-amz-tagging", tagging); + map.add(X_AMZ_TAGGING, tagging); } } if let Some(v) = retention { - map.add("x-amz-object-lock-mode", v.mode.to_string()); + map.add(X_AMZ_OBJECT_LOCK_MODE, v.mode.to_string()); map.add( - "x-amz-object-lock-retain-until-date", + X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, to_iso8601utc(v.retain_until_date), ); } if legal_hold { - map.add("x-amz-object-lock-legal-hold", "ON"); + map.add(X_AMZ_OBJECT_LOCK_LEGAL_HOLD, "ON"); } map diff --git a/src/s3/builders/create_bucket.rs b/src/s3/builders/create_bucket.rs index ee76a91..f61b258 100644 --- a/src/s3/builders/create_bucket.rs +++ b/src/s3/builders/create_bucket.rs @@ -15,7 +15,8 @@ use crate::s3::Client; use crate::s3::client::DEFAULT_REGION; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::CreateBucketResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -74,7 +75,7 @@ impl S3Api for CreateBucket { } impl ToS3Request for CreateBucket { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let region1: Option<&str> = self.region.as_deref(); @@ -86,13 +87,16 @@ impl ToS3Request for CreateBucket { (None, Some(v)) => v.to_string(), (Some(r1), Some(r2)) if r1 == r2 => self.region.unwrap(), // Both are Some and equal (Some(r1), Some(r2)) => { - return Err(Error::RegionMismatch(r1.to_string(), r2.to_string())); + return Err(ValidationErr::RegionMismatch { + bucket_region: r1.to_string(), + region: r2.to_string(), + }); } }; let mut headers: Multimap = self.extra_headers.unwrap_or_default(); if self.object_lock { - headers.add("x-amz-bucket-object-lock-enabled", "true"); + headers.add(X_AMZ_BUCKET_OBJECT_LOCK_ENABLED, "true"); } let data: String = match region_str.as_str() { diff --git a/src/s3/builders/delete_bucket.rs b/src/s3/builders/delete_bucket.rs index d5b48c6..6c67e91 100644 --- a/src/s3/builders/delete_bucket.rs +++ b/src/s3/builders/delete_bucket.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::check_bucket_name; @@ -35,7 +35,7 @@ impl S3Api for DeleteBucket { } impl ToS3Request for DeleteBucket { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_bucket_encryption.rs b/src/s3/builders/delete_bucket_encryption.rs index 4a64153..7a85106 100644 --- a/src/s3/builders/delete_bucket_encryption.rs +++ b/src/s3/builders/delete_bucket_encryption.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketEncryptionResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for DeleteBucketEncryption { } impl ToS3Request for DeleteBucketEncryption { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_bucket_lifecycle.rs b/src/s3/builders/delete_bucket_lifecycle.rs index da851b6..339458d 100644 --- a/src/s3/builders/delete_bucket_lifecycle.rs +++ b/src/s3/builders/delete_bucket_lifecycle.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketLifecycleResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for DeleteBucketLifecycle { } impl ToS3Request for DeleteBucketLifecycle { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_bucket_notification.rs b/src/s3/builders/delete_bucket_notification.rs index 2151b1c..7235ec0 100644 --- a/src/s3/builders/delete_bucket_notification.rs +++ b/src/s3/builders/delete_bucket_notification.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketNotificationResponse; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::types::{NotificationConfig, S3Api, S3Request, ToS3Request}; @@ -38,7 +38,7 @@ impl S3Api for DeleteBucketNotification { } impl ToS3Request for DeleteBucketNotification { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; const CONFIG: NotificationConfig = NotificationConfig { diff --git a/src/s3/builders/delete_bucket_policy.rs b/src/s3/builders/delete_bucket_policy.rs index 7491070..7f70fce 100644 --- a/src/s3/builders/delete_bucket_policy.rs +++ b/src/s3/builders/delete_bucket_policy.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketPolicyResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for DeleteBucketPolicy { } impl ToS3Request for DeleteBucketPolicy { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_bucket_replication.rs b/src/s3/builders/delete_bucket_replication.rs index 59a3849..9ef5524 100644 --- a/src/s3/builders/delete_bucket_replication.rs +++ b/src/s3/builders/delete_bucket_replication.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketReplicationResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for DeleteBucketReplication { } impl ToS3Request for DeleteBucketReplication { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_bucket_tagging.rs b/src/s3/builders/delete_bucket_tagging.rs index 81fa066..fd32752 100644 --- a/src/s3/builders/delete_bucket_tagging.rs +++ b/src/s3/builders/delete_bucket_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteBucketTaggingResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for DeleteBucketTagging { } impl ToS3Request for DeleteBucketTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::DELETE) diff --git a/src/s3/builders/delete_object_lock_config.rs b/src/s3/builders/delete_object_lock_config.rs index 077145f..2e0f1e6 100644 --- a/src/s3/builders/delete_object_lock_config.rs +++ b/src/s3/builders/delete_object_lock_config.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::DeleteObjectLockConfigResponse; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::types::{ObjectLockConfig, S3Api, S3Request, ToS3Request}; @@ -34,7 +34,7 @@ impl S3Api for DeleteObjectLockConfig { } impl ToS3Request for DeleteObjectLockConfig { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let config = ObjectLockConfig { diff --git a/src/s3/builders/delete_object_tagging.rs b/src/s3/builders/delete_object_tagging.rs index 1fe0471..9064f97 100644 --- a/src/s3/builders/delete_object_tagging.rs +++ b/src/s3/builders/delete_object_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::DeleteObjectTaggingResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -74,7 +74,7 @@ impl S3Api for DeleteObjectTagging { } impl ToS3Request for DeleteObjectTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; diff --git a/src/s3/builders/delete_objects.rs b/src/s3/builders/delete_objects.rs index dd662b7..f947169 100644 --- a/src/s3/builders/delete_objects.rs +++ b/src/s3/builders/delete_objects.rs @@ -13,27 +13,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Builders for RemoveObject APIs. - +use crate::s3::Client; use crate::s3::client::MAX_MULTIPART_COUNT; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; -use crate::s3::response::DeleteError; -use crate::s3::types::ListEntry; -use crate::s3::utils::{check_object_name, insert}; -use crate::s3::{ - Client, - error::Error, - response::{DeleteObjectResponse, DeleteObjectsResponse}, - types::{S3Api, S3Request, ToS3Request, ToStream}, - utils::{check_bucket_name, md5sum_hash}, -}; +use crate::s3::response::{DeleteError, DeleteObjectResponse, DeleteObjectsResponse}; +use crate::s3::types::{ListEntry, S3Api, S3Request, ToS3Request, ToStream}; +use crate::s3::utils::{check_bucket_name, check_object_name, insert, md5sum_hash}; use async_trait::async_trait; use bytes::Bytes; use futures_util::stream::iter; use futures_util::{Stream, StreamExt, stream as futures_stream}; use http::Method; use std::pin::Pin; - // region: object-to-delete pub trait ValidKey: Into {} @@ -154,7 +147,7 @@ impl S3Api for DeleteObject { } impl ToS3Request for DeleteObject { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object.key)?; @@ -163,7 +156,7 @@ impl ToS3Request for DeleteObject { let mut headers: Multimap = self.extra_headers.unwrap_or_default(); if self.bypass_governance_mode { - headers.add("x-amz-bypass-governance-retention", "true"); + headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true"); } Ok(S3Request::new(self.client, Method::DELETE) @@ -238,7 +231,7 @@ impl S3Api for DeleteObjects { } impl ToS3Request for DeleteObjects { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut data: String = String::from(""); @@ -263,10 +256,10 @@ impl ToS3Request for DeleteObjects { let mut headers: Multimap = self.extra_headers.unwrap_or_default(); { if self.bypass_governance_mode { - headers.add("x-amz-bypass-governance-retention", "true"); + headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true"); } - headers.add("Content-Type", "application/xml"); - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_TYPE, "application/xml"); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); } Ok(S3Request::new(self.client, Method::POST) @@ -374,7 +367,7 @@ impl DeleteObjectsStreaming { self } - async fn next_request(&mut self) -> Result, Error> { + async fn next_request(&mut self) -> Result, ValidationErr> { let mut objects = Vec::new(); while let Some(object) = self.objects.items.next().await { objects.push(object); @@ -413,7 +406,7 @@ impl ToStream for DeleteObjectsStreaming { Some((response, this)) } Ok(None) => None, - Err(e) => Some((Err(e), this)), + Err(e) => Some((Err(e.into()), this)), } }, ))) diff --git a/src/s3/builders/get_bucket_encryption.rs b/src/s3/builders/get_bucket_encryption.rs index 44ef149..fa97834 100644 --- a/src/s3/builders/get_bucket_encryption.rs +++ b/src/s3/builders/get_bucket_encryption.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetBucketEncryptionResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetBucketEncryption { } impl ToS3Request for GetBucketEncryption { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_bucket_lifecycle.rs b/src/s3/builders/get_bucket_lifecycle.rs index c48b646..095a64b 100644 --- a/src/s3/builders/get_bucket_lifecycle.rs +++ b/src/s3/builders/get_bucket_lifecycle.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::GetBucketLifecycleResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -66,7 +66,7 @@ impl S3Api for GetBucketLifecycle { } impl ToS3Request for GetBucketLifecycle { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut query_params: Multimap = insert(self.extra_query_params, "lifecycle"); diff --git a/src/s3/builders/get_bucket_notification.rs b/src/s3/builders/get_bucket_notification.rs index f77a3da..7cb5921 100644 --- a/src/s3/builders/get_bucket_notification.rs +++ b/src/s3/builders/get_bucket_notification.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetBucketNotificationResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetBucketNotification { } impl ToS3Request for GetBucketNotification { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_bucket_policy.rs b/src/s3/builders/get_bucket_policy.rs index f5f2323..18d6f10 100644 --- a/src/s3/builders/get_bucket_policy.rs +++ b/src/s3/builders/get_bucket_policy.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetBucketPolicyResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetBucketPolicy { } impl ToS3Request for GetBucketPolicy { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_bucket_replication.rs b/src/s3/builders/get_bucket_replication.rs index 97834d6..2c4d295 100644 --- a/src/s3/builders/get_bucket_replication.rs +++ b/src/s3/builders/get_bucket_replication.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetBucketReplicationResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetBucketReplication { } impl ToS3Request for GetBucketReplication { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_bucket_tagging.rs b/src/s3/builders/get_bucket_tagging.rs index 49eb663..436ab84 100644 --- a/src/s3/builders/get_bucket_tagging.rs +++ b/src/s3/builders/get_bucket_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::GetBucketTaggingResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -73,7 +73,7 @@ impl S3Api for GetBucketTagging { } impl ToS3Request for GetBucketTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_bucket_versioning.rs b/src/s3/builders/get_bucket_versioning.rs index 54b8087..60ae014 100644 --- a/src/s3/builders/get_bucket_versioning.rs +++ b/src/s3/builders/get_bucket_versioning.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetBucketVersioningResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetBucketVersioning { } impl ToS3Request for GetBucketVersioning { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_object.rs b/src/s3/builders/get_object.rs index a64e22a..6dd8d83 100644 --- a/src/s3/builders/get_object.rs +++ b/src/s3/builders/get_object.rs @@ -13,18 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -use http::Method; - +use crate::s3::client::Client; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; -use crate::s3::utils::check_object_name; -use crate::s3::{ - client::Client, - error::Error, - response::GetObjectResponse, - sse::{Sse, SseCustomerKey}, - types::{S3Api, S3Request, ToS3Request}, - utils::{UtcTime, check_bucket_name, to_http_header_value}, +use crate::s3::response::GetObjectResponse; +use crate::s3::sse::{Sse, SseCustomerKey}; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{ + UtcTime, check_bucket_name, check_object_name, check_ssec, to_http_header_value, }; +use http::Method; /// Argument builder for the [`GetObject`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) S3 API operation. /// @@ -122,14 +121,10 @@ impl S3Api for GetObject { } impl ToS3Request for GetObject { - fn to_s3request(self) -> Result { - { - check_bucket_name(&self.bucket, true)?; - check_object_name(&self.object)?; - if self.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } + fn to_s3request(self) -> Result { + check_bucket_name(&self.bucket, true)?; + check_object_name(&self.object)?; + check_ssec(&self.ssec, &self.client)?; let mut headers: Multimap = self.extra_headers.unwrap_or_default(); { @@ -147,24 +142,24 @@ impl ToS3Request for GetObject { if let Some(l) = length { range.push_str(&(o + l - 1).to_string()); } - headers.add("Range", range); + headers.add(RANGE, range); } } if let Some(v) = self.match_etag { - headers.add("if-match", v); + headers.add(IF_MATCH, v); } if let Some(v) = self.not_match_etag { - headers.add("if-none-match", v); + headers.add(IF_NONE_MATCH, v); } if let Some(v) = self.modified_since { - headers.add("if-modified-since", to_http_header_value(v)); + headers.add(IF_MODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = self.unmodified_since { - headers.add("if-unmodified-since", to_http_header_value(v)); + headers.add(IF_UNMODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = &self.ssec { diff --git a/src/s3/builders/get_object_legal_hold.rs b/src/s3/builders/get_object_legal_hold.rs index dcd074b..685485d 100644 --- a/src/s3/builders/get_object_legal_hold.rs +++ b/src/s3/builders/get_object_legal_hold.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::GetObjectLegalHoldResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -68,7 +68,7 @@ impl S3Api for GetObjectLegalHold { } impl ToS3Request for GetObjectLegalHold { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; diff --git a/src/s3/builders/get_object_lock_config.rs b/src/s3/builders/get_object_lock_config.rs index 1d98cd8..d56a3de 100644 --- a/src/s3/builders/get_object_lock_config.rs +++ b/src/s3/builders/get_object_lock_config.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::builders::BucketCommon; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::response::GetObjectLockConfigResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::{check_bucket_name, insert}; @@ -34,7 +34,7 @@ impl S3Api for GetObjectLockConfig { } impl ToS3Request for GetObjectLockConfig { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/get_object_prompt.rs b/src/s3/builders/get_object_prompt.rs index 5a1bf4e..c6c6b1f 100644 --- a/src/s3/builders/get_object_prompt.rs +++ b/src/s3/builders/get_object_prompt.rs @@ -13,16 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::s3::client::Client; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::GetObjectPromptResponse; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::sse::SseCustomerKey; -use crate::s3::utils::{check_bucket_name, check_object_name}; -use crate::s3::{ - client::Client, - error::Error, - response::GetObjectPromptResponse, - types::{S3Api, S3Request, ToS3Request}, -}; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{check_bucket_name, check_object_name, check_ssec}; use bytes::Bytes; use http::Method; use serde_json::json; @@ -94,16 +92,13 @@ impl S3Api for GetObjectPrompt { } impl ToS3Request for GetObjectPrompt { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; - + check_ssec(&self.ssec, &self.client)?; if self.client.is_aws_host() { - return Err(Error::UnsupportedApi("ObjectPrompt".into())); - } - if self.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); + return Err(ValidationErr::UnsupportedAwsApi("ObjectPrompt".into())); } } let mut query_params: Multimap = self.extra_query_params.unwrap_or_default(); diff --git a/src/s3/builders/get_object_retention.rs b/src/s3/builders/get_object_retention.rs index c6df748..47c446b 100644 --- a/src/s3/builders/get_object_retention.rs +++ b/src/s3/builders/get_object_retention.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::GetObjectRetentionResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -74,7 +74,7 @@ impl S3Api for GetObjectRetention { } impl ToS3Request for GetObjectRetention { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; diff --git a/src/s3/builders/get_object_tagging.rs b/src/s3/builders/get_object_tagging.rs index f866d77..25419cb 100644 --- a/src/s3/builders/get_object_tagging.rs +++ b/src/s3/builders/get_object_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::GetObjectTaggingResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -74,7 +74,7 @@ impl S3Api for GetObjectTagging { } impl ToS3Request for GetObjectTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; diff --git a/src/s3/builders/get_presigned_object_url.rs b/src/s3/builders/get_presigned_object_url.rs index 527fa60..0f01b04 100644 --- a/src/s3/builders/get_presigned_object_url.rs +++ b/src/s3/builders/get_presigned_object_url.rs @@ -16,6 +16,7 @@ use crate::s3::Client; use crate::s3::creds::Credentials; use crate::s3::error::Error; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::GetPresignedObjectUrlResponse; use crate::s3::signer::presign_v4; @@ -68,7 +69,6 @@ impl GetPresignedObjectUrl { /// Sends the request to generate a presigned URL for an S3 object. pub async fn send(self) -> Result { - // NOTE: this send function is async and because of that, not comparable with other send functions... check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; @@ -91,7 +91,7 @@ impl GetPresignedObjectUrl { if let Some(p) = &self.client.shared.provider { let creds: Credentials = p.fetch(); if let Some(t) = creds.session_token { - query_params.add("X-Amz-Security-Token", t); + query_params.add(X_AMZ_SECURITY_TOKEN, t); } let date = match self.request_time { diff --git a/src/s3/builders/get_presigned_policy_form_data.rs b/src/s3/builders/get_presigned_policy_form_data.rs index 0ee6a1b..ec04cdf 100644 --- a/src/s3/builders/get_presigned_policy_form_data.rs +++ b/src/s3/builders/get_presigned_policy_form_data.rs @@ -15,7 +15,8 @@ use crate::s3::Client; use crate::s3::creds::Credentials; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::signer::post_presign_v4; use crate::s3::utils::{ UtcTime, b64encode, check_bucket_name, to_amz_date, to_iso8601utc, to_signer_date, utc_now, @@ -41,12 +42,14 @@ impl GetPresignedPolicyFormData { .await?; let creds: Credentials = self.client.shared.provider.as_ref().unwrap().fetch(); - self.policy.form_data( - creds.access_key, - creds.secret_key, - creds.session_token, - region, - ) + self.policy + .form_data( + creds.access_key, + creds.secret_key, + creds.session_token, + region, + ) + .map_err(Error::Validation) } } @@ -82,7 +85,7 @@ impl PostPolicy { /// let expiration = utc_now() + Duration::days(7); /// let policy = PostPolicy::new("bucket-name", expiration).unwrap(); /// ``` - pub fn new(bucket_name: &str, expiration: UtcTime) -> Result { + pub fn new(bucket_name: &str, expiration: UtcTime) -> Result { check_bucket_name(bucket_name, true)?; Ok(Self { @@ -102,11 +105,11 @@ impl PostPolicy { fn is_reserved_element(element: &str) -> bool { element.eq_ignore_ascii_case("bucket") - || element.eq_ignore_ascii_case("x-amz-algorithm") - || element.eq_ignore_ascii_case("x-amz-credential") - || element.eq_ignore_ascii_case("x-amz-date") - || element.eq_ignore_ascii_case("policy") - || element.eq_ignore_ascii_case("x-amz-signature") + || element.eq_ignore_ascii_case(X_AMZ_ALGORITHM) + || element.eq_ignore_ascii_case(X_AMZ_CREDENTIAL) + || element.eq_ignore_ascii_case(X_AMZ_DATE) + || element.eq_ignore_ascii_case(POLICY) + || element.eq_ignore_ascii_case(X_AMZ_SIGNATURE) } fn get_credential_string(access_key: &String, date: &UtcTime, region: &String) -> String { @@ -131,10 +134,14 @@ impl PostPolicy { /// // Add condition that 'key' (object name) equals to 'bucket-name' /// policy.add_equals_condition("key", "bucket-name").unwrap(); /// ``` - pub fn add_equals_condition(&mut self, element: &str, value: &str) -> Result<(), Error> { + pub fn add_equals_condition( + &mut self, + element: &str, + value: &str, + ) -> Result<(), ValidationErr> { if element.is_empty() { - return Err(Error::PostPolicyError( - "condition element cannot be empty".to_string(), + return Err(ValidationErr::PostPolicyError( + "condition element cannot be empty".into(), )); } @@ -143,13 +150,15 @@ impl PostPolicy { || v.eq_ignore_ascii_case("redirect") || v.eq_ignore_ascii_case("content-length-range") { - return Err(Error::PostPolicyError(format!( + return Err(ValidationErr::PostPolicyError(format!( "{element} is unsupported for equals condition", ))); } if PostPolicy::is_reserved_element(v.as_str()) { - return Err(Error::PostPolicyError(format!("{element} cannot set"))); + return Err(ValidationErr::PostPolicyError(format!( + "{element} cannot set" + ))); } self.eq_conditions.insert(v, value.to_string()); @@ -186,10 +195,14 @@ impl PostPolicy { /// // Add condition that 'Content-Type' starts with 'image/' /// policy.add_starts_with_condition("Content-Type", "image/").unwrap(); /// ``` - pub fn add_starts_with_condition(&mut self, element: &str, value: &str) -> Result<(), Error> { + pub fn add_starts_with_condition( + &mut self, + element: &str, + value: &str, + ) -> Result<(), ValidationErr> { if element.is_empty() { - return Err(Error::PostPolicyError( - "condition element cannot be empty".to_string(), + return Err(ValidationErr::PostPolicyError( + "condition element cannot be empty".into(), )); } @@ -198,13 +211,15 @@ impl PostPolicy { || v.eq_ignore_ascii_case("content-length-range") || (v.starts_with("x-amz-") && v.starts_with("x-amz-meta-")) { - return Err(Error::PostPolicyError(format!( + return Err(ValidationErr::PostPolicyError(format!( "{element} is unsupported for starts-with condition", ))); } if PostPolicy::is_reserved_element(v.as_str()) { - return Err(Error::PostPolicyError(format!("{element} cannot set"))); + return Err(ValidationErr::PostPolicyError(format!( + "{element} cannot set" + ))); } self.starts_with_conditions.insert(v, value.to_string()); @@ -246,10 +261,10 @@ impl PostPolicy { &mut self, lower_limit: usize, upper_limit: usize, - ) -> Result<(), Error> { + ) -> Result<(), ValidationErr> { if lower_limit > upper_limit { - return Err(Error::PostPolicyError( - "lower limit cannot be greater than upper limit".to_string(), + return Err(ValidationErr::PostPolicyError( + "lower limit cannot be greater than upper limit".into(), )); } @@ -272,16 +287,18 @@ impl PostPolicy { secret_key: String, session_token: Option, region: String, - ) -> Result, Error> { + ) -> Result, ValidationErr> { if region.is_empty() { - return Err(Error::PostPolicyError("region cannot be empty".to_string())); + return Err(ValidationErr::PostPolicyError( + "region cannot be empty".into(), + )); } if !self.eq_conditions.contains_key("key") && !self.starts_with_conditions.contains_key("key") { - return Err(Error::PostPolicyError( - "key condition must be set".to_string(), + return Err(ValidationErr::PostPolicyError( + "key condition must be set".into(), )); } @@ -328,13 +345,13 @@ impl PostPolicy { let signature = post_presign_v4(&encoded_policy, &secret_key, date, ®ion); let mut data: HashMap = HashMap::new(); - data.insert("x-amz-algorithm".into(), PostPolicy::ALGORITHM.to_string()); - data.insert("x-amz-credential".into(), credential); - data.insert("x-amz-date".into(), amz_date); - data.insert("policy".into(), encoded_policy); - data.insert("x-amz-signature".into(), signature); + data.insert(X_AMZ_ALGORITHM.into(), PostPolicy::ALGORITHM.to_string()); + data.insert(X_AMZ_CREDENTIAL.into(), credential); + data.insert(X_AMZ_DATE.into(), amz_date); + data.insert(POLICY.into(), encoded_policy); + data.insert(X_AMZ_SIGNATURE.into(), signature); if let Some(v) = session_token { - data.insert("x-amz-security-token".into(), v); + data.insert(X_AMZ_SECURITY_TOKEN.into(), v); } Ok(data) diff --git a/src/s3/builders/get_region.rs b/src/s3/builders/get_region.rs index 96e373d..75634fa 100644 --- a/src/s3/builders/get_region.rs +++ b/src/s3/builders/get_region.rs @@ -15,7 +15,7 @@ use crate::s3::Client; use crate::s3::client::DEFAULT_REGION; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::GetRegionResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -63,7 +63,7 @@ impl S3Api for GetRegion { } impl ToS3Request for GetRegion { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; Ok(S3Request::new(self.client, Method::GET) diff --git a/src/s3/builders/list_buckets.rs b/src/s3/builders/list_buckets.rs index de9733c..c80575e 100644 --- a/src/s3/builders/list_buckets.rs +++ b/src/s3/builders/list_buckets.rs @@ -13,15 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use http::Method; - +use crate::s3::Client; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::ListBucketsResponse; -use crate::s3::{ - Client, - error::Error, - types::{S3Api, S3Request, ToS3Request}, -}; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use http::Method; /// Argument builder for the [`ListBuckets`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) S3 API operation. /// @@ -58,7 +55,7 @@ impl S3Api for ListBuckets { } impl ToS3Request for ListBuckets { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { Ok(S3Request::new(self.client, Method::GET) .query_params(self.extra_query_params.unwrap_or_default()) .headers(self.extra_headers.unwrap_or_default())) diff --git a/src/s3/builders/list_objects.rs b/src/s3/builders/list_objects.rs index 3542fe3..bf7ec09 100644 --- a/src/s3/builders/list_objects.rs +++ b/src/s3/builders/list_objects.rs @@ -12,23 +12,19 @@ //! Argument builders for ListObject APIs. +use crate::s3::client::Client; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::ListObjectsResponse; +use crate::s3::response::list_objects::{ + ListObjectVersionsResponse, ListObjectsV1Response, ListObjectsV2Response, +}; +use crate::s3::types::{S3Api, S3Request, ToS3Request, ToStream}; +use crate::s3::utils::{check_bucket_name, insert}; use async_trait::async_trait; use futures_util::{Stream, StreamExt, stream as futures_stream}; use http::Method; -use crate::s3::multimap::{Multimap, MultimapExt}; -use crate::s3::utils::insert; -use crate::s3::{ - client::Client, - error::Error, - response::ListObjectsResponse, - response::list_objects::{ - ListObjectVersionsResponse, ListObjectsV1Response, ListObjectsV2Response, - }, - types::{S3Api, S3Request, ToS3Request, ToStream}, - utils::check_bucket_name, -}; - fn add_common_list_objects_query_params( query_params: &mut Multimap, delimiter: Option, @@ -114,7 +110,7 @@ impl S3Api for ListObjectsV1 { } impl ToS3Request for ListObjectsV1 { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut query_params: Multimap = self.extra_query_params.unwrap_or_default(); @@ -219,7 +215,7 @@ impl S3Api for ListObjectsV2 { } impl ToS3Request for ListObjectsV2 { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut query_params: Multimap = self.extra_query_params.unwrap_or_default(); @@ -340,7 +336,7 @@ impl S3Api for ListObjectVersions { } impl ToS3Request for ListObjectVersions { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut query_params: Multimap = insert(self.extra_query_params, "versions"); diff --git a/src/s3/builders/listen_bucket_notification.rs b/src/s3/builders/listen_bucket_notification.rs index 117d3d9..c4f4c2f 100644 --- a/src/s3/builders/listen_bucket_notification.rs +++ b/src/s3/builders/listen_bucket_notification.rs @@ -13,19 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::s3::client::Client; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::multimap::{Multimap, MultimapExt}; +use crate::s3::response::ListenBucketNotificationResponse; +use crate::s3::types::{NotificationRecords, S3Api, S3Request, ToS3Request}; +use crate::s3::utils::check_bucket_name; use async_trait::async_trait; use futures_util::Stream; use http::Method; -use crate::s3::multimap::{Multimap, MultimapExt}; -use crate::s3::{ - client::Client, - error::Error, - response::ListenBucketNotificationResponse, - types::{NotificationRecords, S3Api, S3Request, ToS3Request}, - utils::check_bucket_name, -}; - /// Argument builder for the [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification) /// /// This struct constructs the parameters required for the [`Client::listen_bucket_notification`](crate::s3::client::Client::listen_bucket_notification) method. @@ -92,11 +89,13 @@ impl S3Api for ListenBucketNotification { } impl ToS3Request for ListenBucketNotification { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; if self.client.is_aws_host() { - return Err(Error::UnsupportedApi("ListenBucketNotification".into())); + return Err(ValidationErr::UnsupportedAwsApi( + "ListenBucketNotification".into(), + )); } } diff --git a/src/s3/builders/put_bucket_encryption.rs b/src/s3/builders/put_bucket_encryption.rs index 982f5a5..2d27bab 100644 --- a/src/s3/builders/put_bucket_encryption.rs +++ b/src/s3/builders/put_bucket_encryption.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketEncryptionResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -75,7 +75,7 @@ impl S3Api for PutBucketEncryption { } impl ToS3Request for PutBucketEncryption { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let bytes: Bytes = self.config.to_xml().into(); diff --git a/src/s3/builders/put_bucket_lifecycle.rs b/src/s3/builders/put_bucket_lifecycle.rs index 76793f4..8887590 100644 --- a/src/s3/builders/put_bucket_lifecycle.rs +++ b/src/s3/builders/put_bucket_lifecycle.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::lifecycle_config::LifecycleConfig; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::PutBucketLifecycleResponse; @@ -74,13 +75,13 @@ impl S3Api for PutBucketLifecycle { } impl ToS3Request for PutBucketLifecycle { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let mut headers: Multimap = self.extra_headers.unwrap_or_default(); let bytes: Bytes = self.config.to_xml().into(); - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); Ok(S3Request::new(self.client, Method::PUT) .region(self.region) diff --git a/src/s3/builders/put_bucket_notification.rs b/src/s3/builders/put_bucket_notification.rs index 1458778..b017dcc 100644 --- a/src/s3/builders/put_bucket_notification.rs +++ b/src/s3/builders/put_bucket_notification.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketNotificationResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -74,7 +74,7 @@ impl S3Api for PutBucketNotification { } impl ToS3Request for PutBucketNotification { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let bytes: Bytes = self.config.to_xml().into(); diff --git a/src/s3/builders/put_bucket_policy.rs b/src/s3/builders/put_bucket_policy.rs index 7f0695e..888ef1a 100644 --- a/src/s3/builders/put_bucket_policy.rs +++ b/src/s3/builders/put_bucket_policy.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketPolicyResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -74,7 +74,7 @@ impl S3Api for PutBucketPolicy { } impl ToS3Request for PutBucketPolicy { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let bytes: Bytes = self.config.into(); diff --git a/src/s3/builders/put_bucket_replication.rs b/src/s3/builders/put_bucket_replication.rs index ce737eb..1f408cf 100644 --- a/src/s3/builders/put_bucket_replication.rs +++ b/src/s3/builders/put_bucket_replication.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketReplicationResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -74,7 +74,7 @@ impl S3Api for PutBucketReplication { } impl ToS3Request for PutBucketReplication { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let bytes: Bytes = self.config.to_xml().into(); diff --git a/src/s3/builders/put_bucket_tagging.rs b/src/s3/builders/put_bucket_tagging.rs index 9eafba8..141d51c 100644 --- a/src/s3/builders/put_bucket_tagging.rs +++ b/src/s3/builders/put_bucket_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketTaggingResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -75,7 +75,7 @@ impl S3Api for PutBucketTagging { } impl ToS3Request for PutBucketTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let data: String = { diff --git a/src/s3/builders/put_bucket_versioning.rs b/src/s3/builders/put_bucket_versioning.rs index b4ac168..f67a533 100644 --- a/src/s3/builders/put_bucket_versioning.rs +++ b/src/s3/builders/put_bucket_versioning.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutBucketVersioningResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -126,7 +126,7 @@ impl S3Api for PutBucketVersioning { } impl ToS3Request for PutBucketVersioning { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let data: String = { @@ -142,7 +142,7 @@ impl ToS3Request for PutBucketVersioning { Some(VersioningStatus::Enabled) => data.push_str("Enabled"), Some(VersioningStatus::Suspended) => data.push_str("Suspended"), None => { - return Err(Error::InvalidVersioningStatus( + return Err(ValidationErr::InvalidVersioningStatus( "Missing VersioningStatus".into(), )); } diff --git a/src/s3/builders/put_object.rs b/src/s3/builders/put_object.rs index e7a3b05..648c05b 100644 --- a/src/s3/builders/put_object.rs +++ b/src/s3/builders/put_object.rs @@ -14,23 +14,21 @@ // limitations under the License. use super::ObjectContent; +use crate::s3::builders::{ContentStream, Size}; +use crate::s3::client::Client; +use crate::s3::error::{Error, IoError, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::a_response_traits::HasEtagFromHeaders; -use crate::s3::segmented_bytes::SegmentedBytes; -use crate::s3::utils::{check_object_name, insert}; -use crate::s3::{ - builders::{ContentStream, Size}, - client::Client, - error::Error, - response::{ - AbortMultipartUploadResponse, CompleteMultipartUploadResponse, - CreateMultipartUploadResponse, PutObjectContentResponse, PutObjectResponse, - UploadPartResponse, - }, - sse::Sse, - types::{PartInfo, Retention, S3Api, S3Request, ToS3Request}, - utils::{check_bucket_name, md5sum_hash, to_iso8601utc, url_encode}, +use crate::s3::response::{ + AbortMultipartUploadResponse, CompleteMultipartUploadResponse, CreateMultipartUploadResponse, + PutObjectContentResponse, PutObjectResponse, UploadPartResponse, }; +use crate::s3::segmented_bytes::SegmentedBytes; +use crate::s3::sse::Sse; +use crate::s3::types::{PartInfo, Retention, S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{check_bucket_name, md5sum_hash, to_iso8601utc, url_encode}; +use crate::s3::utils::{check_object_name, check_sse, insert}; use bytes::{Bytes, BytesMut}; use http::Method; use std::{collections::HashMap, sync::Arc}; @@ -119,7 +117,7 @@ impl S3Api for CreateMultipartUpload { } impl ToS3Request for CreateMultipartUpload { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; @@ -195,7 +193,7 @@ impl S3Api for AbortMultipartUpload { } impl ToS3Request for AbortMultipartUpload { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; @@ -272,15 +270,17 @@ impl CompleteMultipartUpload { } impl ToS3Request for CompleteMultipartUpload { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; if self.upload_id.is_empty() { - return Err(Error::InvalidUploadId("upload ID cannot be empty".into())); + return Err(ValidationErr::InvalidUploadId( + "upload ID cannot be empty".into(), + )); } if self.parts.is_empty() { - return Err(Error::EmptyParts("parts cannot be empty".into())); + return Err(ValidationErr::EmptyParts("parts cannot be empty".into())); } } @@ -302,8 +302,8 @@ impl ToS3Request for CompleteMultipartUpload { let mut headers: Multimap = self.extra_headers.unwrap_or_default(); { - headers.add("Content-Type", "application/xml"); - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_TYPE, "application/xml"); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); } let mut query_params: Multimap = self.extra_query_params.unwrap_or_default(); query_params.add("uploadId", self.upload_id); @@ -409,22 +409,24 @@ impl S3Api for UploadPart { } impl ToS3Request for UploadPart { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; - if let Some(upload_id) = &self.upload_id { - if upload_id.is_empty() { - return Err(Error::InvalidUploadId("upload ID cannot be empty".into())); - } + if let Some(upload_id) = &self.upload_id + && upload_id.is_empty() + { + return Err(ValidationErr::InvalidUploadId( + "upload ID cannot be empty".into(), + )); } - if let Some(part_number) = self.part_number { - if !(1..=MAX_MULTIPART_COUNT).contains(&part_number) { - return Err(Error::InvalidPartNumber(format!( - "part number must be between 1 and {MAX_MULTIPART_COUNT}" - ))); - } + if let Some(part_number) = self.part_number + && !(1..=MAX_MULTIPART_COUNT).contains(&part_number) + { + return Err(ValidationErr::InvalidPartNumber(format!( + "part number must be between 1 and {MAX_MULTIPART_COUNT}" + ))); } } @@ -523,7 +525,7 @@ impl S3Api for PutObject { } impl ToS3Request for PutObject { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { self.0.to_s3request() } } @@ -532,7 +534,7 @@ impl ToS3Request for PutObject { // region: put-object-content -/// PutObjectContent takes a `ObjectContent` stream and uploads it to MinIO/S3. +/// PutObjectContent takes an `ObjectContent` stream and uploads it to MinIO/S3. /// /// It is a higher level API and handles multipart uploads transparently. #[derive(Default)] @@ -631,12 +633,13 @@ impl PutObjectContent { pub async fn send(mut self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; + check_sse(&self.sse, &self.client)?; let input_content = std::mem::take(&mut self.input_content); self.content_stream = input_content .to_content_stream() .await - .map_err(Error::IOError)?; + .map_err(IoError::from)?; // object_size may be Size::Unknown. let object_size = self.content_stream.get_size(); @@ -646,14 +649,12 @@ impl PutObjectContent { self.part_size = Size::Known(part_size); self.part_count = expected_parts; - if let Some(v) = &self.sse { - if v.tls_required() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } - // Read the first part. - let seg_bytes = self.content_stream.read_upto(part_size as usize).await?; + let seg_bytes = self + .content_stream + .read_upto(part_size as usize) + .await + .map_err(IoError::from)?; // In the first part read, if: // @@ -691,7 +692,7 @@ impl PutObjectContent { // Not enough data! let expected: u64 = object_size.as_u64().unwrap(); let got: u64 = seg_bytes.len() as u64; - Err(Error::InsufficientData(expected, got)) + Err(ValidationErr::InsufficientData { expected, got }.into()) } else { let bucket: String = self.bucket.clone(); let object: String = self.object.clone(); @@ -754,7 +755,10 @@ impl PutObjectContent { if let Some(v) = first_part.take() { v } else { - self.content_stream.read_upto(part_size as usize).await? + self.content_stream + .read_upto(part_size as usize) + .await + .map_err(IoError::from)? } }; part_number += 1; @@ -764,19 +768,19 @@ impl PutObjectContent { assert!(buffer_size <= part_size, "{buffer_size} <= {part_size}",); if (buffer_size == 0) && (part_number > 1) { - // We are done as we uploaded at least 1 part and we have reached the end of the stream. + // We are done as we uploaded at least 1 part, and we have reached the end of the stream. break; } // Check if we have too many parts to upload. if self.part_count.is_none() && (part_number > MAX_MULTIPART_COUNT) { - return Err(Error::TooManyParts); + return Err(ValidationErr::TooManyParts(part_number as u64).into()); } if object_size.is_known() { let exp = object_size.as_u64().unwrap(); if exp < total_read { - return Err(Error::TooMuchData(exp)); + return Err(ValidationErr::TooMuchData(exp).into()); } } @@ -808,7 +812,7 @@ impl PutObjectContent { size: buffer_size, }); - // Finally check if we are done. + // Finally, check if we are done. if buffer_size < part_size { done = true; } @@ -820,7 +824,11 @@ impl PutObjectContent { if object_size.is_known() { let expected = object_size.as_u64().unwrap(); if expected != size { - return Err(Error::InsufficientData(expected, size)); + return Err(ValidationErr::InsufficientData { + expected, + got: size, + } + .into()); } } @@ -851,7 +859,7 @@ fn into_headers_put_object( retention: Option, legal_hold: bool, content_type: Option, -) -> Result { +) -> Result { let mut map = Multimap::new(); if let Some(v) = extra_headers { @@ -862,12 +870,12 @@ fn into_headers_put_object( // Validate it. for (k, _) in v.iter() { if k.is_empty() { - return Err(Error::InvalidUserMetadata( + return Err(ValidationErr::InvalidUserMetadata( "user metadata key cannot be empty".into(), )); } if !k.starts_with("x-amz-meta-") { - return Err(Error::InvalidUserMetadata(format!( + return Err(ValidationErr::InvalidUserMetadata(format!( "user metadata key '{k}' does not start with 'x-amz-meta-'", ))); } @@ -891,27 +899,27 @@ fn into_headers_put_object( } if !tagging.is_empty() { - map.insert("x-amz-tagging".into(), tagging); + map.insert(X_AMZ_TAGGING.into(), tagging); } } if let Some(v) = retention { - map.insert("x-amz-object-lock-mode".into(), v.mode.to_string()); + map.insert(X_AMZ_OBJECT_LOCK_MODE.into(), v.mode.to_string()); map.insert( - "x-amz-object-lock-retain-until-date".into(), + X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.into(), to_iso8601utc(v.retain_until_date), ); } if legal_hold { - map.insert("x-amz-object-lock-legal-hold".into(), "ON".into()); + map.insert(X_AMZ_OBJECT_LOCK_LEGAL_HOLD.into(), "ON".into()); } // Set the Content-Type header if not already set. - if !map.contains_key("Content-Type") { + if !map.contains_key(CONTENT_TYPE) { map.insert( - "Content-Type".into(), - content_type.unwrap_or_else(|| "application/octet-stream".into()), + CONTENT_TYPE.into(), + content_type.unwrap_or("application/octet-stream".into()), ); } @@ -925,27 +933,30 @@ pub const MAX_MULTIPART_COUNT: u16 = 10_000; /// Returns the size of each part to upload and the total number of parts. The /// number of parts is `None` when the object size is unknown. -pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option), Error> { +pub fn calc_part_info( + object_size: Size, + part_size: Size, +) -> Result<(u64, Option), ValidationErr> { // Validate arguments against limits. if let Size::Known(v) = part_size { if v < MIN_PART_SIZE { - return Err(Error::InvalidMinPartSize(v)); + return Err(ValidationErr::InvalidMinPartSize(v)); } if v > MAX_PART_SIZE { - return Err(Error::InvalidMaxPartSize(v)); + return Err(ValidationErr::InvalidMaxPartSize(v)); } } - if let Size::Known(v) = object_size { - if v > MAX_OBJECT_SIZE { - return Err(Error::InvalidObjectSize(v)); - } + if let Size::Known(v) = object_size + && v > MAX_OBJECT_SIZE + { + return Err(ValidationErr::InvalidObjectSize(v)); } match (object_size, part_size) { - // If object size is unknown, part size must be provided. - (Size::Unknown, Size::Unknown) => Err(Error::MissingPartSize), + // If the object size is unknown, the part size must be provided. + (Size::Unknown, Size::Unknown) => Err(ValidationErr::MissingPartSize), // If object size is unknown, and part size is known, the number of // parts will be unknown, so return None for that. @@ -954,8 +965,7 @@ pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option // If object size is known, and part size is unknown, calculate part // size. (Size::Known(object_size), Size::Unknown) => { - // 1. Calculate the minimum part size (i.e. assuming part count is - // maximum). + // 1. Calculate the minimum part size (i.e., assuming part count is the maximum). let mut psize: u64 = (object_size as f64 / MAX_MULTIPART_COUNT as f64).ceil() as u64; // 2. Round up to the nearest multiple of MIN_PART_SIZE. @@ -979,11 +989,11 @@ pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option (Size::Known(object_size), Size::Known(part_size)) => { let part_count = (object_size as f64 / part_size as f64).ceil() as u16; if part_count == 0 || part_count > MAX_MULTIPART_COUNT { - return Err(Error::InvalidPartCount( + return Err(ValidationErr::InvalidPartCount { object_size, part_size, - MAX_MULTIPART_COUNT, - )); + part_count: MAX_MULTIPART_COUNT, + }); } Ok((part_size, Some(part_count))) @@ -1002,29 +1012,29 @@ mod tests { if let Size::Known(v) = part_size { if v < MIN_PART_SIZE { return match res { - Err(Error::InvalidMinPartSize(v_err)) => v == v_err, + Err(ValidationErr::InvalidMinPartSize(v_err)) => v == v_err, _ => false, } } if v > MAX_PART_SIZE { return match res { - Err(Error::InvalidMaxPartSize(v_err)) => v == v_err, + Err(ValidationErr::InvalidMaxPartSize(v_err)) => v == v_err, _ => false, } } } - if let Size::Known(v) = object_size { - if v > MAX_OBJECT_SIZE { + if let Size::Known(v) = object_size + && v > MAX_OBJECT_SIZE { return match res { - Err(Error::InvalidObjectSize(v_err)) => v == v_err, + Err(ValidationErr::InvalidObjectSize(v_err)) => v == v_err, _ => false, } } - } + // Validate the calculation of part size and part count. match (object_size, part_size, res) { - (Size::Unknown, Size::Unknown, Err(Error::MissingPartSize)) => true, + (Size::Unknown, Size::Unknown, Err(ValidationErr::MissingPartSize)) => true, (Size::Unknown, Size::Unknown, _) => false, (Size::Unknown, Size::Known(part_size), Ok((psize, None))) => { @@ -1049,7 +1059,7 @@ mod tests { (Size::Known(object_size), Size::Known(part_size), res) => { if (part_size > object_size) || ((part_size * (MAX_MULTIPART_COUNT as u64)) < object_size) { return match res { - Err(Error::InvalidPartCount(v1, v2, v3)) => { + Err(ValidationErr::InvalidPartCount{object_size:v1, part_size:v2, part_count:v3}) => { (v1 == object_size) && (v2 == part_size) && (v3 == MAX_MULTIPART_COUNT) } _ => false, diff --git a/src/s3/builders/put_object_legal_hold.rs b/src/s3/builders/put_object_legal_hold.rs index ccdd4b0..b610886 100644 --- a/src/s3/builders/put_object_legal_hold.rs +++ b/src/s3/builders/put_object_legal_hold.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::PutObjectLegalHoldResponse; use crate::s3::types::{S3Api, S3Request, ToS3Request}; @@ -75,7 +76,7 @@ impl S3Api for PutObjectLegalHold { } impl ToS3Request for PutObjectLegalHold { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; @@ -90,7 +91,7 @@ impl ToS3Request for PutObjectLegalHold { let bytes: Bytes = Bytes::from(payload); // TODO consider const payload with precalculated md5 - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); Ok(S3Request::new(self.client, Method::PUT) .region(self.region) diff --git a/src/s3/builders/put_object_lock_config.rs b/src/s3/builders/put_object_lock_config.rs index d823258..e703187 100644 --- a/src/s3/builders/put_object_lock_config.rs +++ b/src/s3/builders/put_object_lock_config.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::Multimap; use crate::s3::response::PutObjectLockConfigResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -74,7 +74,7 @@ impl S3Api for PutObjectLockConfig { } impl ToS3Request for PutObjectLockConfig { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; let bytes: Bytes = self.config.to_xml().into(); diff --git a/src/s3/builders/put_object_retention.rs b/src/s3/builders/put_object_retention.rs index 9db2764..9da2e1b 100644 --- a/src/s3/builders/put_object_retention.rs +++ b/src/s3/builders/put_object_retention.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::PutObjectRetentionResponse; use crate::s3::types::{RetentionMode, S3Api, S3Request, ToS3Request}; @@ -95,15 +96,15 @@ impl S3Api for PutObjectRetention { } impl ToS3Request for PutObjectRetention { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; if self.retention_mode.is_some() ^ self.retain_until_date.is_some() { - return Err(Error::InvalidRetentionConfig(String::from( - "both mode and retain_until_date must be set or unset", - ))); + return Err(ValidationErr::InvalidRetentionConfig( + "both mode and retain_until_date must be set or unset".into(), + )); } } @@ -125,9 +126,9 @@ impl ToS3Request for PutObjectRetention { let mut headers: Multimap = self.extra_headers.unwrap_or_default(); if self.bypass_governance_mode { - headers.add("x-amz-bypass-governance-retention", "true"); + headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true"); } - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); let mut query_params: Multimap = insert(self.extra_query_params, "retention"); query_params.add_version(self.version_id); diff --git a/src/s3/builders/put_object_tagging.rs b/src/s3/builders/put_object_tagging.rs index 83fae15..9d80691 100644 --- a/src/s3/builders/put_object_tagging.rs +++ b/src/s3/builders/put_object_tagging.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::PutObjectTaggingResponse; use crate::s3::segmented_bytes::SegmentedBytes; @@ -83,7 +83,7 @@ impl S3Api for PutObjectTagging { } impl ToS3Request for PutObjectTagging { - fn to_s3request(self) -> Result { + fn to_s3request(self) -> Result { check_bucket_name(&self.bucket, true)?; check_object_name(&self.object)?; diff --git a/src/s3/builders/select_object_content.rs b/src/s3/builders/select_object_content.rs index 0c5463d..6b66277 100644 --- a/src/s3/builders/select_object_content.rs +++ b/src/s3/builders/select_object_content.rs @@ -14,12 +14,13 @@ // limitations under the License. use crate::s3::Client; -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::SelectObjectContentResponse; use crate::s3::sse::SseCustomerKey; use crate::s3::types::{S3Api, S3Request, SelectRequest, ToS3Request}; -use crate::s3::utils::{check_bucket_name, check_object_name, insert, md5sum_hash}; +use crate::s3::utils::{check_bucket_name, check_object_name, check_ssec, insert, md5sum_hash}; use async_trait::async_trait; use bytes::Bytes; use http::Method; @@ -90,19 +91,15 @@ impl S3Api for SelectObjectContent { #[async_trait] impl ToS3Request for SelectObjectContent { - fn to_s3request(self) -> Result { - { - check_bucket_name(&self.bucket, true)?; - check_object_name(&self.object)?; + fn to_s3request(self) -> Result { + check_bucket_name(&self.bucket, true)?; + check_object_name(&self.object)?; + check_ssec(&self.ssec, &self.client)?; - if self.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } let bytes: Bytes = self.request.to_xml().into(); let mut headers: Multimap = self.extra_headers.unwrap_or_default(); - headers.add("Content-MD5", md5sum_hash(bytes.as_ref())); + headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref())); let mut query_params: Multimap = insert(self.extra_query_params, "select"); query_params.add("select-type", "2"); diff --git a/src/s3/builders/stat_object.rs b/src/s3/builders/stat_object.rs index da33950..3759de4 100644 --- a/src/s3/builders/stat_object.rs +++ b/src/s3/builders/stat_object.rs @@ -13,19 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -use async_trait::async_trait; -use http::Method; - +use crate::s3::client::Client; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::StatObjectResponse; -use crate::s3::utils::check_object_name; -use crate::s3::{ - client::Client, - error::Error, - sse::{Sse, SseCustomerKey}, - types::{S3Api, S3Request, ToS3Request}, - utils::{UtcTime, check_bucket_name, to_http_header_value}, +use crate::s3::sse::{Sse, SseCustomerKey}; +use crate::s3::types::{S3Api, S3Request, ToS3Request}; +use crate::s3::utils::{ + UtcTime, check_bucket_name, check_object_name, check_ssec, to_http_header_value, }; +use async_trait::async_trait; +use http::Method; /// Argument builder for the [`StatObject`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) S3 API operation. /// Retrieves all of the metadata from an object without returning the object itself. @@ -125,28 +124,24 @@ impl S3Api for StatObject { #[async_trait] impl ToS3Request for StatObject { - fn to_s3request(self) -> Result { - { - check_bucket_name(&self.bucket, true)?; - check_object_name(&self.object)?; - if self.ssec.is_some() && !self.client.is_secure() { - return Err(Error::SseTlsRequired(None)); - } - } + fn to_s3request(self) -> Result { + check_bucket_name(&self.bucket, true)?; + check_object_name(&self.object)?; + check_ssec(&self.ssec, &self.client)?; let mut headers: Multimap = self.extra_headers.unwrap_or_default(); { if let Some(v) = self.match_etag { - headers.add("if-match", v); + headers.add(IF_MATCH, v); } if let Some(v) = self.not_match_etag { - headers.add("if-none-match", v); + headers.add(IF_NONE_MATCH, v); } if let Some(v) = self.modified_since { - headers.add("if-modified-since", to_http_header_value(v)); + headers.add(IF_MODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = self.unmodified_since { - headers.add("if-unmodified-since", to_http_header_value(v)); + headers.add(IF_UNMODIFIED_SINCE, to_http_header_value(v)); } if let Some(v) = self.ssec { headers.add_multimap(v.headers()); diff --git a/src/s3/client.rs b/src/s3/client.rs index 79cc1ed..7a41ea3 100644 --- a/src/s3/client.rs +++ b/src/s3/client.rs @@ -23,21 +23,23 @@ use std::sync::{Arc, OnceLock}; use crate::s3::builders::{BucketExists, ComposeSource}; use crate::s3::creds::Provider; -use crate::s3::error::{Error, ErrorCode, ErrorResponse}; +use crate::s3::header_constants::*; use crate::s3::http::BaseUrl; +use crate::s3::minio_error_response::{MinioErrorCode, MinioErrorResponse}; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::a_response_traits::{HasEtagFromHeaders, HasS3Fields}; use crate::s3::response::*; use crate::s3::segmented_bytes::SegmentedBytes; use crate::s3::signer::sign_v4_s3; -use crate::s3::utils::{EMPTY_SHA256, sha256_hash_sb, to_amz_date, utc_now}; +use crate::s3::utils::{EMPTY_SHA256, check_ssec_with_log, sha256_hash_sb, to_amz_date, utc_now}; +use crate::s3::error::{Error, IoError, NetworkError, S3ServerError, ValidationErr}; use bytes::Bytes; use dashmap::DashMap; use http::HeaderMap; use hyper::http::Method; use rand::Rng; -use reqwest::Body; +use reqwest::{Body, Response}; mod append_object; mod bucket_exists; @@ -197,15 +199,17 @@ impl ClientBuilder { ))] if let Some(v) = self.ssl_cert_file { let mut buf = Vec::new(); - File::open(v)?.read_to_end(&mut buf)?; - let certs = reqwest::Certificate::from_pem_bundle(&buf)?; + let mut file = File::open(v).map_err(IoError::IOError)?; + file.read_to_end(&mut buf).map_err(IoError::IOError)?; + + let certs = reqwest::Certificate::from_pem_bundle(&buf).map_err(ValidationErr::from)?; for cert in certs { builder = builder.add_root_certificate(cert); } } Ok(Client { - http_client: builder.build()?, + http_client: builder.build().map_err(ValidationErr::from)?, shared: Arc::new(SharedClientItems { base_url: self.base_url, provider: self.provider, @@ -333,17 +337,13 @@ impl Client { let sources_len = sources.len(); for source in sources.iter_mut() { - if source.ssec.is_some() && !self.is_secure() { - return Err(Error::SseTlsRequired(Some(format!( - "source {}/{}{}: ", - source.bucket, - source.object, - source - .version_id - .as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v) - )))); - } + check_ssec_with_log( + &source.ssec, + self, + &source.bucket, + &source.object, + &source.version_id, + )?; i += 1; @@ -370,18 +370,19 @@ impl Client { } if (size < MIN_PART_SIZE) && (sources_len != 1) && (i != sources_len) { - return Err(Error::InvalidComposeSourcePartSize( - source.bucket.clone(), - source.object.clone(), - source.version_id.clone(), + return Err(ValidationErr::InvalidComposeSourcePartSize { + bucket: source.bucket.clone(), + object: source.object.clone(), + version: source.version_id.clone(), size, - MIN_PART_SIZE, - )); + expected_size: MIN_PART_SIZE, + } + .into()); } object_size += size; if object_size > MAX_OBJECT_SIZE { - return Err(Error::InvalidObjectSize(object_size)); + return Err(ValidationErr::InvalidObjectSize(object_size).into()); } if size > MAX_PART_SIZE { @@ -394,13 +395,14 @@ impl Client { } if last_part_size < MIN_PART_SIZE && sources_len != 1 && i != sources_len { - return Err(Error::InvalidComposeSourceMultipart( - source.bucket.to_string(), - source.object.to_string(), - source.version_id.clone(), + return Err(ValidationErr::InvalidComposeSourceMultipart { + bucket: source.bucket.to_string(), + object: source.object.to_string(), + version: source.version_id.clone(), size, - MIN_PART_SIZE, - )); + expected_size: MIN_PART_SIZE, + } + .into()); } part_count += count as u16; @@ -409,7 +411,9 @@ impl Client { } if part_count > MAX_MULTIPART_COUNT { - return Err(Error::InvalidMultipartCount(MAX_MULTIPART_COUNT)); + return Err( + ValidationErr::InvalidMultipartCount(MAX_MULTIPART_COUNT as u64).into(), + ); } } @@ -436,14 +440,14 @@ impl Client { )?; { - headers.add("Host", url.host_header_value()); + headers.add(HOST, url.host_header_value()); let sha256: String = match *method { Method::PUT | Method::POST => { - if !headers.contains_key("Content-Type") { - headers.add("Content-Type", "application/octet-stream"); + if !headers.contains_key(CONTENT_TYPE) { + headers.add(CONTENT_TYPE, "application/octet-stream"); } let len: usize = body.as_ref().map_or(0, |b| b.len()); - headers.add("Content-Length", len.to_string()); + headers.add(CONTENT_LENGTH, len.to_string()); match body { None => EMPTY_SHA256.into(), Some(ref v) => { @@ -454,14 +458,14 @@ impl Client { } _ => EMPTY_SHA256.into(), }; - headers.add("x-amz-content-sha256", sha256.clone()); + headers.add(X_AMZ_CONTENT_SHA256, sha256.clone()); let date = utc_now(); - headers.add("x-amz-date", to_amz_date(date)); + headers.add(X_AMZ_DATE, to_amz_date(date)); if let Some(p) = &self.shared.provider { let creds = p.fetch(); if creds.session_token.is_some() { - headers.add("X-Amz-Security-Token", creds.session_token.unwrap()); + headers.add(X_AMZ_SECURITY_TOKEN, creds.session_token.unwrap()); } sign_v4_s3( method, @@ -509,15 +513,12 @@ impl Client { None => Vec::new(), }; let stream = futures_util::stream::iter( - bytes_vec - .into_iter() - .map(|b| -> Result<_, std::io::Error> { Ok(b) }), + bytes_vec.into_iter().map(|b| -> Result<_, Error> { Ok(b) }), ); req = req.body(Body::wrap_stream(stream)); } - let resp: reqwest::Response = req.send().await?; - + let resp: Response = req.send().await.map_err(ValidationErr::from)?; //TODO request error handled by network error layer if resp.status().is_success() { return Ok(resp); } @@ -525,9 +526,9 @@ impl Client { let mut resp = resp; let status_code = resp.status().as_u16(); let headers: HeaderMap = mem::take(resp.headers_mut()); - let body: Bytes = resp.bytes().await?; + let body: Bytes = resp.bytes().await.map_err(ValidationErr::from)?; - let e: Error = self.shared.get_error_response( + let e: MinioErrorResponse = self.shared.create_minio_error_response( body, status_code, headers, @@ -536,17 +537,17 @@ impl Client { bucket_name, object_name, retry, - ); + )?; - if let Error::S3Error(ref err) = e { - if (err.code == ErrorCode::NoSuchBucket) || (err.code == ErrorCode::RetryHead) { - if let Some(v) = bucket_name { - self.shared.region_map.remove(v); - } - } + // If the error is a NoSuchBucket or RetryHead, remove the bucket from the region map. + if (matches!(e.code(), MinioErrorCode::NoSuchBucket) + || matches!(e.code(), MinioErrorCode::RetryHead)) + && let Some(v) = bucket_name + { + self.shared.region_map.remove(v); }; - Err(e) + Err(Error::S3Server(S3ServerError::S3Error(Box::new(e)))) } pub(crate) async fn execute( @@ -574,8 +575,8 @@ impl Client { match resp { Ok(r) => return Ok(r), Err(e) => match e { - Error::S3Error(ref er) => { - if er.code != ErrorCode::RetryHead { + Error::S3Server(S3ServerError::S3Error(ref er)) => { + if !matches!(er.code(), MinioErrorCode::RetryHead) { return Err(e); } } @@ -614,16 +615,19 @@ impl SharedClientItems { header_map: &reqwest::header::HeaderMap, bucket_name: Option<&str>, retry: bool, - ) -> Result<(ErrorCode, String), Error> { + ) -> Result<(MinioErrorCode, String), Error> { let (mut code, mut message) = match status_code { - 301 => (ErrorCode::PermanentRedirect, "Moved Permanently".into()), - 307 => (ErrorCode::Redirect, "Temporary redirect".into()), - 400 => (ErrorCode::BadRequest, "Bad request".into()), - _ => (ErrorCode::NoError, String::new()), + 301 => ( + MinioErrorCode::PermanentRedirect, + "Moved Permanently".into(), + ), + 307 => (MinioErrorCode::Redirect, "Temporary redirect".into()), + 400 => (MinioErrorCode::BadRequest, "Bad request".into()), + _ => (MinioErrorCode::NoError, String::new()), }; - let region: &str = match header_map.get("x-amz-bucket-region") { - Some(v) => v.to_str()?, + let region: &str = match header_map.get(X_AMZ_BUCKET_REGION) { + Some(v) => v.to_str().map_err(ValidationErr::from)?, _ => "", }; @@ -632,19 +636,20 @@ impl SharedClientItems { message.push_str(region); } - if retry && !region.is_empty() && (method == Method::HEAD) { - if let Some(v) = bucket_name { - if self.region_map.contains_key(v) { - code = ErrorCode::RetryHead; - message = String::new(); - } - } + if retry + && !region.is_empty() + && (method == Method::HEAD) + && let Some(v) = bucket_name + && self.region_map.contains_key(v) + { + code = MinioErrorCode::RetryHead; + message = String::new(); } Ok((code, message)) } - fn get_error_response( + fn create_minio_error_response( &self, body: Bytes, http_status_code: u16, @@ -654,88 +659,98 @@ impl SharedClientItems { bucket_name: Option<&str>, object_name: Option<&str>, retry: bool, - ) -> Error { + ) -> Result { + // if body is present, try to parse it as XML error response if !body.is_empty() { - return match headers.get("Content-Type") { - Some(v) => match v.to_str() { - Ok(s) => match s.to_lowercase().contains("application/xml") { - true => match ErrorResponse::parse(body, headers) { - Ok(v) => Error::S3Error(v), - Err(e) => e, - }, - false => Error::InvalidResponse(http_status_code, s.to_string()), - }, - Err(e) => return Error::StrError(e), - }, - _ => Error::InvalidResponse(http_status_code, String::new()), + let content_type = headers + .get(CONTENT_TYPE) + .ok_or_else(|| { + Error::S3Server(S3ServerError::InvalidServerResponse { + message: "missing Content-Type header".into(), + http_status_code, + content_type: String::new(), + }) + })? + .to_str() + .map_err(Into::into) // ToStrError -> ValidationErr + .map_err(Error::Validation)?; // ValidationErr -> Error + + return if content_type.to_lowercase().contains("application/xml") { + MinioErrorResponse::new_from_body(body, headers) + } else { + Err(Error::S3Server(S3ServerError::InvalidServerResponse { + message: format!( + "expected content-type 'application/xml', but got {content_type}" + ), + http_status_code, + content_type: content_type.into(), + })) }; } + // Decide code and message by status let (code, message) = match http_status_code { - 301 | 307 | 400 => match self.handle_redirect_response( + 301 | 307 | 400 => self.handle_redirect_response( http_status_code, method, &headers, bucket_name, retry, - ) { - Ok(v) => v, - Err(e) => return e, - }, - 403 => (ErrorCode::AccessDenied, "Access denied".into()), + )?, + 403 => (MinioErrorCode::AccessDenied, "Access denied".into()), 404 => match object_name { - Some(_) => (ErrorCode::NoSuchKey, "Object does not exist".into()), - _ => match bucket_name { - Some(_) => (ErrorCode::NoSuchBucket, "Bucket does not exist".into()), - _ => ( - ErrorCode::ResourceNotFound, + Some(_) => (MinioErrorCode::NoSuchKey, "Object does not exist".into()), + None => match bucket_name { + Some(_) => (MinioErrorCode::NoSuchBucket, "Bucket does not exist".into()), + None => ( + MinioErrorCode::ResourceNotFound, "Request resource not found".into(), ), }, }, - 405 => ( - ErrorCode::MethodNotAllowed, + 405 | 501 => ( + MinioErrorCode::MethodNotAllowed, "The specified method is not allowed against this resource".into(), ), 409 => match bucket_name { - Some(_) => (ErrorCode::NoSuchBucket, "Bucket does not exist".into()), - _ => ( - ErrorCode::ResourceConflict, + Some(_) => (MinioErrorCode::NoSuchBucket, "Bucket does not exist".into()), + None => ( + MinioErrorCode::ResourceConflict, "Request resource conflicts".into(), ), }, - 501 => ( - ErrorCode::MethodNotAllowed, - "The specified method is not allowed against this resource".into(), - ), - _ => return Error::ServerError(http_status_code), + _ => { + return Err(Error::Network(NetworkError::ServerError(http_status_code))); + } }; - let request_id: String = match headers.get("x-amz-request-id") { - Some(v) => match v.to_str() { - Ok(s) => s.to_string(), - Err(e) => return Error::StrError(e), - }, - _ => String::new(), + let request_id = match headers.get(X_AMZ_REQUEST_ID) { + Some(v) => v + .to_str() + .map_err(Into::into) + .map_err(Error::Validation)? // ValidationErr -> Error + .to_string(), + None => String::new(), }; - let host_id: String = match headers.get("x-amz-id-2") { - Some(v) => match v.to_str() { - Ok(s) => s.to_string(), - Err(e) => return Error::StrError(e), - }, - _ => String::new(), + let host_id = match headers.get(X_AMZ_ID_2) { + Some(v) => v + .to_str() + .map_err(Into::into) + .map_err(Error::Validation)? // ValidationErr -> Error + .to_string(), + None => String::new(), }; - Error::S3Error(ErrorResponse { + Ok(MinioErrorResponse::new( headers, code, - message, - resource: resource.to_string(), + (!message.is_empty()).then_some(message), + resource.to_string(), request_id, host_id, - bucket_name: bucket_name.unwrap_or_default().to_string(), - object_name: object_name.unwrap_or_default().to_string(), - }) + bucket_name.map(String::from), + object_name.map(String::from), + )) } } diff --git a/src/s3/client/delete_bucket.rs b/src/s3/client/delete_bucket.rs index a6b4e1d..8135524 100644 --- a/src/s3/client/delete_bucket.rs +++ b/src/s3/client/delete_bucket.rs @@ -15,7 +15,9 @@ use super::Client; use crate::s3::builders::{DeleteBucket, DeleteObject, ObjectToDelete}; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::Error; +use crate::s3::error::S3ServerError::S3Error; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::{BucketExistsResponse, DeleteResult}; use crate::s3::response::{ DeleteBucketResponse, DeleteObjectResponse, DeleteObjectsResponse, PutObjectLegalHoldResponse, @@ -130,14 +132,14 @@ impl Client { let request: DeleteBucket = self.delete_bucket(&bucket); match request.send().await { Ok(resp) => Ok(resp), - Err(Error::S3Error(mut e)) => { - if matches!(e.code, ErrorCode::NoSuchBucket) { + Err(Error::S3Server(S3Error(mut e))) => { + if matches!(e.code(), MinioErrorCode::NoSuchBucket) { Ok(DeleteBucketResponse { request: Default::default(), //TODO consider how to handle this body: Bytes::new(), - headers: e.headers, + headers: e.take_headers(), }) - } else if let ErrorCode::BucketNotEmpty(reason) = &e.code { + } else if matches!(e.code(), MinioErrorCode::BucketNotEmpty) { // for convenience, add the first 5 documents that were are still in the bucket // to the error message let mut stream = self @@ -158,11 +160,14 @@ impl Client { // else: silently ignore the error and keep looping } - let new_reason = format!("{reason}: found content: {objs:?}"); - e.code = ErrorCode::BucketNotEmpty(new_reason); - Err(Error::S3Error(e)) + let new_msg = match e.message() { + None => format!("found content: {objs:?}"), + Some(msg) => format!("{msg}, found content: {objs:?}"), + }; + e.set_message(new_msg); + Err(Error::S3Server(S3Error(e))) } else { - Err(Error::S3Error(e)) + Err(Error::S3Server(S3Error(e))) } } Err(e) => Err(e), diff --git a/src/s3/client/get_region.rs b/src/s3/client/get_region.rs index 0d178d1..3e84a9f 100644 --- a/src/s3/client/get_region.rs +++ b/src/s3/client/get_region.rs @@ -15,7 +15,8 @@ use super::{Client, DEFAULT_REGION}; use crate::s3::builders::GetRegion; -use crate::s3::error::Error; + +use crate::s3::error::{Error, ValidationErr}; use crate::s3::types::S3Api; impl Client { @@ -58,10 +59,11 @@ impl Client { if !self.shared.base_url.region.is_empty() && (self.shared.base_url.region != *requested_region) { - return Err(Error::RegionMismatch( - self.shared.base_url.region.clone(), - requested_region.clone(), - )); + return Err(ValidationErr::RegionMismatch { + bucket_region: self.shared.base_url.region.clone(), + region: requested_region.clone(), + } + .into()); } return Ok(requested_region.clone()); } diff --git a/src/s3/error.rs b/src/s3/error.rs index 6fc2339..d9f6cb7 100644 --- a/src/s3/error.rs +++ b/src/s3/error.rs @@ -1,414 +1,342 @@ -// MinIO Rust Library for Amazon S3 Compatible Cloud Storage -// Copyright 2022 MinIO, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. +use crate::s3::minio_error_response::MinioErrorResponse; +use thiserror::Error; -//! Error definitions for S3 operations +// Client side validation issues like invalid url or bucket name +#[derive(Error, Debug)] +pub enum ValidationErr { + /// The specified bucket is not valid + #[error("Invalid bucket name: '{name}' - {reason}")] + InvalidBucketName { name: String, reason: String }, -extern crate alloc; -use crate::s3::utils::get_default_text; -use bytes::{Buf, Bytes}; -use http::HeaderMap; -use std::fmt; -use xmltree::Element; + /// No Bucket name was provided + #[error("No bucket name provided")] + MissingBucketName, -#[derive(Clone, Debug, Default, PartialEq)] -pub enum ErrorCode { - #[default] - NoError, + /// Error while parsing time from string + #[error("Time parse error: {0}")] + TimeParseError(#[from] chrono::ParseError), - PermanentRedirect, - Redirect, - BadRequest, - RetryHead, - NoSuchBucket, - NoSuchBucketPolicy, - ReplicationConfigurationNotFoundError, - ServerSideEncryptionConfigurationNotFoundError, - NoSuchTagSet, - NoSuchObjectLockConfiguration, - NoSuchLifecycleConfiguration, - NoSuchKey, - ResourceNotFound, - MethodNotAllowed, - ResourceConflict, - AccessDenied, - NotSupported, - BucketNotEmpty(String), // String contains optional reason msg - BucketAlreadyOwnedByYou, - InvalidWriteOffset, + /// Error while parsing a URL from string + #[error("Invalid URL: {0}")] + InvalidUrl(#[from] http::uri::InvalidUri), - OtherError(String), -} + /// Error while performing IO operations + #[error("IO error: {0}")] + IOError(#[from] std::io::Error), -impl ErrorCode { - pub fn parse(s: &str) -> Self { - match s.to_lowercase().as_str() { - "permanentredirect" => ErrorCode::PermanentRedirect, - "redirect" => ErrorCode::Redirect, - "badrequest" => ErrorCode::BadRequest, - "retryhead" => ErrorCode::RetryHead, - "nosuchbucket" => ErrorCode::NoSuchBucket, - "nosuchbucketpolicy" => ErrorCode::NoSuchBucketPolicy, - "replicationconfigurationnotfounderror" => { - ErrorCode::ReplicationConfigurationNotFoundError - } - "serversideencryptionconfigurationnotfounderror" => { - ErrorCode::ServerSideEncryptionConfigurationNotFoundError - } - "nosuchtagset" => ErrorCode::NoSuchTagSet, - "nosuchobjectlockconfiguration" => ErrorCode::NoSuchObjectLockConfiguration, - "nosuchlifecycleconfiguration" => ErrorCode::NoSuchLifecycleConfiguration, - "nosuchkey" => ErrorCode::NoSuchKey, - "resourcenotfound" => ErrorCode::ResourceNotFound, - "methodnotallowed" => ErrorCode::MethodNotAllowed, - "resourceconflict" => ErrorCode::ResourceConflict, - "accessdenied" => ErrorCode::AccessDenied, - "notsupported" => ErrorCode::NotSupported, - "bucketnotempty" => ErrorCode::BucketNotEmpty("".to_string()), - "bucketalreadyownedbyyou" => ErrorCode::BucketAlreadyOwnedByYou, - "invalidwriteoffset" => ErrorCode::InvalidWriteOffset, + #[error("XML parse error: {0}")] + XmlParseError(#[from] xmltree::ParseError), - v => ErrorCode::OtherError(v.to_owned()), - } - } -} + #[error("HTTP error: {0}")] + HttpError(#[from] reqwest::Error), -#[derive(Clone, Debug, Default)] -/// Error response for S3 operations -pub struct ErrorResponse { - /// Headers as returned by the server. - pub(crate) headers: HeaderMap, - pub code: ErrorCode, - pub message: String, - pub resource: String, - pub request_id: String, - pub host_id: String, - pub bucket_name: String, - pub object_name: String, -} + #[error("String error: {message}")] + StrError { + message: String, + #[source] + source: Option>, + }, -impl ErrorResponse { - pub fn parse(body: Bytes, headers: HeaderMap) -> Result { - let root = match Element::parse(body.reader()) { - Ok(v) => v, - Err(e) => return Err(Error::XmlParseError(e)), - }; + #[error("Integer parsing error: {0}")] + IntError(#[from] std::num::ParseIntError), - Ok(Self { - headers, - code: ErrorCode::parse(&get_default_text(&root, "Code")), - message: get_default_text(&root, "Message"), - resource: get_default_text(&root, "Resource"), - request_id: get_default_text(&root, "RequestId"), - host_id: get_default_text(&root, "HostId"), - bucket_name: get_default_text(&root, "BucketName"), - object_name: get_default_text(&root, "Key"), - }) - } -} + #[error("Boolean parsing error: {0}")] + BoolError(#[from] std::str::ParseBoolError), -/// Error definitions -#[derive(Debug)] -pub enum Error { - TimeParseError(chrono::ParseError), - InvalidUrl(http::uri::InvalidUri), - IOError(std::io::Error), - XmlParseError(xmltree::ParseError), - HttpError(reqwest::Error), - StrError(reqwest::header::ToStrError), - IntError(std::num::ParseIntError), - BoolError(std::str::ParseBoolError), - Utf8Error(Box), - JsonError(serde_json::Error), - XmlError(String), - InvalidBaseUrl(String), - InvalidBucketName(String), - UrlBuildError(String), - RegionMismatch(String, String), - S3Error(ErrorResponse), - InvalidResponse(u16, String), - ServerError(u16), + #[error("Failed to parse as UTF-8: {0}")] + Utf8Error(#[from] std::str::Utf8Error), + + #[error("JSON error: {0}")] + JsonError(#[from] serde_json::Error), + + #[error("XML error: {message}")] + XmlError { + message: String, + #[source] + source: Option>, + }, + + #[error("Invalid object name: {0}")] InvalidObjectName(String), + + #[error("Invalid upload ID: {0}")] InvalidUploadId(String), + + #[error("Invalid part number: {0}")] InvalidPartNumber(String), + + #[error("Invalid user metadata: {0}")] InvalidUserMetadata(String), + + #[error("Invalid boolean value: {0}")] + InvalidBooleanValue(String), + + #[error("Invalid integer value: {message}")] + InvalidIntegerValue { + message: String, + #[source] + source: Box, + }, + + #[error("Empty parts: {0}")] EmptyParts(String), + + #[error("Invalid retention mode: {0}")] InvalidRetentionMode(String), + + #[error("Invalid retention configuration: {0}")] InvalidRetentionConfig(String), + + #[error("Part size {0} is not supported; minimum allowed 5MiB")] InvalidMinPartSize(u64), + + #[error("Part size {0} is not supported; maximum allowed 5GiB")] InvalidMaxPartSize(u64), + + #[error("Object size {0} is not supported; maximum allowed 5TiB")] InvalidObjectSize(u64), + + #[error("Valid part size must be provided when object size is unknown")] MissingPartSize, - InvalidPartCount(u64, u64, u16), - TooManyParts, + + #[error( + "Object size {object_size} and part size {part_size} make more than {part_count} parts for upload" + )] + InvalidPartCount { + object_size: u64, + part_size: u64, + part_count: u16, + }, + + #[error("Too many parts for upload: {0} parts; maximum allowed is MAX_MULTIPART_COUNT parts")] + TooManyParts(u64), + + #[error("{}", sse_tls_required_message(.0))] SseTlsRequired(Option), + + #[error("Too much data in the stream - exceeds {0} bytes")] TooMuchData(u64), - InsufficientData(u64, u64), + + #[error("Not enough data in the stream; expected: {expected}, got: {got} bytes")] + InsufficientData { expected: u64, got: u64 }, + + #[error("Invalid legal hold: {0}")] InvalidLegalHold(String), + + #[error("Invalid select expression: {0}")] InvalidSelectExpression(String), + + #[error("Invalid header value type: {0}")] InvalidHeaderValueType(u8), - CrcMismatch(String, u32, u32), + + #[error("Invalid base URL: {0}")] + InvalidBaseUrl(String), + + #[error("URL build error: {0}")] + UrlBuildError(String), + + #[error("Region must be {bucket_region}, but passed {region}")] + RegionMismatch { + bucket_region: String, + region: String, + }, + + #[error("{crc_type} CRC mismatch; expected: {expected}, got: {got}")] + CrcMismatch { + crc_type: String, + expected: u32, + got: u32, + }, + + #[error("Unknown event type: {0}")] UnknownEventType(String), - SelectError(String, String), - UnsupportedApi(String), - InvalidComposeSource(String), - InvalidComposeSourceOffset(String, String, Option, u64, u64), - InvalidComposeSourceLength(String, String, Option, u64, u64), - InvalidComposeSourceSize(String, String, Option, u64, u64), - InvalidComposeSourcePartSize(String, String, Option, u64, u64), - InvalidComposeSourceMultipart(String, String, Option, u64, u64), + + /// Error returned by the S3 Select API + #[error("Error code: {error_code}, error message: {error_message}")] + SelectError { + error_code: String, + error_message: String, + }, + + /// Error returned when the S3 API is not supported by AWS S3 + #[error("{0} API is not supported in Amazon AWS S3")] + UnsupportedAwsApi(String), + + #[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceOffset", &format!("offset {offset} is beyond object size {object_size}")))] + InvalidComposeSourceOffset { + bucket: String, + object: String, + version: Option, + offset: u64, + object_size: u64, + }, + + #[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceLength", &format!("length {length} is beyond object size {object_size}")))] + InvalidComposeSourceLength { + bucket: String, + object: String, + version: Option, + length: u64, + object_size: u64, + }, + + #[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceSize", &format!("compose size {compose_size} is beyond object size {object_size}")))] + InvalidComposeSourceSize { + bucket: String, + object: String, + version: Option, + compose_size: u64, + object_size: u64, + }, + + #[error("Invalid directive: {0}")] InvalidDirective(String), + + #[error("Invalid copy directive: {0}")] InvalidCopyDirective(String), - InvalidMultipartCount(u16), - MissingLifecycleAction, - InvalidExpiredObjectDeleteMarker, - InvalidDateAndDays(String), - InvalidLifecycleRuleId, - InvalidFilter, + + #[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourcePartSize", &format!("compose size {size} must be greater than {expected_size}")))] + InvalidComposeSourcePartSize { + bucket: String, + object: String, + version: Option, + size: u64, + expected_size: u64, + }, + + #[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceMultipart", &format!("size {size} for multipart split upload of {size}, last part size is less than {expected_size}")))] + InvalidComposeSourceMultipart { + bucket: String, + object: String, + version: Option, + size: u64, + expected_size: u64, + }, + + #[error("Compose sources create more than allowed multipart count {0}")] + InvalidMultipartCount(u64), + + #[error("Only one of And, Prefix or Tag must be provided: {0}")] + InvalidFilter(String), + + #[error("Invalid versioning status: {0}")] InvalidVersioningStatus(String), + + #[error("Post policy error: {0}")] PostPolicyError(String), + + #[error("Invalid object lock config: {0}")] InvalidObjectLockConfig(String), - NoClientProvided, - TagDecodingError(String, String), + + #[error("Tag decoding failed: {error_message} on input '{input}'")] + TagDecodingError { + input: String, + error_message: String, + }, + + #[error("Content length is unknown")] ContentLengthUnknown, } -impl std::error::Error for Error {} - -impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Error::TimeParseError(e) => write!(f, "{e}"), - Error::InvalidUrl(e) => write!(f, "{e}"), - Error::IOError(e) => write!(f, "{e}"), - Error::XmlParseError(e) => write!(f, "{e}"), - Error::HttpError(e) => write!(f, "{e}"), - Error::StrError(e) => write!(f, "{e}"), - Error::IntError(e) => write!(f, "{e}"), - Error::BoolError(e) => write!(f, "{e}"), - Error::Utf8Error(e) => write!(f, "{e}"), - Error::JsonError(e) => write!(f, "{e}"), - Error::XmlError(m) => write!(f, "{m}"), - Error::InvalidBucketName(m) => write!(f, "{m}"), - Error::InvalidObjectName(m) => write!(f, "{m}"), - Error::InvalidUploadId(m) => write!(f, "{m}"), - Error::InvalidPartNumber(m) => write!(f, "{m}"), - Error::InvalidUserMetadata(m) => write!(f, "{m}"), - Error::EmptyParts(m) => write!(f, "{m}"), - Error::InvalidRetentionMode(m) => write!(f, "invalid retention mode {m}"), - Error::InvalidRetentionConfig(m) => write!(f, "invalid retention configuration; {m}"), - Error::InvalidMinPartSize(s) => { - write!(f, "part size {s} is not supported; minimum allowed 5MiB") - } - Error::InvalidMaxPartSize(s) => { - write!(f, "part size {s} is not supported; maximum allowed 5GiB") - } - Error::InvalidObjectSize(s) => { - write!(f, "object size {s} is not supported; maximum allowed 5TiB",) - } - Error::MissingPartSize => write!( - f, - "valid part size must be provided when object size is unknown" - ), - Error::InvalidPartCount(os, ps, pc) => write!( - f, - "object size {os} and part size {ps} make more than {pc} parts for upload" - ), - Error::TooManyParts => write!(f, "too many parts for upload"), - Error::SseTlsRequired(m) => write!( - f, - "{}SSE operation must be performed over a secure connection", - m.as_ref().map_or(String::new(), |v| v.clone()) - ), - Error::TooMuchData(s) => write!(f, "too much data in the stream - exceeds {s} bytes"), - Error::InsufficientData(expected, got) => write!( - f, - "not enough data in the stream; expected: {expected}, got: {got} bytes", - ), - Error::InvalidBaseUrl(m) => write!(f, "{m}"), - Error::UrlBuildError(m) => write!(f, "{m}"), - Error::InvalidLegalHold(s) => write!(f, "invalid legal hold {s}"), - Error::RegionMismatch(br, r) => write!(f, "region must be {br}, but passed {r}"), - Error::S3Error(er) => write!( - f, - "s3 operation failed; code: {:?}, message: {}, resource: {}, request_id: {}, host_id: {}, bucket_name: {}, object_name: {}", - er.code, - er.message, - er.resource, - er.request_id, - er.host_id, - er.bucket_name, - er.object_name, - ), - Error::InvalidResponse(sc, ct) => write!( - f, - "invalid response received; status code: {sc}; content-type: {ct}" - ), - Error::ServerError(sc) => write!(f, "server failed with HTTP status code {sc}"), - Error::InvalidSelectExpression(m) => write!(f, "{m}"), - Error::InvalidHeaderValueType(v) => write!(f, "invalid header value type {v}"), - Error::CrcMismatch(t, e, g) => { - write!(f, "{t} CRC mismatch; expected: {e}, got: {g}") - } - Error::UnknownEventType(et) => write!(f, "unknown event type {et}"), - Error::SelectError(ec, em) => write!(f, "error code: {ec}, error message: {em}"), - Error::UnsupportedApi(a) => write!(f, "{a} API is not supported in Amazon AWS S3"), - Error::InvalidComposeSource(m) => write!(f, "{m}"), - Error::InvalidComposeSourceOffset(b, o, v, of, os) => write!( - f, - "source {}/{}{}: offset {} is beyond object size {}", - b, - o, - v.as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v), - of, - os - ), - Error::InvalidComposeSourceLength(b, o, v, l, os) => write!( - f, - "source {}/{}{}: length {} is beyond object size {}", - b, - o, - v.as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v), - l, - os - ), - Error::InvalidComposeSourceSize(b, o, v, cs, os) => write!( - f, - "source {}/{}{}: compose size {} is beyond object size {}", - b, - o, - v.as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v), - cs, - os - ), - Error::InvalidDirective(m) => write!(f, "{m}"), - Error::InvalidCopyDirective(m) => write!(f, "{m}"), - Error::InvalidComposeSourcePartSize(b, o, v, s, es) => write!( - f, - "source {}/{}{}: size {} must be greater than {}", - b, - o, - v.as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v), - s, - es - ), - Error::InvalidComposeSourceMultipart(b, o, v, s, es) => write!( - f, - "source {}/{}{}: size {} for multipart split upload of {}, last part size is less than {}", - b, - o, - v.as_ref() - .map_or(String::new(), |v| String::from("?versionId=") + v), - s, - s, - es - ), - Error::InvalidMultipartCount(c) => write!( - f, - "Compose sources create more than allowed multipart count {c}", - ), - Error::MissingLifecycleAction => write!( - f, - "at least one of action (AbortIncompleteMultipartUpload, Expiration, NoncurrentVersionExpiration, NoncurrentVersionTransition or Transition) must be specified in a rule" - ), - Error::InvalidExpiredObjectDeleteMarker => write!( - f, - "ExpiredObjectDeleteMarker must not be provided along with Date and Days" - ), - Error::InvalidDateAndDays(m) => { - write!(f, "Only one of date or days of {m} must be set") - } - Error::InvalidLifecycleRuleId => write!(f, "id must be exceed 255 characters"), - Error::InvalidFilter => write!(f, "only one of And, Prefix or Tag must be provided"), - Error::InvalidVersioningStatus(m) => write!(f, "{m}"), - Error::PostPolicyError(m) => write!(f, "{m}"), - Error::InvalidObjectLockConfig(m) => write!(f, "{m}"), - Error::NoClientProvided => write!(f, "no client provided"), - Error::TagDecodingError(input, error_message) => { - write!(f, "tag decoding failed: {error_message} on input '{input}'") - } - Error::ContentLengthUnknown => write!(f, "content length is unknown"), +impl From for ValidationErr { + fn from(err: reqwest::header::ToStrError) -> Self { + ValidationErr::StrError { + message: "The provided value has an invalid encoding".into(), + source: Some(Box::new(err)), } } } -impl From for Error { - fn from(err: chrono::ParseError) -> Self { - Error::TimeParseError(err) +// Some convenience methods for creating ValidationErr instances +impl ValidationErr { + pub fn xml_error(message: impl Into) -> Self { + Self::XmlError { + message: message.into(), + source: None, + } + } + pub fn xml_error_with_source( + message: impl Into, + source: impl Into>, + ) -> Self { + Self::XmlError { + message: message.into(), + source: Some(source.into()), + } } } -impl From for Error { - fn from(err: http::uri::InvalidUri) -> Self { - Error::InvalidUrl(err) +// IO errors from accessing local files +#[derive(Error, Debug)] +pub enum IoError { + /// Error while performing IO operations + #[error("IO error: {0}")] + IOError(#[from] std::io::Error), +} + +// IO errors on the network like network time out +#[derive(Error, Debug)] +pub enum NetworkError { + #[error("Server failed with HTTP status code {0}")] + ServerError(u16), +} + +// Server response errors like bucket does not exist, etc. +// This would include any server sent validation errors. +#[derive(Error, Debug)] +pub enum S3ServerError { + /// S3 Errors as returned by the S3 server + #[error("S3 error: {0}")] + S3Error(#[from] Box), // NOTE: Boxing to prevent: "warning: large size difference between variants" + + #[error( + "Invalid server response received; {message}; HTTP status code: {http_status_code}; content-type: {content_type}" + )] + InvalidServerResponse { + message: String, + http_status_code: u16, + content_type: String, + }, +} + +// Top-level Minio client error +#[derive(Error, Debug)] +pub enum Error { + #[error("S3 server error occurred")] + S3Server(#[from] S3ServerError), + + #[error("Drive IO error occurred")] + DriveIo(#[from] IoError), + + #[error("Network error occurred")] + Network(#[from] NetworkError), + + #[error("Validation error occurred")] + Validation(#[from] ValidationErr), +} + +// region message helpers + +// Helper functions for formatting error messages with Option +fn sse_tls_required_message(prefix: &Option) -> String { + match prefix { + Some(p) => format!("{p} SSE operation must be performed over a secure connection",), + None => "SSE operation must be performed over a secure connection".to_string(), } } -impl From for Error { - fn from(err: std::io::Error) -> Self { - Error::IOError(err) - } +fn format_s3_object_error( + bucket: &str, + object: &str, + version: Option<&str>, + error_type: &str, + details: &str, +) -> String { + let version_str = match &version.map(String::from) { + Some(v) => format!("?versionId={v}"), + None => String::new(), + }; + format!("source {bucket}/{object}{version_str}: {error_type} {details}") } -impl From for Error { - fn from(err: xmltree::ParseError) -> Self { - Error::XmlParseError(err) - } -} - -impl From for Error { - fn from(err: reqwest::Error) -> Self { - Error::HttpError(err) - } -} - -impl From for Error { - fn from(err: reqwest::header::ToStrError) -> Self { - Error::StrError(err) - } -} - -impl From for Error { - fn from(err: std::num::ParseIntError) -> Self { - Error::IntError(err) - } -} - -impl From for Error { - fn from(err: std::str::ParseBoolError) -> Self { - Error::BoolError(err) - } -} - -impl From for Error { - fn from(err: alloc::string::FromUtf8Error) -> Self { - Error::Utf8Error(err.into()) - } -} - -impl From for Error { - fn from(err: std::str::Utf8Error) -> Self { - Error::Utf8Error(err.into()) - } -} - -impl From for Error { - fn from(err: serde_json::Error) -> Self { - Error::JsonError(err) - } -} +// endregion message helpers diff --git a/src/s3/header_constants.rs b/src/s3/header_constants.rs new file mode 100644 index 0000000..7574c9f --- /dev/null +++ b/src/s3/header_constants.rs @@ -0,0 +1,107 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub const IF_MATCH: &str = "if-match"; +pub const IF_NONE_MATCH: &str = "if-none-match"; +pub const IF_MODIFIED_SINCE: &str = "if-modified-since"; +pub const IF_UNMODIFIED_SINCE: &str = "if-unmodified-since"; +pub const CONTENT_MD5: &str = "Content-MD5"; +pub const CONTENT_TYPE: &str = "Content-Type"; +pub const AUTHORIZATION: &str = "Authorization"; +pub const RANGE: &str = "Range"; +pub const HOST: &str = "Host"; +pub const CONTENT_LENGTH: &str = "Content-Length"; + +pub const POLICY: &str = "policy"; + +pub const X_MINIO_DEPLOYMENT_ID: &str = "x-minio-deployment-id"; + +pub const X_AMZ_VERSION_ID: &str = "x-amz-version-id"; +pub const X_AMZ_ID_2: &str = "x-amz-id-2"; +pub const X_AMZ_WRITE_OFFSET_BYTES: &str = "x-amz-write-offset-bytes"; + +pub const X_AMZ_OBJECT_SIZE: &str = "x-amz-object-size"; +pub const X_AMZ_TAGGING: &str = "x-amz-tagging"; + +pub const X_AMZ_BUCKET_REGION: &str = "x-amz-bucket-region"; + +pub const X_AMZ_OBJECT_LOCK_MODE: &str = "x-amz-object-lock-mode"; + +pub const X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE: &str = "x-amz-object-lock-retain-until-date"; + +pub const X_AMZ_OBJECT_LOCK_LEGAL_HOLD: &str = "x-amz-object-lock-legal-hold"; + +pub const X_AMZ_METADATA_DIRECTIVE: &str = "x-amz-metadata-directive"; + +pub const X_AMZ_TAGGING_DIRECTIVE: &str = "x-amz-tagging-directive"; + +pub const X_AMZ_COPY_SOURCE: &str = "x-amz-copy-source"; + +pub const X_AMZ_COPY_SOURCE_RANGE: &str = "x-amz-copy-source-range"; + +pub const X_AMZ_COPY_SOURCE_IF_MATCH: &str = "x-amz-copy-source-if-match"; + +pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: &str = "x-amz-copy-source-if-none-match"; + +pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: &str = "x-amz-copy-source-if-unmodified-since"; + +pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: &str = "x-amz-copy-source-if-modified-since"; + +pub const X_AMZ_BUCKET_OBJECT_LOCK_ENABLED: &str = "x-amz-bucket-object-lock-enabled"; + +pub const X_AMZ_BYPASS_GOVERNANCE_RETENTION: &str = "x-amz-bypass-governance-retention"; + +pub const X_AMZ_DATE: &str = "x-amz-date"; + +pub const X_AMZ_DELETE_MARKER: &str = "x-amz-delete-marker"; +pub const X_AMZ_ALGORITHM: &str = "x-amz-algorithm"; + +pub const X_AMZ_CREDENTIAL: &str = "x-amz-credential"; + +pub const X_AMZ_SIGNATURE: &str = "x-amz-signature"; + +pub const X_AMZ_REQUEST_ID: &str = "x-amz-request-id"; + +pub const X_AMZ_EXPIRES: &str = "x-amz-expires"; + +pub const X_AMZ_SIGNED_HEADERS: &str = "x-amz-signedheaders"; + +pub const X_AMZ_CONTENT_SHA256: &str = "x-amz-content-sha256"; + +pub const X_AMZ_SECURITY_TOKEN: &str = "x-amz-security-token"; + +pub const X_AMZ_SERVER_SIDE_ENCRYPTION: &str = "X-Amz-Server-Side-Encryption"; + +pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CONTEXT: &str = "X-Amz-Server-Side-Encryption-Context"; + +pub const X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID: &str = + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id"; + +pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str = + "X-Amz-Server-Side-Encryption-Customer-Algorithm"; + +pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str = + "X-Amz-Server-Side-Encryption-Customer-Key"; +pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str = + "X-Amz-Server-Side-Encryption-Customer-Key-MD5"; + +pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str = + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm"; + +pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str = + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key"; + +pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str = + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5"; diff --git a/src/s3/http.rs b/src/s3/http.rs index 5ccda4e..0a9ea45 100644 --- a/src/s3/http.rs +++ b/src/s3/http.rs @@ -17,7 +17,8 @@ use super::utils::urlencode_object_key; use crate::s3::client::DEFAULT_REGION; -use crate::s3::error::Error; + +use crate::s3::error::ValidationErr; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::utils::match_hostname; use derivative::Derivative; @@ -135,7 +136,7 @@ fn get_aws_info( aws_s3_prefix: &mut String, aws_domain_suffix: &mut String, dualstack: &mut bool, -) -> Result<(), Error> { +) -> Result<(), ValidationErr> { if !match_hostname(host) { return Ok(()); } @@ -156,18 +157,18 @@ fn get_aws_info( } if !match_aws_s3_endpoint(host) { - return Err(Error::UrlBuildError( - String::from("invalid Amazon AWS host ") + host, - )); + return Err(ValidationErr::UrlBuildError(format!( + "invalid Amazon AWS host {host}" + ))); } let matcher = AWS_S3_PREFIX_REGEX.find(host).unwrap(); let s3_prefix = host.get(..matcher.end()).unwrap(); if s3_prefix.contains("s3-accesspoint") && !https { - return Err(Error::UrlBuildError( - String::from("use HTTPS scheme for host ") + host, - )); + return Err(ValidationErr::UrlBuildError(format!( + "use HTTPS scheme for host {host}" + ))); } let mut tokens: Vec<_> = host.get(matcher.len()..).unwrap().split('.').collect(); @@ -195,9 +196,9 @@ fn get_aws_info( if domain_suffix.ends_with(".cn") && !s3_prefix.ends_with("s3-accelerate.") && region.is_empty() { - return Err(Error::UrlBuildError( - String::from("region missing in Amazon S3 China endpoint ") + host, - )); + return Err(ValidationErr::UrlBuildError(format!( + "region missing in Amazon S3 China endpoint {host}" + ))); } *region = region_in_host; @@ -223,7 +224,7 @@ pub struct BaseUrl { } impl FromStr for BaseUrl { - type Err = Error; + type Err = ValidationErr; /// Convert a string to a BaseUrl. /// @@ -245,7 +246,7 @@ impl FromStr for BaseUrl { /// // Get base URL from IPv6 address /// let base_url: BaseUrl = "[0:0:0:0:0:ffff:c0a8:7c3f]:9000".parse().unwrap(); /// ``` - fn from_str(s: &str) -> Result { + fn from_str(s: &str) -> Result { let url = s.parse::()?; let https = match url.scheme() { @@ -254,9 +255,9 @@ impl FromStr for BaseUrl { "http" => false, "https" => true, _ => { - return Err(Error::InvalidBaseUrl(String::from( - "scheme must be http or https", - ))); + return Err(ValidationErr::InvalidBaseUrl( + "scheme must be http or https".into(), + )); } }, }; @@ -264,9 +265,9 @@ impl FromStr for BaseUrl { let mut host = match url.host() { Some(h) => h, _ => { - return Err(Error::InvalidBaseUrl(String::from( - "valid host must be provided", - ))); + return Err(ValidationErr::InvalidBaseUrl( + "valid host must be provided".into(), + )); } }; @@ -285,15 +286,15 @@ impl FromStr for BaseUrl { } if url.path() != "/" && url.path() != "" { - return Err(Error::InvalidBaseUrl(String::from( - "path must be empty for base URL", - ))); + return Err(ValidationErr::InvalidBaseUrl( + "path must be empty for base URL".into(), + )); } if url.query().is_some() { - return Err(Error::InvalidBaseUrl(String::from( - "query must be none for base URL", - ))); + return Err(ValidationErr::InvalidBaseUrl( + "query must be none for base URL".into(), + )); } let mut region = String::new(); @@ -335,7 +336,7 @@ impl BaseUrl { bucket_name: &str, enforce_path_style: bool, region: &str, - ) -> Result<(), Error> { + ) -> Result<(), ValidationErr> { let mut host = String::from(&self.aws_s3_prefix); host.push_str(&self.aws_domain_suffix); if host.eq_ignore_ascii_case("s3-external-1.amazonaws.com") @@ -349,9 +350,9 @@ impl BaseUrl { host = String::from(&self.aws_s3_prefix); if self.aws_s3_prefix.contains("s3-accelerate") { if bucket_name.contains('.') { - return Err(Error::UrlBuildError(String::from( - "bucket name with '.' is not allowed for accelerate endpoint", - ))); + return Err(ValidationErr::UrlBuildError( + "bucket name with '.' is not allowed for accelerate endpoint".into(), + )); } if enforce_path_style { @@ -408,7 +409,7 @@ impl BaseUrl { query: &Multimap, bucket_name: Option<&str>, object_name: Option<&str>, - ) -> Result { + ) -> Result { let mut url = Url { https: self.https, host: self.host.clone(), diff --git a/src/s3/lifecycle_config.rs b/src/s3/lifecycle_config.rs index 7443b7b..ef8732f 100644 --- a/src/s3/lifecycle_config.rs +++ b/src/s3/lifecycle_config.rs @@ -1,4 +1,19 @@ -use crate::s3::error::Error; +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::s3::error::ValidationErr; use crate::s3::types::Filter; use crate::s3::utils::to_iso8601utc; use xmltree::Element; @@ -10,7 +25,7 @@ pub struct LifecycleConfig { } impl LifecycleConfig { - pub fn from_xml(root: &Element) -> Result { + pub fn from_xml(root: &Element) -> Result { let mut config = LifecycleConfig { rules: Vec::new() }; // Process all Rule elements in the XML @@ -23,7 +38,7 @@ impl LifecycleConfig { Ok(config) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { // Skip validation if empty if self.rules.is_empty() { return Ok(()); @@ -90,17 +105,15 @@ impl LifecycleConfig { data.push_str(&days.to_string()); data.push_str(""); } - if let Some(delete_marker) = rule.expiration_expired_object_delete_marker { - if delete_marker { - data.push_str( - "true", - ); - } + if let Some(delete_marker) = rule.expiration_expired_object_delete_marker + && delete_marker + { + data.push_str("true"); } - if let Some(delete_all) = rule.expiration_expired_object_all_versions { - if delete_all { - data.push_str("true"); - } + if let Some(delete_all) = rule.expiration_expired_object_all_versions + && delete_all + { + data.push_str("true"); } data.push_str(""); } @@ -118,10 +131,10 @@ impl LifecycleConfig { data.push_str(&days.to_string()); data.push_str(""); - if let Some(delete_marker) = rule.all_versions_expiration_delete_marker { - if delete_marker { - data.push_str("true"); - } + if let Some(delete_marker) = rule.all_versions_expiration_delete_marker + && delete_marker + { + data.push_str("true"); } data.push_str(""); @@ -248,14 +261,14 @@ pub struct LifecycleRule { } impl LifecycleRule { - pub fn from_xml(rule_elem: &Element) -> Result { + pub fn from_xml(rule_elem: &Element) -> Result { let mut rule = LifecycleRule::default(); // Parse ID - if let Some(id_elem) = rule_elem.get_child("ID") { - if let Some(id_text) = id_elem.get_text() { - rule.id = id_text.to_string(); - } + if let Some(id_elem) = rule_elem.get_child("ID") + && let Some(id_text) = id_elem.get_text() + { + rule.id = id_text.to_string(); } // Parse Status @@ -264,7 +277,7 @@ impl LifecycleRule { rule.status = status_text == "Enabled"; } } else { - return Err(Error::XmlError("Missing element".to_string())); + return Err(ValidationErr::xml_error("Missing element")); } // Parse Filter @@ -273,202 +286,205 @@ impl LifecycleRule { } // Parse AbortIncompleteMultipartUpload - if let Some(abort_elem) = rule_elem.get_child("AbortIncompleteMultipartUpload") { - if let Some(days_elem) = abort_elem.get_child("DaysAfterInitiation") { - if let Some(days_text) = days_elem.get_text() { - rule.abort_incomplete_multipart_upload_days_after_initiation = - Some(days_text.parse().map_err(|_| { - Error::XmlError("Invalid DaysAfterInitiation value".to_string()) - })?); - } - } - } + if let Some(abort_elem) = rule_elem.get_child("AbortIncompleteMultipartUpload") + && let Some(days_elem) = abort_elem.get_child("DaysAfterInitiation") + && let Some(days_text) = days_elem.get_text() + { + rule.abort_incomplete_multipart_upload_days_after_initiation = + Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source("Invalid DaysAfterInitiation value", e) + })?) + }; // Parse Expiration if let Some(expiration_elem) = rule_elem.get_child("Expiration") { // Date - if let Some(date_elem) = expiration_elem.get_child("Date") { - if let Some(date_text) = date_elem.get_text() { - // Assume a function that parses ISO8601 to DateTime - rule.expiration_date = Some(parse_iso8601(&date_text)?); - } + if let Some(date_elem) = expiration_elem.get_child("Date") + && let Some(date_text) = date_elem.get_text() + { + // Assume a function that parses ISO8601 to DateTime + rule.expiration_date = Some(parse_iso8601(&date_text)?); } // Days - if let Some(days_elem) = expiration_elem.get_child("Days") { - if let Some(days_text) = days_elem.get_text() { - rule.expiration_days = Some(days_text.parse().map_err(|_| { - Error::XmlError("Invalid Expiration Days value".to_string()) - })?); - } + if let Some(days_elem) = expiration_elem.get_child("Days") + && let Some(days_text) = days_elem.get_text() + { + rule.expiration_days = Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source("Invalid Expiration Days value", e) + })?); } // ExpiredObjectDeleteMarker if let Some(delete_marker_elem) = expiration_elem.get_child("ExpiredObjectDeleteMarker") + && let Some(delete_marker_text) = delete_marker_elem.get_text() { - if let Some(delete_marker_text) = delete_marker_elem.get_text() { - rule.expiration_expired_object_delete_marker = - Some(delete_marker_text == "true"); - } + rule.expiration_expired_object_delete_marker = Some(delete_marker_text == "true"); } // ExpiredObjectAllVersions - if let Some(all_versions_elem) = expiration_elem.get_child("ExpiredObjectAllVersions") { - if let Some(all_versions_text) = all_versions_elem.get_text() { - rule.expiration_expired_object_all_versions = Some(all_versions_text == "true"); - } + if let Some(all_versions_elem) = expiration_elem.get_child("ExpiredObjectAllVersions") + && let Some(all_versions_text) = all_versions_elem.get_text() + { + rule.expiration_expired_object_all_versions = Some(all_versions_text == "true"); } } // Parse DelMarkerExpiration - if let Some(del_marker_elem) = rule_elem.get_child("DelMarkerExpiration") { - if let Some(days_elem) = del_marker_elem.get_child("Days") { - if let Some(days_text) = days_elem.get_text() { - rule.del_marker_expiration_days = Some(days_text.parse().map_err(|_| { - Error::XmlError("Invalid DelMarkerExpiration Days value".to_string()) - })?); - } - } + if let Some(del_marker_elem) = rule_elem.get_child("DelMarkerExpiration") + && let Some(days_elem) = del_marker_elem.get_child("Days") + && let Some(days_text) = days_elem.get_text() + { + rule.del_marker_expiration_days = Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source("Invalid DelMarkerExpiration Days value", e) + })?); } // Parse AllVersionsExpiration if let Some(all_versions_elem) = rule_elem.get_child("AllVersionsExpiration") { - if let Some(days_elem) = all_versions_elem.get_child("Days") { - if let Some(days_text) = days_elem.get_text() { - rule.all_versions_expiration_days = Some(days_text.parse().map_err(|_| { - Error::XmlError("Invalid AllVersionsExpiration Days value".to_string()) - })?); - } + if let Some(days_elem) = all_versions_elem.get_child("Days") + && let Some(days_text) = days_elem.get_text() + { + rule.all_versions_expiration_days = Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source( + "Invalid AllVersionsExpiration Days value", + e, + ) + })?); } - if let Some(delete_marker_elem) = all_versions_elem.get_child("DeleteMarker") { - if let Some(delete_marker_text) = delete_marker_elem.get_text() { - rule.all_versions_expiration_delete_marker = Some(delete_marker_text == "true"); - } + if let Some(delete_marker_elem) = all_versions_elem.get_child("DeleteMarker") + && let Some(delete_marker_text) = delete_marker_elem.get_text() + { + rule.all_versions_expiration_delete_marker = Some(delete_marker_text == "true"); } } // Parse NoncurrentVersionExpiration if let Some(noncurrent_exp_elem) = rule_elem.get_child("NoncurrentVersionExpiration") { - if let Some(days_elem) = noncurrent_exp_elem.get_child("NoncurrentDays") { - if let Some(days_text) = days_elem.get_text() { - rule.noncurrent_version_expiration_noncurrent_days = - Some(days_text.parse().map_err(|_| { - Error::XmlError( - "Invalid NoncurrentVersionExpiration NoncurrentDays value" - .to_string(), - ) - })?); - } + if let Some(days_elem) = noncurrent_exp_elem.get_child("NoncurrentDays") + && let Some(days_text) = days_elem.get_text() + { + rule.noncurrent_version_expiration_noncurrent_days = + Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source( + "Invalid NoncurrentVersionExpiration NoncurrentDays value", + e, + ) + })?); } - if let Some(versions_elem) = noncurrent_exp_elem.get_child("NewerNoncurrentVersions") { - if let Some(versions_text) = versions_elem.get_text() { - rule.noncurrent_version_expiration_newer_versions = - Some(versions_text.parse().map_err(|_| { - Error::XmlError("Invalid NewerNoncurrentVersions value".to_string()) - })?); - } + if let Some(versions_elem) = noncurrent_exp_elem.get_child("NewerNoncurrentVersions") + && let Some(versions_text) = versions_elem.get_text() + { + rule.noncurrent_version_expiration_newer_versions = + Some(versions_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source( + "Invalid NewerNoncurrentVersions value", + e, + ) + })?); } } // Parse NoncurrentVersionTransition if let Some(noncurrent_trans_elem) = rule_elem.get_child("NoncurrentVersionTransition") { - if let Some(days_elem) = noncurrent_trans_elem.get_child("NoncurrentDays") { - if let Some(days_text) = days_elem.get_text() { - rule.noncurrent_version_transition_noncurrent_days = - Some(days_text.parse().map_err(|_| { - Error::XmlError( - "Invalid NoncurrentVersionTransition NoncurrentDays value" - .to_string(), - ) - })?); - } + if let Some(days_elem) = noncurrent_trans_elem.get_child("NoncurrentDays") + && let Some(days_text) = days_elem.get_text() + { + rule.noncurrent_version_transition_noncurrent_days = + Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source( + "Invalid NoncurrentVersionTransition NoncurrentDays value", + e, + ) + })?); } - if let Some(storage_elem) = noncurrent_trans_elem.get_child("StorageClass") { - if let Some(storage_text) = storage_elem.get_text() { - rule.noncurrent_version_transition_storage_class = - Some(storage_text.to_string()); - } + if let Some(storage_elem) = noncurrent_trans_elem.get_child("StorageClass") + && let Some(storage_text) = storage_elem.get_text() + { + rule.noncurrent_version_transition_storage_class = Some(storage_text.to_string()); } if let Some(versions_elem) = noncurrent_trans_elem.get_child("NewerNoncurrentVersions") + && let Some(versions_text) = versions_elem.get_text() { - if let Some(versions_text) = versions_elem.get_text() { - rule.noncurrent_version_transition_newer_versions = - Some(versions_text.parse().map_err(|_| { - Error::XmlError("Invalid NewerNoncurrentVersions value".to_string()) - })?); - } + rule.noncurrent_version_transition_newer_versions = + Some(versions_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source( + "Invalid NewerNoncurrentVersions value", + e, + ) + })?); } } // Parse Transition if let Some(transition_elem) = rule_elem.get_child("Transition") { // Date - if let Some(date_elem) = transition_elem.get_child("Date") { - if let Some(date_text) = date_elem.get_text() { - rule.transition_date = Some(parse_iso8601(&date_text)?); - } + if let Some(date_elem) = transition_elem.get_child("Date") + && let Some(date_text) = date_elem.get_text() + { + rule.transition_date = Some(parse_iso8601(&date_text)?); } // Days - if let Some(days_elem) = transition_elem.get_child("Days") { - if let Some(days_text) = days_elem.get_text() { - rule.transition_days = Some(days_text.parse().map_err(|_| { - Error::XmlError("Invalid Transition Days value".to_string()) - })?); - } + if let Some(days_elem) = transition_elem.get_child("Days") + && let Some(days_text) = days_elem.get_text() + { + rule.transition_days = Some(days_text.parse().map_err(|e| { + ValidationErr::xml_error_with_source("Invalid Transition Days value", e) + })?); } // StorageClass - if let Some(storage_elem) = transition_elem.get_child("StorageClass") { - if let Some(storage_text) = storage_elem.get_text() { - rule.transition_storage_class = Some(storage_text.to_string()); - } + if let Some(storage_elem) = transition_elem.get_child("StorageClass") + && let Some(storage_text) = storage_elem.get_text() + { + rule.transition_storage_class = Some(storage_text.to_string()); } } Ok(rule) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { // Basic validation requirements // Ensure ID is present if self.id.is_empty() { - return Err(Error::XmlError("Rule ID cannot be empty".to_string())); + return Err(ValidationErr::xml_error("Rule ID cannot be empty")); } // Validate storage classes in transitions - if let Some(storage_class) = &self.transition_storage_class { - if storage_class.is_empty() { - return Err(Error::XmlError( - "Transition StorageClass cannot be empty".to_string(), - )); - } + if let Some(storage_class) = &self.transition_storage_class + && storage_class.is_empty() + { + return Err(ValidationErr::xml_error( + "Transition StorageClass cannot be empty", + )); } - if let Some(storage_class) = &self.noncurrent_version_transition_storage_class { - if storage_class.is_empty() { - return Err(Error::XmlError( - "NoncurrentVersionTransition StorageClass cannot be empty".to_string(), - )); - } + if let Some(storage_class) = &self.noncurrent_version_transition_storage_class + && storage_class.is_empty() + { + return Err(ValidationErr::xml_error( + "NoncurrentVersionTransition StorageClass cannot be empty", + )); } // Check that expiration has either days or date, not both if self.expiration_days.is_some() && self.expiration_date.is_some() { - return Err(Error::XmlError( - "Expiration cannot specify both Days and Date".to_string(), + return Err(ValidationErr::xml_error( + "Expiration cannot specify both Days and Date", )); } // Check that transition has either days or date, not both if self.transition_days.is_some() && self.transition_date.is_some() { - return Err(Error::XmlError( - "Transition cannot specify both Days and Date".to_string(), + return Err(ValidationErr::xml_error( + "Transition cannot specify both Days and Date", )); } @@ -477,8 +493,10 @@ impl LifecycleRule { } // Helper function to parse ISO8601 dates -fn parse_iso8601(date_str: &str) -> Result, Error> { +fn parse_iso8601(date_str: &str) -> Result, ValidationErr> { chrono::DateTime::parse_from_rfc3339(date_str) .map(|dt| dt.with_timezone(&chrono::Utc)) - .map_err(|_| Error::XmlError(format!("Invalid date format: {date_str}"))) + .map_err(|e| { + ValidationErr::xml_error_with_source(format!("Invalid date format: {date_str}"), e) + }) } diff --git a/src/s3/minio_error_response.rs b/src/s3/minio_error_response.rs new file mode 100644 index 0000000..030fb02 --- /dev/null +++ b/src/s3/minio_error_response.rs @@ -0,0 +1,320 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2022 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +extern crate alloc; + +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::utils::{get_text_default, get_text_option}; +use bytes::{Buf, Bytes}; +use http::HeaderMap; +use std::str::FromStr; +use thiserror::Error; +use xmltree::Element; + +/// Error codes for Minio operations as returned by the server. +#[derive(Clone, Debug, Error, Default, PartialEq)] +pub enum MinioErrorCode { + // region errors codes equal to the minio-go SDK in s3-error.go + // quoted lines are from the minio-go SDK but not used in the minio-rs SDK (yet) + + //BadDigest: "The Content-Md5 you specified did not match what we received.", + //EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.", + //EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.", + //IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.", + //InternalError: "We encountered an internal error, please try again.", + //InvalidAccessKeyID: "The access key ID you provided does not exist in our records.", + //InvalidBucketName: "The specified bucket is not valid.", + //InvalidDigest: "The Content-Md5 you specified is not valid.", + //InvalidRange: "The requested range is not satisfiable.", + //MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.", + //MissingContentLength: "You must provide the Content-Length HTTP header.", + //MissingContentMD5: "Missing required header for this request: Content-Md5.", + //MissingRequestBodyError: "Request body is empty.", + /// The specified key does not exist + NoSuchBucket, + /// The bucket policy does not exist + NoSuchBucketPolicy, + ///The specified key does not exist + NoSuchKey, + //NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.", + //NotImplemented: "A header you provided implies functionality that is not implemented.", + //PreconditionFailed: "At least one of the pre-conditions you specified did not hold.", + //RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.", + //SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.", + /// The specified method is not allowed against this resource + MethodNotAllowed, + //InvalidPart: "One or more of the specified parts could not be found.", + //InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.", + //InvalidObjectState: "The operation is not valid for the current state of the object.", + //AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.", + //MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.", + /// The bucket you tried to delete is not empty + BucketNotEmpty, + //AllAccessDisabled: "All access to this bucket has been disabled.", + //MalformedPolicy: "Policy has invalid resource.", + //MissingFields: "Missing fields in request.", + //AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"/YYYYMMDD/REGION/SERVICE/aws4_request\".", + //MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.", + ///Your previous request to create the named bucket succeeded and you already own it + BucketAlreadyOwnedByYou, + //InvalidDuration: "Duration provided in the request is invalid.", + //XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.", + //NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.", + //Conflict: "Bucket not empty.", + /// endregion + + #[default] + NoError, + InvalidMinioErrorCode, + PermanentRedirect, + Redirect, + BadRequest, + RetryHead, + ReplicationConfigurationNotFoundError, + ServerSideEncryptionConfigurationNotFoundError, + NoSuchTagSet, + NoSuchObjectLockConfiguration, + NoSuchLifecycleConfiguration, + ResourceNotFound, + ResourceConflict, + AccessDenied, + NotSupported, + InvalidWriteOffset, + + OtherError(String), // This is a catch-all for any error code not explicitly defined +} + +#[allow(dead_code)] +const ALL_MINIO_ERROR_CODE: &[MinioErrorCode] = &[ + MinioErrorCode::NoError, + MinioErrorCode::InvalidMinioErrorCode, + MinioErrorCode::PermanentRedirect, + MinioErrorCode::Redirect, + MinioErrorCode::BadRequest, + MinioErrorCode::RetryHead, + MinioErrorCode::NoSuchBucket, + MinioErrorCode::NoSuchBucketPolicy, + MinioErrorCode::ReplicationConfigurationNotFoundError, + MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError, + MinioErrorCode::NoSuchTagSet, + MinioErrorCode::NoSuchObjectLockConfiguration, + MinioErrorCode::NoSuchLifecycleConfiguration, + MinioErrorCode::NoSuchKey, + MinioErrorCode::ResourceNotFound, + MinioErrorCode::MethodNotAllowed, + MinioErrorCode::ResourceConflict, + MinioErrorCode::AccessDenied, + MinioErrorCode::NotSupported, + MinioErrorCode::BucketNotEmpty, + MinioErrorCode::BucketAlreadyOwnedByYou, + MinioErrorCode::InvalidWriteOffset, + //MinioErrorCode::OtherError("".to_string()), +]; + +impl FromStr for MinioErrorCode { + type Err = Error; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "noerror" => Ok(MinioErrorCode::NoError), + "invalidminioerrorcode" => Ok(MinioErrorCode::InvalidMinioErrorCode), + "permanentredirect" => Ok(MinioErrorCode::PermanentRedirect), + "redirect" => Ok(MinioErrorCode::Redirect), + "badrequest" => Ok(MinioErrorCode::BadRequest), + "retryhead" => Ok(MinioErrorCode::RetryHead), + "nosuchbucket" => Ok(MinioErrorCode::NoSuchBucket), + "nosuchbucketpolicy" => Ok(MinioErrorCode::NoSuchBucketPolicy), + "replicationconfigurationnotfounderror" => { + Ok(MinioErrorCode::ReplicationConfigurationNotFoundError) + } + "serversideencryptionconfigurationnotfounderror" => { + Ok(MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError) + } + "nosuchtagset" => Ok(MinioErrorCode::NoSuchTagSet), + "nosuchobjectlockconfiguration" => Ok(MinioErrorCode::NoSuchObjectLockConfiguration), + "nosuchlifecycleconfiguration" => Ok(MinioErrorCode::NoSuchLifecycleConfiguration), + "nosuchkey" => Ok(MinioErrorCode::NoSuchKey), + "resourcenotfound" => Ok(MinioErrorCode::ResourceNotFound), + "methodnotallowed" => Ok(MinioErrorCode::MethodNotAllowed), + "resourceconflict" => Ok(MinioErrorCode::ResourceConflict), + "accessdenied" => Ok(MinioErrorCode::AccessDenied), + "notsupported" => Ok(MinioErrorCode::NotSupported), + "bucketnotempty" => Ok(MinioErrorCode::BucketNotEmpty), + "bucketalreadyownedbyyou" => Ok(MinioErrorCode::BucketAlreadyOwnedByYou), + "invalidwriteoffset" => Ok(MinioErrorCode::InvalidWriteOffset), + + v => Ok(MinioErrorCode::OtherError(v.to_owned())), + } + } +} + +impl std::fmt::Display for MinioErrorCode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MinioErrorCode::NoError => write!(f, "NoError"), + MinioErrorCode::InvalidMinioErrorCode => write!(f, "InvalidMinioErrorCode"), + + MinioErrorCode::PermanentRedirect => write!(f, "PermanentRedirect"), + MinioErrorCode::Redirect => write!(f, "Redirect"), + MinioErrorCode::BadRequest => write!(f, "BadRequest"), + MinioErrorCode::RetryHead => write!(f, "RetryHead"), + MinioErrorCode::NoSuchBucket => write!(f, "NoSuchBucket"), + MinioErrorCode::NoSuchBucketPolicy => write!(f, "NoSuchBucketPolicy"), + MinioErrorCode::ReplicationConfigurationNotFoundError => { + write!(f, "ReplicationConfigurationNotFoundError") + } + MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError => { + write!(f, "ServerSideEncryptionConfigurationNotFoundError") + } + MinioErrorCode::NoSuchTagSet => write!(f, "NoSuchTagSet"), + MinioErrorCode::NoSuchObjectLockConfiguration => { + write!(f, "NoSuchObjectLockConfiguration") + } + MinioErrorCode::NoSuchLifecycleConfiguration => { + write!(f, "NoSuchLifecycleConfiguration") + } + MinioErrorCode::NoSuchKey => write!(f, "NoSuchKey"), + MinioErrorCode::ResourceNotFound => write!(f, "ResourceNotFound"), + MinioErrorCode::MethodNotAllowed => write!(f, "MethodNotAllowed"), + MinioErrorCode::ResourceConflict => write!(f, "ResourceConflict"), + MinioErrorCode::AccessDenied => write!(f, "AccessDenied"), + MinioErrorCode::NotSupported => write!(f, "NotSupported"), + MinioErrorCode::BucketNotEmpty => write!(f, "BucketNotEmpty"), + MinioErrorCode::BucketAlreadyOwnedByYou => write!(f, "BucketAlreadyOwnedByYou"), + MinioErrorCode::InvalidWriteOffset => write!(f, "InvalidWriteOffset"), + MinioErrorCode::OtherError(msg) => write!(f, "{msg}"), + } + } +} + +#[cfg(test)] +mod test_error_code { + use super::*; + + /// Test that all MinioErrorCode values can be converted to and from strings + #[test] + fn test_minio_error_code_roundtrip() { + for code in ALL_MINIO_ERROR_CODE { + let str = code.to_string(); + let code_obs: MinioErrorCode = str.parse().unwrap(); + assert_eq!( + code_obs, *code, + "Failed MinioErrorCode round-trip: code {code} -> str '{str}' -> code {code_obs}" + ); + } + } +} + +/// MinioErrorResponse Is the typed error returned by all API operations. +/// equivalent of ErrorResponse in the minio-go SDK +#[derive(Clone, Debug, Default)] +pub struct MinioErrorResponse { + code: MinioErrorCode, + message: Option, + headers: HeaderMap, + resource: String, + request_id: String, + host_id: String, + bucket_name: Option, + object_name: Option, +} + +impl MinioErrorResponse { + pub fn new( + headers: HeaderMap, + code: MinioErrorCode, + message: Option, + resource: String, + request_id: String, + host_id: String, + bucket_name: Option, + object_name: Option, + ) -> Self { + Self { + headers, + code, + message, + resource, + request_id, + host_id, + bucket_name, + object_name, + } + } + + pub fn new_from_body(body: Bytes, headers: HeaderMap) -> Result { + let root = Element::parse(body.reader()).map_err(ValidationErr::from)?; + Ok(Self { + headers, + code: MinioErrorCode::from_str(&get_text_default(&root, "Code"))?, + message: get_text_option(&root, "Message"), + resource: get_text_default(&root, "Resource"), + request_id: get_text_default(&root, "RequestId"), + host_id: get_text_default(&root, "HostId"), + bucket_name: get_text_option(&root, "BucketName"), + object_name: get_text_option(&root, "Key"), + }) + } + + pub fn headers(&self) -> &HeaderMap { + &self.headers + } + /// Take ownership of the headers as returned by the server. + pub fn take_headers(&mut self) -> HeaderMap { + std::mem::take(&mut self.headers) + } + pub fn code(&self) -> MinioErrorCode { + self.code.clone() + } + pub fn message(&self) -> &Option { + &self.message + } + pub fn set_message(&mut self, message: String) { + self.message = Some(message); + } + pub fn resource(&self) -> &str { + &self.resource + } + pub fn request_id(&self) -> &str { + &self.request_id + } + pub fn host_id(&self) -> &str { + &self.host_id + } + pub fn bucket_name(&self) -> &Option { + &self.bucket_name + } + pub fn object_name(&self) -> &Option { + &self.object_name + } +} + +impl std::fmt::Display for MinioErrorResponse { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "S3 operation failed: \n\tcode: {:?}\n\tmessage: {:?}\n\tresource: {}\n\trequest_id: {}\n\thost_id: {}\n\tbucket_name: {:?}\n\tobject_name: {:?}", + self.code, + self.message, + self.resource, + self.request_id, + self.host_id, + self.bucket_name, + self.object_name, + ) + } +} + +impl std::error::Error for MinioErrorResponse {} diff --git a/src/s3/mod.rs b/src/s3/mod.rs index 77017e0..6d347a7 100644 --- a/src/s3/mod.rs +++ b/src/s3/mod.rs @@ -19,8 +19,10 @@ pub mod builders; pub mod client; pub mod creds; pub mod error; +pub mod header_constants; pub mod http; pub mod lifecycle_config; +pub mod minio_error_response; pub mod multimap; mod object_content; pub mod response; diff --git a/src/s3/multimap.rs b/src/s3/multimap.rs index 7bb1f76..88709b3 100644 --- a/src/s3/multimap.rs +++ b/src/s3/multimap.rs @@ -15,12 +15,11 @@ use crate::s3::utils::url_encode; use lazy_static::lazy_static; -use multimap::MultiMap; use regex::Regex; use std::collections::BTreeMap; /// Multimap for string key and string value -pub type Multimap = MultiMap; +pub type Multimap = multimap::MultiMap; pub trait MultimapExt { /// Adds a key-value pair to the multimap diff --git a/src/s3/object_content.rs b/src/s3/object_content.rs index 6bd5a37..2ac86ec 100644 --- a/src/s3/object_content.rs +++ b/src/s3/object_content.rs @@ -24,7 +24,7 @@ use crate::s3::segmented_bytes::SegmentedBytes; #[cfg(test)] use quickcheck::Arbitrary; -type IoResult = Result; +type IoResult = core::result::Result; // region: Size @@ -209,11 +209,9 @@ impl ObjectContent { if file_path.is_dir() { return Err(std::io::Error::other("path is a directory")); } - let parent_dir = file_path.parent().ok_or_else(|| { - std::io::Error::other(format!( - "path {file_path:?} does not have a parent directory" - )) - })?; + let parent_dir = file_path.parent().ok_or(std::io::Error::other(format!( + "path {file_path:?} does not have a parent directory" + )))?; if !parent_dir.is_dir() { async_std::fs::create_dir_all(parent_dir).await?; } diff --git a/src/s3/response/a_response_traits.rs b/src/s3/response/a_response_traits.rs index 2daf26a..bc3eea7 100644 --- a/src/s3/response/a_response_traits.rs +++ b/src/s3/response/a_response_traits.rs @@ -1,6 +1,7 @@ -use crate::s3::error::Error; +use crate::s3::error::ValidationErr; +use crate::s3::header_constants::*; use crate::s3::types::S3Request; -use crate::s3::utils::{get_text, trim_quotes}; +use crate::s3::utils::{get_text_result, parse_bool, trim_quotes}; use bytes::{Buf, Bytes}; use http::HeaderMap; use std::collections::HashMap; @@ -21,7 +22,7 @@ macro_rules! impl_from_s3response { Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await? + body: resp.bytes().await.map_err(ValidationErr::from)?, }) } } @@ -44,7 +45,7 @@ macro_rules! impl_from_s3response_with_size { Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, object_size: 0, // Default value, can be set later }) } @@ -117,7 +118,7 @@ pub trait HasVersion: HasS3Fields { #[inline] fn version_id(&self) -> Option<&str> { self.headers() - .get("x-amz-version-id") + .get(X_AMZ_VERSION_ID) .and_then(|v| v.to_str().ok()) } } @@ -128,7 +129,7 @@ pub trait HasEtagFromHeaders: HasS3Fields { /// Returns the value of the `ETag` header from response headers (for operations that return ETag in headers). /// The ETag is typically a hash of the object content, but it may vary based on the storage backend. #[inline] - fn etag(&self) -> Result { + fn etag(&self) -> Result { // Retrieve the ETag from the response headers. let etag = self .headers() @@ -148,10 +149,10 @@ pub trait HasEtagFromBody: HasS3Fields { /// Returns the value of the `ETag` from the response body, which is a unique identifier for /// the object version. The ETag is typically a hash of the object content, but it may vary /// based on the storage backend. - fn etag(&self) -> Result { + fn etag(&self) -> Result { // Retrieve the ETag from the response body. let root = xmltree::Element::parse(self.body().clone().reader())?; - let etag: String = get_text(&root, "ETag")?; + let etag: String = get_text_result(&root, "ETag")?; Ok(trim_quotes(etag)) } } @@ -162,7 +163,7 @@ pub trait HasObjectSize: HasS3Fields { #[inline] fn object_size(&self) -> u64 { self.headers() - .get("x-amz-object-size") + .get(X_AMZ_OBJECT_SIZE) .and_then(|v| v.to_str().ok()) .and_then(|s| s.parse::().ok()) .unwrap_or(0) @@ -181,18 +182,10 @@ pub trait HasIsDeleteMarker: HasS3Fields { /// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates /// whether (true) or not (false) the current version of the object is a delete marker. #[inline] - fn is_delete_marker(&self) -> Result, Error> { - Ok(Some( - self.headers() - .get("x-amz-delete-marker") - .map(|v| v == "true") - .unwrap_or(false), - )) - - //Ok(match self.headers().get("x-amz-delete-marker") { - // Some(v) => Some(v.to_str()?.parse::()?), - // None => None, - //}) + fn is_delete_marker(&self) -> Result { + self.headers() + .get(X_AMZ_DELETE_MARKER) + .map_or(Ok(false), |v| parse_bool(v.to_str()?)) } } @@ -201,7 +194,7 @@ pub trait HasTagging: HasS3Fields { /// /// If the bucket has no tags, this will return an empty `HashMap`. #[inline] - fn tags(&self) -> Result, Error> { + fn tags(&self) -> Result, ValidationErr> { let mut tags = HashMap::new(); if self.body().is_empty() { // Note: body is empty when server responses with NoSuchTagSet @@ -210,9 +203,9 @@ pub trait HasTagging: HasS3Fields { let mut root = Element::parse(self.body().clone().reader())?; let element = root .get_mut_child("TagSet") - .ok_or(Error::XmlError(" tag not found".to_string()))?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; while let Some(v) = element.take_child("Tag") { - tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?); + tags.insert(get_text_result(&v, "Key")?, get_text_result(&v, "Value")?); } Ok(tags) } diff --git a/src/s3/response/append_object.rs b/src/s3/response/append_object.rs index fa21b6a..c41cc82 100644 --- a/src/s3/response/append_object.rs +++ b/src/s3/response/append_object.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasEtagFromHeaders, HasObject, HasObjectSize, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/bucket_exists.rs b/src/s3/response/bucket_exists.rs index 8bc1e18..b106534 100644 --- a/src/s3/response/bucket_exists.rs +++ b/src/s3/response/bucket_exists.rs @@ -14,7 +14,9 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::S3ServerError::S3Error; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -47,15 +49,19 @@ impl FromS3Response for BucketExistsResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, exists: true, }), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucket) => Ok(Self { - request, - headers: e.headers, - body: Bytes::new(), - exists: false, - }), + Err(Error::S3Server(S3Error(mut e))) + if matches!(e.code(), MinioErrorCode::NoSuchBucket) => + { + Ok(Self { + request, + headers: e.take_headers(), + body: Bytes::new(), + exists: false, + }) + } Err(e) => Err(e), } } diff --git a/src/s3/response/copy_object.rs b/src/s3/response/copy_object.rs index e885172..aaf565d 100644 --- a/src/s3/response/copy_object.rs +++ b/src/s3/response/copy_object.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasEtagFromBody, HasObject, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/create_bucket.rs b/src/s3/response/create_bucket.rs index 30525b7..af7d1f0 100644 --- a/src/s3/response/create_bucket.rs +++ b/src/s3/response/create_bucket.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -46,17 +46,17 @@ impl FromS3Response for CreateBucketResponse { let mut resp: reqwest::Response = response?; let mut request = request; - let bucket: &str = request + let bucket = request .bucket .as_deref() - .ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?; + .ok_or(ValidationErr::MissingBucketName)?; let region: &str = &request.inner_region; request.client.add_bucket_region(bucket, region); Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, }) } } diff --git a/src/s3/response/delete_bucket.rs b/src/s3/response/delete_bucket.rs index f70ee8c..8b5d23e 100644 --- a/src/s3/response/delete_bucket.rs +++ b/src/s3/response/delete_bucket.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::Error; + +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use bytes::Bytes; @@ -44,16 +45,16 @@ impl FromS3Response for DeleteBucketResponse { let mut resp: reqwest::Response = response?; let mut request = request; - let bucket: &str = request + let bucket = request .bucket .as_deref() - .ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?; + .ok_or(Error::Validation(ValidationErr::MissingBucketName))?; request.client.remove_bucket_region(bucket); Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, }) } } diff --git a/src/s3/response/delete_bucket_encryption.rs b/src/s3/response/delete_bucket_encryption.rs index a2aaf29..fe4864f 100644 --- a/src/s3/response/delete_bucket_encryption.rs +++ b/src/s3/response/delete_bucket_encryption.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/delete_bucket_lifecycle.rs b/src/s3/response/delete_bucket_lifecycle.rs index 69f5121..9a1ba1c 100644 --- a/src/s3/response/delete_bucket_lifecycle.rs +++ b/src/s3/response/delete_bucket_lifecycle.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/delete_bucket_notification.rs b/src/s3/response/delete_bucket_notification.rs index 15e31fb..b327012 100644 --- a/src/s3/response/delete_bucket_notification.rs +++ b/src/s3/response/delete_bucket_notification.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/delete_bucket_policy.rs b/src/s3/response/delete_bucket_policy.rs index 2cdc71c..cc2b893 100644 --- a/src/s3/response/delete_bucket_policy.rs +++ b/src/s3/response/delete_bucket_policy.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -46,13 +47,17 @@ impl FromS3Response for DeleteBucketPolicyResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, - }), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucketPolicy) => Ok(Self { - request, - headers: e.headers, - body: Bytes::new(), + body: resp.bytes().await.map_err(ValidationErr::from)?, }), + Err(Error::S3Server(S3ServerError::S3Error(mut e))) + if matches!(e.code(), MinioErrorCode::NoSuchBucketPolicy) => + { + Ok(Self { + request, + headers: e.take_headers(), + body: Bytes::new(), + }) + } Err(e) => Err(e), } } diff --git a/src/s3/response/delete_bucket_replication.rs b/src/s3/response/delete_bucket_replication.rs index ad530ca..3bc34ee 100644 --- a/src/s3/response/delete_bucket_replication.rs +++ b/src/s3/response/delete_bucket_replication.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -46,14 +47,17 @@ impl FromS3Response for DeleteBucketReplicationResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, }), - Err(Error::S3Error(e)) - if matches!(e.code, ErrorCode::ReplicationConfigurationNotFoundError) => + Err(Error::S3Server(S3ServerError::S3Error(mut e))) + if matches!( + e.code(), + MinioErrorCode::ReplicationConfigurationNotFoundError + ) => { Ok(Self { request, - headers: e.headers, + headers: e.take_headers(), body: Bytes::new(), }) } diff --git a/src/s3/response/delete_bucket_tagging.rs b/src/s3/response/delete_bucket_tagging.rs index 309c24c..cb90088 100644 --- a/src/s3/response/delete_bucket_tagging.rs +++ b/src/s3/response/delete_bucket_tagging.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/delete_object.rs b/src/s3/response/delete_object.rs index 2b0621c..bbf2026 100644 --- a/src/s3/response/delete_object.rs +++ b/src/s3/response/delete_object.rs @@ -13,12 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasIsDeleteMarker, HasRegion, HasS3Fields, HasVersion, }; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::{get_default_text, get_option_text, get_text}; +use crate::s3::utils::{get_text_default, get_text_option, get_text_result}; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -101,7 +101,7 @@ impl_has_s3fields!(DeleteObjectsResponse); impl DeleteObjectsResponse { /// Returns the bucket name for which the delete operation was performed. pub fn result(&self) -> Result, Error> { - let root = Element::parse(self.body.clone().reader())?; + let root = Element::parse(self.body.clone().reader()).map_err(ValidationErr::from)?; let result = root .children .iter() @@ -109,19 +109,19 @@ impl DeleteObjectsResponse { .map(|elem| { if elem.name == "Deleted" { Ok(DeleteResult::Deleted(DeletedObject { - name: get_text(elem, "Key")?, - version_id: get_option_text(elem, "VersionId"), - delete_marker: get_default_text(elem, "DeleteMarker").to_lowercase() + name: get_text_result(elem, "Key")?, + version_id: get_text_option(elem, "VersionId"), + delete_marker: get_text_default(elem, "DeleteMarker").to_lowercase() == "true", - delete_marker_version_id: get_option_text(elem, "DeleteMarkerVersionId"), + delete_marker_version_id: get_text_option(elem, "DeleteMarkerVersionId"), })) } else { assert_eq!(elem.name, "Error"); Ok(DeleteResult::Error(DeleteError { - code: get_text(elem, "Code")?, - message: get_text(elem, "Message")?, - object_name: get_text(elem, "Key")?, - version_id: get_option_text(elem, "VersionId"), + code: get_text_result(elem, "Code")?, + message: get_text_result(elem, "Message")?, + object_name: get_text_result(elem, "Key")?, + version_id: get_text_option(elem, "VersionId"), })) } }) diff --git a/src/s3/response/delete_object_lock_config.rs b/src/s3/response/delete_object_lock_config.rs index f99274d..75cfa47 100644 --- a/src/s3/response/delete_object_lock_config.rs +++ b/src/s3/response/delete_object_lock_config.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/delete_object_tagging.rs b/src/s3/response/delete_object_tagging.rs index ba55e95..4f6407a 100644 --- a/src/s3/response/delete_object_tagging.rs +++ b/src/s3/response/delete_object_tagging.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/get_bucket_encryption.rs b/src/s3/response/get_bucket_encryption.rs index 2c776a7..dcda29a 100644 --- a/src/s3/response/get_bucket_encryption.rs +++ b/src/s3/response/get_bucket_encryption.rs @@ -14,10 +14,11 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SseConfig}; -use crate::s3::utils::{get_option_text, get_text}; +use crate::s3::utils::{get_text_option, get_text_result}; use async_trait::async_trait; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -49,25 +50,25 @@ impl GetBucketEncryptionResponse { /// /// This includes the encryption algorithm and, if applicable, the AWS KMS key ID used for encrypting objects. /// If the bucket has no default encryption configuration, this method returns a default `SseConfig` with empty fields. - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { if self.body.is_empty() { return Ok(SseConfig::default()); } - let mut root = Element::parse(self.body.clone().reader())?; // clone of Bytes is inexpensive + let mut root = Element::parse(self.body.clone().reader()).map_err(ValidationErr::from)?; // clone of Bytes is inexpensive let rule = root .get_mut_child("Rule") - .ok_or(Error::XmlError(" tag not found".into()))?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; let sse_by_default = rule .get_mut_child("ApplyServerSideEncryptionByDefault") - .ok_or(Error::XmlError( - " tag not found".into(), + .ok_or(ValidationErr::xml_error( + " tag not found", ))?; Ok(SseConfig { - sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, - kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), + sse_algorithm: get_text_result(sse_by_default, "SSEAlgorithm")?, + kms_master_key_id: get_text_option(sse_by_default, "KMSMasterKeyID"), }) } } @@ -82,17 +83,17 @@ impl FromS3Response for GetBucketEncryptionResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, }), - Err(Error::S3Error(e)) + Err(Error::S3Server(S3ServerError::S3Error(mut e))) if matches!( - e.code, - ErrorCode::ServerSideEncryptionConfigurationNotFoundError + e.code(), + MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError ) => { Ok(Self { request, - headers: e.headers, + headers: e.take_headers(), body: Bytes::new(), }) } diff --git a/src/s3/response/get_bucket_lifecycle.rs b/src/s3/response/get_bucket_lifecycle.rs index d37b01a..03df099 100644 --- a/src/s3/response/get_bucket_lifecycle.rs +++ b/src/s3/response/get_bucket_lifecycle.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::lifecycle_config::LifecycleConfig; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; @@ -49,7 +49,7 @@ impl GetBucketLifecycleResponse { /// /// This configuration includes rules for managing the lifecycle of objects in the bucket, /// such as transitioning them to different storage classes or expiring them after a specified period. - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { LifecycleConfig::from_xml(&Element::parse(self.body.clone().reader())?) } diff --git a/src/s3/response/get_bucket_notification.rs b/src/s3/response/get_bucket_notification.rs index 0b3153d..840efa8 100644 --- a/src/s3/response/get_bucket_notification.rs +++ b/src/s3/response/get_bucket_notification.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, NotificationConfig, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; @@ -47,7 +47,7 @@ impl GetBucketNotificationResponse { /// /// This configuration includes the event types and the destinations (e.g., SNS topics, SQS queues, Lambda functions) /// configured to receive notifications for those events. - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { NotificationConfig::from_xml(&mut Element::parse(self.body.clone().reader())?) } } diff --git a/src/s3/response/get_bucket_policy.rs b/src/s3/response/get_bucket_policy.rs index cd77b88..d8cf797 100644 --- a/src/s3/response/get_bucket_policy.rs +++ b/src/s3/response/get_bucket_policy.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -46,10 +47,8 @@ impl GetBucketPolicyResponse { /// /// This method retrieves the policy associated with the bucket, which defines permissions /// for accessing the bucket and its contents. - pub fn config(&self) -> Result<&str, Error> { - std::str::from_utf8(&self.body).map_err(|e| { - Error::Utf8Error(format!("Failed to parse bucket policy as UTF-8: {e}").into()) - }) + pub fn config(&self) -> Result<&str, ValidationErr> { + Ok(std::str::from_utf8(&self.body)?) } } @@ -63,13 +62,17 @@ impl FromS3Response for GetBucketPolicyResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, - }), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucketPolicy) => Ok(Self { - request, - headers: e.headers, - body: Bytes::from_static("{}".as_ref()), + body: resp.bytes().await.map_err(ValidationErr::from)?, }), + Err(Error::S3Server(S3ServerError::S3Error(mut e))) + if matches!(e.code(), MinioErrorCode::NoSuchBucketPolicy) => + { + Ok(Self { + request, + headers: e.take_headers(), + body: Bytes::from_static("{}".as_ref()), + }) + } Err(e) => Err(e), } } diff --git a/src/s3/response/get_bucket_replication.rs b/src/s3/response/get_bucket_replication.rs index fa99f88..324bb17 100644 --- a/src/s3/response/get_bucket_replication.rs +++ b/src/s3/response/get_bucket_replication.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, ReplicationConfig, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; @@ -48,7 +48,7 @@ impl GetBucketReplicationResponse { /// and one or more replication rules that specify the conditions under which objects are replicated. /// /// For more details on replication configuration elements, see the [AWS S3 Replication Configuration documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-add-config.html). - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { let root = Element::parse(self.body.clone().reader())?; ReplicationConfig::from_xml(&root) } diff --git a/src/s3/response/get_bucket_tagging.rs b/src/s3/response/get_bucket_tagging.rs index 625c874..500d361 100644 --- a/src/s3/response/get_bucket_tagging.rs +++ b/src/s3/response/get_bucket_tagging.rs @@ -14,7 +14,8 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields, HasTagging}; use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; @@ -52,13 +53,17 @@ impl FromS3Response for GetBucketTaggingResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, - }), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchTagSet) => Ok(Self { - request, - headers: e.headers, - body: Bytes::new(), + body: resp.bytes().await.map_err(ValidationErr::from)?, }), + Err(Error::S3Server(S3ServerError::S3Error(mut e))) + if matches!(e.code(), MinioErrorCode::NoSuchTagSet) => + { + Ok(Self { + request, + headers: e.take_headers(), + body: Bytes::new(), + }) + } Err(e) => Err(e), } } diff --git a/src/s3/response/get_bucket_versioning.rs b/src/s3/response/get_bucket_versioning.rs index 81caae0..62ad588 100644 --- a/src/s3/response/get_bucket_versioning.rs +++ b/src/s3/response/get_bucket_versioning.rs @@ -14,10 +14,10 @@ // limitations under the License. use crate::s3::builders::VersioningStatus; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::get_option_text; +use crate::s3::utils::get_text_option; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -51,9 +51,9 @@ impl GetBucketVersioningResponse { /// - `Some(VersioningStatus::Enabled)` if versioning is enabled. /// - `Some(VersioningStatus::Suspended)` if versioning is suspended. /// - `None` if versioning has never been configured for this bucket. - pub fn status(&self) -> Result, Error> { + pub fn status(&self) -> Result, ValidationErr> { let root = Element::parse(self.body.clone().reader())?; - Ok(get_option_text(&root, "Status").map(|v| match v.as_str() { + Ok(get_text_option(&root, "Status").map(|v| match v.as_str() { "Enabled" => VersioningStatus::Enabled, _ => VersioningStatus::Suspended, // Default case })) @@ -65,8 +65,8 @@ impl GetBucketVersioningResponse { /// - `Some(true)` if MFA delete is enabled. /// - `Some(false)` if MFA delete is disabled. /// - `None` if MFA delete has never been configured for this bucket. - pub fn mfa_delete(&self) -> Result, Error> { + pub fn mfa_delete(&self) -> Result, ValidationErr> { let root = Element::parse(self.body.clone().reader())?; - Ok(get_option_text(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled"))) + Ok(get_text_option(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled"))) } } diff --git a/src/s3/response/get_object.rs b/src/s3/response/get_object.rs index 7eb97b0..b926b4e 100644 --- a/src/s3/response/get_object.rs +++ b/src/s3/response/get_object.rs @@ -14,14 +14,12 @@ // limitations under the License. use crate::impl_has_s3fields; +use crate::s3::builders::ObjectContent; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasEtagFromHeaders, HasObject, HasRegion, HasS3Fields, HasVersion, }; -use crate::s3::{ - builders::ObjectContent, - error::Error, - types::{FromS3Response, S3Request}, -}; +use crate::s3::types::{FromS3Response, S3Request}; use async_trait::async_trait; use bytes::Bytes; use futures_util::TryStreamExt; @@ -52,10 +50,10 @@ impl GetObjectResponse { } /// Returns the content size (in Bytes) of the object. - pub fn object_size(&self) -> Result { + pub fn object_size(&self) -> Result { self.resp .content_length() - .ok_or(Error::ContentLengthUnknown) + .ok_or(ValidationErr::ContentLengthUnknown) } } diff --git a/src/s3/response/get_object_legal_hold.rs b/src/s3/response/get_object_legal_hold.rs index 5903cba..e2f6891 100644 --- a/src/s3/response/get_object_legal_hold.rs +++ b/src/s3/response/get_object_legal_hold.rs @@ -13,12 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::get_default_text; +use crate::s3::utils::get_text_default; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -47,11 +47,11 @@ impl GetObjectLegalHoldResponse { /// Returns the legal hold status of the object. /// /// This method retrieves whether the legal hold is enabled for the specified object. - pub fn enabled(&self) -> Result { + pub fn enabled(&self) -> Result { if self.body.is_empty() { return Ok(false); // No legal hold configuration present due to NoSuchObjectLockConfiguration } let root = Element::parse(self.body.clone().reader())?; - Ok(get_default_text(&root, "Status") == "ON") + Ok(get_text_default(&root, "Status") == "ON") } } diff --git a/src/s3/response/get_object_lock_config.rs b/src/s3/response/get_object_lock_config.rs index 1be630c..fa5cadb 100644 --- a/src/s3/response/get_object_lock_config.rs +++ b/src/s3/response/get_object_lock_config.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, ObjectLockConfig, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; @@ -46,7 +46,7 @@ impl GetObjectLockConfigResponse { /// /// This method retrieves the Object Lock settings, which include retention mode and period, /// as well as legal hold status for the bucket. - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { ObjectLockConfig::from_xml(&Element::parse(self.body.clone().reader())?) } } diff --git a/src/s3/response/get_object_prompt.rs b/src/s3/response/get_object_prompt.rs index 387d54f..4dd9c01 100644 --- a/src/s3/response/get_object_prompt.rs +++ b/src/s3/response/get_object_prompt.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; @@ -38,9 +38,7 @@ impl GetObjectPromptResponse { /// Returns the prompt response for the object. /// /// This method retrieves the content of the object as a UTF-8 encoded string. - pub fn prompt_response(&self) -> Result<&str, Error> { - std::str::from_utf8(&self.body).map_err(|e| { - Error::Utf8Error(format!("Failed to parse prompt_response as UTF-8: {e}").into()) - }) + pub fn prompt_response(&self) -> Result<&str, ValidationErr> { + Ok(std::str::from_utf8(&self.body)?) } } diff --git a/src/s3/response/get_object_retention.rs b/src/s3/response/get_object_retention.rs index edde337..5cbe417 100644 --- a/src/s3/response/get_object_retention.rs +++ b/src/s3/response/get_object_retention.rs @@ -14,12 +14,13 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::{Error, ErrorCode}; +use crate::s3::error::{Error, S3ServerError, ValidationErr}; +use crate::s3::minio_error_response::MinioErrorCode; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; use crate::s3::types::{FromS3Response, RetentionMode, S3Request}; -use crate::s3::utils::{UtcTime, from_iso8601utc, get_option_text}; +use crate::s3::utils::{UtcTime, from_iso8601utc, get_text_option}; use async_trait::async_trait; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -45,12 +46,12 @@ impl GetObjectRetentionResponse { /// Returns the retention mode of the object. /// /// This method retrieves the retention mode, which can be either `Governance` or `Compliance`. - pub fn retention_mode(&self) -> Result, Error> { + pub fn retention_mode(&self) -> Result, ValidationErr> { if self.body.is_empty() { return Ok(None); } let root = Element::parse(self.body.clone().reader())?; - Ok(match get_option_text(&root, "Mode") { + Ok(match get_text_option(&root, "Mode") { Some(v) => Some(RetentionMode::parse(&v)?), _ => None, }) @@ -59,12 +60,12 @@ impl GetObjectRetentionResponse { /// Returns the date until which the object is retained. /// /// This method retrieves the retention date, which indicates when the object will no longer be retained. - pub fn retain_until_date(&self) -> Result, Error> { + pub fn retain_until_date(&self) -> Result, ValidationErr> { if self.body.is_empty() { return Ok(None); } let root = Element::parse(self.body.clone().reader())?; - Ok(match get_option_text(&root, "RetainUntilDate") { + Ok(match get_text_option(&root, "RetainUntilDate") { Some(v) => Some(from_iso8601utc(&v)?), _ => None, }) @@ -81,14 +82,14 @@ impl FromS3Response for GetObjectRetentionResponse { Ok(mut resp) => Ok(Self { request, headers: mem::take(resp.headers_mut()), - body: resp.bytes().await?, + body: resp.bytes().await.map_err(ValidationErr::from)?, }), - Err(Error::S3Error(e)) - if matches!(e.code, ErrorCode::NoSuchObjectLockConfiguration) => + Err(Error::S3Server(S3ServerError::S3Error(mut e))) + if matches!(e.code(), MinioErrorCode::NoSuchObjectLockConfiguration) => { Ok(Self { request, - headers: e.headers, + headers: e.take_headers(), body: Bytes::new(), }) } diff --git a/src/s3/response/get_object_tagging.rs b/src/s3/response/get_object_tagging.rs index fd26bbd..a71124b 100644 --- a/src/s3/response/get_object_tagging.rs +++ b/src/s3/response/get_object_tagging.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasTagging, HasVersion, }; diff --git a/src/s3/response/get_region.rs b/src/s3/response/get_region.rs index 946c82c..033b53c 100644 --- a/src/s3/response/get_region.rs +++ b/src/s3/response/get_region.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::s3::client::DEFAULT_REGION; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; @@ -43,7 +43,7 @@ impl GetRegionResponse { /// Returns the region response for the bucket. /// /// This method retrieves the region where the bucket is located. - pub fn region_response(&self) -> Result { + pub fn region_response(&self) -> Result { let root = Element::parse(self.body.clone().reader())?; let mut location = root.get_text().unwrap_or_default().to_string(); diff --git a/src/s3/response/list_buckets.rs b/src/s3/response/list_buckets.rs index e31dede..04b9228 100644 --- a/src/s3/response/list_buckets.rs +++ b/src/s3/response/list_buckets.rs @@ -13,10 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::HasS3Fields; use crate::s3::types::{Bucket, FromS3Response, S3Request}; -use crate::s3::utils::{from_iso8601utc, get_text}; +use crate::s3::utils::{from_iso8601utc, get_text_result}; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -36,18 +36,18 @@ impl_has_s3fields!(ListBucketsResponse); impl ListBucketsResponse { /// Returns the list of buckets in the account. - pub fn buckets(&self) -> Result, Error> { + pub fn buckets(&self) -> Result, ValidationErr> { let mut root = Element::parse(self.body().clone().reader())?; let buckets_xml = root .get_mut_child("Buckets") - .ok_or(Error::XmlError(" tag not found".into()))?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; let mut buckets: Vec = Vec::new(); while let Some(b) = buckets_xml.take_child("Bucket") { let bucket = b; buckets.push(Bucket { - name: get_text(&bucket, "Name")?, - creation_date: from_iso8601utc(&get_text(&bucket, "CreationDate")?)?, + name: get_text_result(&bucket, "Name")?, + creation_date: from_iso8601utc(&get_text_result(&bucket, "CreationDate")?)?, }) } Ok(buckets) diff --git a/src/s3/response/list_objects.rs b/src/s3/response/list_objects.rs index 28b55f9..899b1a3 100644 --- a/src/s3/response/list_objects.rs +++ b/src/s3/response/list_objects.rs @@ -11,7 +11,7 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::HasS3Fields; use crate::s3::types::{FromS3Response, ListEntry, S3Request}; use crate::s3::utils::xml::{Element, MergeXmlElements}; @@ -25,13 +25,12 @@ use std::mem; fn url_decode_w_enc( encoding_type: &Option, s: Option, -) -> Result, Error> { - if let Some(v) = encoding_type.as_ref() { - if v == "url" { - if let Some(raw) = s { - return Ok(Some(url_decode(&raw).to_string())); - } - } +) -> Result, ValidationErr> { + if let Some(v) = encoding_type.as_ref() + && v == "url" + && let Some(raw) = s + { + return Ok(Some(url_decode(&raw).to_string())); } if let Some(v) = s.as_ref() { @@ -53,7 +52,7 @@ fn parse_common_list_objects_response( bool, Option, ), - Error, + ValidationErr, > { let encoding_type = root.get_child_text("EncodingType"); let prefix = url_decode_w_enc( @@ -70,7 +69,8 @@ fn parse_common_list_objects_response( .unwrap_or(false), root.get_child_text("MaxKeys") .map(|x| x.parse::()) - .transpose()?, + .transpose() + .map_err(ValidationErr::from)?, )) } @@ -80,7 +80,7 @@ fn parse_list_objects_contents( main_tag: &str, encoding_type: &Option, with_delete_marker: bool, -) -> Result<(), Error> { +) -> Result<(), ValidationErr> { let children1 = root.get_matching_children(main_tag); let children2 = if with_delete_marker { root.get_matching_children("DeleteMarker") @@ -98,7 +98,8 @@ fn parse_list_objects_contents( let size: Option = content .get_child_text("Size") .map(|x| x.parse::()) - .transpose()?; + .transpose() + .map_err(ValidationErr::from)?; let storage_class = content.get_child_text("StorageClass"); let is_latest = content .get_child_text("IsLatest") @@ -153,7 +154,7 @@ fn parse_list_objects_common_prefixes( contents: &mut Vec, root: &Element, encoding_type: &Option, -) -> Result<(), Error> { +) -> Result<(), ValidationErr> { for (_, common_prefix) in root.get_matching_children("CommonPrefixes") { contents.push(ListEntry { name: url_decode_w_enc( @@ -208,9 +209,10 @@ impl FromS3Response for ListObjectsV1Response { ) -> Result { let mut resp = response?; let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; + let body = resp.bytes().await.map_err(ValidationErr::from)?; - let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let xmltree_root = + xmltree::Element::parse(body.clone().reader()).map_err(ValidationErr::from)?; let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; @@ -271,16 +273,18 @@ impl FromS3Response for ListObjectsV2Response { ) -> Result { let mut resp = response?; let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; + let body = resp.bytes().await.map_err(ValidationErr::from)?; - let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let xmltree_root = + xmltree::Element::parse(body.clone().reader()).map_err(ValidationErr::from)?; let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; let key_count = root .get_child_text("KeyCount") .map(|x| x.parse::()) - .transpose()?; + .transpose() + .map_err(ValidationErr::from)?; let start_after = url_decode_w_enc(&encoding_type, root.get_child_text("StartAfter"))?; let continuation_token = root.get_child_text("ContinuationToken"); let next_continuation_token = root.get_child_text("NextContinuationToken"); @@ -338,9 +342,10 @@ impl FromS3Response for ListObjectVersionsResponse { ) -> Result { let mut resp = response?; let headers: HeaderMap = mem::take(resp.headers_mut()); - let body = resp.bytes().await?; + let body = resp.bytes().await.map_err(ValidationErr::from)?; - let xmltree_root = xmltree::Element::parse(body.clone().reader())?; + let xmltree_root = + xmltree::Element::parse(body.clone().reader()).map_err(ValidationErr::from)?; let root = Element::from(&xmltree_root); let (name, encoding_type, prefix, delimiter, is_truncated, max_keys) = parse_common_list_objects_response(&root)?; diff --git a/src/s3/response/listen_bucket_notification.rs b/src/s3/response/listen_bucket_notification.rs index 8c7ab6e..ee2e778 100644 --- a/src/s3/response/listen_bucket_notification.rs +++ b/src/s3/response/listen_bucket_notification.rs @@ -14,7 +14,7 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, NotificationRecords, S3Request}; use async_std::stream::Stream; @@ -58,7 +58,7 @@ impl FromS3Response let mut buf = Vec::new(); let mut cursor = 0; - let mut stream = byte_stream.map_err(Error::from).boxed(); + let mut stream = byte_stream.map_err(ValidationErr::from).boxed(); while let Some(chunk) = stream.next().await { let chunk = chunk?; @@ -67,10 +67,10 @@ impl FromS3Response while let Some(pos) = buf[cursor..].iter().position(|&b| b == b'\n') { let end = cursor + pos; let line_bytes = &buf[..end]; - let line = std::str::from_utf8(line_bytes)?.trim(); + let line = std::str::from_utf8(line_bytes).map_err(ValidationErr::from)?.trim(); if !line.is_empty() { - let parsed: NotificationRecords = serde_json::from_str(line)?; + let parsed: NotificationRecords = serde_json::from_str(line).map_err(ValidationErr::from)?; yield parsed; } @@ -86,12 +86,13 @@ impl FromS3Response // Drain the remaining buffer if not empty if !buf.is_empty() { - let line = std::str::from_utf8(&buf)?.trim(); + let line = std::str::from_utf8(&buf).map_err(ValidationErr::from)?.trim(); if !line.is_empty() { - let parsed: NotificationRecords = serde_json::from_str(line)?; + let parsed: NotificationRecords = serde_json::from_str(line).map_err(ValidationErr::from)?; yield parsed; } } + }); Ok(( diff --git a/src/s3/response/put_bucket_encryption.rs b/src/s3/response/put_bucket_encryption.rs index 9ad784d..e3a8242 100644 --- a/src/s3/response/put_bucket_encryption.rs +++ b/src/s3/response/put_bucket_encryption.rs @@ -13,10 +13,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SseConfig}; -use crate::s3::utils::{get_option_text, get_text}; +use crate::s3::utils::{get_text_option, get_text_result}; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -41,22 +41,22 @@ impl HasRegion for PutBucketEncryptionResponse {} impl PutBucketEncryptionResponse { /// Returns the server-side encryption configuration. - pub fn config(&self) -> Result { + pub fn config(&self) -> Result { let mut root = Element::parse(self.body().clone().reader())?; let rule = root .get_mut_child("Rule") - .ok_or(Error::XmlError(String::from(" tag not found")))?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; let sse_by_default = rule .get_mut_child("ApplyServerSideEncryptionByDefault") - .ok_or(Error::XmlError(String::from( + .ok_or(ValidationErr::xml_error( " tag not found", - )))?; + ))?; Ok(SseConfig { - sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?, - kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"), + sse_algorithm: get_text_result(sse_by_default, "SSEAlgorithm")?, + kms_master_key_id: get_text_option(sse_by_default, "KMSMasterKeyID"), }) } } diff --git a/src/s3/response/put_bucket_lifecycle.rs b/src/s3/response/put_bucket_lifecycle.rs index 287094c..7f9de2b 100644 --- a/src/s3/response/put_bucket_lifecycle.rs +++ b/src/s3/response/put_bucket_lifecycle.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_bucket_notification.rs b/src/s3/response/put_bucket_notification.rs index 93b7743..39f4325 100644 --- a/src/s3/response/put_bucket_notification.rs +++ b/src/s3/response/put_bucket_notification.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_bucket_policy.rs b/src/s3/response/put_bucket_policy.rs index d502633..9909db8 100644 --- a/src/s3/response/put_bucket_policy.rs +++ b/src/s3/response/put_bucket_policy.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_bucket_replication.rs b/src/s3/response/put_bucket_replication.rs index 2cccda7..3c5e3fe 100644 --- a/src/s3/response/put_bucket_replication.rs +++ b/src/s3/response/put_bucket_replication.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_bucket_tagging.rs b/src/s3/response/put_bucket_tagging.rs index 71d8b89..327876e 100644 --- a/src/s3/response/put_bucket_tagging.rs +++ b/src/s3/response/put_bucket_tagging.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_bucket_versioning.rs b/src/s3/response/put_bucket_versioning.rs index 45ec913..a9205ec 100644 --- a/src/s3/response/put_bucket_versioning.rs +++ b/src/s3/response/put_bucket_versioning.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_object.rs b/src/s3/response/put_object.rs index 2f6386f..53ab279 100644 --- a/src/s3/response/put_object.rs +++ b/src/s3/response/put_object.rs @@ -13,12 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasEtagFromHeaders, HasObject, HasRegion, HasS3Fields, HasVersion, }; use crate::s3::types::{FromS3Response, S3Request}; -use crate::s3::utils::get_text; +use crate::s3::utils::get_text_result; use crate::{impl_from_s3response, impl_from_s3response_with_size, impl_has_s3fields}; use bytes::{Buf, Bytes}; use http::HeaderMap; @@ -99,9 +99,10 @@ impl HasEtagFromHeaders for S3MultipartResponse {} impl S3MultipartResponse { /// Returns the upload ID for the multipart upload, while consuming the response. - pub async fn upload_id(&self) -> Result { + pub async fn upload_id(&self) -> Result { let root = Element::parse(self.body.clone().reader())?; - get_text(&root, "UploadId").map_err(|e| Error::InvalidUploadId(e.to_string())) + get_text_result(&root, "UploadId") + .map_err(|e| ValidationErr::InvalidUploadId(e.to_string())) } } diff --git a/src/s3/response/put_object_legal_hold.rs b/src/s3/response/put_object_legal_hold.rs index 1a7864a..a5ea4a6 100644 --- a/src/s3/response/put_object_legal_hold.rs +++ b/src/s3/response/put_object_legal_hold.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/put_object_lock_config.rs b/src/s3/response/put_object_lock_config.rs index 20331bc..9309ea6 100644 --- a/src/s3/response/put_object_lock_config.rs +++ b/src/s3/response/put_object_lock_config.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request}; use crate::{impl_from_s3response, impl_has_s3fields}; diff --git a/src/s3/response/put_object_retention.rs b/src/s3/response/put_object_retention.rs index 375ae10..763073b 100644 --- a/src/s3/response/put_object_retention.rs +++ b/src/s3/response/put_object_retention.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/put_object_tagging.rs b/src/s3/response/put_object_tagging.rs index c845f32..124f0fa 100644 --- a/src/s3/response/put_object_tagging.rs +++ b/src/s3/response/put_object_tagging.rs @@ -13,7 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; use crate::s3::response::a_response_traits::{ HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion, }; diff --git a/src/s3/response/select_object_content.rs b/src/s3/response/select_object_content.rs index fa03ce1..81aa88a 100644 --- a/src/s3/response/select_object_content.rs +++ b/src/s3/response/select_object_content.rs @@ -14,14 +14,15 @@ // limitations under the License. use crate::impl_has_s3fields; -use crate::s3::error::Error; +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields}; use crate::s3::types::{FromS3Response, S3Request, SelectProgress}; -use crate::s3::utils::{copy_slice, crc32, get_text, uint32}; +use crate::s3::utils::{copy_slice, crc32, get_text_result, uint32}; use async_trait::async_trait; use bytes::Bytes; use http::HeaderMap; -use std::collections::{HashMap, VecDeque}; +use std::collections::VecDeque; use std::io::BufReader; use std::mem; use xmltree::Element; @@ -78,7 +79,7 @@ impl SelectObjectContentResponse { self.message_crc_read = false; } - fn read_prelude(&mut self) -> Result { + fn read_prelude(&mut self) -> Result { if self.buf.len() < 8 { return Ok(false); } @@ -88,13 +89,16 @@ impl SelectObjectContentResponse { self.prelude[i] = self .buf .pop_front() - .ok_or(Error::InsufficientData(8, i as u64))?; + .ok_or(ValidationErr::InsufficientData { + expected: 8, + got: i as u64, + })?; } Ok(true) } - fn read_prelude_crc(&mut self) -> Result { + fn read_prelude_crc(&mut self) -> Result { if self.buf.len() < 4 { return Ok(false); } @@ -104,13 +108,16 @@ impl SelectObjectContentResponse { self.prelude_crc[i] = self .buf .pop_front() - .ok_or(Error::InsufficientData(4, i as u64))?; + .ok_or(ValidationErr::InsufficientData { + expected: 4, + got: i as u64, + })?; } Ok(true) } - fn read_data(&mut self) -> Result { + fn read_data(&mut self) -> Result { let data_length = self.total_length - 8 - 4 - 4; if self.buf.len() < data_length { return Ok(false); @@ -123,14 +130,17 @@ impl SelectObjectContentResponse { self.data.push( self.buf .pop_front() - .ok_or(Error::InsufficientData(data_length as u64, i as u64))?, + .ok_or(ValidationErr::InsufficientData { + expected: data_length as u64, + got: i as u64, + })?, ); } Ok(true) } - fn read_message_crc(&mut self) -> Result { + fn read_message_crc(&mut self) -> Result { if self.buf.len() < 4 { return Ok(false); } @@ -140,14 +150,17 @@ impl SelectObjectContentResponse { self.message_crc[i] = self .buf .pop_front() - .ok_or(Error::InsufficientData(4, i as u64))?; + .ok_or(ValidationErr::InsufficientData { + expected: 4, + got: i as u64, + })?; } Ok(true) } - fn decode_header(&mut self, header_length: usize) -> Result, Error> { - let mut headers: HashMap = HashMap::new(); + fn decode_header(&mut self, header_length: usize) -> Result { + let mut headers = Multimap::new(); let mut offset = 0_usize; while offset < header_length { let mut length = self.data[offset] as usize; @@ -156,10 +169,10 @@ impl SelectObjectContentResponse { break; } - let name = String::from_utf8(self.data[offset..offset + length].to_vec())?; + let name: &str = std::str::from_utf8(&self.data[offset..offset + length])?; offset += length; if self.data[offset] != 7 { - return Err(Error::InvalidHeaderValueType(self.data[offset])); + return Err(ValidationErr::InvalidHeaderValueType(self.data[offset])); } offset += 1; @@ -169,16 +182,16 @@ impl SelectObjectContentResponse { offset += 1; length = ((b0 << 8) | b1) as usize; - let value = String::from_utf8(self.data[offset..offset + length].to_vec())?; + let value = std::str::from_utf8(&self.data[offset..offset + length])?; offset += length; - headers.insert(name, value); + headers.add(name, value); } Ok(headers) } - async fn do_read(&mut self) -> Result<(), Error> { + async fn do_read(&mut self) -> Result<(), ValidationErr> { if self.done { return Ok(()); } @@ -204,7 +217,11 @@ impl SelectObjectContentResponse { let expected = uint32(&self.prelude_crc)?; if got != expected { self.done = true; - return Err(Error::CrcMismatch("prelude".into(), expected, got)); + return Err(ValidationErr::CrcMismatch { + crc_type: "prelude".into(), + expected, + got, + }); } self.total_length = uint32(&self.prelude[0..4])? as usize; @@ -228,7 +245,11 @@ impl SelectObjectContentResponse { let expected = uint32(&self.message_crc)?; if got != expected { self.done = true; - return Err(Error::CrcMismatch("message".into(), expected, got)); + return Err(ValidationErr::CrcMismatch { + crc_type: "message".into(), + expected, + got, + }); } } @@ -240,16 +261,16 @@ impl SelectObjectContentResponse { }; if value == "error" { self.done = true; - return Err(Error::SelectError( - match headers.get(":error-code") { + return Err(ValidationErr::SelectError { + error_code: match headers.get(":error-code") { Some(v) => v.clone(), None => String::new(), }, - match headers.get(":error-message") { + error_message: match headers.get(":error-message") { Some(v) => v.clone(), None => String::new(), }, - )); + }); } let event_type = match headers.get(":event-type") { @@ -273,9 +294,9 @@ impl SelectObjectContentResponse { let root = Element::parse(&mut BufReader::new(payload))?; self.reset(); self.progress = SelectProgress { - bytes_scanned: get_text(&root, "BytesScanned")?.parse::()?, - bytes_progressed: get_text(&root, "BytesProcessed")?.parse::()?, - bytes_returned: get_text(&root, "BytesReturned")?.parse::()?, + bytes_scanned: get_text_result(&root, "BytesScanned")?.parse::()?, + bytes_progressed: get_text_result(&root, "BytesProcessed")?.parse::()?, + bytes_returned: get_text_result(&root, "BytesReturned")?.parse::()?, }; continue; } @@ -288,7 +309,7 @@ impl SelectObjectContentResponse { } self.done = true; - return Err(Error::UnknownEventType(event_type.to_string())); + return Err(ValidationErr::UnknownEventType(event_type.into())); } } diff --git a/src/s3/response/stat_object.rs b/src/s3/response/stat_object.rs index f301212..2889a1c 100644 --- a/src/s3/response/stat_object.rs +++ b/src/s3/response/stat_object.rs @@ -13,18 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::response::a_response_traits::{ HasBucket, HasEtagFromHeaders, HasIsDeleteMarker, HasObject, HasRegion, HasS3Fields, }; +use crate::s3::types::{FromS3Response, S3Request}; use crate::s3::types::{RetentionMode, parse_legal_hold}; use crate::s3::utils::{UtcTime, from_http_header_value, from_iso8601utc}; -use crate::s3::{ - error::Error, - types::{FromS3Response, S3Request}, -}; use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::Bytes; use http::HeaderMap; +use http::header::LAST_MODIFIED; use std::collections::HashMap; use std::mem; @@ -48,8 +48,8 @@ impl HasIsDeleteMarker for StatObjectResponse {} impl StatObjectResponse { /// Returns the size of the object (header-value of `Content-Length`). - pub fn size(&self) -> Result { - let size: u64 = match self.headers().get("Content-Length") { + pub fn size(&self) -> Result { + let size: u64 = match self.headers().get(CONTENT_LENGTH) { Some(v) => v.to_str()?.parse::()?, None => 0_u64, }; @@ -57,39 +57,39 @@ impl StatObjectResponse { } /// Return the last modified time of the object (header-value of `Last-Modified`). - pub fn last_modified(&self) -> Result, Error> { - match self.headers().get("Last-Modified") { + pub fn last_modified(&self) -> Result, ValidationErr> { + match self.headers().get(LAST_MODIFIED) { Some(v) => Ok(Some(from_http_header_value(v.to_str()?)?)), None => Ok(None), } } /// Return the retention mode of the object (header-value of `x-amz-object-lock-mode`). - pub fn retention_mode(&self) -> Result, Error> { - match self.headers().get("x-amz-object-lock-mode") { + pub fn retention_mode(&self) -> Result, ValidationErr> { + match self.headers().get(X_AMZ_OBJECT_LOCK_MODE) { Some(v) => Ok(Some(RetentionMode::parse(v.to_str()?)?)), None => Ok(None), } } /// Return the retention date of the object (header-value of `x-amz-object-lock-retain-until-date`). - pub fn retention_retain_until_date(&self) -> Result, Error> { - match self.headers().get("x-amz-object-lock-retain-until-date") { + pub fn retention_retain_until_date(&self) -> Result, ValidationErr> { + match self.headers().get(X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE) { Some(v) => Ok(Some(from_iso8601utc(v.to_str()?)?)), None => Ok(None), } } /// Return the legal hold status of the object (header-value of `x-amz-object-lock-legal-hold`). - pub fn legal_hold(&self) -> Result, Error> { - match self.headers().get("x-amz-object-lock-legal-hold") { + pub fn legal_hold(&self) -> Result, ValidationErr> { + match self.headers().get(X_AMZ_OBJECT_LOCK_LEGAL_HOLD) { Some(v) => Ok(Some(parse_legal_hold(v.to_str()?)?)), None => Ok(None), } } /// Returns the user-defined metadata of the object (header-value of `x-amz-meta-*`). - pub fn user_metadata(&self) -> Result, Error> { + pub fn user_metadata(&self) -> Result, ValidationErr> { let mut user_metadata: HashMap = HashMap::new(); for (key, value) in self.headers().iter() { if let Some(v) = key.as_str().strip_prefix("x-amz-meta-") { diff --git a/src/s3/segmented_bytes.rs b/src/s3/segmented_bytes.rs index 0f0dfc1..ba9c104 100644 --- a/src/s3/segmented_bytes.rs +++ b/src/s3/segmented_bytes.rs @@ -54,7 +54,7 @@ impl SegmentedBytes { } } - pub fn iter(&self) -> SegmentedBytesIterator { + pub fn iter(&self) -> SegmentedBytesIterator<'_> { SegmentedBytesIterator { sb: self, current_segment: 0, diff --git a/src/s3/signer.rs b/src/s3/signer.rs index 348e87d..6226d11 100644 --- a/src/s3/signer.rs +++ b/src/s3/signer.rs @@ -15,6 +15,7 @@ //! Signature V4 for S3 API +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::utils::{UtcTime, sha256_hash, to_amz_date, to_signer_date}; use hex::encode as hexencode; @@ -139,7 +140,7 @@ fn sign_v4( let signature = get_signature(signing_key.as_slice(), string_to_sign.as_bytes()); let authorization = get_authorization(access_key, &scope, &signed_headers, &signature); - headers.add("Authorization", authorization); + headers.add(AUTHORIZATION, authorization); } /// Signs and updates headers for given parameters for S3 request @@ -184,11 +185,11 @@ pub(crate) fn presign_v4( let canonical_headers = "host:".to_string() + host; let signed_headers = "host"; - query_params.add("X-Amz-Algorithm", "AWS4-HMAC-SHA256"); - query_params.add("X-Amz-Credential", access_key.to_string() + "/" + &scope); - query_params.add("X-Amz-Date", to_amz_date(date)); - query_params.add("X-Amz-Expires", expires.to_string()); - query_params.add("X-Amz-SignedHeaders", signed_headers.to_string()); + query_params.add(X_AMZ_ALGORITHM, "AWS4-HMAC-SHA256"); + query_params.add(X_AMZ_CREDENTIAL, access_key.to_string() + "/" + &scope); + query_params.add(X_AMZ_DATE, to_amz_date(date)); + query_params.add(X_AMZ_EXPIRES, expires.to_string()); + query_params.add(X_AMZ_SIGNED_HEADERS, signed_headers.to_string()); let canonical_query_string = query_params.get_canonical_query_string(); let canonical_request_hash = get_canonical_request_hash( @@ -203,7 +204,7 @@ pub(crate) fn presign_v4( let signing_key = get_signing_key(secret_key, date, region, "s3"); let signature = get_signature(signing_key.as_slice(), string_to_sign.as_bytes()); - query_params.add("X-Amz-Signature", signature); + query_params.add(X_AMZ_SIGNATURE, signature); } /// Signs and updates headers for given parameters for pre-sign POST request diff --git a/src/s3/sse.rs b/src/s3/sse.rs index 1174843..95a8652 100644 --- a/src/s3/sse.rs +++ b/src/s3/sse.rs @@ -15,6 +15,7 @@ //! Server side encryption definitions +use crate::s3::header_constants::*; use crate::s3::multimap::{Multimap, MultimapExt}; use crate::s3::utils::{b64encode, md5sum_hash}; use std::any::Any; @@ -42,24 +43,24 @@ impl SseCustomerKey { let md5key: String = md5sum_hash(key.as_bytes()); let mut headers = Multimap::with_capacity(3); - headers.add("X-Amz-Server-Side-Encryption-Customer-Algorithm", "AES256"); - headers.add("X-Amz-Server-Side-Encryption-Customer-Key", b64key.clone()); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, "AES256"); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, b64key.clone()); headers.add( - "X-Amz-Server-Side-Encryption-Customer-Key-MD5", + X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, md5key.clone(), ); let mut copy_headers = Multimap::with_capacity(3); copy_headers.add( - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm", + X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, "AES256", ); copy_headers.add( - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key", + X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, b64key, ); copy_headers.add( - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5", + X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, md5key, ); @@ -98,10 +99,10 @@ impl SseKms { pub fn new(key: &str, context: Option<&str>) -> SseKms { let mut headers = Multimap::with_capacity(3); - headers.add("X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id", key); - headers.add("X-Amz-Server-Side-Encryption", "aws:kms"); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID, key); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION, "aws:kms"); if let Some(v) = context { - headers.add("X-Amz-Server-Side-Encryption-Context", b64encode(v)); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION_CONTEXT, b64encode(v)); } SseKms { headers } @@ -135,7 +136,7 @@ pub struct SseS3 { impl SseS3 { pub fn new() -> Self { let mut headers = Multimap::new(); - headers.add("X-Amz-Server-Side-Encryption", "AES256"); + headers.add(X_AMZ_SERVER_SIDE_ENCRYPTION, "AES256"); Self { headers } } diff --git a/src/s3/types.rs b/src/s3/types.rs index d6c2dd7..f384b97 100644 --- a/src/s3/types.rs +++ b/src/s3/types.rs @@ -16,11 +16,11 @@ //! Various types for S3 API requests and responses use super::client::{Client, DEFAULT_REGION}; -use crate::s3::error::Error; -use crate::s3::utils::{UtcTime, get_option_text, get_text}; - +use crate::s3::error::{Error, ValidationErr}; +use crate::s3::header_constants::*; use crate::s3::multimap::Multimap; use crate::s3::segmented_bytes::SegmentedBytes; +use crate::s3::utils::{UtcTime, get_text_option, get_text_result}; use async_trait::async_trait; use futures_util::Stream; use http::Method; @@ -141,10 +141,10 @@ pub trait ToS3Request: Sized { /// /// # Returns /// - /// * `Result` - The executable S3 request on success, + /// * `Result` - The executable S3 request on success, /// or an error if the request cannot be built correctly. /// - fn to_s3request(self) -> Result; + fn to_s3request(self) -> Result; } /// Trait for converting HTTP responses into strongly-typed S3 response objects. @@ -184,7 +184,7 @@ pub trait FromS3Response: Sized { /// async fn from_s3response( s3req: S3Request, - resp: Result, + response: Result, ) -> Result; } @@ -283,11 +283,13 @@ pub enum RetentionMode { } impl RetentionMode { - pub fn parse(s: &str) -> Result { - match s.to_uppercase().as_str() { - "GOVERNANCE" => Ok(RetentionMode::GOVERNANCE), - "COMPLIANCE" => Ok(RetentionMode::COMPLIANCE), - _ => Err(Error::InvalidRetentionMode(s.to_string())), + pub fn parse(s: &str) -> Result { + if s.eq_ignore_ascii_case("GOVERNANCE") { + Ok(RetentionMode::GOVERNANCE) + } else if s.eq_ignore_ascii_case("COMPLIANCE") { + Ok(RetentionMode::COMPLIANCE) + } else { + Err(ValidationErr::InvalidRetentionMode(s.to_string())) } } } @@ -309,11 +311,13 @@ pub struct Retention { } /// Parses legal hold string value -pub fn parse_legal_hold(s: &str) -> Result { - match s.to_uppercase().as_str() { - "ON" => Ok(true), - "OFF" => Ok(false), - _ => Err(Error::InvalidLegalHold(s.to_string())), +pub fn parse_legal_hold(s: &str) -> Result { + if s.eq_ignore_ascii_case("ON") { + Ok(true) + } else if s.eq_ignore_ascii_case("OFF") { + Ok(false) + } else { + Err(ValidationErr::InvalidLegalHold(s.to_string())) } } @@ -444,11 +448,11 @@ impl SelectRequest { expr: &str, csv_input: CsvInputSerialization, csv_output: CsvOutputSerialization, - ) -> Result { + ) -> Result { if expr.is_empty() { - return Err(Error::InvalidSelectExpression(String::from( - "select expression cannot be empty", - ))); + return Err(ValidationErr::InvalidSelectExpression( + "select expression cannot be empty".into(), + )); } Ok(SelectRequest { @@ -468,11 +472,11 @@ impl SelectRequest { expr: String, csv_input: CsvInputSerialization, json_output: JsonOutputSerialization, - ) -> Result { + ) -> Result { if expr.is_empty() { - return Err(Error::InvalidSelectExpression(String::from( - "select expression cannot be empty", - ))); + return Err(ValidationErr::InvalidSelectExpression( + "select expression cannot be empty".into(), + )); } Ok(SelectRequest { @@ -492,11 +496,11 @@ impl SelectRequest { expr: String, json_input: JsonInputSerialization, json_output: JsonOutputSerialization, - ) -> Result { + ) -> Result { if expr.is_empty() { - return Err(Error::InvalidSelectExpression(String::from( - "select expression cannot be empty", - ))); + return Err(ValidationErr::InvalidSelectExpression( + "select expression cannot be empty".into(), + )); } Ok(SelectRequest { @@ -516,11 +520,11 @@ impl SelectRequest { expr: String, parquet_input: ParquetInputSerialization, csv_output: CsvOutputSerialization, - ) -> Result { + ) -> Result { if expr.is_empty() { - return Err(Error::InvalidSelectExpression(String::from( - "select expression cannot be empty", - ))); + return Err(ValidationErr::InvalidSelectExpression( + "select expression cannot be empty".into(), + )); } Ok(SelectRequest { @@ -540,11 +544,11 @@ impl SelectRequest { expr: String, parquet_input: ParquetInputSerialization, json_output: JsonOutputSerialization, - ) -> Result { + ) -> Result { if expr.is_empty() { - return Err(Error::InvalidSelectExpression(String::from( - "select expression cannot be empty", - ))); + return Err(ValidationErr::InvalidSelectExpression( + "select expression cannot be empty".into(), + )); } Ok(SelectRequest { @@ -668,17 +672,17 @@ impl SelectRequest { data.push_str("true"); } - if let Some(s) = self.scan_start_range { - if let Some(e) = self.scan_end_range { - data.push_str(""); - data.push_str(""); - data.push_str(&s.to_string()); - data.push_str(""); - data.push_str(""); - data.push_str(&e.to_string()); - data.push_str(""); - data.push_str(""); - } + if let Some(s) = self.scan_start_range + && let Some(e) = self.scan_end_range + { + data.push_str(""); + data.push_str(""); + data.push_str(&s.to_string()); + data.push_str(""); + data.push_str(""); + data.push_str(&e.to_string()); + data.push_str(""); + data.push_str(""); } data.push_str(""); @@ -738,19 +742,19 @@ pub struct ResponseElements(HashMap); impl ResponseElements { pub fn content_length(&self) -> Option<&String> { - self.0.get("content-length") + self.0.get(CONTENT_LENGTH) } pub fn x_amz_request_id(&self) -> Option<&String> { - self.0.get("x-amz-request-id") + self.0.get(X_AMZ_REQUEST_ID) } pub fn x_minio_deployment_id(&self) -> Option<&String> { - self.0.get("x-minio-deployment-id") + self.0.get(X_MINIO_DEPLOYMENT_ID) } pub fn x_amz_id_2(&self) -> Option<&String> { - self.0.get("x-amz-id-2") + self.0.get(X_AMZ_ID_2) } pub fn x_minio_origin_endpoint(&self) -> Option<&String> { @@ -860,11 +864,13 @@ pub enum Directive { } impl Directive { - pub fn parse(s: &str) -> Result { - match s { - "COPY" => Ok(Directive::Copy), - "REPLACE" => Ok(Directive::Replace), - _ => Err(Error::InvalidDirective(s.to_string())), + pub fn parse(s: &str) -> Result { + if s.eq_ignore_ascii_case("COPY") { + Ok(Directive::Copy) + } else if s.eq_ignore_ascii_case("REPLACE") { + Ok(Directive::Replace) + } else { + Err(ValidationErr::InvalidDirective(s.into())) } } } @@ -943,15 +949,13 @@ pub struct Filter { } impl Filter { - pub fn from_xml(element: &Element) -> Result { + pub fn from_xml(element: &Element) -> Result { let and_operator = match element.get_child("And") { Some(v) => Some(AndOperator { prefix: match v.get_child("Prefix") { Some(p) => Some( p.get_text() - .ok_or(Error::XmlError( - "text of tag not found".to_string(), - ))? + .ok_or(ValidationErr::xml_error("text of tag not found"))? .to_string(), ), None => None, @@ -962,8 +966,11 @@ impl Filter { for xml_node in &tags.children { let tag = xml_node .as_element() - .ok_or(Error::XmlError(" element not found".to_string()))?; - map.insert(get_text(tag, "Key")?, get_text(tag, "Value")?); + .ok_or(ValidationErr::xml_error(" element not found"))?; + map.insert( + get_text_result(tag, "Key")?, + get_text_result(tag, "Value")?, + ); } Some(map) } @@ -976,9 +983,7 @@ impl Filter { let prefix = match element.get_child("Prefix") { Some(v) => Some( v.get_text() - .ok_or(Error::XmlError( - "text of tag not found".to_string(), - ))? + .ok_or(ValidationErr::xml_error("text of tag not found"))? .to_string(), ), None => None, @@ -986,8 +991,8 @@ impl Filter { let tag = match element.get_child("Tag") { Some(v) => Some(Tag { - key: get_text(v, "Key")?, - value: get_text(v, "Value")?, + key: get_text_result(v, "Key")?, + value: get_text_result(v, "Value")?, }), None => None, }; @@ -999,11 +1004,11 @@ impl Filter { }) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { if self.and_operator.is_some() ^ self.prefix.is_some() ^ self.tag.is_some() { return Ok(()); } - Err(Error::InvalidFilter) + Err(ValidationErr::InvalidFilter(self.to_xml())) } pub fn to_xml(&self) -> String { @@ -1060,18 +1065,18 @@ fn parse_common_notification_config( Option, Option, ), - Error, + ValidationErr, > { let mut events = Vec::new(); while let Some(v) = element.take_child("Event") { events.push( v.get_text() - .ok_or(Error::XmlError("text of tag not found".to_string()))? + .ok_or(ValidationErr::xml_error("text of tag not found"))? .to_string(), ); } - let id = get_option_text(element, "Id"); + let id = get_text_option(element, "Id"); let (prefix_filter_rule, suffix_filter_rule) = match element.get_child("Filter") { Some(filter) => { @@ -1079,13 +1084,13 @@ fn parse_common_notification_config( let mut suffix = None; let rules = filter .get_child("S3Key") - .ok_or(Error::XmlError(" tag not found".to_string()))?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; for rule in &rules.children { let v = rule .as_element() - .ok_or(Error::XmlError(" tag not found".to_string()))?; - let name = get_text(v, "Name")?; - let value = get_text(v, "Value")?; + .ok_or(ValidationErr::xml_error(" tag not found"))?; + let name = get_text_result(v, "Name")?; + let value = get_text_result(v, "Value")?; if PrefixFilterRule::NAME == name { prefix = Some(PrefixFilterRule { value }); } else { @@ -1174,7 +1179,7 @@ pub struct CloudFuncConfig { } impl CloudFuncConfig { - pub fn from_xml(element: &mut Element) -> Result { + pub fn from_xml(element: &mut Element) -> Result { let (events, id, prefix_filter_rule, suffix_filter_rule) = parse_common_notification_config(element)?; Ok(CloudFuncConfig { @@ -1182,16 +1187,16 @@ impl CloudFuncConfig { id, prefix_filter_rule, suffix_filter_rule, - cloud_func: get_text(element, "CloudFunction")?, + cloud_func: get_text_result(element, "CloudFunction")?, }) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { if !self.events.is_empty() && !self.cloud_func.is_empty() { return Ok(()); } - Err(Error::InvalidFilter) + Err(ValidationErr::InvalidFilter(self.to_xml())) } pub fn to_xml(&self) -> String { @@ -1225,7 +1230,7 @@ pub struct QueueConfig { } impl QueueConfig { - pub fn from_xml(element: &mut Element) -> Result { + pub fn from_xml(element: &mut Element) -> Result { let (events, id, prefix_filter_rule, suffix_filter_rule) = parse_common_notification_config(element)?; Ok(QueueConfig { @@ -1233,16 +1238,16 @@ impl QueueConfig { id, prefix_filter_rule, suffix_filter_rule, - queue: get_text(element, "Queue")?, + queue: get_text_result(element, "Queue")?, }) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { if !self.events.is_empty() && !self.queue.is_empty() { return Ok(()); } - Err(Error::InvalidFilter) + Err(ValidationErr::InvalidFilter(self.to_xml())) } pub fn to_xml(&self) -> String { @@ -1276,7 +1281,7 @@ pub struct TopicConfig { } impl TopicConfig { - pub fn from_xml(element: &mut Element) -> Result { + pub fn from_xml(element: &mut Element) -> Result { let (events, id, prefix_filter_rule, suffix_filter_rule) = parse_common_notification_config(element)?; Ok(TopicConfig { @@ -1284,16 +1289,16 @@ impl TopicConfig { id, prefix_filter_rule, suffix_filter_rule, - topic: get_text(element, "Topic")?, + topic: get_text_result(element, "Topic")?, }) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { if !self.events.is_empty() && !self.topic.is_empty() { return Ok(()); } - Err(Error::InvalidFilter) + Err(ValidationErr::InvalidFilter(self.to_xml())) } pub fn to_xml(&self) -> String { @@ -1325,7 +1330,7 @@ pub struct NotificationConfig { } impl NotificationConfig { - pub fn from_xml(root: &mut Element) -> Result { + pub fn from_xml(root: &mut Element) -> Result { let mut config = NotificationConfig { cloud_func_config_list: None, queue_config_list: None, @@ -1359,7 +1364,7 @@ impl NotificationConfig { Ok(config) } - pub fn validate(&self) -> Result<(), Error> { + pub fn validate(&self) -> Result<(), ValidationErr> { if let Some(v) = &self.cloud_func_config_list { for rule in v { rule.validate()?; @@ -1478,46 +1483,46 @@ pub struct Destination { } impl Destination { - pub fn from_xml(element: &Element) -> Result { + pub fn from_xml(element: &Element) -> Result { Ok(Destination { - bucket_arn: get_text(element, "Bucket")?, + bucket_arn: get_text_result(element, "Bucket")?, access_control_translation: match element.get_child("AccessControlTranslation") { Some(v) => Some(AccessControlTranslation { - owner: get_text(v, "Owner")?, + owner: get_text_result(v, "Owner")?, }), _ => None, }, - account: get_option_text(element, "Account"), + account: get_text_option(element, "Account"), encryption_config: element.get_child("EncryptionConfiguration").map(|v| { EncryptionConfig { - replica_kms_key_id: get_option_text(v, "ReplicaKmsKeyID"), + replica_kms_key_id: get_text_option(v, "ReplicaKmsKeyID"), } }), metrics: match element.get_child("Metrics") { Some(v) => Some(Metrics { - event_threshold_minutes: match get_option_text( + event_threshold_minutes: match get_text_option( v.get_child("EventThreshold") - .ok_or(Error::XmlError(" tag not found".to_string()))?, + .ok_or(ValidationErr::xml_error(" tag not found"))?, "Minutes", ) { Some(v) => Some(v.parse::()?), _ => None, }, - status: get_text(v, "Status")? == "Enabled", + status: get_text_result(v, "Status")? == "Enabled", }), _ => None, }, replication_time: match element.get_child("ReplicationTime") { Some(v) => Some(ReplicationTime { - time_minutes: match get_option_text(v, "Time") { + time_minutes: match get_text_option(v, "Time") { Some(v) => Some(v.parse::()?), _ => None, }, - status: get_text(v, "Status")? == "Enabled", + status: get_text_result(v, "Status")? == "Enabled", }), _ => None, }, - storage_class: get_option_text(element, "StorageClass"), + storage_class: get_text_option(element, "StorageClass"), }) } @@ -1622,36 +1627,38 @@ pub struct ReplicationRule { } impl ReplicationRule { - pub fn from_xml(element: &Element) -> Result { + pub fn from_xml(element: &Element) -> Result { Ok(ReplicationRule { destination: Destination::from_xml( element .get_child("Destination") - .ok_or(Error::XmlError(" tag not found".to_string()))?, + .ok_or(ValidationErr::xml_error(" tag not found"))?, )?, delete_marker_replication_status: match element.get_child("DeleteMarkerReplication") { - Some(v) => Some(get_text(v, "Status")? == "Enabled"), + Some(v) => Some(get_text_result(v, "Status")? == "Enabled"), _ => None, }, existing_object_replication_status: match element.get_child("ExistingObjectReplication") { - Some(v) => Some(get_text(v, "Status")? == "Enabled"), + Some(v) => Some(get_text_result(v, "Status")? == "Enabled"), _ => None, }, filter: match element.get_child("Filter") { Some(v) => Some(Filter::from_xml(v)?), _ => None, }, - id: get_option_text(element, "ID"), - prefix: get_option_text(element, "Prefix"), - priority: match get_option_text(element, "Priority") { + id: get_text_option(element, "ID"), + prefix: get_text_option(element, "Prefix"), + priority: match get_text_option(element, "Priority") { Some(v) => Some(v.parse::()?), _ => None, }, source_selection_criteria: match element.get_child("SourceSelectionCriteria") { Some(v) => match v.get_child("SseKmsEncryptedObjects") { Some(v) => Some(SourceSelectionCriteria { - sse_kms_encrypted_objects_status: Some(get_text(v, "Status")? == "Enabled"), + sse_kms_encrypted_objects_status: Some( + get_text_result(v, "Status")? == "Enabled", + ), }), _ => Some(SourceSelectionCriteria { sse_kms_encrypted_objects_status: None, @@ -1660,10 +1667,10 @@ impl ReplicationRule { _ => None, }, delete_replication_status: match element.get_child("DeleteReplication") { - Some(v) => Some(get_text(v, "Status")? == "Enabled"), + Some(v) => Some(get_text_result(v, "Status")? == "Enabled"), _ => None, }, - status: get_text(element, "Status")? == "Enabled", + status: get_text_result(element, "Status")? == "Enabled", }) } @@ -1759,19 +1766,18 @@ pub struct ReplicationConfig { } impl ReplicationConfig { - pub fn from_xml(root: &Element) -> Result { + pub fn from_xml(root: &Element) -> Result { let mut config = ReplicationConfig { - role: get_option_text(root, "Role"), + role: get_text_option(root, "Role"), rules: Vec::new(), }; if let Some(v) = root.get_child("Rule") { for rule in &v.children { - config - .rules - .push(ReplicationRule::from_xml(rule.as_element().ok_or( - Error::XmlError(" tag not found".to_string()), - )?)?); + config.rules.push(ReplicationRule::from_xml( + rule.as_element() + .ok_or(ValidationErr::xml_error(" tag not found"))?, + )?); } } @@ -1805,7 +1811,11 @@ pub struct ObjectLockConfig { } impl ObjectLockConfig { - pub fn new(mode: RetentionMode, days: Option, years: Option) -> Result { + pub fn new( + mode: RetentionMode, + days: Option, + years: Option, + ) -> Result { if days.is_some() ^ years.is_some() { return Ok(Self { retention_mode: Some(mode), @@ -1814,12 +1824,12 @@ impl ObjectLockConfig { }); } - Err(Error::InvalidObjectLockConfig( - "only one days or years must be set".to_string(), + Err(ValidationErr::InvalidObjectLockConfig( + "only one days or years must be set".into(), )) } - pub fn from_xml(root: &Element) -> Result { + pub fn from_xml(root: &Element) -> Result { let mut config = ObjectLockConfig { retention_mode: None, retention_duration_days: None, @@ -1827,17 +1837,19 @@ impl ObjectLockConfig { }; if let Some(r) = root.get_child("Rule") { - let default_retention = r.get_child("DefaultRetention").ok_or(Error::XmlError( - " tag not found".to_string(), - ))?; - config.retention_mode = - Some(RetentionMode::parse(&get_text(default_retention, "Mode")?)?); + let default_retention = r + .get_child("DefaultRetention") + .ok_or(ValidationErr::xml_error(" tag not found"))?; + config.retention_mode = Some(RetentionMode::parse(&get_text_result( + default_retention, + "Mode", + )?)?); - if let Some(v) = get_option_text(default_retention, "Days") { + if let Some(v) = get_text_option(default_retention, "Days") { config.retention_duration_days = Some(v.parse::()?); } - if let Some(v) = get_option_text(default_retention, "Years") { + if let Some(v) = get_text_option(default_retention, "Years") { config.retention_duration_years = Some(v.parse::()?); } } diff --git a/src/s3/utils.rs b/src/s3/utils.rs index acac48a..f8793fa 100644 --- a/src/s3/utils.rs +++ b/src/s3/utils.rs @@ -15,13 +15,12 @@ //! Various utility and helper functions -use crate::s3::error::Error; use crate::s3::multimap::Multimap; use crate::s3::segmented_bytes::SegmentedBytes; use base64::engine::Engine as _; use base64::engine::general_purpose::STANDARD as BASE64; use byteorder::{BigEndian, ReadBytesExt}; -use chrono::{DateTime, Datelike, NaiveDateTime, ParseError, Utc}; +use chrono::{DateTime, Datelike, NaiveDateTime, Utc}; use crc::{CRC_32_ISO_HDLC, Crc}; use hex::ToHex; use lazy_static::lazy_static; @@ -35,10 +34,11 @@ use sha2::{Digest, Sha256}; use std::collections::HashMap; use std::sync::Arc; use xmltree::Element; - /// Date and time with UTC timezone pub type UtcTime = DateTime; - +use crate::s3::Client; +use crate::s3::error::ValidationErr; +use crate::s3::sse::{Sse, SseCustomerKey}; use url::form_urlencoded; // Great stuff to get confused about. @@ -72,9 +72,13 @@ pub fn crc32(data: &[u8]) -> u32 { Crc::::new(&CRC_32_ISO_HDLC).checksum(data) } -/// Converts data array into 32 bit unsigned int -pub fn uint32(mut data: &[u8]) -> Result { +/// Converts data array into 32 bit BigEndian unsigned int +pub fn uint32(mut data: &[u8]) -> Result { data.read_u32::() + .map_err(|e| ValidationErr::InvalidIntegerValue { + message: "data is not a valid 32-bit BigEndian unsigned integer".into(), + source: Box::new(e), + }) } /// sha256 hash of empty data @@ -179,14 +183,10 @@ pub fn to_iso8601utc(time: UtcTime) -> String { } /// Parses ISO8601 UTC formatted value to time -pub fn from_iso8601utc(s: &str) -> Result { - Ok(DateTime::::from_naive_utc_and_offset( - match NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%3fZ") { - Ok(d) => d, - _ => NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%SZ")?, - }, - Utc, - )) +pub fn from_iso8601utc(s: &str) -> Result { + let dt = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%3fZ") + .or_else(|_| NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%SZ"))?; + Ok(DateTime::::from_naive_utc_and_offset(dt, Utc)) } const OBJECT_KEY_ENCODE_SET: &AsciiSet = &NON_ALPHANUMERIC @@ -220,12 +220,20 @@ pub mod aws_date_format { } } +pub fn parse_bool(value: &str) -> Result { + if value.eq_ignore_ascii_case("true") { + Ok(true) + } else if value.eq_ignore_ascii_case("false") { + Ok(false) + } else { + Err(ValidationErr::InvalidBooleanValue(value.to_string())) + } +} + /// Parses HTTP header value to time -pub fn from_http_header_value(s: &str) -> Result { - Ok(DateTime::::from_naive_utc_and_offset( - NaiveDateTime::parse_from_str(s, "%a, %d %b %Y %H:%M:%S GMT")?, - Utc, - )) +pub fn from_http_header_value(s: &str) -> Result { + let dt = NaiveDateTime::parse_from_str(s, "%a, %d %b %Y %H:%M:%S GMT")?; + Ok(DateTime::::from_naive_utc_and_offset(dt, Utc)) } /// Checks if given hostname is valid or not @@ -266,23 +274,26 @@ pub fn match_region(value: &str) -> bool { } /// Validates given bucket name. TODO S3Express has slightly different rules for bucket names -pub fn check_bucket_name(bucket_name: impl AsRef, strict: bool) -> Result<(), Error> { +pub fn check_bucket_name(bucket_name: impl AsRef, strict: bool) -> Result<(), ValidationErr> { let bucket_name: &str = bucket_name.as_ref().trim(); let bucket_name_len = bucket_name.len(); if bucket_name_len == 0 { - return Err(Error::InvalidBucketName( - "bucket name cannot be empty".into(), - )); + return Err(ValidationErr::InvalidBucketName { + name: "".into(), + reason: "bucket name cannot be empty".into(), + }); } if bucket_name_len < 3 { - return Err(Error::InvalidBucketName(format!( - "bucket name ('{bucket_name}') cannot be less than 3 characters" - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: "bucket name cannot be less than 3 characters".into(), + }); } if bucket_name_len > 63 { - return Err(Error::InvalidBucketName(format!( - "Bucket name ('{bucket_name}') cannot be greater than 63 characters" - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: "bucket name cannot be greater than 63 characters".into(), + }); } lazy_static! { @@ -294,69 +305,118 @@ pub fn check_bucket_name(bucket_name: impl AsRef, strict: bool) -> Result<( } if IPV4_REGEX.is_match(bucket_name) { - return Err(Error::InvalidBucketName(format!( - "bucket name ('{bucket_name}') cannot be an IP address" - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: "bucket name cannot be an IP address".into(), + }); } if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") { - return Err(Error::InvalidBucketName(format!( - "bucket name ('{bucket_name}') contains invalid successive characters '..', '.-' or '-.'", - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: "bucket name contains invalid successive characters '..', '.-' or '-.'".into(), + }); } if strict { if !VALID_BUCKET_NAME_STRICT_REGEX.is_match(bucket_name) { - return Err(Error::InvalidBucketName(format!( - "bucket name ('{bucket_name}') does not follow S3 standards strictly, according to {}", - *VALID_BUCKET_NAME_STRICT_REGEX - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: format!( + "bucket name does not follow S3 standards strictly, according to {}", + *VALID_BUCKET_NAME_STRICT_REGEX + ), + }); } } else if !VALID_BUCKET_NAME_REGEX.is_match(bucket_name) { - return Err(Error::InvalidBucketName(format!( - "bucket name ('{bucket_name}') does not follow S3 standards, according to {}", - *VALID_BUCKET_NAME_REGEX - ))); + return Err(ValidationErr::InvalidBucketName { + name: bucket_name.into(), + reason: format!( + "bucket name does not follow S3 standards, according to {}", + *VALID_BUCKET_NAME_REGEX + ), + }); } Ok(()) } /// Validates given object name. TODO S3Express has slightly different rules for object names -pub fn check_object_name(object_name: impl AsRef) -> Result<(), Error> { - let object_name: &str = object_name.as_ref(); - let object_name_n_bytes = object_name.len(); - if object_name_n_bytes == 0 { - return Err(Error::InvalidObjectName( +pub fn check_object_name(object_name: impl AsRef) -> Result<(), ValidationErr> { + let name = object_name.as_ref(); + match name.len() { + 0 => Err(ValidationErr::InvalidObjectName( "object name cannot be empty".into(), - )); - } - if object_name_n_bytes > 1024 { - return Err(Error::InvalidObjectName(format!( - "Object name ('{object_name}') cannot be greater than 1024 bytes" - ))); + )), + n if n > 1024 => Err(ValidationErr::InvalidObjectName(format!( + "Object name ('{name}') cannot be greater than 1024 bytes" + ))), + _ => Ok(()), } +} +/// Validates SSE (Server-Side Encryption) settings. +pub fn check_sse(sse: &Option>, client: &Client) -> Result<(), ValidationErr> { + if let Some(v) = &sse + && v.tls_required() + && !client.is_secure() + { + return Err(ValidationErr::SseTlsRequired(None)); + } Ok(()) } +/// Validates SSE-C (Server-Side Encryption with Customer-Provided Keys) settings. +pub fn check_ssec(ssec: &Option, client: &Client) -> Result<(), ValidationErr> { + if ssec.is_some() && !client.is_secure() { + return Err(ValidationErr::SseTlsRequired(None)); + } + Ok(()) +} + +/// Validates SSE-C (Server-Side Encryption with Customer-Provided Keys) settings and logs an error +pub fn check_ssec_with_log( + ssec: &Option, + client: &Client, + bucket: &str, + object: &str, + version: &Option, +) -> Result<(), ValidationErr> { + if ssec.is_some() && !client.is_secure() { + return Err(ValidationErr::SseTlsRequired(Some(format!( + "source {bucket}/{object}{}: ", + version + .as_ref() + .map_or(String::new(), |v| String::from("?versionId=") + v) + )))); + } + Ok(()) +} + +/// Gets default text value of given XML element for given tag. +pub fn get_text_default(element: &Element, tag: &str) -> String { + element.get_child(tag).map_or(String::new(), |v| { + v.get_text().unwrap_or_default().to_string() + }) +} + /// Gets text value of given XML element for given tag. -pub fn get_text(element: &Element, tag: &str) -> Result { +pub fn get_text_result(element: &Element, tag: &str) -> Result { Ok(element .get_child(tag) - .ok_or(Error::XmlError(format!("<{tag}> tag not found")))? + .ok_or(ValidationErr::xml_error(format!("<{tag}> tag not found")))? .get_text() - .ok_or(Error::XmlError(format!("text of <{tag}> tag not found")))? + .ok_or(ValidationErr::xml_error(format!( + "text of <{tag}> tag not found" + )))? .to_string()) } /// Gets optional text value of given XML element for given tag. -pub fn get_option_text(element: &Element, tag: &str) -> Option { - if let Some(v) = element.get_child(tag) { - return Some(v.get_text().unwrap_or_default().to_string()); - } - - None +pub fn get_text_option(element: &Element, tag: &str) -> Option { + element + .get_child(tag) + .and_then(|v| v.get_text().map(|s| s.to_string())) } /// Trim leading and trailing quotes from a string. It consumes the @@ -368,13 +428,6 @@ pub fn trim_quotes(mut s: String) -> String { s } -/// Gets default text value of given XML element for given tag. -pub fn get_default_text(element: &Element, tag: &str) -> String { - element.get_child(tag).map_or(String::new(), |v| { - v.get_text().unwrap_or_default().to_string() - }) -} - /// Copies source byte slice into destination byte slice pub fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize { let mut c = 0; @@ -409,10 +462,13 @@ const QUERY_ESCAPE: &AsciiSet = &NON_ALPHANUMERIC .remove(b'.') .remove(b'~'); -fn unescape(s: &str) -> Result { +fn unescape(s: &str) -> Result { percent_decode_str(s) .decode_utf8() - .map_err(|e| Error::TagDecodingError(s.to_string(), e.to_string())) + .map_err(|e| ValidationErr::TagDecodingError { + input: s.to_string(), + error_message: e.to_string(), + }) .map(|s| s.to_string()) } @@ -431,17 +487,17 @@ pub fn encode_tags(h: &HashMap) -> String { tags.join("&") } -pub fn parse_tags(s: &str) -> Result, Error> { +pub fn parse_tags(s: &str) -> Result, ValidationErr> { let mut tags = HashMap::new(); for tag in s.split('&') { let mut kv = tag.split('='); let k = match kv.next() { Some(v) => unescape(v)?, None => { - return Err(Error::TagDecodingError( - s.into(), - "tag key was empty".into(), - )); + return Err(ValidationErr::TagDecodingError { + input: s.into(), + error_message: "tag key was empty".into(), + }); } }; let v = match kv.next() { @@ -449,10 +505,10 @@ pub fn parse_tags(s: &str) -> Result, Error> { None => "".to_owned(), }; if kv.next().is_some() { - return Err(Error::TagDecodingError( - s.into(), - "tag had too many values for a key".into(), - )); + return Err(ValidationErr::TagDecodingError { + input: s.into(), + error_message: "tag had too many values for a key".into(), + }); } tags.insert(k, v); } @@ -468,10 +524,9 @@ pub fn insert(data: Option, key: impl Into) -> Multimap { } pub mod xml { + use crate::s3::error::ValidationErr; use std::collections::HashMap; - use crate::s3::error::Error; - #[derive(Debug, Clone)] struct XmlElementIndex { children: HashMap>, @@ -537,21 +592,23 @@ pub mod xml { .map(|v| v.to_string()) } - pub fn get_child_text_or_error(&self, tag: &str) -> Result { + pub fn get_child_text_or_error(&self, tag: &str) -> Result { let i = self .child_element_index .get_first(tag) - .ok_or(Error::XmlError(format!("<{tag}> tag not found")))?; + .ok_or(ValidationErr::xml_error(format!("<{tag}> tag not found")))?; self.inner.children[i] .as_element() .unwrap() .get_text() .map(|x| x.to_string()) - .ok_or(Error::XmlError(format!("text of <{tag}> tag not found"))) + .ok_or(ValidationErr::xml_error(format!( + "text of <{tag}> tag not found" + ))) } // Returns all children with given tag along with their index. - pub fn get_matching_children(&self, tag: &str) -> Vec<(usize, Element)> { + pub fn get_matching_children(&self, tag: &str) -> Vec<(usize, Element<'_>)> { self.child_element_index .get(tag) .unwrap_or(&vec![]) @@ -560,7 +617,7 @@ pub mod xml { .collect() } - pub fn get_child(&self, tag: &str) -> Option { + pub fn get_child(&self, tag: &str) -> Option> { let index = self.child_element_index.get_first(tag)?; Some(self.inner.children[index].as_element()?.into()) } diff --git a/tests/test_append_object.rs b/tests/test_append_object.rs index 5a57eae..aa60ef1 100644 --- a/tests/test_append_object.rs +++ b/tests/test_append_object.rs @@ -14,7 +14,8 @@ // limitations under the License. use minio::s3::builders::ObjectContent; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{ HasBucket, HasEtagFromHeaders, HasObject, HasObjectSize, }; @@ -118,7 +119,7 @@ async fn append_object_0(ctx: TestContext, bucket_name: String) { .to_vec(), ) .unwrap(); - assert_eq!(content, format!("{}{}", content1, content2)); + assert_eq!(content, format!("{content1}{content2}")); } /// Append to the beginning of an existing object (happy flow) @@ -191,11 +192,11 @@ async fn append_object_2(ctx: TestContext, bucket_name: String) { .await; match resp { - Ok(v) => panic!("append object should have failed; got value: {:?}", v), - Err(Error::S3Error(e)) => { - assert_eq!(e.code, ErrorCode::InvalidWriteOffset); + Ok(v) => panic!("append object should have failed; got value: {v:?}"), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::InvalidWriteOffset); } - Err(e) => panic!("append object should have failed; got error: {:?}", e), + Err(e) => panic!("append object should have failed; got error: {e:?}"), } } @@ -220,11 +221,11 @@ async fn append_object_3(ctx: TestContext, bucket_name: String) { .await; match resp { - Ok(v) => panic!("append object should have failed; got value: {:?}", v), - Err(Error::S3Error(e)) => { - assert_eq!(e.code, ErrorCode::InvalidWriteOffset); + Ok(v) => panic!("append object should have failed; got value: {v:?}"), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::InvalidWriteOffset); } - Err(e) => panic!("append object should have failed; got error: {:?}", e), + Err(e) => panic!("append object should have failed; got error: {e:?}"), } } @@ -289,11 +290,11 @@ async fn append_object_5(ctx: TestContext, bucket_name: String) { .await; match resp { - Ok(v) => panic!("append object should have failed; got value: {:?}", v), - Err(Error::S3Error(e)) => { - assert_eq!(e.code, ErrorCode::NoSuchKey); + Ok(v) => panic!("append object should have failed; got value: {v:?}"), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NoSuchKey); } - Err(e) => panic!("append object should have failed; got error: {:?}", e), + Err(e) => panic!("append object should have failed; got error: {e:?}"), } } @@ -337,7 +338,7 @@ async fn append_object_content_0(ctx: TestContext, bucket_name: String) { .to_vec(), ) .unwrap(); - assert_eq!(content, format!("{}{}", content1, content2)); + assert_eq!(content, format!("{content1}{content2}")); } #[minio_macros::test(skip_if_not_express)] diff --git a/tests/test_bucket_create_delete.rs b/tests/test_bucket_create_delete.rs index e18efc2..50fced6 100644 --- a/tests/test_bucket_create_delete.rs +++ b/tests/test_bucket_create_delete.rs @@ -14,7 +14,8 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion}; use minio::s3::response::{ BucketExistsResponse, CreateBucketResponse, DeleteBucketResponse, PutObjectContentResponse, @@ -43,10 +44,12 @@ async fn bucket_create(ctx: TestContext) { ctx.client.create_bucket(&bucket_name).send().await; match resp { Ok(_) => panic!("Bucket already exists, but was created again"), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::BucketAlreadyOwnedByYou) => { + Err(Error::S3Server(S3ServerError::S3Error(e))) + if matches!(e.code(), MinioErrorCode::BucketAlreadyOwnedByYou) => + { // this is expected, as the bucket already exists } - Err(e) => panic!("Unexpected error: {:?}", e), + Err(e) => panic!("Unexpected error: {e:?}"), } } @@ -59,10 +62,12 @@ async fn bucket_delete(ctx: TestContext) { ctx.client.delete_bucket(&bucket_name).send().await; match resp { Ok(_) => panic!("Bucket does not exist, but was removed"), - Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucket) => { + Err(Error::S3Server(S3ServerError::S3Error(e))) + if matches!(e.code(), MinioErrorCode::NoSuchBucket) => + { // this is expected, as the bucket does not exist } - Err(e) => panic!("Unexpected error: {:?}", e), + Err(e) => panic!("Unexpected error: {e:?}"), } // create a new bucket diff --git a/tests/test_bucket_lifecycle.rs b/tests/test_bucket_lifecycle.rs index 5b0a9e7..d613509 100644 --- a/tests/test_bucket_lifecycle.rs +++ b/tests/test_bucket_lifecycle.rs @@ -14,8 +14,9 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; use minio::s3::lifecycle_config::LifecycleConfig; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketLifecycleResponse, GetBucketLifecycleResponse, PutBucketLifecycleResponse, @@ -74,10 +75,9 @@ async fn bucket_lifecycle(ctx: TestContext, bucket_name: String) { let resp: Result = ctx.client.get_bucket_lifecycle(&bucket_name).send().await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NoSuchLifecycleConfiguration), - v => panic!( - "Expected error S3Error(NoSuchLifecycleConfiguration): but got {:?}", - v - ), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NoSuchLifecycleConfiguration) + } + v => panic!("Expected error S3Error(NoSuchLifecycleConfiguration): but got {v:?}"), } } diff --git a/tests/test_bucket_replication.rs b/tests/test_bucket_replication.rs index e7b3d1e..d95c009 100644 --- a/tests/test_bucket_replication.rs +++ b/tests/test_bucket_replication.rs @@ -15,7 +15,8 @@ use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{ DeleteBucketReplicationResponse, GetBucketReplicationResponse, GetBucketVersioningResponse, @@ -117,7 +118,7 @@ async fn bucket_replication_s3(ctx: TestContext, bucket_name: String) { .send() .await .unwrap(); - println!("response of deleting replication: resp={:?}", resp); + println!("response of deleting replication: resp={resp:?}"); } let _resp: GetBucketVersioningResponse = ctx .client @@ -140,15 +141,19 @@ async fn bucket_replication_s3express(ctx: TestContext, bucket_name: String) { .send() .await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } let resp: Result = ctx.client.get_bucket_replication(&bucket_name).send().await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } let resp: Result = ctx @@ -157,7 +162,9 @@ async fn bucket_replication_s3express(ctx: TestContext, bucket_name: String) { .send() .await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } } diff --git a/tests/test_bucket_tagging.rs b/tests/test_bucket_tagging.rs index 99e7f54..ca775de 100644 --- a/tests/test_bucket_tagging.rs +++ b/tests/test_bucket_tagging.rs @@ -14,7 +14,8 @@ // limitations under the License. use minio::s3::client::DEFAULT_REGION; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasRegion, HasTagging}; use minio::s3::response::{ DeleteBucketTaggingResponse, GetBucketTaggingResponse, PutBucketTaggingResponse, @@ -78,21 +79,27 @@ async fn bucket_tags_s3express(ctx: TestContext, bucket_name: String) { .send() .await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } let resp: Result = ctx.client.get_bucket_tagging(&bucket_name).send().await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } let resp: Result = ctx.client.delete_bucket_tagging(&bucket_name).send().await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } } diff --git a/tests/test_bucket_versioning.rs b/tests/test_bucket_versioning.rs index 96e4868..837b5f3 100644 --- a/tests/test_bucket_versioning.rs +++ b/tests/test_bucket_versioning.rs @@ -15,7 +15,8 @@ use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasRegion}; use minio::s3::response::{GetBucketVersioningResponse, PutBucketVersioningResponse}; use minio::s3::types::S3Api; @@ -73,14 +74,18 @@ async fn bucket_versioning_s3express(ctx: TestContext, bucket_name: String) { .send() .await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } let resp: Result = ctx.client.get_bucket_versioning(&bucket_name).send().await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } } diff --git a/tests/test_get_presigned_object_url.rs b/tests/test_get_presigned_object_url.rs index e02953e..1e911be 100644 --- a/tests/test_get_presigned_object_url.rs +++ b/tests/test_get_presigned_object_url.rs @@ -15,6 +15,7 @@ use http::Method; use minio::s3::client::DEFAULT_REGION; +use minio::s3::header_constants::*; use minio::s3::response::GetPresignedObjectUrlResponse; use minio_common::test_context::TestContext; use minio_common::utils::rand_object_name; @@ -28,7 +29,7 @@ async fn get_presigned_object_url(ctx: TestContext, bucket_name: String) { .send() .await .unwrap(); - assert!(resp.url.contains("X-Amz-Signature=")); + assert!(resp.url.contains(X_AMZ_SIGNATURE)); assert_eq!(resp.bucket, bucket_name); assert_eq!(resp.object, object_name); assert_eq!(resp.region, DEFAULT_REGION); diff --git a/tests/test_get_presigned_post_form_data.rs b/tests/test_get_presigned_post_form_data.rs index 122bf5a..2b1defd 100644 --- a/tests/test_get_presigned_post_form_data.rs +++ b/tests/test_get_presigned_post_form_data.rs @@ -14,6 +14,7 @@ // limitations under the License. use minio::s3::builders::PostPolicy; +use minio::s3::header_constants::*; use minio_common::example::create_post_policy_example; use minio_common::test_context::TestContext; use minio_common::utils::rand_object_name; @@ -32,9 +33,9 @@ async fn get_presigned_post_form_data(ctx: TestContext, bucket_name: String) { .await .unwrap(); //println!("form_data={:?}", &form_data); - assert!(form_data.contains_key("x-amz-signature")); - assert!(form_data.contains_key("policy")); - assert!(form_data.contains_key("x-amz-date")); - assert!(form_data.contains_key("x-amz-algorithm")); - assert!(form_data.contains_key("x-amz-credential")); + assert!(form_data.contains_key(X_AMZ_SIGNATURE)); + assert!(form_data.contains_key(POLICY)); + assert!(form_data.contains_key(X_AMZ_DATE)); + assert!(form_data.contains_key(X_AMZ_ALGORITHM)); + assert!(form_data.contains_key(X_AMZ_CREDENTIAL)); } diff --git a/tests/test_object_put.rs b/tests/test_object_put.rs index a0d36d2..d2b6afd 100644 --- a/tests/test_object_put.rs +++ b/tests/test_object_put.rs @@ -137,7 +137,7 @@ async fn put_object_content_1(ctx: TestContext, bucket_name: String) { .await .unwrap(); - assert!(!resp.is_delete_marker().unwrap().unwrap()); + assert!(!resp.is_delete_marker().unwrap()); } } diff --git a/tests/test_select_object_content.rs b/tests/test_select_object_content.rs index 59b42aa..843b345 100644 --- a/tests/test_select_object_content.rs +++ b/tests/test_select_object_content.rs @@ -13,7 +13,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -use minio::s3::error::{Error, ErrorCode}; +use minio::s3::error::{Error, S3ServerError}; +use minio::s3::minio_error_response::MinioErrorCode; use minio::s3::response::a_response_traits::{HasBucket, HasObject}; use minio::s3::response::{PutObjectContentResponse, SelectObjectContentResponse}; use minio::s3::types::{S3Api, SelectRequest}; @@ -75,7 +76,9 @@ async fn select_object_content_express(ctx: TestContext, bucket_name: String) { .send() .await; match resp { - Err(Error::S3Error(e)) => assert_eq!(e.code, ErrorCode::NotSupported), - v => panic!("Expected error S3Error(NotSupported): but got {:?}", v), + Err(Error::S3Server(S3ServerError::S3Error(e))) => { + assert_eq!(e.code(), MinioErrorCode::NotSupported) + } + v => panic!("Expected error S3Error(NotSupported): but got {v:?}"), } }