diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4ee3af4..37a6b9c 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -36,6 +36,7 @@ jobs: export ACCESS_KEY=minioadmin export SECRET_KEY=minioadmin export ENABLE_HTTPS=1 + export SERVER_REGION=us-east-1 export MINIO_SSL_CERT_FILE=./tests/public.crt MINIO_TEST_TOKIO_RUNTIME_FLAVOR="multi_thread" cargo test -- --nocapture test-current-thread: @@ -49,6 +50,7 @@ jobs: export ACCESS_KEY=minioadmin export SECRET_KEY=minioadmin export ENABLE_HTTPS=1 + export SERVER_REGION=us-east-1 export MINIO_SSL_CERT_FILE=./tests/public.crt MINIO_TEST_TOKIO_RUNTIME_FLAVOR="current_thread" cargo test -- --nocapture diff --git a/Cargo.toml b/Cargo.toml index 7173088..c05564d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -22,9 +22,15 @@ localhost = [] [workspace.dependencies] uuid = "1.18" futures-util = "0.3" -reqwest = { version = "0.12", default-features = false } -bytes = "1.10" +futures-io = "0.3" +reqwest = { version = "0.12", default-features = false } +bytes = "1.11" async-std = "1.13" +tokio = "1.48" +rand = "0.9" +log = "0.4" +chrono = "0.4" +http = "1.4" [dependencies] @@ -38,14 +44,14 @@ async-recursion = "1.1" async-stream = "0.3" async-trait = "0.1" base64 = "0.22" -chrono = { version = "0.4", features = ["serde"] } -crc = "3.3" +chrono = { workspace = true, features = ["serde"] } +crc = "3.4" dashmap = "6.1.0" env_logger = "0.11" hmac = { version = "0.12", optional = true } -hyper = { version = "1.7", features = ["full"] } +hyper = { version = "1.8", features = ["full"] } lazy_static = "1.5" -log = "0.4" +log = { workspace = true } md5 = "0.8" multimap = "0.10" percent-encoding = "2.3" @@ -58,19 +64,19 @@ serde_yaml = "0.9" sha2 = { version = "0.10", optional = true } urlencoding = "2.1" xmltree = "0.12" -http = "1.3" +http = { workspace = true } thiserror = "2.0" typed-builder = "0.23" [dev-dependencies] minio-common = { path = "./common" } minio-macros = { path = "./macros" } -tokio = { version = "1.48", features = ["full"] } -async-std = { version = "1.13", features = ["attributes", "tokio1"] } +tokio = { workspace = true, features = ["full"] } +async-std = { workspace = true, features = ["attributes", "tokio1"] } clap = { version = "4.5", features = ["derive"] } -rand = { version = "0.9", features = ["small_rng"] } +rand = { workspace = true, features = ["small_rng"] } quickcheck = "1.0" -criterion = "0.7" +criterion = "0.8" [lib] name = "minio" diff --git a/common/Cargo.toml b/common/Cargo.toml index 72e8954..2b52c1c 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -4,20 +4,17 @@ version = "0.1.0" edition = "2024" [dependencies] -minio = {path = ".." } +minio = { path = ".." } uuid = { workspace = true, features = ["v4"] } reqwest = { workspace = true } bytes = { workspace = true } async-std = { workspace = true } - -futures-io = "0.3.31" -tokio = { version = "1.47.1", features = ["full"] } -rand = { version = "0.9.2", features = ["small_rng"] } - -log = "0.4.27" -chrono = "0.4.41" - -http = "1.3.1" +futures-io = { workspace = true } +tokio = { workspace = true, features = ["full"] } +rand = { workspace = true, features = ["small_rng"] } +log = { workspace = true } +chrono = { workspace = true } +http = { workspace = true } [lib] name = "minio_common" diff --git a/common/src/example.rs b/common/src/example.rs index 2100943..20e486d 100644 --- a/common/src/example.rs +++ b/common/src/example.rs @@ -52,7 +52,7 @@ pub fn create_bucket_notification_config_example() -> NotificationConfig { suffix_filter_rule: Some(SuffixFilterRule { value: String::from("pg"), }), - queue: String::from("arn:minio:sqs::miniojavatest:webhook"), + queue: String::from("arn:minio:sqs:us-east-1:miniojavatest:webhook"), }]), ..Default::default() } diff --git a/macros/Cargo.toml b/macros/Cargo.toml index 9887102..1c4dc43 100644 --- a/macros/Cargo.toml +++ b/macros/Cargo.toml @@ -7,11 +7,11 @@ edition = "2024" uuid = { workspace = true, features = ["v4"] } futures-util = { workspace = true } -syn = "2.0.104" -proc-macro2 = "1.0.97" -quote = "1.0.40" -darling = "0.21.0" -darling_core = "0.21.0" +syn = "2.0" +proc-macro2 = "1.0" +quote = "1.0" +darling = "0.21" +darling_core = "0.21" [dev-dependencies] minio-common = { path = "../common" } diff --git a/src/s3/builders/append_object.rs b/src/s3/builders/append_object.rs index f2a3c57..60494e4 100644 --- a/src/s3/builders/append_object.rs +++ b/src/s3/builders/append_object.rs @@ -61,8 +61,8 @@ pub struct AppendObject { #[builder(!default)] // force required data: Arc, + /// Value of `x-amz-write-offset-bytes`. #[builder(!default)] // force required - /// value of x-amz-write-offset-bytes offset_bytes: u64, } @@ -141,7 +141,7 @@ pub struct AppendObjectContent { content_stream: ContentStream, #[builder(default)] part_count: Option, - /// Value of x-amz-write-offset-bytes + /// Value of `x-amz-write-offset-bytes`. #[builder(default)] offset_bytes: u64, } @@ -243,7 +243,7 @@ impl AppendObjectContent { } } - /// multipart append + /// Performs multipart append. async fn send_mpa( &mut self, part_size: u64, diff --git a/src/s3/builders/bucket_common.rs b/src/s3/builders/bucket_common.rs index 9fa7f7f..67638ed 100644 --- a/src/s3/builders/bucket_common.rs +++ b/src/s3/builders/bucket_common.rs @@ -18,7 +18,7 @@ use crate::s3::multimap_ext::Multimap; use std::marker::PhantomData; use typed_builder::TypedBuilder; -/// Common parameters for bucket operations +/// Common parameters for bucket operations. #[derive(Clone, Debug, TypedBuilder)] pub struct BucketCommon { #[builder(!default)] // force required diff --git a/src/s3/builders/bucket_exists.rs b/src/s3/builders/bucket_exists.rs index cc47ee9..e4ed035 100644 --- a/src/s3/builders/bucket_exists.rs +++ b/src/s3/builders/bucket_exists.rs @@ -21,10 +21,10 @@ use crate::s3::types::{S3Api, S3Request, ToS3Request}; use crate::s3::utils::check_bucket_name; use http::Method; -/// This struct constructs the parameters required for the [`Client::bucket_exists`](crate::s3::client::MinioClient::bucket_exists) method. +/// Constructs the parameters for the [`Client::bucket_exists`](crate::s3::client::MinioClient::bucket_exists) method. /// /// See [Amazon S3: Working with Buckets](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingBucket.html) -/// for more information about checking if a bucket exists. +/// for more information. pub type BucketExists = BucketCommon; #[doc(hidden)] diff --git a/src/s3/builders/copy_object.rs b/src/s3/builders/copy_object.rs index 65fc386..52ae22e 100644 --- a/src/s3/builders/copy_object.rs +++ b/src/s3/builders/copy_object.rs @@ -587,7 +587,7 @@ impl ComposeObjectInternal { size -= o; } - let mut offset = source.offset.unwrap_or_default(); + let offset = source.offset.unwrap_or_default(); let mut headers = source.get_headers(); headers.add_multimap(ssec_headers.clone()); @@ -631,19 +631,15 @@ impl ComposeObjectInternal { size, }); } else { - while size > 0 { + let part_ranges = calculate_part_ranges(offset, size, MAX_PART_SIZE); + for (part_offset, length) in part_ranges { part_number += 1; - - let mut length = size; - if length > MAX_PART_SIZE { - length = MAX_PART_SIZE; - } - let end_bytes = offset + length - 1; + let end_bytes = part_offset + length - 1; let mut headers_copy = headers.clone(); headers_copy.add( X_AMZ_COPY_SOURCE_RANGE, - format!("bytes={offset}-{end_bytes}"), + format!("bytes={part_offset}-{end_bytes}"), ); let resp: UploadPartCopyResponse = match self @@ -668,11 +664,8 @@ impl ComposeObjectInternal { parts.push(PartInfo { number: part_number, etag, - size, + size: length, }); - - offset += length; - size -= length; } } } @@ -796,8 +789,8 @@ impl ComposeObject { // region: misc +/// Source object information for [`compose_object`](MinioClient::compose_object). #[derive(Clone, Debug, Default)] -/// Source object information for [compose_object](MinioClient::compose_object) pub struct ComposeSource { pub extra_headers: Option, pub extra_query_params: Option, @@ -818,7 +811,7 @@ pub struct ComposeSource { } impl ComposeSource { - /// Returns a compose source with given bucket name and object name + /// Returns a compose source with given bucket name and object name. /// /// # Examples /// @@ -927,8 +920,8 @@ impl ComposeSource { } } +/// Base argument for object conditional read APIs. #[derive(Clone, Debug, TypedBuilder)] -/// Base argument for object conditional read APIs pub struct CopySource { #[builder(default, setter(into))] pub extra_headers: Option, @@ -1036,4 +1029,122 @@ fn into_headers_copy_object( map } + +/// Calculates part ranges (offset, length) for multipart copy operations. +/// +/// Given a starting offset, total size, and maximum part size, returns a vector of +/// (offset, length) tuples for each part. This is extracted as a separate function +/// to enable unit testing without requiring actual S3 operations or multi-gigabyte files. +/// +/// # Arguments +/// * `start_offset` - Starting byte offset +/// * `total_size` - Total bytes to copy +/// * `max_part_size` - Maximum size per part (typically MAX_PART_SIZE = 5GB) +/// +/// # Returns +/// Vector of (offset, length) tuples for each part +fn calculate_part_ranges( + start_offset: u64, + total_size: u64, + max_part_size: u64, +) -> Vec<(u64, u64)> { + let mut ranges = Vec::new(); + let mut offset = start_offset; + let mut remaining = total_size; + + while remaining > 0 { + let length = remaining.min(max_part_size); + ranges.push((offset, length)); + offset += length; + remaining -= length; + } + + ranges +} + // endregion: misc + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_calculate_part_ranges_single_part() { + // Size <= max_part_size should return single part + let ranges = calculate_part_ranges(0, 1000, 5000); + assert_eq!(ranges, vec![(0, 1000)]); + } + + #[test] + fn test_calculate_part_ranges_exact_multiple() { + // Size exactly divisible by max_part_size + let ranges = calculate_part_ranges(0, 10000, 5000); + assert_eq!(ranges, vec![(0, 5000), (5000, 5000)]); + } + + #[test] + fn test_calculate_part_ranges_with_remainder() { + // Size with remainder + let ranges = calculate_part_ranges(0, 12000, 5000); + assert_eq!(ranges, vec![(0, 5000), (5000, 5000), (10000, 2000)]); + } + + #[test] + fn test_calculate_part_ranges_with_start_offset() { + // Starting from non-zero offset + let ranges = calculate_part_ranges(1000, 12000, 5000); + assert_eq!(ranges, vec![(1000, 5000), (6000, 5000), (11000, 2000)]); + } + + #[test] + fn test_calculate_part_ranges_zero_size() { + // Zero size edge case - returns empty + let ranges = calculate_part_ranges(0, 0, 5000); + assert!(ranges.is_empty()); + } + + #[test] + fn test_calculate_part_ranges_realistic() { + // Simulate 12GB file with 5GB max part size + let total_size: u64 = 12 * 1024 * 1024 * 1024; // 12 GB + let max_part_size: u64 = 5 * 1024 * 1024 * 1024; // 5 GB + + let ranges = calculate_part_ranges(0, total_size, max_part_size); + + assert_eq!(ranges.len(), 3); + assert_eq!(ranges[0], (0, max_part_size)); // 0-5GB + assert_eq!(ranges[1], (max_part_size, max_part_size)); // 5GB-10GB + assert_eq!(ranges[2], (2 * max_part_size, 2 * 1024 * 1024 * 1024)); // 10GB-12GB + + // Verify total size matches + let total: u64 = ranges.iter().map(|(_, len)| len).sum(); + assert_eq!(total, total_size); + + // Verify offsets are contiguous + let mut expected_offset = 0; + for (offset, length) in &ranges { + assert_eq!(*offset, expected_offset); + expected_offset += length; + } + } + + #[test] + fn test_calculate_part_ranges_each_part_correct_length() { + // This test catches the bug where `size` (remaining) was used instead of `length` + let ranges = calculate_part_ranges(0, 17000, 5000); + + // Should be [(0,5000), (5000,5000), (10000,5000), (15000,2000)] + // NOT [(0,17000), (17000,12000), ...] which the buggy code would produce + assert_eq!( + ranges, + vec![(0, 5000), (5000, 5000), (10000, 5000), (15000, 2000)] + ); + + // Each non-final part should be exactly max_part_size + for (i, (_, length)) in ranges.iter().enumerate() { + if i < ranges.len() - 1 { + assert_eq!(*length, 5000, "Part {} should be max_part_size", i); + } + } + } +} diff --git a/src/s3/builders/delete_objects.rs b/src/s3/builders/delete_objects.rs index 8e88cda..534db51 100644 --- a/src/s3/builders/delete_objects.rs +++ b/src/s3/builders/delete_objects.rs @@ -37,15 +37,16 @@ impl ValidKey for String {} impl ValidKey for &str {} impl ValidKey for &String {} -/// Specify an object to be deleted. The object can be specified by key or by -/// key and version_id via the From trait. +/// Specifies an object to be deleted. +/// +/// The object can be specified by key or by key and version_id via the `From` trait. #[derive(Debug, Clone, Default)] pub struct ObjectToDelete { key: String, version_id: Option, } -/// A key can be converted into a DeleteObject. The version_id is set to None. +/// A key can be converted into a `DeleteObject` with `version_id` set to `None`. impl From for ObjectToDelete { fn from(key: K) -> Self { Self { @@ -55,7 +56,7 @@ impl From for ObjectToDelete { } } -/// A tuple of key and version_id can be converted into a DeleteObject. +/// A tuple of key and version_id can be converted into a `DeleteObject`. impl From<(K, &str)> for ObjectToDelete { fn from((key, version_id): (K, &str)) -> Self { Self { @@ -65,7 +66,7 @@ impl From<(K, &str)> for ObjectToDelete { } } -/// A tuple of key and option version_id can be converted into a DeleteObject. +/// A tuple of key and optional version_id can be converted into a `DeleteObject`. impl From<(K, Option<&str>)> for ObjectToDelete { fn from((key, version_id): (K, Option<&str>)) -> Self { Self { @@ -178,9 +179,10 @@ pub struct DeleteObjects { #[builder(default)] bypass_governance_mode: bool, - /// Enable verbose mode (defaults to false). If enabled, the response will - /// include the keys of objects that were successfully deleted. Otherwise, - /// only objects that encountered an error are returned. + /// Enables verbose mode (defaults to false). + /// + /// If enabled, the response will include the keys of objects that were successfully + /// deleted. Otherwise, only objects that encountered an error are returned. #[builder(default)] verbose_mode: bool, } @@ -320,9 +322,10 @@ impl DeleteObjectsStreaming { self } - /// Enable verbose mode (defaults to false). If enabled, the response will - /// include the keys of objects that were successfully deleted. Otherwise - /// only objects that encountered an error are returned. + /// Enables verbose mode (defaults to false). + /// + /// If enabled, the response will include the keys of objects that were successfully + /// deleted. Otherwise, only objects that encountered an error are returned. pub fn verbose_mode(mut self, verbose_mode: bool) -> Self { self.verbose_mode = verbose_mode; self @@ -338,7 +341,7 @@ impl DeleteObjectsStreaming { self } - /// Sets the region for the request + /// Sets the region for the request. pub fn region(mut self, region: Option) -> Self { self.region = region; self diff --git a/src/s3/builders/get_presigned_policy_form_data.rs b/src/s3/builders/get_presigned_policy_form_data.rs index a6a7ae3..d5d46be 100644 --- a/src/s3/builders/get_presigned_policy_form_data.rs +++ b/src/s3/builders/get_presigned_policy_form_data.rs @@ -61,10 +61,10 @@ impl GetPresignedPolicyFormData { pub type GetPresignedPolicyFormDataBldr = GetPresignedPolicyFormDataBuilder<((MinioClient,), (PostPolicy,))>; -/// Post policy information for presigned post policy form-data +/// Post policy information for presigned POST policy form-data. /// -/// Condition elements and respective condition for Post policy is available here. +/// See [Post Policy Conditions](https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-HTTPPOSTConstructPolicy.html#sigv4-PolicyConditions) +/// for condition elements and their usage. #[derive(Clone, Debug)] pub struct PostPolicy { pub region: Option, @@ -82,7 +82,7 @@ impl PostPolicy { const STARTS_WITH: &'static str = "starts-with"; const ALGORITHM: &'static str = "AWS4-HMAC-SHA256"; - /// Returns post policy with given bucket name and expiration + /// Returns a post policy with given bucket name and expiration. /// /// # Examples /// diff --git a/src/s3/creds.rs b/src/s3/creds.rs index 65b9cb5..dff35db 100644 --- a/src/s3/creds.rs +++ b/src/s3/creds.rs @@ -15,27 +15,27 @@ //! Credential providers +/// Credentials containing access key, secret key, and optional session token. #[derive(Clone, Debug)] -/// Credentials contain access key, secret key and session token optionally pub struct Credentials { pub access_key: String, pub secret_key: String, pub session_token: Option, } -/// Provider trait to fetch credentials +/// Provider trait to fetch credentials. pub trait Provider: std::fmt::Debug { fn fetch(&self) -> Credentials; } +/// Static credential provider. #[derive(Clone, Debug)] -/// Static credential provider pub struct StaticProvider { creds: Credentials, } impl StaticProvider { - /// Returns a static provider with given access key, secret key and optional session token + /// Returns a static provider with given access key, secret key, and optional session token. /// /// # Examples /// diff --git a/src/s3/http.rs b/src/s3/http.rs index e4b7ea4..10e8541 100644 --- a/src/s3/http.rs +++ b/src/s3/http.rs @@ -33,8 +33,8 @@ lazy_static! { static ref AWS_S3_PREFIX_REGEX: Regex = Regex::new(AWS_S3_PREFIX).unwrap(); } +/// Represents HTTP URL. #[derive(Clone, Debug)] -/// Represents HTTP URL pub struct Url { pub https: bool, pub host: String, @@ -212,8 +212,8 @@ fn get_aws_info( Ok(()) } +/// Represents base URL of S3 endpoint. #[derive(Clone, Debug)] -/// Represents Base URL of S3 endpoint pub struct BaseUrl { pub https: bool, host: String, diff --git a/src/s3/object_content.rs b/src/s3/object_content.rs index 15383c7..117e31e 100644 --- a/src/s3/object_content.rs +++ b/src/s3/object_content.rs @@ -82,8 +82,9 @@ impl Arbitrary for Size { } // endregion: Size -/// Object content that can be uploaded or downloaded. Can be constructed from a stream of `Bytes`, -/// a file path, or a `Bytes` object. +/// Object content that can be uploaded or downloaded. +/// +/// Can be constructed from a stream of `Bytes`, a file path, or a `Bytes` object. pub struct ObjectContent(ObjectContentInner); enum ObjectContentInner { diff --git a/src/s3/response/append_object.rs b/src/s3/response/append_object.rs index 9c8eb7e..392a31e 100644 --- a/src/s3/response/append_object.rs +++ b/src/s3/response/append_object.rs @@ -21,8 +21,9 @@ use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::Bytes; use http::HeaderMap; -/// Represents the response of the `append_object` API call. -/// This struct contains metadata and information about the object being appended. +/// Response from the `append_object` API. +/// +/// Contains metadata about the object being appended. #[derive(Clone, Debug)] pub struct AppendObjectResponse { request: S3Request, diff --git a/src/s3/response/bucket_exists.rs b/src/s3/response/bucket_exists.rs index 6f92821..d9075ae 100644 --- a/src/s3/response/bucket_exists.rs +++ b/src/s3/response/bucket_exists.rs @@ -24,8 +24,9 @@ use bytes::Bytes; use http::HeaderMap; use std::mem; -/// Represents the response of the [bucket_exists()](crate::s3::client::MinioClient::bucket_exists) API call. -/// This struct contains metadata and information about the existence of a bucket. +/// Response from the [`bucket_exists()`](crate::s3::client::MinioClient::bucket_exists) API. +/// +/// Contains information about the existence of a bucket. #[derive(Clone, Debug)] pub struct BucketExistsResponse { request: S3Request, diff --git a/src/s3/response/create_bucket.rs b/src/s3/response/create_bucket.rs index 0fe643a..e4bab68 100644 --- a/src/s3/response/create_bucket.rs +++ b/src/s3/response/create_bucket.rs @@ -22,9 +22,7 @@ use bytes::Bytes; use http::HeaderMap; use std::mem; -/// Response of -/// [create_bucket()](crate::s3::client::MinioClient::create_bucket) -/// API +/// Response from the [`create_bucket()`](crate::s3::client::MinioClient::create_bucket) API. #[derive(Clone, Debug)] pub struct CreateBucketResponse { request: S3Request, diff --git a/src/s3/response/delete_bucket.rs b/src/s3/response/delete_bucket.rs index 7a3dd76..8eba96f 100644 --- a/src/s3/response/delete_bucket.rs +++ b/src/s3/response/delete_bucket.rs @@ -22,9 +22,7 @@ use bytes::Bytes; use http::HeaderMap; use std::mem; -/// Response of -/// [delete_bucket()](crate::s3::client::MinioClient::delete_bucket) -/// API +/// Response from the [`delete_bucket()`](crate::s3::client::MinioClient::delete_bucket) API. #[derive(Clone, Debug)] pub struct DeleteBucketResponse { pub(crate) request: S3Request, diff --git a/src/s3/response/delete_bucket_notification.rs b/src/s3/response/delete_bucket_notification.rs index 4e03513..e42d2d6 100644 --- a/src/s3/response/delete_bucket_notification.rs +++ b/src/s3/response/delete_bucket_notification.rs @@ -19,8 +19,9 @@ use crate::{impl_from_s3response, impl_has_s3fields}; use bytes::Bytes; use http::HeaderMap; -/// Represents the response of the [delete_bucket_notification()](crate::s3::client::MinioClient::delete_bucket_notification) API call. -/// This struct contains metadata and information about the bucket whose notifications were removed. +/// Response from the [`delete_bucket_notification()`](crate::s3::client::MinioClient::delete_bucket_notification) API. +/// +/// Contains metadata about the bucket whose notifications were removed. #[derive(Clone, Debug)] pub struct DeleteBucketNotificationResponse { request: S3Request, diff --git a/src/s3/response/delete_bucket_replication.rs b/src/s3/response/delete_bucket_replication.rs index 3dd95f4..d3d1f7e 100644 --- a/src/s3/response/delete_bucket_replication.rs +++ b/src/s3/response/delete_bucket_replication.rs @@ -23,8 +23,9 @@ use bytes::Bytes; use http::HeaderMap; use std::mem; -/// Represents the response of the `[delete_bucket_replication()](crate::s3::client::MinioClient::delete_bucket_replication) API call. -/// This struct contains metadata and information about the bucket whose replication configuration was removed. +/// Response from the [`delete_bucket_replication()`](crate::s3::client::MinioClient::delete_bucket_replication) API. +/// +/// Contains metadata about the bucket whose replication configuration was removed. #[derive(Clone, Debug)] pub struct DeleteBucketReplicationResponse { request: S3Request, diff --git a/src/s3/response/delete_object.rs b/src/s3/response/delete_object.rs index 5efe215..ce37017 100644 --- a/src/s3/response/delete_object.rs +++ b/src/s3/response/delete_object.rs @@ -80,11 +80,11 @@ impl DeleteResult { } } -/// Response of -/// [delete_objects()](crate::s3::client::MinioClient::delete_objects) -/// S3 API. It is also returned by the -/// [remove_objects()](crate::s3::client::MinioClient::delete_objects_streaming) API in the -/// form of a stream. +/// Response of the [`delete_objects()`](crate::s3::client::MinioClient::delete_objects) S3 API. +/// +/// It is also returned by the +/// [`delete_objects_streaming()`](crate::s3::client::MinioClient::delete_objects_streaming) API +/// in the form of a stream. #[derive(Clone, Debug)] pub struct DeleteObjectsResponse { request: S3Request, @@ -95,6 +95,9 @@ pub struct DeleteObjectsResponse { impl_from_s3response!(DeleteObjectsResponse); impl_has_s3fields!(DeleteObjectsResponse); +impl HasBucket for DeleteObjectsResponse {} +impl HasRegion for DeleteObjectsResponse {} + impl DeleteObjectsResponse { /// Returns the bucket name for which the delete operation was performed. pub fn result(&self) -> Result, Error> { diff --git a/src/s3/response/stat_object.rs b/src/s3/response/stat_object.rs index 5bcad17..7e80f73 100644 --- a/src/s3/response/stat_object.rs +++ b/src/s3/response/stat_object.rs @@ -16,7 +16,8 @@ use crate::s3::error::ValidationErr; use crate::s3::header_constants::*; use crate::s3::response_traits::{ - HasBucket, HasEtagFromHeaders, HasIsDeleteMarker, HasObject, HasRegion, HasS3Fields, + HasBucket, HasEtagFromHeaders, HasIsDeleteMarker, HasObject, HasObjectSize, HasRegion, + HasS3Fields, HasVersion, }; use crate::s3::types::S3Request; use crate::s3::types::{RetentionMode, parse_legal_hold}; @@ -27,9 +28,10 @@ use http::HeaderMap; use http::header::LAST_MODIFIED; use std::collections::HashMap; +/// Response from the [`stat_object`](crate::s3::client::MinioClient::stat_object) API. +/// +/// Provides metadata about an object stored in S3 or a compatible service. #[derive(Clone, Debug)] -/// Response from the [`stat_object`](crate::s3::client::MinioClient::stat_object) API call, -/// providing metadata about an object stored in S3 or a compatible service. pub struct StatObjectResponse { request: S3Request, headers: HeaderMap, @@ -44,6 +46,8 @@ impl HasRegion for StatObjectResponse {} impl HasObject for StatObjectResponse {} impl HasEtagFromHeaders for StatObjectResponse {} impl HasIsDeleteMarker for StatObjectResponse {} +impl HasVersion for StatObjectResponse {} +impl HasObjectSize for StatObjectResponse {} impl StatObjectResponse { /// Returns the size of the object (header-value of `Content-Length`). @@ -55,7 +59,7 @@ impl StatObjectResponse { Ok(size) } - /// Return the last modified time of the object (header-value of `Last-Modified`). + /// Returns the last modified time of the object (header-value of `Last-Modified`). pub fn last_modified(&self) -> Result, ValidationErr> { match self.headers().get(LAST_MODIFIED) { Some(v) => Ok(Some(from_http_header_value(v.to_str()?)?)), @@ -63,7 +67,7 @@ impl StatObjectResponse { } } - /// Return the retention mode of the object (header-value of `x-amz-object-lock-mode`). + /// Returns the retention mode of the object (header-value of `x-amz-object-lock-mode`). pub fn retention_mode(&self) -> Result, ValidationErr> { match self.headers().get(X_AMZ_OBJECT_LOCK_MODE) { Some(v) => Ok(Some(RetentionMode::parse(v.to_str()?)?)), @@ -71,7 +75,7 @@ impl StatObjectResponse { } } - /// Return the retention date of the object (header-value of `x-amz-object-lock-retain-until-date`). + /// Returns the retention date of the object (header-value of `x-amz-object-lock-retain-until-date`). pub fn retention_retain_until_date(&self) -> Result, ValidationErr> { match self.headers().get(X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE) { Some(v) => Ok(Some(from_iso8601utc(v.to_str()?)?)), @@ -79,7 +83,7 @@ impl StatObjectResponse { } } - /// Return the legal hold status of the object (header-value of `x-amz-object-lock-legal-hold`). + /// Returns the legal hold status of the object (header-value of `x-amz-object-lock-legal-hold`). pub fn legal_hold(&self) -> Result, ValidationErr> { match self.headers().get(X_AMZ_OBJECT_LOCK_LEGAL_HOLD) { Some(v) => Ok(Some(parse_legal_hold(v.to_str()?)?)), diff --git a/src/s3/response_traits.rs b/src/s3/response_traits.rs index beb8e1a..8aa739c 100644 --- a/src/s3/response_traits.rs +++ b/src/s3/response_traits.rs @@ -173,17 +173,13 @@ pub trait HasObjectSize: HasS3Fields { } } -/// Value of the `x-amz-delete-marker` header. +/// Provides access to the `x-amz-delete-marker` header value. +/// /// Indicates whether the specified object version that was permanently deleted was (true) or /// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates /// whether (true) or not (false) the current version of the object is a delete marker. pub trait HasIsDeleteMarker: HasS3Fields { /// Returns `true` if the object is a delete marker, `false` otherwise. - /// - /// Value of the `x-amz-delete-marker` header. - /// Indicates whether the specified object version that was permanently deleted was (true) or - /// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates - /// whether (true) or not (false) the current version of the object is a delete marker. #[inline] fn is_delete_marker(&self) -> Result { self.headers() diff --git a/src/s3/signer.rs b/src/s3/signer.rs index 9be460d..f8f9306 100644 --- a/src/s3/signer.rs +++ b/src/s3/signer.rs @@ -26,7 +26,7 @@ use ring::hmac; #[cfg(not(feature = "ring"))] use sha2::Sha256; -/// Returns HMAC hash for given key and data +/// Returns HMAC hash for given key and data. fn hmac_hash(key: &[u8], data: &[u8]) -> Vec { #[cfg(feature = "ring")] { @@ -42,12 +42,12 @@ fn hmac_hash(key: &[u8], data: &[u8]) -> Vec { } } -/// Returns hex encoded HMAC hash for given key and data +/// Returns hex-encoded HMAC hash for given key and data. fn hmac_hash_hex(key: &[u8], data: &[u8]) -> String { hex_encode(hmac_hash(key, data).as_slice()) } -/// Returns scope value of given date, region and service name +/// Returns scope value of given date, region and service name. fn get_scope(date: UtcTime, region: &str, service_name: &str) -> String { format!( "{}/{region}/{service_name}/aws4_request", @@ -55,7 +55,7 @@ fn get_scope(date: UtcTime, region: &str, service_name: &str) -> String { ) } -/// Returns hex encoded SHA256 hash of canonical request +/// Returns hex-encoded SHA256 hash of canonical request. fn get_canonical_request_hash( method: &Method, uri: &str, @@ -70,7 +70,7 @@ fn get_canonical_request_hash( sha256_hash(canonical_request.as_bytes()) } -/// Returns string-to-sign value of given date, scope and canonical request hash +/// Returns string-to-sign value of given date, scope and canonical request hash. fn get_string_to_sign(date: UtcTime, scope: &str, canonical_request_hash: &str) -> String { format!( "AWS4-HMAC-SHA256\n{}\n{scope}\n{canonical_request_hash}", @@ -78,7 +78,7 @@ fn get_string_to_sign(date: UtcTime, scope: &str, canonical_request_hash: &str) ) } -/// Returns signing key of given secret key, date, region and service name +/// Returns signing key of given secret key, date, region and service name. fn get_signing_key(secret_key: &str, date: UtcTime, region: &str, service_name: &str) -> Vec { let mut key: Vec = b"AWS4".to_vec(); key.extend(secret_key.as_bytes()); @@ -89,12 +89,12 @@ fn get_signing_key(secret_key: &str, date: UtcTime, region: &str, service_name: hmac_hash(date_region_service_key.as_slice(), b"aws4_request") } -/// Returns signature value for given signing key and string-to-sign +/// Returns signature value for given signing key and string-to-sign. fn get_signature(signing_key: &[u8], string_to_sign: &[u8]) -> String { hmac_hash_hex(signing_key, string_to_sign) } -/// Returns authorization value for given access key, scope, signed headers and signature +/// Returns authorization value for given access key, scope, signed headers and signature. fn get_authorization( access_key: &str, scope: &str, @@ -106,7 +106,7 @@ fn get_authorization( ) } -/// Signs and updates headers for given parameters +/// Signs and updates headers for given parameters. fn sign_v4( service_name: &str, method: &Method, @@ -138,7 +138,7 @@ fn sign_v4( headers.add(AUTHORIZATION, authorization); } -/// Signs and updates headers for given parameters for S3 request +/// Signs and updates headers for the given S3 request parameters. pub(crate) fn sign_v4_s3( method: &Method, uri: &str, @@ -164,7 +164,7 @@ pub(crate) fn sign_v4_s3( ) } -/// Signs and updates headers for given parameters for pre-sign request +/// Signs and updates query parameters for the given presigned request. pub(crate) fn presign_v4( method: &Method, host: &str, @@ -202,7 +202,7 @@ pub(crate) fn presign_v4( query_params.add(X_AMZ_SIGNATURE, signature); } -/// Signs and updates headers for given parameters for pre-sign POST request +/// Returns signature for the given presigned POST request parameters. pub(crate) fn post_presign_v4( string_to_sign: &str, secret_key: &str, diff --git a/src/s3/utils.rs b/src/s3/utils.rs index d1313f3..4ec5d4b 100644 --- a/src/s3/utils.rs +++ b/src/s3/utils.rs @@ -55,7 +55,7 @@ pub fn url_encode(s: &str) -> String { urlencoding::encode(s).into_owned() } -/// Encodes data using base64 algorithm +/// Encodes data using base64 algorithm. pub fn b64_encode(input: impl AsRef<[u8]>) -> String { base64::engine::general_purpose::STANDARD.encode(input) } @@ -66,7 +66,7 @@ pub fn crc32(data: &[u8]) -> u32 { Crc::::new(&CRC_32_ISO_HDLC).checksum(data) } -/// Converts data array into 32 bit BigEndian unsigned int +/// Converts data array into 32-bit BigEndian unsigned int. pub fn uint32(data: &[u8]) -> Result { if data.len() < 4 { return Err(ValidationErr::InvalidIntegerValue { @@ -80,10 +80,10 @@ pub fn uint32(data: &[u8]) -> Result { Ok(u32::from_be_bytes(data[..4].try_into().unwrap())) } -/// sha256 hash of empty data +/// SHA256 hash of empty data. pub const EMPTY_SHA256: &str = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; -/// Gets hex encoded SHA256 hash of given data +/// Gets hex-encoded SHA256 hash of given data. pub fn sha256_hash(data: &[u8]) -> String { #[cfg(feature = "ring")] { @@ -649,27 +649,27 @@ mod tests { } } -/// Gets bas64 encoded MD5 hash of given data +/// Gets base64-encoded MD5 hash of given data. pub fn md5sum_hash(data: &[u8]) -> String { b64_encode(md5::compute(data).as_slice()) } -/// Gets current UTC time +/// Gets current UTC time. pub fn utc_now() -> UtcTime { chrono::offset::Utc::now() } -/// Gets signer date value of given time +/// Gets signer date value of given time. pub fn to_signer_date(time: UtcTime) -> String { time.format("%Y%m%d").to_string() } -/// Gets AMZ date value of given time +/// Gets AMZ date value of given time. pub fn to_amz_date(time: UtcTime) -> String { time.format("%Y%m%dT%H%M%SZ").to_string() } -/// Gets HTTP header value of given time +/// Gets HTTP header value of given time. pub fn to_http_header_value(time: UtcTime) -> String { format!( "{}, {} {} {} GMT", @@ -694,12 +694,12 @@ pub fn to_http_header_value(time: UtcTime) -> String { ) } -/// Gets ISO8601 UTC formatted value of given time +/// Gets ISO8601 UTC formatted value of given time. pub fn to_iso8601utc(time: UtcTime) -> String { time.format("%Y-%m-%dT%H:%M:%S.%3fZ").to_string() } -/// Parses ISO8601 UTC formatted value to time +/// Parses ISO8601 UTC formatted value to time. pub fn from_iso8601utc(s: &str) -> Result { let dt = NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%S.%3fZ") .or_else(|_| NaiveDateTime::parse_from_str(s, "%Y-%m-%dT%H:%M:%SZ"))?; @@ -747,13 +747,13 @@ pub fn parse_bool(value: &str) -> Result { } } -/// Parses HTTP header value to time +/// Parses HTTP header value to time. pub fn from_http_header_value(s: &str) -> Result { let dt = NaiveDateTime::parse_from_str(s, "%a, %d %b %Y %H:%M:%S GMT")?; Ok(DateTime::::from_naive_utc_and_offset(dt, Utc)) } -/// Checks if given hostname is valid or not +/// Checks if given hostname is valid or not. pub fn match_hostname(value: &str) -> bool { lazy_static! { static ref HOSTNAME_REGEX: Regex = @@ -777,7 +777,7 @@ pub fn match_hostname(value: &str) -> bool { true } -/// Checks if given region is valid or not +/// Checks if given region is valid or not. pub fn match_region(value: &str) -> bool { lazy_static! { static ref REGION_REGEX: Regex = Regex::new(r"^([a-z_\d-]{1,63})$").unwrap(); @@ -790,7 +790,8 @@ pub fn match_region(value: &str) -> bool { || value.ends_with('_') } -/// Validates given bucket name. TODO S3Express has slightly different rules for bucket names +/// Validates given bucket name. +// TODO: S3Express has slightly different rules for bucket names pub fn check_bucket_name(bucket_name: impl AsRef, strict: bool) -> Result<(), ValidationErr> { let bucket_name: &str = bucket_name.as_ref().trim(); let bucket_name_len = bucket_name.len(); @@ -858,7 +859,8 @@ pub fn check_bucket_name(bucket_name: impl AsRef, strict: bool) -> Result<( Ok(()) } -/// Validates given object name. TODO S3Express has slightly different rules for object names +/// Validates given object name. +// TODO: S3Express has slightly different rules for object names pub fn check_object_name(object_name: impl AsRef) -> Result<(), ValidationErr> { let name = object_name.as_ref(); match name.len() { @@ -894,7 +896,7 @@ pub fn check_ssec( Ok(()) } -/// Validates SSE-C (Server-Side Encryption with Customer-Provided Keys) settings and logs an error +/// Validates SSE-C (Server-Side Encryption with Customer-Provided Keys) settings and logs an error. pub fn check_ssec_with_log( ssec: &Option, client: &MinioClient, @@ -939,7 +941,7 @@ pub fn get_text_option(element: &Element, tag: &str) -> Option { .and_then(|v| v.get_text().map(|s| s.to_string())) } -/// Trim leading and trailing quotes from a string. It consumes the +/// Trims leading and trailing quotes from a string. Note: consumes the input string. pub fn trim_quotes(mut s: String) -> String { if s.len() >= 2 && s.starts_with('"') && s.ends_with('"') { s.drain(0..1); // remove the leading quote @@ -948,7 +950,7 @@ pub fn trim_quotes(mut s: String) -> String { s } -/// Copies source byte slice into destination byte slice +/// Copies source byte slice into destination byte slice. pub fn copy_slice(dst: &mut [u8], src: &[u8]) -> usize { let mut c = 0; for (d, s) in dst.iter_mut().zip(src.iter()) { @@ -1035,8 +1037,8 @@ pub fn parse_tags(s: &str) -> Result, ValidationErr> { Ok(tags) } -#[must_use] /// Returns the consumed data and inserts a key into it with an empty value. +#[must_use] pub fn insert(data: Option, key: impl Into) -> Multimap { let mut result: Multimap = data.unwrap_or_default(); result.insert(key.into(), String::new()); diff --git a/tests/s3/bucket_create_delete.rs b/tests/s3/bucket_create_delete.rs index 32f357d..ba02bb0 100644 --- a/tests/s3/bucket_create_delete.rs +++ b/tests/s3/bucket_create_delete.rs @@ -126,7 +126,7 @@ async fn bucket_delete(ctx: TestContext) { .unwrap(); assert!(!resp.exists()); assert_eq!(resp.bucket(), bucket_name); - assert_eq!(resp.region(), ""); //TODO this ought to be DEFAULT_REGION + assert_eq!(resp.region(), DEFAULT_REGION); } async fn test_bucket_delete_and_purge(ctx: &TestContext, bucket_name: &str, object_name: &str) { diff --git a/tests/s3/bucket_exists.rs b/tests/s3/bucket_exists.rs index 309c753..ebe7d04 100644 --- a/tests/s3/bucket_exists.rs +++ b/tests/s3/bucket_exists.rs @@ -51,5 +51,5 @@ async fn bucket_exists(ctx: TestContext, bucket_name: String) { .unwrap(); assert!(!resp.exists()); assert_eq!(resp.bucket(), bucket_name); - assert_eq!(resp.region(), ""); // TODO this should probably be DEFAULT_REGION + assert_eq!(resp.region(), DEFAULT_REGION); } diff --git a/tests/s3/bucket_notification.rs b/tests/s3/bucket_notification.rs index 7200f0c..2b4ff5f 100644 --- a/tests/s3/bucket_notification.rs +++ b/tests/s3/bucket_notification.rs @@ -22,8 +22,37 @@ use minio::s3::types::{NotificationConfig, S3Api}; use minio_common::example::create_bucket_notification_config_example; use minio_common::test_context::TestContext; -const SQS_ARN: &str = "arn:minio:sqs::miniojavatest:webhook"; +const SQS_ARN: &str = "arn:minio:sqs:us-east-1:miniojavatest:webhook"; +/// Tests bucket notification configuration. +/// +/// ## Prerequisites (MinIO Admin Configuration Required) +/// +/// This test requires notification targets to be configured via MinIO admin before it can run. +/// Bucket notifications cannot be configured via the S3 API alone - the notification targets +/// (webhooks, Kafka, NATS, AMQP, etc.) must first be set up using MinIO admin commands. +/// +/// ### Example: Configure a webhook notification target +/// +/// ```bash +/// # Configure webhook target with ARN "miniojavatest" +/// mc admin config set myminio notify_webhook:miniojavatest \ +/// endpoint="http://example.com/webhook" \ +/// queue_limit="10" +/// +/// # Restart MinIO to apply changes +/// mc admin service restart myminio +/// +/// # Verify the ARN is available +/// mc admin info myminio --json | jq '.info.services.notifications' +/// # Should show: arn:minio:sqs:us-east-1:miniojavatest:webhook +/// ``` +/// +/// ### Test Behavior +/// +/// - If notification targets are properly configured, the test runs normally +/// - If targets are not configured, the test gracefully skips (not a failure) +/// - This allows the test suite to pass in development environments without notification infrastructure #[minio_macros::test(skip_if_express)] async fn test_bucket_notification(ctx: TestContext, bucket_name: String) { let config: NotificationConfig = create_bucket_notification_config_example(); diff --git a/tests/start-server.sh b/tests/start-server.sh index b0c8fd6..657b519 100755 --- a/tests/start-server.sh +++ b/tests/start-server.sh @@ -13,6 +13,7 @@ mkdir -p /tmp/certs cp ./tests/public.crt ./tests/private.key /tmp/certs/ (MINIO_CI_CD=true \ + MINIO_SITE_REGION=us-east-1 \ MINIO_NOTIFY_WEBHOOK_ENABLE_miniojavatest=on \ MINIO_NOTIFY_WEBHOOK_ENDPOINT_miniojavatest=http://example.org/ \ ./minio server /tmp/test-xl/{1...4}/ --certs-dir /tmp/certs/ &)