From f23572dce8e611f855ba6953798c0a2c97e549ca Mon Sep 17 00:00:00 2001 From: Henk-Jan Lebbink Date: Sat, 29 Mar 2025 23:26:11 +0100 Subject: [PATCH] added benchmarks (#142) --- Cargo.toml | 26 +- benches/README.md | 9 + benches/bench_check_bucket_name.rs | 30 ++ benches/s3/api_benchmarks.rs | 103 ++++++ benches/s3/bench_bucket_exists.rs | 28 ++ benches/s3/bench_bucket_lifecycle.rs | 61 ++++ benches/s3/bench_bucket_notification.rs | 64 ++++ benches/s3/bench_bucket_policy.rs | 61 ++++ benches/s3/bench_bucket_replication.rs | 121 +++++++ benches/s3/bench_bucket_tags.rs | 61 ++++ benches/s3/bench_bucket_versioning.rs | 41 +++ benches/s3/bench_list_bucket.rs | 28 ++ benches/s3/bench_object_copy.rs | 35 ++ benches/s3/bench_object_legal_hold.rs | 58 ++++ benches/s3/bench_object_lock_config.rs | 49 +++ benches/s3/bench_object_retention.rs | 60 ++++ benches/s3/bench_object_tags.rs | 58 ++++ benches/s3/common_benches.rs | 167 ++++++++++ common/Cargo.toml | 21 ++ common/src/cleanup_guard.rs | 69 ++++ common/src/example.rs | 196 +++++++++++ common/src/lib.rs | 6 + common/src/rand_reader.rs | 45 +++ common/src/rand_src.rs | 89 +++++ common/src/test_context.rs | 158 +++++++++ common/src/utils.rs | 48 +++ src/s3/args.rs | 28 +- src/s3/builders/put_object.rs | 4 +- src/s3/client.rs | 6 +- src/s3/response/disable_object_legal_hold.rs | 4 +- src/s3/response/enable_object_legal_hold.rs | 4 +- .../response/is_object_legal_hold_enabled.rs | 6 +- src/s3/response/put_object.rs | 2 +- src/s3/types.rs | 25 ++ tests/common.rs | 311 ------------------ ...bucket.rs => test_bucket_create_delete.rs} | 10 +- tests/test_bucket_encryption.rs | 6 +- tests/test_bucket_exists.rs | 6 +- tests/test_bucket_lifecycle.rs | 31 +- tests/test_bucket_notification.rs | 43 ++- tests/test_bucket_policy.rs | 46 ++- tests/test_bucket_replication.rs | 119 +++---- tests/test_bucket_tags.rs | 13 +- tests/test_bucket_versioning.rs | 8 +- tests/test_get_object.rs | 12 +- tests/test_get_presigned_object_url.rs | 7 +- tests/test_get_presigned_post_form_data.rs | 17 +- tests/test_list_buckets.rs | 9 +- tests/test_list_objects.rs | 60 ++-- tests/test_listen_bucket_notification.rs | 9 +- ...mpose_object.rs => test_object_compose.rs} | 21 +- ...est_copy_object.rs => test_object_copy.rs} | 27 +- tests/test_object_legal_hold.rs | 16 +- tests/test_object_lock_config.rs | 8 +- ...t_object_content.rs => test_object_put.rs} | 53 +-- ...emove_objects.rs => test_object_remove.rs} | 7 +- tests/test_object_retention.rs | 26 +- tests/test_object_tags.rs | 22 +- tests/test_select_object_content.rs | 18 +- tests/test_upload_download_object.rs | 22 +- 60 files changed, 1986 insertions(+), 712 deletions(-) create mode 100644 benches/README.md create mode 100644 benches/bench_check_bucket_name.rs create mode 100644 benches/s3/api_benchmarks.rs create mode 100644 benches/s3/bench_bucket_exists.rs create mode 100644 benches/s3/bench_bucket_lifecycle.rs create mode 100644 benches/s3/bench_bucket_notification.rs create mode 100644 benches/s3/bench_bucket_policy.rs create mode 100644 benches/s3/bench_bucket_replication.rs create mode 100644 benches/s3/bench_bucket_tags.rs create mode 100644 benches/s3/bench_bucket_versioning.rs create mode 100644 benches/s3/bench_list_bucket.rs create mode 100644 benches/s3/bench_object_copy.rs create mode 100644 benches/s3/bench_object_legal_hold.rs create mode 100644 benches/s3/bench_object_lock_config.rs create mode 100644 benches/s3/bench_object_retention.rs create mode 100644 benches/s3/bench_object_tags.rs create mode 100644 benches/s3/common_benches.rs create mode 100644 common/Cargo.toml create mode 100644 common/src/cleanup_guard.rs create mode 100644 common/src/example.rs create mode 100644 common/src/lib.rs create mode 100644 common/src/rand_reader.rs create mode 100644 common/src/rand_src.rs create mode 100644 common/src/test_context.rs create mode 100644 common/src/utils.rs delete mode 100644 tests/common.rs rename tests/{test_create_delete_bucket.rs => test_bucket_create_delete.rs} (82%) rename tests/{test_compose_object.rs => test_object_compose.rs} (80%) rename tests/{test_copy_object.rs => test_object_copy.rs} (72%) rename tests/{test_put_object_content.rs => test_object_put.rs} (82%) rename tests/{test_remove_objects.rs => test_object_remove.rs} (92%) diff --git a/Cargo.toml b/Cargo.toml index 398926d..f6a6bba 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ ring = ["dep:ring"] [dependencies] async-recursion = "1.1.1" -async-trait = "0.1.87" +async-trait = "0.1.88" base64 = "0.22.1" byteorder = "1.5.0" bytes = "1.10.1" @@ -36,11 +36,9 @@ env_logger = "0.11.7" futures-util = "0.3.31" hex = "0.4.3" hmac = { version = "0.12.1", optional = true } -#home = "0.5.9" -http = "1.2.0" hyper = { version = "1.6.0", features = ["full"] } lazy_static = "1.5.0" -log = "0.4.26" +log = "0.4.27" md5 = "0.7.0" multimap = "0.10.0" percent-encoding = "2.3.1" @@ -50,17 +48,24 @@ ring = { version = "0.17.14", optional = true, default-features = false, feature serde = { version = "1.0.219", features = ["derive"] } serde_json = "1.0.140" sha2 = { version = "0.10.8", optional = true } -tokio = { version = "1.44.0", features = ["full"] } +tokio = { version = "1.44.1", features = ["full"] } tokio-stream = "0.1.17" -tokio-util = { version = "0.7.13", features = ["io"] } +tokio-util = { version = "0.7.14", features = ["io"] } urlencoding = "2.1.3" xmltree = "0.11.0" futures = "0.3.31" +http = "1.3.1" [dev-dependencies] -async-std = { version = "1.13.0", features = ["attributes", "tokio1"] } -clap = { version = "4.5.31", features = ["derive"] } +minio_common = { path = "./common" } +async-std = { version = "1.13.1", features = ["attributes", "tokio1"] } +clap = { version = "4.5.34", features = ["derive"] } quickcheck = "1.0.3" +criterion = "0.5.1" + +[lib] +name = "minio" +path = "src/lib.rs" [[example]] name = "file_uploader" @@ -70,3 +75,8 @@ name = "file_downloader" [[example]] name = "object_prompt" + +[[bench]] +name = "s3-api" +path = "benches/s3/api_benchmarks.rs" +harness = false \ No newline at end of file diff --git a/benches/README.md b/benches/README.md new file mode 100644 index 0000000..d0a3e6c --- /dev/null +++ b/benches/README.md @@ -0,0 +1,9 @@ +Benches: + +* Designed for low-level, micro-benchmarks. +* Focuses on measuring specific functions or operations. +* Outputs raw benchmarking results. + +run: `cargo bench --bench s3_api_benchmarks` + +results are in `target\criterion` \ No newline at end of file diff --git a/benches/bench_check_bucket_name.rs b/benches/bench_check_bucket_name.rs new file mode 100644 index 0000000..dbf8868 --- /dev/null +++ b/benches/bench_check_bucket_name.rs @@ -0,0 +1,30 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use criterion::{Criterion, criterion_group, criterion_main}; +use minio::s3::utils::check_bucket_name; + +fn bench_check_bucket_name(c: &mut Criterion) { + c.bench_function("check_bucket_name true", |b| { + b.iter(|| check_bucket_name("my-example-bucket-name", true)) + }); + + c.bench_function("check_bucket_name false", |b| { + b.iter(|| check_bucket_name("my-example-bucket-name", false)) + }); +} + +criterion_group!(benches, bench_check_bucket_name); +criterion_main!(benches); diff --git a/benches/s3/api_benchmarks.rs b/benches/s3/api_benchmarks.rs new file mode 100644 index 0000000..4cf09a0 --- /dev/null +++ b/benches/s3/api_benchmarks.rs @@ -0,0 +1,103 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +mod bench_bucket_exists; +mod bench_bucket_lifecycle; +mod bench_bucket_notification; +mod bench_bucket_policy; +mod bench_bucket_replication; +mod bench_bucket_tags; +mod bench_bucket_versioning; +mod bench_list_bucket; +mod bench_object_legal_hold; +mod bench_object_lock_config; +mod bench_object_retention; +mod bench_object_tags; +mod common_benches; + +mod bench_object_copy; + +use criterion::{Criterion, criterion_group, criterion_main}; +use std::time::Duration; + +use crate::bench_bucket_exists::*; +use crate::bench_bucket_lifecycle::*; +#[allow(unused_imports)] +use crate::bench_bucket_notification::*; +use crate::bench_bucket_policy::*; +#[allow(unused_imports)] +use crate::bench_bucket_replication::*; +use crate::bench_bucket_tags::*; +use crate::bench_bucket_versioning::*; +use crate::bench_list_bucket::*; +#[allow(unused_imports)] +use crate::bench_object_copy::*; +use crate::bench_object_legal_hold::*; +use crate::bench_object_lock_config::*; +use crate::bench_object_retention::*; +use crate::bench_object_tags::*; + +criterion_group!( + name = benches; + config = Criterion::default() + .configure_from_args() + .warm_up_time(Duration::from_secs_f32(0.01)) + .sample_size(100) + .nresamples(1001) + .measurement_time(Duration::from_secs_f32(0.5)); + targets = + bench_bucket_exists, + bench_set_bucket_lifecycle, + bench_get_bucket_lifecycle, + bench_delete_bucket_lifecycle, + // + //bench_set_bucket_notification, //A specified destination ARN does not exist or is not well-formed + //bench_get_bucket_notification, + //bench_delete_bucket_notification, + // + bench_set_bucket_policy, + bench_get_bucket_policy, + bench_delete_bucket_policy, + // + //bench_set_bucket_replication, //TODO setup permissions to allow replication + //bench_get_bucket_replication, + //bench_delete_bucket_replication, + // + bench_set_bucket_tags, + bench_get_bucket_tags, + bench_delete_bucket_tags, + // + bench_set_bucket_versioning, + bench_get_bucket_versioning, + // + bench_list_buckets, + //bench_object_copy, //TODO first refactor object_copy + // + bench_enable_object_legal_hold, + bench_disable_object_legal_hold, + bench_is_object_legal_hold, + // + bench_set_object_lock_config, + bench_get_object_lock_config, + bench_delete_object_lock_config, + // + bench_set_object_retention, + bench_get_object_retention, + // + bench_set_object_tags, + bench_get_object_tags +); + +criterion_main!(benches); diff --git a/benches/s3/bench_bucket_exists.rs b/benches/s3/bench_bucket_exists.rs new file mode 100644 index 0000000..55f949d --- /dev/null +++ b/benches/s3/bench_bucket_exists.rs @@ -0,0 +1,28 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::BucketExists; + +pub(crate) fn bench_bucket_exists(criterion: &mut Criterion) { + benchmark_s3_api( + "bucket_exists", + criterion, + || async { Ctx2::new().await }, + |ctx| BucketExists::new(&ctx.bucket).client(&ctx.client), + ); +} diff --git a/benches/s3/bench_bucket_lifecycle.rs b/benches/s3/bench_bucket_lifecycle.rs new file mode 100644 index 0000000..bba3916 --- /dev/null +++ b/benches/s3/bench_bucket_lifecycle.rs @@ -0,0 +1,61 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{DeleteBucketLifecycle, GetBucketLifecycle, SetBucketLifecycle}; +use minio::s3::types::S3Api; +use minio_common::example::create_bucket_lifecycle_config_examples; + +pub(crate) fn bench_set_bucket_lifecycle(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_lifecycle", + criterion, + || async { Ctx2::new().await }, + |ctx| { + let config = create_bucket_lifecycle_config_examples(); + SetBucketLifecycle::new(&ctx.bucket) + .client(&ctx.client) + .life_cycle_config(config) + }, + ) +} +pub(crate) fn bench_get_bucket_lifecycle(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_lifecycle", + criterion, + || async { + let ctx = Ctx2::new().await; + let config = create_bucket_lifecycle_config_examples(); + SetBucketLifecycle::new(&ctx.bucket) + .client(&ctx.client) + .life_cycle_config(config) + .send() + .await + .unwrap(); + ctx + }, + |ctx| GetBucketLifecycle::new(&ctx.bucket).client(&ctx.client), + ) +} +pub(crate) fn bench_delete_bucket_lifecycle(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_bucket_lifecycle", + criterion, + || async { Ctx2::new().await }, + |ctx| DeleteBucketLifecycle::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_bucket_notification.rs b/benches/s3/bench_bucket_notification.rs new file mode 100644 index 0000000..2529819 --- /dev/null +++ b/benches/s3/bench_bucket_notification.rs @@ -0,0 +1,64 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{DeleteBucketNotification, GetBucketNotification, SetBucketNotification}; +use minio::s3::types::S3Api; +use minio_common::example::create_bucket_notification_config_example; + +#[allow(dead_code)] +pub(crate) fn bench_set_bucket_notification(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_notification", + criterion, + || async { Ctx2::new().await }, + |ctx| { + let config = create_bucket_notification_config_example(); + SetBucketNotification::new(&ctx.bucket) + .client(&ctx.client) + .notification_config(config) + }, + ) +} +#[allow(dead_code)] +pub(crate) fn bench_get_bucket_notification(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_notification", + criterion, + || async { + let ctx = Ctx2::new().await; + let config = create_bucket_notification_config_example(); + SetBucketNotification::new(&ctx.bucket) + .client(&ctx.client) + .notification_config(config) + .send() + .await + .unwrap(); + ctx + }, + |ctx| GetBucketNotification::new(&ctx.bucket).client(&ctx.client), + ) +} +#[allow(dead_code)] +pub(crate) fn bench_delete_bucket_notification(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_bucket_notification", + criterion, + || async { Ctx2::new().await }, + |ctx| DeleteBucketNotification::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_bucket_policy.rs b/benches/s3/bench_bucket_policy.rs new file mode 100644 index 0000000..08211b3 --- /dev/null +++ b/benches/s3/bench_bucket_policy.rs @@ -0,0 +1,61 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{DeleteBucketPolicy, GetBucketPolicy, SetBucketPolicy}; +use minio::s3::types::S3Api; +use minio_common::example::create_bucket_policy_config_example; + +pub(crate) fn bench_set_bucket_policy(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_policy", + criterion, + || async { Ctx2::new().await }, + |ctx| { + let config = create_bucket_policy_config_example(&ctx.bucket); + SetBucketPolicy::new(&ctx.bucket) + .client(&ctx.client) + .config(config) + }, + ) +} +pub(crate) fn bench_get_bucket_policy(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_policy", + criterion, + || async { + let ctx = Ctx2::new().await; + let config = create_bucket_policy_config_example(&ctx.bucket); + SetBucketPolicy::new(&ctx.bucket) + .client(&ctx.client) + .config(config) + .send() + .await + .unwrap(); + ctx + }, + |ctx| GetBucketPolicy::new(&ctx.bucket).client(&ctx.client), + ) +} +pub(crate) fn bench_delete_bucket_policy(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_bucket_policy", + criterion, + || async { Ctx2::new().await }, + |ctx| DeleteBucketPolicy::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_bucket_replication.rs b/benches/s3/bench_bucket_replication.rs new file mode 100644 index 0000000..4d323b3 --- /dev/null +++ b/benches/s3/bench_bucket_replication.rs @@ -0,0 +1,121 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{ + DeleteBucketReplication, GetBucketReplication, SetBucketReplication, VersioningStatus, +}; +use minio::s3::response::SetBucketVersioningResponse; +use minio::s3::types::S3Api; +use minio_common::example::create_bucket_replication_config_example; + +#[allow(dead_code)] +pub(crate) fn bench_set_bucket_replication(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_replication", + criterion, + || async { + let mut ctx = Ctx2::new().await; + ctx.new_aux().await; + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.bucket) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.aux_bucket.clone().unwrap()) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + ctx + }, + |ctx| { + let config = + create_bucket_replication_config_example(ctx.aux_bucket.clone().unwrap().as_str()); + SetBucketReplication::new(&ctx.bucket) + .client(&ctx.client) + .replication_config(config) + }, + ) +} +#[allow(dead_code)] +pub(crate) fn bench_get_bucket_replication(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_replication", + criterion, + || async { + let mut ctx = Ctx2::new().await; + ctx.new_aux().await; + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.bucket) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.aux_bucket.clone().unwrap()) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + ctx + }, + |ctx| GetBucketReplication::new(&ctx.bucket).client(&ctx.client), + ) +} +#[allow(dead_code)] +pub(crate) fn bench_delete_bucket_replication(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_bucket_replication", + criterion, + || async { + let mut ctx = Ctx2::new().await; + ctx.new_aux().await; + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.bucket) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + let _resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&ctx.aux_bucket.clone().unwrap()) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + + ctx + }, + |ctx| DeleteBucketReplication::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_bucket_tags.rs b/benches/s3/bench_bucket_tags.rs new file mode 100644 index 0000000..45ccd80 --- /dev/null +++ b/benches/s3/bench_bucket_tags.rs @@ -0,0 +1,61 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{DeleteBucketTags, GetBucketTags, SetBucketTags}; +use minio::s3::response::SetBucketTagsResponse; +use minio::s3::types::S3Api; +use minio_common::example::create_tags_example; + +pub(crate) fn bench_set_bucket_tags(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_tags", + criterion, + || async { Ctx2::new().await }, + |ctx| { + SetBucketTags::new(&ctx.bucket) + .client(&ctx.client) + .tags(create_tags_example()) + }, + ) +} +pub(crate) fn bench_get_bucket_tags(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_tags", + criterion, + || async { + let ctx = Ctx2::new().await; + let _resp: SetBucketTagsResponse = ctx + .client + .set_bucket_tags(&ctx.bucket) + .tags(create_tags_example()) + .send() + .await + .unwrap(); + ctx + }, + |ctx| GetBucketTags::new(&ctx.bucket).client(&ctx.client), + ) +} +pub(crate) fn bench_delete_bucket_tags(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_bucket_tags", + criterion, + || async { Ctx2::new().await }, + |ctx| DeleteBucketTags::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_bucket_versioning.rs b/benches/s3/bench_bucket_versioning.rs new file mode 100644 index 0000000..281986b --- /dev/null +++ b/benches/s3/bench_bucket_versioning.rs @@ -0,0 +1,41 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{GetBucketVersioning, SetBucketVersioning, VersioningStatus}; + +pub(crate) fn bench_get_bucket_versioning(criterion: &mut Criterion) { + benchmark_s3_api( + "get_bucket_versioning", + criterion, + || async { Ctx2::new().await }, + |ctx| GetBucketVersioning::new(&ctx.bucket).client(&ctx.client), + ) +} +pub(crate) fn bench_set_bucket_versioning(criterion: &mut Criterion) { + benchmark_s3_api( + "set_bucket_versioning", + criterion, + || async { Ctx2::new().await }, + |ctx| { + let status = VersioningStatus::Enabled; + SetBucketVersioning::new(&ctx.bucket) + .client(&ctx.client) + .versioning_status(status) + }, + ) +} diff --git a/benches/s3/bench_list_bucket.rs b/benches/s3/bench_list_bucket.rs new file mode 100644 index 0000000..6e6223e --- /dev/null +++ b/benches/s3/bench_list_bucket.rs @@ -0,0 +1,28 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::ListBuckets; + +pub(crate) fn bench_list_buckets(criterion: &mut Criterion) { + benchmark_s3_api( + "list_buckets", + criterion, + || async { Ctx2::new().await }, + |ctx| ListBuckets::new().client(&ctx.client), + ) +} diff --git a/benches/s3/bench_object_copy.rs b/benches/s3/bench_object_copy.rs new file mode 100644 index 0000000..68a3f54 --- /dev/null +++ b/benches/s3/bench_object_copy.rs @@ -0,0 +1,35 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#[allow(unused_imports)] +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; + +#[allow(dead_code)] +pub(crate) fn bench_object_copy(_criterion: &mut Criterion) { + /* + benchmark_s3_api( + "object_copy", + criterion, + || async { Ctx2::new_with_object(false).await }, + |ctx| { + let _object_name_dst = rand_object_name(); + //TODO refactor copy object for this to be possible + todo!() + }, + ); + */ +} diff --git a/benches/s3/bench_object_legal_hold.rs b/benches/s3/bench_object_legal_hold.rs new file mode 100644 index 0000000..386d247 --- /dev/null +++ b/benches/s3/bench_object_legal_hold.rs @@ -0,0 +1,58 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{ + DisableObjectLegalHold, EnableObjectLegalHold, IsObjectLegalHoldEnabled, +}; + +pub(crate) fn bench_enable_object_legal_hold(criterion: &mut Criterion) { + benchmark_s3_api( + "enable_object_legal_hold", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| { + EnableObjectLegalHold::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + }, + ) +} +pub(crate) fn bench_disable_object_legal_hold(criterion: &mut Criterion) { + benchmark_s3_api( + "disable_object_legal_hold", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| { + DisableObjectLegalHold::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + }, + ) +} +pub(crate) fn bench_is_object_legal_hold(criterion: &mut Criterion) { + benchmark_s3_api( + "is_object_legal_hold", + criterion, + || async { Ctx2::new().await }, + |ctx| { + IsObjectLegalHoldEnabled::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + }, + ) +} diff --git a/benches/s3/bench_object_lock_config.rs b/benches/s3/bench_object_lock_config.rs new file mode 100644 index 0000000..186fa59 --- /dev/null +++ b/benches/s3/bench_object_lock_config.rs @@ -0,0 +1,49 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; +use criterion::Criterion; +use minio::s3::builders::{DeleteObjectLockConfig, GetObjectLockConfig, SetObjectLockConfig}; +use minio_common::example::create_object_lock_config_example; + +pub(crate) fn bench_set_object_lock_config(criterion: &mut Criterion) { + benchmark_s3_api( + "set_object_lock_config", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| { + let config = create_object_lock_config_example(); + SetObjectLockConfig::new(&ctx.bucket) + .client(&ctx.client) + .config(config) + }, + ) +} +pub(crate) fn bench_get_object_lock_config(criterion: &mut Criterion) { + benchmark_s3_api( + "get_object_lock_config", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| GetObjectLockConfig::new(&ctx.bucket).client(&ctx.client), + ) +} +pub(crate) fn bench_delete_object_lock_config(criterion: &mut Criterion) { + benchmark_s3_api( + "delete_object_lock_config", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| DeleteObjectLockConfig::new(&ctx.bucket).client(&ctx.client), + ) +} diff --git a/benches/s3/bench_object_retention.rs b/benches/s3/bench_object_retention.rs new file mode 100644 index 0000000..423a8bc --- /dev/null +++ b/benches/s3/bench_object_retention.rs @@ -0,0 +1,60 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{GetObjectRetention, SetObjectRetention}; +use minio::s3::response::SetObjectRetentionResponse; +use minio::s3::types::{RetentionMode, S3Api}; +use minio::s3::utils::utc_now; + +pub(crate) fn bench_set_object_retention(criterion: &mut Criterion) { + benchmark_s3_api( + "set_object_retention", + criterion, + || async { Ctx2::new_with_object(true).await }, + |ctx| { + SetObjectRetention::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + .retention_mode(Some(RetentionMode::GOVERNANCE)) + .retain_until_date(Some(utc_now() + chrono::Duration::days(1))) + }, + ) +} +pub(crate) fn bench_get_object_retention(criterion: &mut Criterion) { + benchmark_s3_api( + "get_object_retention", + criterion, + || async { + let ctx = Ctx2::new_with_object(true).await; + let _resp: SetObjectRetentionResponse = SetObjectRetention::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + .retention_mode(Some(RetentionMode::GOVERNANCE)) + .retain_until_date(Some(utc_now() + chrono::Duration::days(1))) + .send() + .await + .unwrap(); + ctx + }, + |ctx| { + GetObjectRetention::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + }, + ) +} diff --git a/benches/s3/bench_object_tags.rs b/benches/s3/bench_object_tags.rs new file mode 100644 index 0000000..303dd46 --- /dev/null +++ b/benches/s3/bench_object_tags.rs @@ -0,0 +1,58 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common_benches::{Ctx2, benchmark_s3_api}; + +use criterion::Criterion; +use minio::s3::builders::{GetObjectTags, SetObjectTags}; +use minio::s3::response::SetObjectTagsResponse; +use minio::s3::types::S3Api; +use minio_common::example::create_tags_example; + +pub(crate) fn bench_set_object_tags(criterion: &mut Criterion) { + benchmark_s3_api( + "set_object_tags", + criterion, + || async { Ctx2::new_with_object(false).await }, + |ctx| { + SetObjectTags::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + .tags(create_tags_example()) + }, + ) +} +pub(crate) fn bench_get_object_tags(criterion: &mut Criterion) { + benchmark_s3_api( + "get_object_tags", + criterion, + || async { + let ctx = Ctx2::new_with_object(false).await; + let _resp: SetObjectTagsResponse = SetObjectTags::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + .tags(create_tags_example()) + .send() + .await + .unwrap(); + ctx + }, + |ctx| { + GetObjectTags::new(&ctx.bucket) + .client(&ctx.client) + .object(ctx.object.clone()) + }, + ) +} diff --git a/benches/s3/common_benches.rs b/benches/s3/common_benches.rs new file mode 100644 index 0000000..ff32c27 --- /dev/null +++ b/benches/s3/common_benches.rs @@ -0,0 +1,167 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use criterion::Criterion; +use minio::s3::Client; +use minio::s3::error::Error; +use minio::s3::response::{MakeBucketResponse, PutObjectContentResponse}; +use minio::s3::types::{FromS3Response, S3Api}; +use minio_common::cleanup_guard::CleanupGuard; +use minio_common::test_context::TestContext; +use minio_common::utils::{ + get_bytes_from_response, get_response_from_bytes, rand_bucket_name, rand_object_name, +}; +use std::env; +use tokio::runtime::Runtime; + +pub(crate) struct Ctx2 { + pub client: Client, + pub bucket: String, + pub object: String, + _cleanup: CleanupGuard, + pub aux_bucket: Option, + _aux_cleanup: Option, +} + +impl Ctx2 { + /// Create a new context with a bucket + pub async fn new() -> Self { + unsafe { + env::set_var("SSL_CERT_FILE", "./tests/public.crt"); + } + let ctx = TestContext::new_from_env(); + let (bucket_name, cleanup) = ctx.create_bucket_helper().await; + + Self { + client: ctx.client, + bucket: bucket_name, + object: "".to_string(), + _cleanup: cleanup, + aux_bucket: None, + _aux_cleanup: None, + } + } + /// Create a new context with a bucket and an object + pub async fn new_with_object(object_lock: bool) -> Self { + unsafe { + env::set_var("SSL_CERT_FILE", "./tests/public.crt"); + } + let ctx = TestContext::new_from_env(); + let bucket_name: String = rand_bucket_name(); + let _resp: MakeBucketResponse = ctx + .client + .make_bucket(&bucket_name) + .object_lock(object_lock) + .send() + .await + .unwrap(); + let cleanup = CleanupGuard::new(&ctx.client, &bucket_name); + let object_name = rand_object_name(); + let data = bytes::Bytes::from("hello, world".to_string().into_bytes()); + let _resp: PutObjectContentResponse = ctx + .client + .put_object_content(&bucket_name, &object_name, data) + .send() + .await + .unwrap(); + + Self { + client: ctx.client, + bucket: bucket_name, + object: object_name.to_string(), + _cleanup: cleanup, + aux_bucket: None, + _aux_cleanup: None, + } + } + #[allow(dead_code)] + pub async fn new_aux(&mut self) -> String { + let bucket_name: String = rand_bucket_name(); + self.aux_bucket = Some(bucket_name.clone()); + self._aux_cleanup = Some(CleanupGuard::new(&self.client, &bucket_name)); + let _resp: MakeBucketResponse = self + .client + .make_bucket(&bucket_name) + .object_lock(false) + .send() + .await + .unwrap(); + + bucket_name + } +} + +pub(crate) fn benchmark_s3_api( + name: &str, + criterion: &mut Criterion, + global_setup: impl Fn() -> GlobalSetupFuture, + per_iter_setup: impl Fn(&Ctx2) -> ApiType, +) where + ApiType: S3Api, + GlobalSetupFuture: Future, +{ + let rt = Runtime::new().unwrap(); + let mut group = criterion.benchmark_group(name); + + // Global setup + let ctx: Ctx2 = rt.block_on(global_setup()); + + // Benchmark to_s3request phase + group.bench_function("to_s3request", |b| { + b.iter_custom(|iters| { + let mut total = std::time::Duration::ZERO; + for _ in 0..iters { + let api = per_iter_setup(&ctx); + + let start = std::time::Instant::now(); + let _request = api.to_s3request(); + total += start.elapsed(); + } + total + }) + }); + + // Benchmark from_s3response phase + group.bench_function("from_s3response", |b| { + b.iter_custom(|iters| { + let mut total = std::time::Duration::ZERO; + + // Per-iteration setup for initial request + let api = per_iter_setup(&ctx); + let request = api.to_s3request().unwrap(); + + // Execute the request to get a response, store the bytes for swift cloning + let bytes: bytes::Bytes = rt.block_on(async { + let resp: Result = request.clone().execute().await; + get_bytes_from_response(resp).await + }); + + for _ in 0..iters { + let response2 = Ok(get_response_from_bytes(bytes.clone())); + let request2 = request.clone(); + + let start = std::time::Instant::now(); + rt.block_on(async { + let _ = + ::S3Response::from_s3response(request2, response2).await; + }); + total += start.elapsed(); + } + total + }) + }); + + group.finish(); +} diff --git a/common/Cargo.toml b/common/Cargo.toml new file mode 100644 index 0000000..b2de269 --- /dev/null +++ b/common/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "minio_common" +version = "0.1.0" +edition = "2024" + +[dependencies] +minio = {path = ".." } +tokio = { version = "1.44.1", features = ["full"] } +tokio-stream = "0.1.17" +async-std = "1.13.1" +rand = { version = "0.8.5", features = ["small_rng"] } +bytes = "1.10.1" +log = "0.4.27" +chrono = "0.4.40" +reqwest = "0.12.15" +http = "1.3.1" + +[lib] +name = "minio_common" +path = "src/lib.rs" + diff --git a/common/src/cleanup_guard.rs b/common/src/cleanup_guard.rs new file mode 100644 index 0000000..7fe18af --- /dev/null +++ b/common/src/cleanup_guard.rs @@ -0,0 +1,69 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use async_std::future::timeout; +use minio::s3::Client; +use std::thread; + +/// Cleanup guard that removes the bucket when it is dropped +pub struct CleanupGuard { + client: Client, + bucket_name: String, +} + +impl CleanupGuard { + #[allow(dead_code)] + pub fn new(client: &Client, bucket_name: &str) -> Self { + Self { + client: client.clone(), + bucket_name: bucket_name.to_string(), + } + } +} + +impl Drop for CleanupGuard { + fn drop(&mut self) { + let client = self.client.clone(); + let bucket_name = self.bucket_name.clone(); + //println!("Going to remove bucket {}", bucket_name); + + // Spawn the cleanup task in a way that detaches it from the current runtime + thread::spawn(move || { + // Create a new runtime for this thread + let rt = tokio::runtime::Runtime::new().unwrap(); + + // Execute the async cleanup in this new runtime + rt.block_on(async { + // do the actual removal of the bucket + match timeout( + std::time::Duration::from_secs(60), + client.remove_and_purge_bucket(&bucket_name), + ) + .await + { + Ok(result) => match result { + Ok(_) => { + //println!("Bucket {} removed successfully", bucket_name), + } + Err(e) => println!("Error removing bucket {}: {:?}", bucket_name, e), + }, + Err(_) => println!("Timeout after 60s while removing bucket {}", bucket_name), + } + }); + }) + .join() + .unwrap(); // This blocks the current thread until cleanup is done + } +} diff --git a/common/src/example.rs b/common/src/example.rs new file mode 100644 index 0000000..1b5851b --- /dev/null +++ b/common/src/example.rs @@ -0,0 +1,196 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use minio::s3::args::PostPolicy; +use minio::s3::types::{ + AndOperator, Destination, Filter, LifecycleConfig, LifecycleRule, NotificationConfig, + ObjectLockConfig, PrefixFilterRule, QueueConfig, ReplicationConfig, ReplicationRule, + RetentionMode, SuffixFilterRule, +}; +use minio::s3::utils::utc_now; +use std::collections::HashMap; + +pub fn create_bucket_lifecycle_config_examples() -> LifecycleConfig { + LifecycleConfig { + rules: vec![LifecycleRule { + abort_incomplete_multipart_upload_days_after_initiation: None, + expiration_date: None, + expiration_days: Some(365), + expiration_expired_object_delete_marker: None, + filter: Filter { + and_operator: None, + prefix: Some(String::from("logs/")), + tag: None, + }, + id: String::from("rule1"), + noncurrent_version_expiration_noncurrent_days: None, + noncurrent_version_transition_noncurrent_days: None, + noncurrent_version_transition_storage_class: None, + status: true, + transition_date: None, + transition_days: None, + transition_storage_class: None, + }], + } +} +pub fn create_bucket_notification_config_example() -> NotificationConfig { + NotificationConfig { + cloud_func_config_list: None, + queue_config_list: Some(vec![QueueConfig { + events: vec![ + String::from("s3:ObjectCreated:Put"), + String::from("s3:ObjectCreated:Copy"), + ], + id: Some("".to_string()), //TODO or should this be NONE?? + prefix_filter_rule: Some(PrefixFilterRule { + value: String::from("images"), + }), + suffix_filter_rule: Some(SuffixFilterRule { + value: String::from("pg"), + }), + queue: String::from("arn:minio:sqs::miniojavatest:webhook"), + }]), + topic_config_list: None, + } +} +pub fn create_bucket_policy_config_example(bucket_name: &str) -> String { + let config = r#" +{ + "Version": "2012-10-17", + "Statement": [ + { + "Action": [ + "s3:GetObject" + ], + "Effect": "Allow", + "Principal": { + "AWS": [ + "*" + ] + }, + "Resource": [ + "arn:aws:s3:::/myobject*" + ], + "Sid": "" + } + ] +} +"# + .replace("", bucket_name); + config.to_string() +} +pub fn create_bucket_policy_config_example_for_replication() -> String { + let config = r#" +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:GetReplicationConfiguration", + "s3:ListBucket", + "s3:ListBucketMultipartUploads", + "s3:GetBucketLocation", + "s3:GetBucketVersioning", + "s3:GetBucketObjectLockConfiguration", + "s3:GetEncryptionConfiguration" + ], + "Resource": [ + "arn:aws:s3:::*" + ], + "Sid": "EnableReplicationOnBucket" + }, + { + "Effect": "Allow", + "Action": [ + "s3:GetReplicationConfiguration", + "s3:ReplicateTags", + "s3:AbortMultipartUpload", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:GetObjectVersionTagging", + "s3:PutObject", + "s3:PutObjectRetention", + "s3:PutBucketObjectLockConfiguration", + "s3:PutObjectLegalHold", + "s3:DeleteObject", + "s3:ReplicateObject", + "s3:ReplicateDelete" + ], + "Resource": [ + "arn:aws:s3:::*" + ], + "Sid": "EnableReplicatingDataIntoBucket" + } + ] +}"#; + config.to_string() +} +pub fn create_bucket_replication_config_example(dst_bucket: &str) -> ReplicationConfig { + let mut tags: HashMap = HashMap::new(); + tags.insert(String::from("key1"), String::from("value1")); + tags.insert(String::from("key2"), String::from("value2")); + + ReplicationConfig { + role: Some("example1".to_string()), + rules: vec![ReplicationRule { + destination: Destination { + bucket_arn: String::from(&format!("arn:aws:s3:::{}", dst_bucket)), + access_control_translation: None, + account: None, + encryption_config: None, + metrics: None, + replication_time: None, + storage_class: None, + }, + delete_marker_replication_status: None, + existing_object_replication_status: None, + filter: Some(Filter { + and_operator: Some(AndOperator { + prefix: Some(String::from("TaxDocs")), + tags: Some(tags), + }), + prefix: None, + tag: None, + }), + id: Some(String::from("rule1")), + prefix: None, + priority: Some(1), + source_selection_criteria: None, + delete_replication_status: Some(false), + status: true, + }], + } +} +pub fn create_tags_example() -> HashMap { + HashMap::from([ + (String::from("Project"), String::from("Project One")), + (String::from("User"), String::from("jsmith")), + ]) +} +pub fn create_object_lock_config_example() -> ObjectLockConfig { + const DURATION_DAYS: i32 = 7; + ObjectLockConfig::new(RetentionMode::GOVERNANCE, Some(DURATION_DAYS), None).unwrap() +} +pub fn create_post_policy_example(bucket_name: &str, object_name: &str) -> PostPolicy { + let expiration = utc_now() + chrono::Duration::days(5); + + let mut policy = PostPolicy::new(&bucket_name, expiration).unwrap(); + policy.add_equals_condition("key", &object_name).unwrap(); + policy + .add_content_length_range_condition(1024 * 1024, 4 * 1024 * 1024) + .unwrap(); + policy +} diff --git a/common/src/lib.rs b/common/src/lib.rs new file mode 100644 index 0000000..f42332d --- /dev/null +++ b/common/src/lib.rs @@ -0,0 +1,6 @@ +pub mod cleanup_guard; +pub mod example; +pub mod rand_reader; +pub mod rand_src; +pub mod test_context; +pub mod utils; diff --git a/common/src/rand_reader.rs b/common/src/rand_reader.rs new file mode 100644 index 0000000..8d64f5a --- /dev/null +++ b/common/src/rand_reader.rs @@ -0,0 +1,45 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::io; + +pub struct RandReader { + size: u64, +} + +impl RandReader { + #[allow(dead_code)] + pub fn new(size: u64) -> RandReader { + RandReader { size } + } +} + +impl io::Read for RandReader { + fn read(&mut self, buf: &mut [u8]) -> Result { + let bytes_read: usize = match (self.size as usize) > buf.len() { + true => buf.len(), + false => self.size as usize, + }; + + if bytes_read > 0 { + let random: &mut dyn rand::RngCore = &mut rand::thread_rng(); + random.fill_bytes(&mut buf[0..bytes_read]); + } + + self.size -= bytes_read as u64; + + Ok(bytes_read) + } +} diff --git a/common/src/rand_src.rs b/common/src/rand_src.rs new file mode 100644 index 0000000..591a5dd --- /dev/null +++ b/common/src/rand_src.rs @@ -0,0 +1,89 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use async_std::task; +use bytes::Bytes; +use rand::SeedableRng; +use rand::prelude::SmallRng; +use std::io; +use tokio::io::AsyncRead; +use tokio_stream::Stream; + +pub struct RandSrc { + size: u64, + rng: SmallRng, +} + +impl RandSrc { + #[allow(dead_code)] + pub fn new(size: u64) -> RandSrc { + let rng = SmallRng::from_entropy(); + RandSrc { size, rng } + } +} + +impl Stream for RandSrc { + type Item = Result; + + fn poll_next( + self: std::pin::Pin<&mut Self>, + _cx: &mut task::Context<'_>, + ) -> task::Poll> { + if self.size == 0 { + return task::Poll::Ready(None); + } + + let bytes_read = match self.size > 64 * 1024 { + true => 64 * 1024, + false => self.size as usize, + }; + + let this = self.get_mut(); + + let mut buf = vec![0; bytes_read]; + let random: &mut dyn rand::RngCore = &mut this.rng; + random.fill_bytes(&mut buf); + + this.size -= bytes_read as u64; + + task::Poll::Ready(Some(Ok(Bytes::from(buf)))) + } +} + +impl AsyncRead for RandSrc { + fn poll_read( + self: std::pin::Pin<&mut Self>, + _cx: &mut task::Context<'_>, + read_buf: &mut tokio::io::ReadBuf<'_>, + ) -> task::Poll> { + let buf = read_buf.initialize_unfilled(); + let bytes_read = match self.size > (buf.len() as u64) { + true => buf.len(), + false => self.size as usize, + }; + + let this = self.get_mut(); + + if bytes_read > 0 { + let random: &mut dyn rand::RngCore = &mut this.rng; + random.fill_bytes(&mut buf[0..bytes_read]); + } + + this.size -= bytes_read as u64; + + read_buf.advance(bytes_read); + task::Poll::Ready(Ok(())) + } +} diff --git a/common/src/test_context.rs b/common/src/test_context.rs new file mode 100644 index 0000000..316b548 --- /dev/null +++ b/common/src/test_context.rs @@ -0,0 +1,158 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::cleanup_guard::CleanupGuard; +use crate::utils::rand_bucket_name; +use minio::s3::Client; +use minio::s3::creds::StaticProvider; +use minio::s3::http::BaseUrl; +use minio::s3::types::S3Api; +use std::path::{Path, PathBuf}; + +#[derive(Clone)] +pub struct TestContext { + pub base_url: BaseUrl, + pub access_key: String, + pub secret_key: String, + pub ignore_cert_check: Option, + pub ssl_cert_file: Option, + pub client: Client, +} + +impl TestContext { + pub fn new_from_env() -> Self { + let run_on_ci: bool = std::env::var("CI") + .unwrap_or("false".into()) + .parse() + .unwrap_or(false); + if run_on_ci { + let host = std::env::var("SERVER_ENDPOINT").unwrap(); + let access_key = std::env::var("ACCESS_KEY").unwrap(); + let secret_key = std::env::var("SECRET_KEY").unwrap(); + let secure = std::env::var("ENABLE_HTTPS").is_ok(); + let value = std::env::var("SSL_CERT_FILE").unwrap(); + let mut ssl_cert_file = None; + if !value.is_empty() { + ssl_cert_file = Some(Path::new(&value)); + } + let ignore_cert_check = std::env::var("IGNORE_CERT_CHECK").is_ok(); + let region = std::env::var("SERVER_REGION").ok(); + + let mut base_url: BaseUrl = host.parse().unwrap(); + base_url.https = secure; + if let Some(v) = region { + base_url.region = v; + } + + let static_provider = StaticProvider::new(&access_key, &secret_key, None); + let client = Client::new( + base_url.clone(), + Some(Box::new(static_provider)), + ssl_cert_file, + Some(ignore_cert_check), + ) + .unwrap(); + + Self { + base_url, + access_key, + secret_key, + ignore_cert_check: Some(ignore_cert_check), + ssl_cert_file: ssl_cert_file.map(PathBuf::from), + client, + } + } else { + const DEFAULT_SERVER_ENDPOINT: &str = "https://play.min.io/"; + const DEFAULT_ACCESS_KEY: &str = "minioadmin"; + const DEFAULT_SECRET_KEY: &str = "minioadmin"; + const DEFAULT_ENABLE_HTTPS: &str = "true"; + const DEFAULT_SSL_CERT_FILE: &str = "./tests/public.crt"; + const DEFAULT_IGNORE_CERT_CHECK: &str = "false"; + const DEFAULT_SERVER_REGION: &str = ""; + + let host: String = + std::env::var("SERVER_ENDPOINT").unwrap_or(DEFAULT_SERVER_ENDPOINT.to_string()); + log::debug!("SERVER_ENDPOINT={}", host); + let access_key: String = + std::env::var("ACCESS_KEY").unwrap_or(DEFAULT_ACCESS_KEY.to_string()); + log::debug!("ACCESS_KEY={}", access_key); + let secret_key: String = + std::env::var("SECRET_KEY").unwrap_or(DEFAULT_SECRET_KEY.to_string()); + log::debug!("SECRET_KEY=*****"); + let secure: bool = std::env::var("ENABLE_HTTPS") + .unwrap_or(DEFAULT_ENABLE_HTTPS.to_string()) + .parse() + .unwrap_or(false); + log::debug!("ENABLE_HTTPS={}", secure); + let ssl_cert: String = + std::env::var("SSL_CERT_FILE").unwrap_or(DEFAULT_SSL_CERT_FILE.to_string()); + log::debug!("SSL_CERT_FILE={}", ssl_cert); + let ssl_cert_file: PathBuf = ssl_cert.into(); + let ignore_cert_check: bool = std::env::var("IGNORE_CERT_CHECK") + .unwrap_or(DEFAULT_IGNORE_CERT_CHECK.to_string()) + .parse() + .unwrap_or(true); + log::debug!("IGNORE_CERT_CHECK={}", ignore_cert_check); + let region: String = + std::env::var("SERVER_REGION").unwrap_or(DEFAULT_SERVER_REGION.to_string()); + log::debug!("SERVER_REGION={:?}", region); + + let mut base_url: BaseUrl = host.parse().unwrap(); + base_url.https = secure; + base_url.region = region; + + let static_provider = StaticProvider::new(&access_key, &secret_key, None); + let client = Client::new( + base_url.clone(), + Some(Box::new(static_provider)), + Some(&*ssl_cert_file), + Some(ignore_cert_check), + ) + .unwrap(); + + Self { + base_url, + access_key, + secret_key, + ignore_cert_check: Some(ignore_cert_check), + ssl_cert_file: Some(ssl_cert_file), + client, + } + } + } + + /// Creates a temporary bucket with an automatic cleanup guard. + /// + /// This function creates a new bucket and returns both its name and a `CleanupGuard` + /// that ensures the bucket is deleted when it goes out of scope. + /// + /// # Returns + /// A tuple containing: + /// - `String` - The name of the created bucket. + /// - `CleanupGuard` - A guard that automatically deletes the bucket when dropped. + /// + /// # Example + /// ```rust + /// let (bucket_name, guard) = client.create_bucket_helper().await; + /// println!("Created temporary bucket: {}", bucket_name); + /// // The bucket will be removed when `guard` is dropped. + /// ``` + pub async fn create_bucket_helper(&self) -> (String, CleanupGuard) { + let bucket_name = rand_bucket_name(); + let _resp = self.client.make_bucket(&bucket_name).send().await.unwrap(); + let guard = CleanupGuard::new(&self.client, &bucket_name); + (bucket_name, guard) + } +} diff --git a/common/src/utils.rs b/common/src/utils.rs new file mode 100644 index 0000000..c894179 --- /dev/null +++ b/common/src/utils.rs @@ -0,0 +1,48 @@ +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use http::{Response as HttpResponse, StatusCode}; +use minio::s3::error::Error; +use rand::distributions::{Alphanumeric, DistString}; + +pub fn rand_bucket_name() -> String { + Alphanumeric + .sample_string(&mut rand::thread_rng(), 8) + .to_lowercase() +} + +pub fn rand_object_name() -> String { + Alphanumeric.sample_string(&mut rand::thread_rng(), 8) +} + +pub async fn get_bytes_from_response(v: Result) -> bytes::Bytes { + match v { + Ok(r) => match r.bytes().await { + Ok(b) => b, + Err(e) => panic!("{:?}", e), + }, + Err(e) => panic!("{:?}", e), + } +} + +pub fn get_response_from_bytes(bytes: bytes::Bytes) -> reqwest::Response { + let http_response = HttpResponse::builder() + .status(StatusCode::OK) // You can customize the status if needed + .header("Content-Type", "application/octet-stream") + .body(bytes) + .expect("Failed to build HTTP response"); + + reqwest::Response::try_from(http_response).expect("Failed to convert to reqwest::Response") +} diff --git a/src/s3/args.rs b/src/s3/args.rs index 3b3b305..a1c1c45 100644 --- a/src/s3/args.rs +++ b/src/s3/args.rs @@ -981,18 +981,18 @@ impl<'a> GetPresignedObjectUrlArgs<'a> { /// /// Condition elements and respective condition for Post policy is available here. -pub struct PostPolicy<'a> { - pub region: Option<&'a str>, - pub bucket: &'a str, +pub struct PostPolicy { + pub region: Option, + pub bucket: String, - expiration: &'a UtcTime, + expiration: UtcTime, eq_conditions: HashMap, starts_with_conditions: HashMap, lower_limit: Option, upper_limit: Option, } -impl<'a> PostPolicy<'a> { +impl PostPolicy { const EQ: &'static str = "eq"; const STARTS_WITH: &'static str = "starts-with"; const ALGORITHM: &'static str = "AWS4-HMAC-SHA256"; @@ -1006,14 +1006,14 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// ``` - pub fn new(bucket_name: &'a str, expiration: &'a UtcTime) -> Result, Error> { + pub fn new(bucket_name: &str, expiration: UtcTime) -> Result { check_bucket_name(bucket_name, true)?; Ok(PostPolicy { region: None, - bucket: bucket_name, + bucket: bucket_name.to_owned(), expiration, eq_conditions: HashMap::new(), starts_with_conditions: HashMap::new(), @@ -1056,7 +1056,7 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let mut policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let mut policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// /// // Add condition that 'key' (object name) equals to 'my-objectname' /// policy.add_equals_condition("key", "my-object"); @@ -1092,7 +1092,7 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let mut policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let mut policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// policy.add_equals_condition("key", "my-object"); /// /// policy.remove_equals_condition("key"); @@ -1109,7 +1109,7 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let mut policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let mut policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// /// // Add condition that 'Content-Type' starts with 'image/' /// policy.add_starts_with_condition("Content-Type", "image/"); @@ -1148,7 +1148,7 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let mut policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let mut policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// policy.add_starts_with_condition("Content-Type", "image/"); /// /// policy.remove_starts_with_condition("Content-Type"); @@ -1165,7 +1165,7 @@ impl<'a> PostPolicy<'a> { /// use minio::s3::utils::*; /// use chrono::Duration; /// let expiration = utc_now() + Duration::days(7); - /// let mut policy = PostPolicy::new("my-bucket", &expiration).unwrap(); + /// let mut policy = PostPolicy::new("my-bucket", expiration).unwrap(); /// /// // Add condition that 'content-length-range' is between 64kiB to 10MiB /// policy.add_content_length_range_condition(64 * 1024, 10 * 1024 * 1024); @@ -1248,7 +1248,7 @@ impl<'a> PostPolicy<'a> { conditions.push(json!([PostPolicy::EQ, "$x-amz-date", amz_date])); let policy = json!({ - "expiration": to_iso8601utc(*self.expiration), + "expiration": to_iso8601utc(self.expiration), "conditions": conditions, }); diff --git a/src/s3/builders/put_object.rs b/src/s3/builders/put_object.rs index 00f0b8a..057bfcc 100644 --- a/src/s3/builders/put_object.rs +++ b/src/s3/builders/put_object.rs @@ -837,7 +837,7 @@ impl PutObjectContent { headers: res.headers, bucket: self.bucket, object: self.object, - location: res.location, + region: res.location, object_size: size, etag: res.etag, version_id: res.version_id, @@ -953,7 +953,7 @@ impl PutObjectContent { headers: res.headers, bucket: self.bucket.clone(), object: self.object.clone(), - location: res.location, + region: res.location, object_size: size, etag: res.etag, version_id: res.version_id, diff --git a/src/s3/client.rs b/src/s3/client.rs index 76d36cc..01c3848 100644 --- a/src/s3/client.rs +++ b/src/s3/client.rs @@ -1075,7 +1075,7 @@ impl Client { pub async fn get_presigned_post_form_data( &self, - policy: &PostPolicy<'_>, + policy: &PostPolicy, ) -> Result, Error> { if self.provider.is_none() { return Err(Error::PostPolicyError( @@ -1083,7 +1083,9 @@ impl Client { )); } - let region = self.get_region(policy.bucket, policy.region).await?; + let region = self + .get_region(&policy.bucket, policy.region.as_deref()) + .await?; let creds = self.provider.as_ref().unwrap().fetch(); policy.form_data( creds.access_key, diff --git a/src/s3/response/disable_object_legal_hold.rs b/src/s3/response/disable_object_legal_hold.rs index 055693e..14c296a 100644 --- a/src/s3/response/disable_object_legal_hold.rs +++ b/src/s3/response/disable_object_legal_hold.rs @@ -26,7 +26,7 @@ pub struct DisableObjectLegalHoldResponse { pub headers: HeaderMap, pub region: String, pub bucket: String, - pub object_name: String, + pub object: String, pub version_id: Option, } @@ -50,7 +50,7 @@ impl FromS3Response for DisableObjectLegalHoldResponse { headers: resp.headers().clone(), region, bucket, - object_name, + object: object_name, version_id, }) } diff --git a/src/s3/response/enable_object_legal_hold.rs b/src/s3/response/enable_object_legal_hold.rs index 479f541..587d196 100644 --- a/src/s3/response/enable_object_legal_hold.rs +++ b/src/s3/response/enable_object_legal_hold.rs @@ -26,7 +26,7 @@ pub struct EnableObjectLegalHoldResponse { pub headers: HeaderMap, pub region: String, pub bucket: String, - pub object_name: String, + pub object: String, pub version_id: Option, } @@ -50,7 +50,7 @@ impl FromS3Response for EnableObjectLegalHoldResponse { headers: resp.headers().clone(), region, bucket, - object_name, + object: object_name, version_id, }) } diff --git a/src/s3/response/is_object_legal_hold_enabled.rs b/src/s3/response/is_object_legal_hold_enabled.rs index 89c5f1c..72cd332 100644 --- a/src/s3/response/is_object_legal_hold_enabled.rs +++ b/src/s3/response/is_object_legal_hold_enabled.rs @@ -29,7 +29,7 @@ pub struct IsObjectLegalHoldEnabledResponse { pub headers: HeaderMap, pub region: String, pub bucket: String, - pub object_name: String, + pub object: String, pub version_id: Option, pub enabled: bool, } @@ -59,7 +59,7 @@ impl FromS3Response for IsObjectLegalHoldEnabledResponse { headers, region, bucket, - object_name, + object: object_name, version_id, enabled: get_default_text(&root, "Status") == "ON", }) @@ -71,7 +71,7 @@ impl FromS3Response for IsObjectLegalHoldEnabledResponse { headers: HeaderMap::new(), region, bucket, - object_name, + object: object_name, version_id, enabled: false, }) diff --git a/src/s3/response/put_object.rs b/src/s3/response/put_object.rs index eb733d3..e62af12 100644 --- a/src/s3/response/put_object.rs +++ b/src/s3/response/put_object.rs @@ -120,7 +120,7 @@ pub struct PutObjectContentResponse { pub headers: HeaderMap, pub bucket: String, pub object: String, - pub location: String, + pub region: String, pub object_size: u64, pub etag: String, pub version_id: Option, diff --git a/src/s3/types.rs b/src/s3/types.rs index d4db9cc..4dacf36 100644 --- a/src/s3/types.rs +++ b/src/s3/types.rs @@ -31,6 +31,7 @@ use xmltree::Element; use std::collections::HashMap; use std::fmt; +#[derive(Clone)] pub struct S3Request<'a> { pub(crate) client: &'a Client, @@ -130,13 +131,37 @@ pub trait FromS3Response: Sized { ) -> Result; } +/// A trait for interacting with the S3 API, providing a unified interface +/// for sending requests and handling responses. #[async_trait] pub trait S3Api: ToS3Request { + /// The associated response type that must implement `FromS3Response`. type S3Response: FromS3Response; + /// Sends an S3 request and processes the response. + /// + /// # Returns + /// - `Ok(Self::S3Response)`: If the request is successful and the response can be parsed. + /// - `Err(Error)`: If there is a failure in request execution or response processing. + /// + /// # Example Usage + /// ```ignore + /// use minio::s3::types::S3Api; + /// async fn example(api: &impl S3Api) { + /// match api.send().await { + /// Ok(response) => println!("Success: {:?}", response), + /// Err(err) => eprintln!("Error: {:?}", err), + /// } + /// } + /// ``` async fn send(&self) -> Result { + // Convert the implementing type into an S3 request let mut req = self.to_s3request()?; + + // Execute the request and await the response let resp: Result = req.execute().await; + + // Convert the response into the associated response type Self::S3Response::from_s3response(req, resp).await } } diff --git a/tests/common.rs b/tests/common.rs deleted file mode 100644 index 2b569b6..0000000 --- a/tests/common.rs +++ /dev/null @@ -1,311 +0,0 @@ -// MinIO Rust Library for Amazon S3 Compatible Cloud Storage -// Copyright 2025 MinIO, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -use async_std::task; -use bytes::Bytes; -use rand::SeedableRng; -use rand::distributions::{Alphanumeric, DistString}; -use rand::prelude::SmallRng; -use std::path::{Path, PathBuf}; -use std::{io, thread}; -use tokio::io::AsyncRead; -use tokio::time::timeout; -use tokio_stream::Stream; - -use minio::s3::client::Client; -use minio::s3::creds::StaticProvider; -use minio::s3::http::BaseUrl; -use minio::s3::types::S3Api; - -pub struct RandReader { - size: u64, -} - -impl RandReader { - #[allow(dead_code)] - pub fn new(size: u64) -> RandReader { - RandReader { size } - } -} - -impl io::Read for RandReader { - fn read(&mut self, buf: &mut [u8]) -> Result { - let bytes_read: usize = match (self.size as usize) > buf.len() { - true => buf.len(), - false => self.size as usize, - }; - - if bytes_read > 0 { - let random: &mut dyn rand::RngCore = &mut rand::thread_rng(); - random.fill_bytes(&mut buf[0..bytes_read]); - } - - self.size -= bytes_read as u64; - - Ok(bytes_read) - } -} - -pub struct RandSrc { - size: u64, - rng: SmallRng, -} - -impl RandSrc { - #[allow(dead_code)] - pub fn new(size: u64) -> RandSrc { - let rng = SmallRng::from_entropy(); - RandSrc { size, rng } - } -} - -impl Stream for RandSrc { - type Item = Result; - - fn poll_next( - self: std::pin::Pin<&mut Self>, - _cx: &mut task::Context<'_>, - ) -> task::Poll> { - if self.size == 0 { - return task::Poll::Ready(None); - } - - let bytes_read = match self.size > 64 * 1024 { - true => 64 * 1024, - false => self.size as usize, - }; - - let this = self.get_mut(); - - let mut buf = vec![0; bytes_read]; - let random: &mut dyn rand::RngCore = &mut this.rng; - random.fill_bytes(&mut buf); - - this.size -= bytes_read as u64; - - task::Poll::Ready(Some(Ok(Bytes::from(buf)))) - } -} - -impl AsyncRead for RandSrc { - fn poll_read( - self: std::pin::Pin<&mut Self>, - _cx: &mut task::Context<'_>, - read_buf: &mut tokio::io::ReadBuf<'_>, - ) -> task::Poll> { - let buf = read_buf.initialize_unfilled(); - let bytes_read = match self.size > (buf.len() as u64) { - true => buf.len(), - false => self.size as usize, - }; - - let this = self.get_mut(); - - if bytes_read > 0 { - let random: &mut dyn rand::RngCore = &mut this.rng; - random.fill_bytes(&mut buf[0..bytes_read]); - } - - this.size -= bytes_read as u64; - - read_buf.advance(bytes_read); - task::Poll::Ready(Ok(())) - } -} - -pub fn rand_bucket_name() -> String { - Alphanumeric - .sample_string(&mut rand::thread_rng(), 8) - .to_lowercase() -} - -#[allow(dead_code)] -pub fn rand_object_name() -> String { - Alphanumeric.sample_string(&mut rand::thread_rng(), 8) -} - -#[derive(Clone)] -#[allow(dead_code)] -pub struct TestContext { - pub base_url: BaseUrl, - pub access_key: String, - pub secret_key: String, - pub ignore_cert_check: Option, - pub ssl_cert_file: Option, - pub client: Client, -} - -impl TestContext { - #[allow(dead_code)] - pub fn new_from_env() -> Self { - let run_on_ci: bool = std::env::var("CI") - .unwrap_or("false".into()) - .parse() - .unwrap_or(false); - if run_on_ci { - let host = std::env::var("SERVER_ENDPOINT").unwrap(); - let access_key = std::env::var("ACCESS_KEY").unwrap(); - let secret_key = std::env::var("SECRET_KEY").unwrap(); - let secure = std::env::var("ENABLE_HTTPS").is_ok(); - let value = std::env::var("SSL_CERT_FILE").unwrap(); - let mut ssl_cert_file = None; - if !value.is_empty() { - ssl_cert_file = Some(Path::new(&value)); - } - let ignore_cert_check = std::env::var("IGNORE_CERT_CHECK").is_ok(); - let region = std::env::var("SERVER_REGION").ok(); - - let mut base_url: BaseUrl = host.parse().unwrap(); - base_url.https = secure; - if let Some(v) = region { - base_url.region = v; - } - - let static_provider = StaticProvider::new(&access_key, &secret_key, None); - let client = Client::new( - base_url.clone(), - Some(Box::new(static_provider)), - ssl_cert_file, - Some(ignore_cert_check), - ) - .unwrap(); - - Self { - base_url, - access_key, - secret_key, - ignore_cert_check: Some(ignore_cert_check), - ssl_cert_file: ssl_cert_file.map(PathBuf::from), - client, - } - } else { - const DEFAULT_SERVER_ENDPOINT: &str = "https://play.min.io/"; - const DEFAULT_ACCESS_KEY: &str = "minioadmin"; - const DEFAULT_SECRET_KEY: &str = "minioadmin"; - const DEFAULT_ENABLE_HTTPS: &str = "true"; - const DEFAULT_SSL_CERT_FILE: &str = "./tests/public.crt"; - const DEFAULT_IGNORE_CERT_CHECK: &str = "false"; - const DEFAULT_SERVER_REGION: &str = ""; - - let host: String = - std::env::var("SERVER_ENDPOINT").unwrap_or(DEFAULT_SERVER_ENDPOINT.to_string()); - log::info!("SERVER_ENDPOINT={}", host); - let access_key: String = - std::env::var("ACCESS_KEY").unwrap_or(DEFAULT_ACCESS_KEY.to_string()); - log::info!("ACCESS_KEY={}", access_key); - let secret_key: String = - std::env::var("SECRET_KEY").unwrap_or(DEFAULT_SECRET_KEY.to_string()); - log::info!("SECRET_KEY=*****"); - let secure: bool = std::env::var("ENABLE_HTTPS") - .unwrap_or(DEFAULT_ENABLE_HTTPS.to_string()) - .parse() - .unwrap_or(false); - log::info!("ENABLE_HTTPS={}", secure); - let ssl_cert: String = - std::env::var("SSL_CERT_FILE").unwrap_or(DEFAULT_SSL_CERT_FILE.to_string()); - log::info!("SSL_CERT_FILE={}", ssl_cert); - let ssl_cert_file: PathBuf = ssl_cert.into(); - let ignore_cert_check: bool = std::env::var("IGNORE_CERT_CHECK") - .unwrap_or(DEFAULT_IGNORE_CERT_CHECK.to_string()) - .parse() - .unwrap_or(true); - log::info!("IGNORE_CERT_CHECK={}", ignore_cert_check); - let region: String = - std::env::var("SERVER_REGION").unwrap_or(DEFAULT_SERVER_REGION.to_string()); - log::info!("SERVER_REGION={:?}", region); - - let mut base_url: BaseUrl = host.parse().unwrap(); - base_url.https = secure; - base_url.region = region; - - let static_provider = StaticProvider::new(&access_key, &secret_key, None); - let client = Client::new( - base_url.clone(), - Some(Box::new(static_provider)), - Some(&*ssl_cert_file), - Some(ignore_cert_check), - ) - .unwrap(); - - Self { - base_url, - access_key, - secret_key, - ignore_cert_check: Some(ignore_cert_check), - ssl_cert_file: Some(ssl_cert_file), - client, - } - } - } -} - -#[allow(dead_code)] -pub async fn create_bucket_helper(ctx: &TestContext) -> (String, CleanupGuard) { - let bucket_name = rand_bucket_name(); - let _resp = ctx.client.make_bucket(&bucket_name).send().await.unwrap(); - let guard = CleanupGuard::new(ctx, &bucket_name); - (bucket_name, guard) -} - -// Cleanup guard that removes the bucket when it is dropped -pub struct CleanupGuard { - ctx: TestContext, - bucket_name: String, -} - -impl CleanupGuard { - #[allow(dead_code)] - pub fn new(ctx: &TestContext, bucket_name: &str) -> Self { - Self { - ctx: ctx.clone(), - bucket_name: bucket_name.to_string(), - } - } -} - -impl Drop for CleanupGuard { - fn drop(&mut self) { - let ctx = self.ctx.clone(); - let bucket_name = self.bucket_name.clone(); - //println!("Going to remove bucket {}", bucket_name); - - // Spawn the cleanup task in a way that detaches it from the current runtime - thread::spawn(move || { - // Create a new runtime for this thread - let rt = tokio::runtime::Runtime::new().unwrap(); - - // Execute the async cleanup in this new runtime - rt.block_on(async { - // do the actual removal of the bucket - match timeout( - std::time::Duration::from_secs(60), - ctx.client.remove_and_purge_bucket(&bucket_name), - ) - .await - { - Ok(result) => match result { - Ok(_) => { - //println!("Bucket {} removed successfully", bucket_name), - } - Err(e) => println!("Error removing bucket {}: {:?}", bucket_name, e), - }, - Err(_) => println!("Timeout after 60s while removing bucket {}", bucket_name), - } - }); - }) - .join() - .unwrap(); // This blocks the current thread until cleanup is done - } -} diff --git a/tests/test_create_delete_bucket.rs b/tests/test_bucket_create_delete.rs similarity index 82% rename from tests/test_create_delete_bucket.rs rename to tests/test_bucket_create_delete.rs index d7d9850..265bcaa 100644 --- a/tests/test_create_delete_bucket.rs +++ b/tests/test_bucket_create_delete.rs @@ -13,19 +13,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, rand_bucket_name}; use minio::s3::client::DEFAULT_REGION; -use minio::s3::response::{BucketExistsResponse, RemoveBucketResponse}; +use minio::s3::response::{BucketExistsResponse, MakeBucketResponse, RemoveBucketResponse}; use minio::s3::types::S3Api; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_bucket_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn create_delete_bucket() { let ctx = TestContext::new_from_env(); let bucket_name = rand_bucket_name(); - ctx.client.make_bucket(&bucket_name).send().await.unwrap(); + let resp: MakeBucketResponse = ctx.client.make_bucket(&bucket_name).send().await.unwrap(); + assert_eq!(resp.bucket, bucket_name); let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); assert!(resp.exists); diff --git a/tests/test_bucket_encryption.rs b/tests/test_bucket_encryption.rs index a26a0ea..57c8cae 100644 --- a/tests/test_bucket_encryption.rs +++ b/tests/test_bucket_encryption.rs @@ -13,19 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketEncryptionResponse, GetBucketEncryptionResponse, SetBucketEncryptionResponse, }; use minio::s3::types::{S3Api, SseConfig}; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_encryption() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let config = SseConfig::default(); diff --git a/tests/test_bucket_exists.rs b/tests/test_bucket_exists.rs index d9b49f2..f439daa 100644 --- a/tests/test_bucket_exists.rs +++ b/tests/test_bucket_exists.rs @@ -13,17 +13,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::BucketExistsResponse; use minio::s3::types::S3Api; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn bucket_exists() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let resp: BucketExistsResponse = ctx.client.bucket_exists(&bucket_name).send().await.unwrap(); diff --git a/tests/test_bucket_lifecycle.rs b/tests/test_bucket_lifecycle.rs index 2c00807..358d75c 100644 --- a/tests/test_bucket_lifecycle.rs +++ b/tests/test_bucket_lifecycle.rs @@ -13,41 +13,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketLifecycleResponse, GetBucketLifecycleResponse, SetBucketLifecycleResponse, }; -use minio::s3::types::{Filter, LifecycleConfig, LifecycleRule, S3Api}; +use minio::s3::types::{LifecycleConfig, S3Api}; +use minio_common::example::create_bucket_lifecycle_config_examples; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_lifecycle() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let config: LifecycleConfig = LifecycleConfig { - rules: vec![LifecycleRule { - abort_incomplete_multipart_upload_days_after_initiation: None, - expiration_date: None, - expiration_days: Some(365), - expiration_expired_object_delete_marker: None, - filter: Filter { - and_operator: None, - prefix: Some(String::from("logs/")), - tag: None, - }, - id: String::from("rule1"), - noncurrent_version_expiration_noncurrent_days: None, - noncurrent_version_transition_noncurrent_days: None, - noncurrent_version_transition_storage_class: None, - status: true, - transition_date: None, - transition_days: None, - transition_storage_class: None, - }], - }; + let config: LifecycleConfig = create_bucket_lifecycle_config_examples(); let resp: SetBucketLifecycleResponse = ctx .client diff --git a/tests/test_bucket_notification.rs b/tests/test_bucket_notification.rs index 06a918a..292bb44 100644 --- a/tests/test_bucket_notification.rs +++ b/tests/test_bucket_notification.rs @@ -1,39 +1,34 @@ -mod common; +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketNotificationResponse, GetBucketNotificationResponse, SetBucketNotificationResponse, }; -use minio::s3::types::{ - NotificationConfig, PrefixFilterRule, QueueConfig, S3Api, SuffixFilterRule, -}; +use minio::s3::types::{NotificationConfig, S3Api}; +use minio_common::example::create_bucket_notification_config_example; +use minio_common::test_context::TestContext; const SQS_ARN: &str = "arn:minio:sqs::miniojavatest:webhook"; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_notification() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let config = NotificationConfig { - cloud_func_config_list: None, - queue_config_list: Some(vec![QueueConfig { - events: vec![ - String::from("s3:ObjectCreated:Put"), - String::from("s3:ObjectCreated:Copy"), - ], - id: Some("".to_string()), //TODO or should this be NONE?? - prefix_filter_rule: Some(PrefixFilterRule { - value: String::from("images"), - }), - suffix_filter_rule: Some(SuffixFilterRule { - value: String::from("pg"), - }), - queue: String::from(SQS_ARN), - }]), - topic_config_list: None, - }; + let config: NotificationConfig = create_bucket_notification_config_example(); let resp: SetBucketNotificationResponse = ctx .client diff --git a/tests/test_bucket_policy.rs b/tests/test_bucket_policy.rs index 4bfbafb..61817b6 100644 --- a/tests/test_bucket_policy.rs +++ b/tests/test_bucket_policy.rs @@ -1,40 +1,32 @@ -use crate::common::{TestContext, create_bucket_helper}; +// MinIO Rust Library for Amazon S3 Compatible Cloud Storage +// Copyright 2025 MinIO, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketPolicyResponse, GetBucketPolicyResponse, SetBucketPolicyResponse, }; use minio::s3::types::S3Api; - -mod common; +use minio_common::example::create_bucket_policy_config_example; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_policy() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let config = r#" -{ - "Version": "2012-10-17", - "Statement": [ - { - "Action": [ - "s3:GetObject" - ], - "Effect": "Allow", - "Principal": { - "AWS": [ - "*" - ] - }, - "Resource": [ - "arn:aws:s3:::/myobject*" - ], - "Sid": "" - } - ] -} -"# - .replace("", &bucket_name); + let config: String = create_bucket_policy_config_example(&bucket_name); let resp: SetBucketPolicyResponse = ctx .client diff --git a/tests/test_bucket_replication.rs b/tests/test_bucket_replication.rs index 8520f96..a3fdfa2 100644 --- a/tests/test_bucket_replication.rs +++ b/tests/test_bucket_replication.rs @@ -13,81 +13,82 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteBucketReplicationResponse, GetBucketReplicationResponse, GetBucketVersioningResponse, - SetBucketReplicationResponse, SetBucketVersioningResponse, + SetBucketPolicyResponse, SetBucketReplicationResponse, SetBucketVersioningResponse, }; -use minio::s3::types::{ - AndOperator, Destination, Filter, ReplicationConfig, ReplicationRule, S3Api, +use minio::s3::types::{ReplicationConfig, S3Api}; +use minio_common::example::{ + create_bucket_policy_config_example_for_replication, create_bucket_replication_config_example, }; -use std::collections::HashMap; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_replication() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let mut tags: HashMap = HashMap::new(); - tags.insert(String::from("key1"), String::from("value1")); - tags.insert(String::from("key2"), String::from("value2")); + let ctx2 = TestContext::new_from_env(); + let (bucket_name2, _cleanup2) = ctx2.create_bucket_helper().await; - let config = ReplicationConfig { - role: Some("example1".to_string()), - rules: vec![ReplicationRule { - destination: Destination { - bucket_arn: String::from("REPLACE-WITH-ACTUAL-DESTINATION-BUCKET-ARN"), - access_control_translation: None, - account: None, - encryption_config: None, - metrics: None, - replication_time: None, - storage_class: None, - }, - delete_marker_replication_status: None, - existing_object_replication_status: None, - filter: Some(Filter { - and_operator: Some(AndOperator { - prefix: Some(String::from("TaxDocs")), - tags: Some(tags), - }), - prefix: None, - tag: None, - }), - id: Some(String::from("rule1")), - prefix: None, - priority: Some(1), - source_selection_criteria: None, - delete_replication_status: Some(false), - status: true, - }], - }; + { + let resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&bucket_name) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); - let resp: SetBucketVersioningResponse = ctx - .client - .set_bucket_versioning(&bucket_name) - .versioning_status(VersioningStatus::Enabled) - .send() - .await - .unwrap(); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + let resp: SetBucketVersioningResponse = ctx + .client + .set_bucket_versioning(&bucket_name2) + .versioning_status(VersioningStatus::Enabled) + .send() + .await + .unwrap(); + assert_eq!(resp.bucket, bucket_name2); + assert_eq!(resp.region, DEFAULT_REGION); - let resp: GetBucketVersioningResponse = ctx - .client - .get_bucket_versioning(&bucket_name) - .send() - .await - .unwrap(); - assert_eq!(resp.status, Some(VersioningStatus::Enabled)); - assert_eq!(resp.bucket, bucket_name); - assert_eq!(resp.region, DEFAULT_REGION); + let resp: GetBucketVersioningResponse = ctx + .client + .get_bucket_versioning(&bucket_name) + .send() + .await + .unwrap(); + assert_eq!(resp.status, Some(VersioningStatus::Enabled)); + assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.region, DEFAULT_REGION); + + if false { + //TODO: to allow replication policy needs to be applied, but this fails + let config: String = create_bucket_policy_config_example_for_replication(); + let _resp: SetBucketPolicyResponse = ctx + .client + .set_bucket_policy(&bucket_name) + .config(config.clone()) + .send() + .await + .unwrap(); + + let _resp: SetBucketPolicyResponse = ctx + .client + .set_bucket_policy(&bucket_name2) + .config(config.clone()) + .send() + .await + .unwrap(); + } + } + + let config: ReplicationConfig = create_bucket_replication_config_example(&bucket_name2); if false { + //TODO setup permissions that allow replication // TODO panic: called `Result::unwrap()` on an `Err` value: S3Error(ErrorResponse { code: "XMinioAdminRemoteTargetNotFoundError", message: "The remote target does not exist", let resp: SetBucketReplicationResponse = ctx .client diff --git a/tests/test_bucket_tags.rs b/tests/test_bucket_tags.rs index 4c81e43..80aaa21 100644 --- a/tests/test_bucket_tags.rs +++ b/tests/test_bucket_tags.rs @@ -13,23 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{DeleteBucketTagsResponse, GetBucketTagsResponse, SetBucketTagsResponse}; use minio::s3::types::S3Api; -use std::collections::HashMap; +use minio_common::example::create_tags_example; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_bucket_tags() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; - let tags = HashMap::from([ - (String::from("Project"), String::from("Project One")), - (String::from("User"), String::from("jsmith")), - ]); + let tags = create_tags_example(); let resp: SetBucketTagsResponse = ctx .client diff --git a/tests/test_bucket_versioning.rs b/tests/test_bucket_versioning.rs index 8b53b2b..d73e20a 100644 --- a/tests/test_bucket_versioning.rs +++ b/tests/test_bucket_versioning.rs @@ -13,18 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper}; use minio::s3::builders::VersioningStatus; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{GetBucketVersioningResponse, SetBucketVersioningResponse}; use minio::s3::types::S3Api; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] -async fn set_get_delete_bucket_versioning() { +async fn set_get_bucket_versioning() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let resp: SetBucketVersioningResponse = ctx .client diff --git a/tests/test_get_object.rs b/tests/test_get_object.rs index 75dc99c..33332aa 100644 --- a/tests/test_get_object.rs +++ b/tests/test_get_object.rs @@ -13,16 +13,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use bytes::Bytes; use minio::s3::types::S3Api; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn get_object() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let data = Bytes::from("hello, world".to_string().into_bytes()); @@ -39,9 +38,4 @@ async fn get_object() { .unwrap(); let got = resp.content.to_segmented_bytes().await.unwrap().to_bytes(); assert_eq!(got, data); - ctx.client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); } diff --git a/tests/test_get_presigned_object_url.rs b/tests/test_get_presigned_object_url.rs index 198fa75..e89fa3a 100644 --- a/tests/test_get_presigned_object_url.rs +++ b/tests/test_get_presigned_object_url.rs @@ -13,18 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use http::Method; use minio::s3::args::GetPresignedObjectUrlArgs; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::GetPresignedObjectUrlResponse; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn get_presigned_object_url() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let resp: GetPresignedObjectUrlResponse = ctx diff --git a/tests/test_get_presigned_post_form_data.rs b/tests/test_get_presigned_post_form_data.rs index a2e208b..c32dbde 100644 --- a/tests/test_get_presigned_post_form_data.rs +++ b/tests/test_get_presigned_post_form_data.rs @@ -13,26 +13,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::args::PostPolicy; -use minio::s3::utils::utc_now; +use minio_common::example::create_post_policy_example; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use std::collections::HashMap; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn get_presigned_post_form_data() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; - + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); - let expiration = utc_now() + chrono::Duration::days(5); - let mut policy = PostPolicy::new(&bucket_name, &expiration).unwrap(); - policy.add_equals_condition("key", &object_name).unwrap(); - policy - .add_content_length_range_condition(1024 * 1024, 4 * 1024 * 1024) - .unwrap(); + let policy: PostPolicy = create_post_policy_example(&bucket_name, &object_name); let form_data: HashMap = ctx .client diff --git a/tests/test_list_buckets.rs b/tests/test_list_buckets.rs index eacf40b..c93f468 100644 --- a/tests/test_list_buckets.rs +++ b/tests/test_list_buckets.rs @@ -1,5 +1,3 @@ -use crate::common::{CleanupGuard, TestContext, create_bucket_helper}; -use minio::s3::response::ListBucketsResponse; // MinIO Rust Library for Amazon S3 Compatible Cloud Storage // Copyright 2025 MinIO, Inc. // @@ -15,9 +13,10 @@ use minio::s3::response::ListBucketsResponse; // See the License for the specific language governing permissions and // limitations under the License. +use minio::s3::response::ListBucketsResponse; use minio::s3::types::S3Api; - -mod common; +use minio_common::cleanup_guard::CleanupGuard; +use minio_common::test_context::TestContext; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn list_buckets() { @@ -27,7 +26,7 @@ async fn list_buckets() { let mut names: Vec = Vec::new(); let mut guards: Vec = Vec::new(); for _ in 1..=N_BUCKETS { - let (bucket_name, guard) = create_bucket_helper(&ctx).await; + let (bucket_name, guard) = ctx.create_bucket_helper().await; names.push(bucket_name); guards.push(guard); } diff --git a/tests/test_list_objects.rs b/tests/test_list_objects.rs index 1339979..0382fd5 100644 --- a/tests/test_list_objects.rs +++ b/tests/test_list_objects.rs @@ -13,23 +13,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; -use minio::s3::builders::ObjectToDelete; use minio::s3::response::PutObjectContentResponse; use minio::s3::types::ToStream; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use tokio_stream::StreamExt; -#[tokio::test(flavor = "multi_thread", worker_threads = 10)] -async fn list_objects() { +async fn list_objects(use_api_v1: bool, include_versions: bool) { const N_OBJECTS: usize = 3; let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let mut names: Vec = Vec::new(); for _ in 1..=N_OBJECTS { - let object_name = rand_object_name(); + let object_name: String = rand_object_name(); let resp: PutObjectContentResponse = ctx .client .put_object_content(&bucket_name, &object_name, "hello world") @@ -37,10 +34,17 @@ async fn list_objects() { .await .unwrap(); assert_eq!(resp.bucket, bucket_name); + assert_eq!(resp.object, object_name); names.push(object_name); } - let mut stream = ctx.client.list_objects(&bucket_name).to_stream().await; + let mut stream = ctx + .client + .list_objects(&bucket_name) + .use_api_v1(use_api_v1) + .include_versions(include_versions) + .to_stream() + .await; let mut count = 0; while let Some(items) = stream.next().await { @@ -51,22 +55,24 @@ async fn list_objects() { } } assert_eq!(count, N_OBJECTS); - - let del_items: Vec = names - .iter() - .map(|v| ObjectToDelete::from(v.as_str())) - .collect(); - let mut resp = ctx - .client - .remove_objects(&bucket_name, del_items.into_iter()) - .verbose_mode(true) - .to_stream() - .await; - - while let Some(item) = resp.next().await { - let res = item.unwrap(); - for obj in res.result.iter() { - assert!(obj.is_deleted()); - } - } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn list_objects_v1_no_versions() { + list_objects(true, false).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn list_objects_v1_with_versions() { + list_objects(true, true).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn list_objects_v2_no_versions() { + list_objects(false, false).await; +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn list_objects_v2_with_versions() { + list_objects(false, true).await; } diff --git a/tests/test_listen_bucket_notification.rs b/tests/test_listen_bucket_notification.rs index df980f2..c56dcd3 100644 --- a/tests/test_listen_bucket_notification.rs +++ b/tests/test_listen_bucket_notification.rs @@ -13,22 +13,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use async_std::task; -use common::RandSrc; use minio::s3::Client; use minio::s3::builders::ObjectContent; use minio::s3::creds::StaticProvider; use minio::s3::types::{NotificationRecords, S3Api}; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use tokio::sync::mpsc; use tokio_stream::StreamExt; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn listen_bucket_notification() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let name = object_name.clone(); diff --git a/tests/test_compose_object.rs b/tests/test_object_compose.rs similarity index 80% rename from tests/test_compose_object.rs rename to tests/test_object_compose.rs index 25510ca..ac9d3e9 100644 --- a/tests/test_compose_object.rs +++ b/tests/test_object_compose.rs @@ -13,18 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; -use common::RandSrc; use minio::s3::args::{ComposeObjectArgs, ComposeSource, StatObjectArgs}; use minio::s3::builders::ObjectContent; -use minio::s3::types::S3Api; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn compose_object() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let src_object_name = rand_object_name(); let size = 16_u64; @@ -57,15 +55,4 @@ async fn compose_object() { .await .unwrap(); assert_eq!(resp.size, 5); - - ctx.client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); - ctx.client - .remove_object(&bucket_name, src_object_name.as_str()) - .send() - .await - .unwrap(); } diff --git a/tests/test_copy_object.rs b/tests/test_object_copy.rs similarity index 72% rename from tests/test_copy_object.rs rename to tests/test_object_copy.rs index c35118f..e3bf45c 100644 --- a/tests/test_copy_object.rs +++ b/tests/test_object_copy.rs @@ -13,22 +13,20 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; -use common::RandSrc; use minio::s3::args::{CopyObjectArgs, CopySource, StatObjectArgs}; use minio::s3::builders::ObjectContent; -use minio::s3::types::S3Api; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn copy_object() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let src_object_name = rand_object_name(); - let size = 16_u64; - let content = ObjectContent::new_from_stream(RandSrc::new(size), Some(size)); + let n_bytes = 16_u64; + let content = ObjectContent::new_from_stream(RandSrc::new(n_bytes), Some(n_bytes)); ctx.client .put_object_content(&bucket_name, &src_object_name, content) .send() @@ -53,16 +51,5 @@ async fn copy_object() { .stat_object(&StatObjectArgs::new(&bucket_name, &object_name).unwrap()) .await .unwrap(); - assert_eq!(resp.size as u64, size); - - ctx.client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); - ctx.client - .remove_object(&bucket_name, src_object_name.as_str()) - .send() - .await - .unwrap(); + assert_eq!(resp.size as u64, n_bytes); } diff --git a/tests/test_object_legal_hold.rs b/tests/test_object_legal_hold.rs index c4d6521..7167a8b 100644 --- a/tests/test_object_legal_hold.rs +++ b/tests/test_object_legal_hold.rs @@ -13,9 +13,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{CleanupGuard, TestContext, rand_bucket_name, rand_object_name}; use bytes::Bytes; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ @@ -23,6 +20,9 @@ use minio::s3::response::{ IsObjectLegalHoldEnabledResponse, MakeBucketResponse, PutObjectContentResponse, }; use minio::s3::types::S3Api; +use minio_common::cleanup_guard::CleanupGuard; +use minio_common::test_context::TestContext; +use minio_common::utils::{rand_bucket_name, rand_object_name}; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn object_legal_hold() { @@ -35,7 +35,7 @@ async fn object_legal_hold() { .send() .await .unwrap(); - let _cleanup = CleanupGuard::new(&ctx, &bucket_name); + let _cleanup = CleanupGuard::new(&ctx.client, &bucket_name); let object_name = rand_object_name(); let data = Bytes::from("hello, world".to_string().into_bytes()); @@ -58,7 +58,7 @@ async fn object_legal_hold() { .await .unwrap(); //println!("response of setting object legal hold: resp={:?}", resp); - assert_eq!(resp.object_name, object_name); + assert_eq!(resp.object, object_name); assert_eq!(resp.bucket, bucket_name); assert_eq!(resp.region, DEFAULT_REGION); assert_eq!(resp.version_id, None); @@ -72,7 +72,7 @@ async fn object_legal_hold() { .unwrap(); //println!("response of getting object legal hold: resp={:?}", resp); assert!(!resp.enabled); - assert_eq!(resp.object_name, object_name); + assert_eq!(resp.object, object_name); assert_eq!(resp.bucket, bucket_name); assert_eq!(resp.region, DEFAULT_REGION); assert_eq!(resp.version_id, None); @@ -85,7 +85,7 @@ async fn object_legal_hold() { .await .unwrap(); //println!("response of setting object legal hold: resp={:?}", resp); - assert_eq!(resp.object_name, object_name); + assert_eq!(resp.object, object_name); assert_eq!(resp.bucket, bucket_name); assert_eq!(resp.region, DEFAULT_REGION); assert_eq!(resp.version_id, None); @@ -99,7 +99,7 @@ async fn object_legal_hold() { .unwrap(); //println!("response of getting object legal hold: resp={:?}", resp); assert!(resp.enabled); - assert_eq!(resp.object_name, object_name); + assert_eq!(resp.object, object_name); assert_eq!(resp.bucket, bucket_name); assert_eq!(resp.region, DEFAULT_REGION); assert_eq!(resp.version_id, None); diff --git a/tests/test_object_lock_config.rs b/tests/test_object_lock_config.rs index 968991f..9e0c405 100644 --- a/tests/test_object_lock_config.rs +++ b/tests/test_object_lock_config.rs @@ -13,14 +13,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{CleanupGuard, TestContext, rand_bucket_name}; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteObjectLockConfigResponse, GetObjectLockConfigResponse, SetObjectLockConfigResponse, }; use minio::s3::types::{ObjectLockConfig, RetentionMode, S3Api}; +use minio_common::cleanup_guard::CleanupGuard; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_bucket_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn set_get_delete_object_lock_config() { @@ -32,7 +32,7 @@ async fn set_get_delete_object_lock_config() { .send() .await .unwrap(); - let _cleanup = CleanupGuard::new(&ctx, &bucket_name); + let _cleanup = CleanupGuard::new(&ctx.client, &bucket_name); const DURATION_DAYS: i32 = 7; let config = diff --git a/tests/test_put_object_content.rs b/tests/test_object_put.rs similarity index 82% rename from tests/test_put_object_content.rs rename to tests/test_object_put.rs index 9b496b8..3546eda 100644 --- a/tests/test_put_object_content.rs +++ b/tests/test_object_put.rs @@ -13,20 +13,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{RandSrc, TestContext, create_bucket_helper, rand_object_name}; use http::header; use minio::s3::args::StatObjectArgs; use minio::s3::builders::ObjectContent; use minio::s3::error::Error; +use minio::s3::response::{PutObjectContentResponse, RemoveObjectResponse, StatObjectResponse}; use minio::s3::types::S3Api; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use tokio::sync::mpsc; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn put_object() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let size = 16_u64; @@ -68,7 +69,7 @@ async fn put_object() { #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn put_object_multipart() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let size: u64 = 16 + 5 * 1024 * 1024; @@ -98,29 +99,27 @@ async fn put_object_multipart() { } #[tokio::test(flavor = "multi_thread", worker_threads = 10)] -async fn put_object_content() { +async fn put_object_content_1() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); - let sizes = [16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; for size in sizes.iter() { - let data_src = RandSrc::new(*size); - let rsp = ctx + let resp: PutObjectContentResponse = ctx .client .put_object_content( &bucket_name, &object_name, - ObjectContent::new_from_stream(data_src, Some(*size)), + ObjectContent::new_from_stream(RandSrc::new(*size), Some(*size)), ) .content_type(String::from("image/jpeg")) .send() .await .unwrap(); - assert_eq!(rsp.object_size, *size); - let etag = rsp.etag; - let resp = ctx + assert_eq!(resp.object_size, *size); + let etag = resp.etag; + let resp: StatObjectResponse = ctx .client .stat_object(&StatObjectArgs::new(&bucket_name, &object_name).unwrap()) .await @@ -131,12 +130,22 @@ async fn put_object_content() { resp.headers.get(header::CONTENT_TYPE).unwrap(), "image/jpeg" ); - ctx.client + let resp: RemoveObjectResponse = ctx + .client .remove_object(&bucket_name, object_name.as_str()) .send() .await .unwrap(); + assert!(!resp.is_delete_marker); } +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 10)] +async fn put_object_content_2() { + let ctx = TestContext::new_from_env(); + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; + let object_name = rand_object_name(); + let sizes = [16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; // Repeat test with no size specified in ObjectContent for size in sizes.iter() { @@ -169,11 +178,11 @@ async fn put_object_content() { } } -/// Test sending ObjectContent across async tasks. +/// Test sending PutObject across async tasks. #[tokio::test(flavor = "multi_thread", worker_threads = 10)] -async fn put_object_content_2() { +async fn put_object_content_3() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let sizes = vec![16_u64, 5 * 1024 * 1024, 16 + 5 * 1024 * 1024]; @@ -201,14 +210,14 @@ async fn put_object_content_2() { tokio::spawn(async move { let mut idx = 0; while let Some(item) = receiver.recv().await { - let rsp = client + let resp: PutObjectContentResponse = client .put_object_content(&test_bucket, &object_name, item) .send() .await .unwrap(); - assert_eq!(rsp.object_size, sizes[idx]); - let etag = rsp.etag; - let resp = client + assert_eq!(resp.object_size, sizes[idx]); + let etag = resp.etag; + let resp: StatObjectResponse = client .stat_object(&StatObjectArgs::new(&test_bucket, &object_name).unwrap()) .await .unwrap(); diff --git a/tests/test_remove_objects.rs b/tests/test_object_remove.rs similarity index 92% rename from tests/test_remove_objects.rs rename to tests/test_object_remove.rs index 0990696..72579f6 100644 --- a/tests/test_remove_objects.rs +++ b/tests/test_object_remove.rs @@ -13,18 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::builders::ObjectToDelete; use minio::s3::response::PutObjectContentResponse; use minio::s3::types::ToStream; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use tokio_stream::StreamExt; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn remove_objects() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let mut names: Vec = Vec::new(); for _ in 1..=3 { diff --git a/tests/test_object_retention.rs b/tests/test_object_retention.rs index 27daa0a..c528dc8 100644 --- a/tests/test_object_retention.rs +++ b/tests/test_object_retention.rs @@ -13,18 +13,18 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{CleanupGuard, TestContext, rand_bucket_name, rand_object_name}; -use common::RandSrc; use minio::s3::builders::ObjectContent; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ - GetObjectRetentionResponse, MakeBucketResponse, PutObjectContentResponse, RemoveObjectResponse, + GetObjectRetentionResponse, MakeBucketResponse, PutObjectContentResponse, SetObjectRetentionResponse, }; use minio::s3::types::{RetentionMode, S3Api}; use minio::s3::utils::{to_iso8601utc, utc_now}; +use minio_common::cleanup_guard::CleanupGuard; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::{rand_bucket_name, rand_object_name}; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn object_retention() { @@ -37,7 +37,7 @@ async fn object_retention() { .send() .await .unwrap(); - let _cleanup = CleanupGuard::new(&ctx, &bucket_name); + let _cleanup = CleanupGuard::new(&ctx.client, &bucket_name); assert_eq!(resp.bucket, bucket_name); let object_name = rand_object_name(); @@ -57,7 +57,7 @@ async fn object_retention() { assert_eq!(resp.object, object_name); assert_eq!(resp.object_size, size); //assert_eq!(resp.version_id, None); - assert_eq!(resp.location, ""); + assert_eq!(resp.region, ""); //assert_eq!(resp.etag, ""); let retain_until_date = utc_now() + chrono::Duration::days(1); @@ -114,16 +114,4 @@ async fn object_retention() { assert_eq!(resp.version_id, None); assert_eq!(resp.region, DEFAULT_REGION); assert_eq!(resp.object, object_name); - - let resp: RemoveObjectResponse = ctx - .client - .remove_object( - &bucket_name, - (object_name.as_str(), obj_resp.version_id.as_deref()), - ) - .send() - .await - .unwrap(); - //assert_eq!(resp.version_id, None); - assert!(resp.is_delete_marker); } diff --git a/tests/test_object_tags.rs b/tests/test_object_tags.rs index 9c7f988..cd92b28 100644 --- a/tests/test_object_tags.rs +++ b/tests/test_object_tags.rs @@ -13,23 +13,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; -use common::RandSrc; use minio::s3::builders::ObjectContent; use minio::s3::client::DEFAULT_REGION; use minio::s3::response::{ DeleteObjectTagsResponse, GetObjectTagsResponse, PutObjectContentResponse, - RemoveObjectResponse, SetObjectTagsResponse, + SetObjectTagsResponse, }; use minio::s3::types::S3Api; +use minio_common::rand_src::RandSrc; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; use std::collections::HashMap; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn object_tags() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let size = 16_u64; @@ -47,7 +46,7 @@ async fn object_tags() { assert_eq!(resp.object, object_name); assert_eq!(resp.object_size, size); assert_eq!(resp.version_id, None); - assert_eq!(&resp.location, ""); + assert_eq!(&resp.region, ""); let tags = HashMap::from([ (String::from("Project"), String::from("Project One")), @@ -104,13 +103,4 @@ async fn object_tags() { assert_eq!(resp.object, object_name); assert_eq!(resp.version_id, None); assert_eq!(resp.region, DEFAULT_REGION); - - let resp: RemoveObjectResponse = ctx - .client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); - assert_eq!(resp.version_id, None); - assert!(!resp.is_delete_marker) } diff --git a/tests/test_select_object_content.rs b/tests/test_select_object_content.rs index 75d517d..c12c15d 100644 --- a/tests/test_select_object_content.rs +++ b/tests/test_select_object_content.rs @@ -13,20 +13,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{TestContext, create_bucket_helper, rand_object_name}; use minio::s3::args::SelectObjectContentArgs; -use minio::s3::response::RemoveObjectResponse; use minio::s3::types::{ - CsvInputSerialization, CsvOutputSerialization, FileHeaderInfo, QuoteFields, S3Api, - SelectRequest, + CsvInputSerialization, CsvOutputSerialization, FileHeaderInfo, QuoteFields, SelectRequest, }; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn select_object_content() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let mut data = String::new(); @@ -81,11 +78,4 @@ async fn select_object_content() { got += core::str::from_utf8(&buf[..size]).unwrap(); } assert_eq!(got, data); - let resp: RemoveObjectResponse = ctx - .client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); - assert!(!resp.is_delete_marker); } diff --git a/tests/test_upload_download_object.rs b/tests/test_upload_download_object.rs index 435edc4..cd87454 100644 --- a/tests/test_upload_download_object.rs +++ b/tests/test_upload_download_object.rs @@ -13,12 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -mod common; - -use crate::common::{RandReader, TestContext, create_bucket_helper, rand_object_name}; use hex::ToHex; use minio::s3::response::PutObjectContentResponse; use minio::s3::types::S3Api; +use minio_common::rand_reader::RandReader; +use minio_common::test_context::TestContext; +use minio_common::utils::rand_object_name; #[cfg(feature = "ring")] use ring::digest::{Context, SHA256}; #[cfg(not(feature = "ring"))] @@ -50,7 +50,7 @@ fn get_hash(filename: &String) -> String { #[tokio::test(flavor = "multi_thread", worker_threads = 10)] async fn upload_download_object() { let ctx = TestContext::new_from_env(); - let (bucket_name, _cleanup) = create_bucket_helper(&ctx).await; + let (bucket_name, _cleanup) = ctx.create_bucket_helper().await; let object_name = rand_object_name(); let size = 16_u64; @@ -128,18 +128,4 @@ async fn upload_download_object() { .await .unwrap(); assert_eq!(get_hash(&object_name), get_hash(&filename)); - - fs::remove_file(&object_name).unwrap(); - fs::remove_file(&filename).unwrap(); - - ctx.client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); - ctx.client - .remove_object(&bucket_name, object_name.as_str()) - .send() - .await - .unwrap(); }