Refactor/cleanup errors (#179)

* error refactored: moved to thiserror
* Result type alias for better ergonomics:
* removed field from MinioErrorCode::BucketNotEmpty enum
* made field private of MinioErrorResponse
* updated XmlError
* simplified calling errors
* bumped toolchain channel form 1.86.0 to 1.87.0
* bumped toolchain channel form 1.87.0 to 1.88.0 due to clippy fixes that are not compatible with 1.87.0
This commit is contained in:
Henk-Jan Lebbink 2025-08-15 06:31:45 +02:00 committed by GitHub
parent e73fa1019c
commit 5080bf9b85
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
133 changed files with 2214 additions and 1701 deletions

View File

@ -11,7 +11,7 @@ keywords = ["object-storage", "minio", "s3"]
categories = ["api-bindings", "web-programming::http-client"]
[dependencies.reqwest]
version = "0.12.18"
version = "0.12.22"
default-features = false
features = ["stream"]
@ -51,19 +51,20 @@ regex = "1.11.1"
ring = { version = "0.17.14", optional = true, default-features = false, features = ["alloc"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
sha2 = { version = "0.10.8", optional = true }
sha2 = { version = "0.10.9", optional = true }
urlencoding = "2.1.3"
xmltree = "0.11.0"
futures = "0.3.31"
http = "1.3.1"
thiserror = "2.0.12"
[dev-dependencies]
tokio = { version = "1.45.1", features = ["full"] }
tokio = { version = "1.47.1", features = ["full"] }
minio_common = { path = "./common" }
async-std = { version = "1.13.1", features = ["attributes", "tokio1"] }
clap = { version = "4.5.40", features = ["derive"] }
clap = { version = "4.5.44", features = ["derive"] }
quickcheck = "1.0.3"
criterion = "0.6.0"
criterion = "0.7.0"
minio-macros = { path = "./macros" }
[lib]

View File

@ -17,6 +17,7 @@ use crate::common_benches::{Ctx2, benchmark_s3_api};
use criterion::Criterion;
use minio::s3::builders::AppendObject;
use minio::s3::error::Error;
use minio::s3::response::StatObjectResponse;
use minio::s3::segmented_bytes::SegmentedBytes;
use minio::s3::types::S3Api;
@ -38,8 +39,9 @@ pub(crate) async fn bench_object_append(criterion: &mut Criterion) {
let data1: SegmentedBytes = SegmentedBytes::from(content1.to_string());
let resp: StatObjectResponse = task::block_in_place(|| {
tokio::runtime::Runtime::new()?
.block_on(ctx.client.stat_object(&ctx.bucket, &ctx.object).send())
let runtime =
tokio::runtime::Runtime::new().map_err(|e| Error::DriveIo(e.into()))?;
runtime.block_on(ctx.client.stat_object(&ctx.bucket, &ctx.object).send())
})
.unwrap();

View File

@ -24,7 +24,6 @@ use minio_common::utils::{
get_bytes_from_response, get_response_from_bytes, rand_bucket_name, rand_object_name,
};
use std::env;
use tokio::runtime::Runtime;
pub(crate) struct Ctx2 {
@ -170,7 +169,7 @@ pub(crate) fn benchmark_s3_api<ApiType, GlobalSetupFuture>(
pub(crate) async fn skip_express_mode(bench_name: &str) -> bool {
let skip = TestContext::new_from_env().client.is_minio_express().await;
if skip {
println!("Skipping benchmark '{}' (MinIO Express mode)", bench_name);
println!("Skipping benchmark '{bench_name}' (MinIO Express mode)");
}
skip
}

View File

@ -5,16 +5,16 @@ edition = "2024"
[dependencies]
minio = {path = ".." }
tokio = { version = "1.45.1", features = ["full"] }
tokio = { version = "1.47.1", features = ["full"] }
async-std = "1.13.1"
rand = { version = "0.8.5", features = ["small_rng"] }
bytes = "1.10.1"
log = "0.4.27"
chrono = "0.4.41"
reqwest = "0.12.20"
reqwest = "0.12.22"
http = "1.3.1"
futures = "0.3.31"
uuid = { version = "1.17.0", features = ["v4"] }
uuid = { version = "1.18.0", features = ["v4"] }
[lib]
name = "minio_common"

View File

@ -46,7 +46,7 @@ pub async fn cleanup(client: Client, bucket_name: &str) {
//eprintln!("Bucket {} removed successfully", bucket_name);
}
Err(e) => {
eprintln!("Error removing bucket {}: {:?}", bucket_name, e);
eprintln!("Error removing bucket '{}':\n{}", bucket_name, e);
}
}
}

View File

@ -45,7 +45,7 @@ pub fn create_bucket_notification_config_example() -> NotificationConfig {
String::from("s3:ObjectCreated:Put"),
String::from("s3:ObjectCreated:Copy"),
],
id: Some("".to_string()), //TODO or should this be NONE??
id: None, //Some("".to_string()), //TODO or should this be NONE??
prefix_filter_rule: Some(PrefixFilterRule {
value: String::from("images"),
}),

View File

@ -57,23 +57,21 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
offset_bytes += data_size;
if resp.object_size() != offset_bytes {
panic!(
"from the append_object: size mismatch: expected {}, got {}",
"from the append_object: size mismatch: expected {}, got {offset_bytes}",
resp.object_size(),
offset_bytes
)
}
//println!("Append response: {:#?}", resp);
//println!("Append response: {resp:#?}");
let resp: StatObjectResponse = client.stat_object(bucket_name, object_name).send().await?;
if resp.size()? != offset_bytes {
panic!(
"from the stat_Object: size mismatch: expected {}, got {}",
"from the stat_Object: size mismatch: expected {}, got {offset_bytes}",
resp.size()?,
offset_bytes
)
}
println!("{}/{}", i, n_segments);
//println!("Stat response: {:#?}", resp);
println!("{i}/{n_segments}");
//println!("Stat response: {resp:#?}");
}
Ok(())

View File

@ -33,7 +33,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
log::info!("encryption before: config={:?}", resp.config());
let config = SseConfig::default();
log::info!("going to set encryption config={:?}", config);
log::info!("going to set encryption config={config:?}");
let _resp: PutBucketEncryptionResponse = client
.put_bucket_encryption(bucket_name)

View File

@ -35,7 +35,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
// TODO
let resp: GetBucketLifecycleResponse =
client.get_bucket_lifecycle(bucket_name).send().await?;
log::info!("life cycle settings before setting: resp={:?}", resp);
log::info!("life cycle settings before setting: resp={resp:?}");
}
let rules: Vec<LifecycleRule> = vec![LifecycleRule {
@ -54,20 +54,20 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
.life_cycle_config(LifecycleConfig { rules })
.send()
.await?;
log::info!("response of setting life cycle config: resp={:?}", resp);
log::info!("response of setting life cycle config: resp={resp:?}");
if false {
// TODO
let resp: GetBucketLifecycleResponse =
client.get_bucket_lifecycle(bucket_name).send().await?;
log::info!("life cycle settings after setting: resp={:?}", resp);
log::info!("life cycle settings after setting: resp={resp:?}");
}
if false {
// TODO
let resp: DeleteBucketLifecycleResponse =
client.delete_bucket_lifecycle(bucket_name).send().await?;
log::info!("response of deleting lifecycle config: resp={:?}", resp);
log::info!("response of deleting lifecycle config: resp={resp:?}");
}
Ok(())
}

View File

@ -7,7 +7,7 @@ use minio::s3::{Client, ClientBuilder};
#[allow(dead_code)]
pub fn create_client_on_play() -> Result<Client, Box<dyn std::error::Error + Send + Sync>> {
let base_url = "https://play.min.io".parse::<BaseUrl>()?;
log::info!("Trying to connect to MinIO at: `{:?}`", base_url);
log::info!("Trying to connect to MinIO at: `{base_url:?}`");
let static_provider = StaticProvider::new(
"Q3AM3UQ867SPQQA43P2F",
@ -24,7 +24,7 @@ pub fn create_client_on_play() -> Result<Client, Box<dyn std::error::Error + Sen
#[allow(dead_code)]
pub fn create_client_on_localhost() -> Result<Client, Box<dyn std::error::Error + Send + Sync>> {
let base_url = "http://localhost:9000/".parse::<BaseUrl>()?;
log::info!("Trying to connect to MinIO at: `{:?}`", base_url);
log::info!("Trying to connect to MinIO at: `{base_url:?}`");
let static_provider = StaticProvider::new("minioadmin", "minioadmin", None);

View File

@ -30,7 +30,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
//Note: object prompt is not supported on play.min.io, you will need point to AIStor
let base_url = "http://localhost:9000".parse::<BaseUrl>()?;
log::info!("Trying to connect to MinIO at: `{:?}`", base_url);
log::info!("Trying to connect to MinIO at: `{base_url:?}`");
let static_provider = StaticProvider::new("admin", "admin", None);

View File

@ -9,11 +9,11 @@ proc-macro = true
[dependencies]
syn = "2.0.53"
proc-macro2 = "1.0.37"
quote = "1.0.18"
darling = "0.20.8"
darling_core = "0.20.8"
syn = "2.0.104"
proc-macro2 = "1.0.95"
quote = "1.0.40"
darling = "0.21.0"
darling_core = "0.21.0"
uuid = { version = "1.17.0", features = ["v4"] }
[dev-dependencies]

View File

@ -1,4 +1,4 @@
[toolchain]
channel = "1.86.0"
channel = "1.88.0"
components = ["clippy", "rustfmt"]
#targets = ["x86_64-unknown-linux-musl"]

View File

@ -17,14 +17,16 @@ use crate::s3::Client;
use crate::s3::builders::{
ContentStream, MAX_MULTIPART_COUNT, ObjectContent, Size, calc_part_info,
};
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::error::{Error, IoError};
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::a_response_traits::HasObjectSize;
use crate::s3::response::{AppendObjectResponse, StatObjectResponse};
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::sse::Sse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, check_object_name};
use crate::s3::utils::{check_bucket_name, check_object_name, check_sse};
use http::Method;
use std::sync::Arc;
// region: append-object
@ -83,20 +85,13 @@ impl S3Api for AppendObject {
}
impl ToS3Request for AppendObject {
fn to_s3request(self) -> Result<S3Request, Error> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
}
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_sse(&self.sse, &self.client)?;
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
headers.add("x-amz-write-offset-bytes", self.offset_bytes.to_string());
headers.add(X_AMZ_WRITE_OFFSET_BYTES, self.offset_bytes.to_string());
Ok(S3Request::new(self.client, Method::PUT)
.region(self.region)
@ -191,29 +186,23 @@ impl AppendObjectContent {
}
pub async fn send(mut self) -> Result<AppendObjectResponse, Error> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
}
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_sse(&self.sse, &self.client)?;
{
let mut headers: Multimap = match self.extra_headers {
Some(ref headers) => headers.clone(),
None => Multimap::new(),
};
headers.add("x-amz-write-offset-bytes", self.offset_bytes.to_string());
headers.add(X_AMZ_WRITE_OFFSET_BYTES, self.offset_bytes.to_string());
self.extra_query_params = Some(headers);
}
self.content_stream = std::mem::take(&mut self.input_content)
.to_content_stream()
.await
.map_err(Error::IOError)?;
.map_err(IoError::from)?;
// object_size may be Size::Unknown.
let object_size = self.content_stream.get_size();
@ -224,7 +213,11 @@ impl AppendObjectContent {
self.part_count = n_expected_parts;
// Read the first part.
let seg_bytes = self.content_stream.read_upto(part_size as usize).await?;
let seg_bytes = self
.content_stream
.read_upto(part_size as usize)
.await
.map_err(IoError::from)?;
// get the length (if any) of the current file
let resp: StatObjectResponse = self
@ -261,7 +254,7 @@ impl AppendObjectContent {
// Not enough data!
let expected = object_size.as_u64().unwrap();
let got = seg_bytes.len() as u64;
Err(Error::InsufficientData(expected, got))
Err(ValidationErr::InsufficientData { expected, got })?
} else {
// Otherwise, we start a multipart append.
self.send_mpa(part_size, current_file_size, seg_bytes).await
@ -288,7 +281,10 @@ impl AppendObjectContent {
if let Some(v) = first_part.take() {
v
} else {
self.content_stream.read_upto(part_size as usize).await?
self.content_stream
.read_upto(part_size as usize)
.await
.map_err(IoError::from)?
}
};
part_number += 1;
@ -304,7 +300,7 @@ impl AppendObjectContent {
// Check if we have too many parts to upload.
if self.part_count.is_none() && part_number > MAX_MULTIPART_COUNT {
return Err(Error::TooManyParts);
return Err(ValidationErr::TooManyParts(part_number as u64).into());
}
// Append the part now.

View File

@ -13,10 +13,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::marker::PhantomData;
use crate::s3::client::Client;
use crate::s3::multimap::Multimap;
use std::marker::PhantomData;
/// Common parameters for bucket operations
///

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::BucketExistsResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::check_bucket_name;
@ -35,7 +35,7 @@ impl S3Api for BucketExists {
}
impl ToS3Request for BucketExists {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::HEAD)

View File

@ -15,7 +15,8 @@
use crate::s3::Client;
use crate::s3::client::{MAX_MULTIPART_COUNT, MAX_PART_SIZE};
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::a_response_traits::HasEtagFromBody;
use crate::s3::response::{
@ -26,7 +27,8 @@ use crate::s3::response::{
use crate::s3::sse::{Sse, SseCustomerKey};
use crate::s3::types::{Directive, PartInfo, Retention, S3Api, S3Request, ToS3Request};
use crate::s3::utils::{
UtcTime, check_bucket_name, check_object_name, to_http_header_value, to_iso8601utc, url_encode,
UtcTime, check_bucket_name, check_object_name, check_sse, check_ssec, to_http_header_value,
to_iso8601utc, url_encode,
};
use async_recursion::async_recursion;
use http::Method;
@ -94,15 +96,17 @@ impl S3Api for UploadPartCopy {
}
impl ToS3Request for UploadPartCopy {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if self.upload_id.is_empty() {
return Err(Error::InvalidUploadId("upload ID cannot be empty".into()));
return Err(ValidationErr::InvalidUploadId(
"upload ID cannot be empty".into(),
));
}
if !(1..=MAX_MULTIPART_COUNT).contains(&self.part_number) {
return Err(Error::InvalidPartNumber(format!(
return Err(ValidationErr::InvalidPartNumber(format!(
"part number must be between 1 and {MAX_MULTIPART_COUNT}"
)));
}
@ -225,17 +229,9 @@ impl S3Api for CopyObjectInternal {
}
impl ToS3Request for CopyObjectInternal {
fn to_s3request(self) -> Result<S3Request, Error> {
{
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
if self.source.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_sse(&self.sse, &self.client)?;
check_ssec(&self.source.ssec, &self.client)?;
let mut headers = self.headers;
{
@ -259,24 +255,24 @@ impl ToS3Request for CopyObjectInternal {
tagging.push_str(&url_encode(value));
}
if !tagging.is_empty() {
headers.add("x-amz-tagging", tagging);
headers.add(X_AMZ_TAGGING, tagging);
}
}
if let Some(v) = self.retention {
headers.add("x-amz-object-lock-mode", v.mode.to_string());
headers.add(X_AMZ_OBJECT_LOCK_MODE, v.mode.to_string());
headers.add(
"x-amz-object-lock-retain-until-date",
X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE,
to_iso8601utc(v.retain_until_date),
);
}
if self.legal_hold {
headers.add("x-amz-object-lock-legal-hold", "ON");
headers.add(X_AMZ_OBJECT_LOCK_LEGAL_HOLD, "ON");
}
if let Some(v) = self.metadata_directive {
headers.add("x-amz-metadata-directive", v.to_string());
headers.add(X_AMZ_METADATA_DIRECTIVE, v.to_string());
}
if let Some(v) = self.tagging_directive {
headers.add("x-amz-tagging-directive", v.to_string());
headers.add(X_AMZ_TAGGING_DIRECTIVE, v.to_string());
}
let mut copy_source = String::from("/");
@ -287,31 +283,28 @@ impl ToS3Request for CopyObjectInternal {
copy_source.push_str("?versionId=");
copy_source.push_str(&url_encode(v));
}
headers.add("x-amz-copy-source", copy_source);
headers.add(X_AMZ_COPY_SOURCE, copy_source);
let range = self.source.get_range_value();
if !range.is_empty() {
headers.add("x-amz-copy-source-range", range);
headers.add(X_AMZ_COPY_SOURCE_RANGE, range);
}
if let Some(v) = self.source.match_etag {
headers.add("x-amz-copy-source-if-match", v);
headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, v);
}
if let Some(v) = self.source.not_match_etag {
headers.add("x-amz-copy-source-if-none-match", v);
headers.add(X_AMZ_COPY_SOURCE_IF_NONE_MATCH, v);
}
if let Some(v) = self.source.modified_since {
headers.add(
"x-amz-copy-source-if-modified-since",
to_http_header_value(v),
);
headers.add(X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = self.source.unmodified_since {
headers.add(
"x-amz-copy-source-if-unmodified-since",
X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
to_http_header_value(v),
);
}
@ -425,16 +418,9 @@ impl CopyObject {
/// Functionally related to the [S3Api::send()](crate::s3::types::S3Api::send) method, but
/// specifically tailored for the `CopyObject` operation.
pub async fn send(self) -> Result<CopyObjectResponse, Error> {
{
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
if self.source.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
check_sse(&self.sse, &self.client)?;
check_ssec(&self.source.ssec, &self.client)?;
let source = self.source.clone();
let stat_resp: StatObjectResponse = self
@ -458,9 +444,9 @@ impl CopyObject {
if let Some(v) = &self.metadata_directive {
match v {
Directive::Copy => {
return Err(Error::InvalidCopyDirective(
return Err(ValidationErr::InvalidCopyDirective(
"COPY metadata directive is not applicable to source object size greater than 5 GiB".into()
));
).into());
}
_ => todo!(), // Nothing to do.
}
@ -468,9 +454,9 @@ impl CopyObject {
if let Some(v) = &self.tagging_directive {
match v {
Directive::Copy => {
return Err(Error::InvalidCopyDirective(
return Err(ValidationErr::InvalidCopyDirective(
"COPY tagging directive is not applicable to source object size greater than 5 GiB".into()
));
).into());
}
_ => todo!(), // Nothing to do.
}
@ -677,7 +663,7 @@ impl ComposeObjectInternal {
// the multipart upload was successful: update the upload_id
let upload_id_cmu: String = match cmu.upload_id().await {
Ok(v) => v,
Err(e) => return (Err(e), upload_id),
Err(e) => return (Err(e.into()), upload_id),
};
upload_id.push_str(&upload_id_cmu);
@ -708,12 +694,12 @@ impl ComposeObjectInternal {
part_number += 1;
if let Some(l) = source.length {
headers.add(
"x-amz-copy-source-range",
X_AMZ_COPY_SOURCE_RANGE,
format!("bytes={}-{}", offset, offset + l - 1),
);
} else if source.offset.is_some() {
headers.add(
"x-amz-copy-source-range",
X_AMZ_COPY_SOURCE_RANGE,
format!("bytes={}-{}", offset, offset + size - 1),
);
}
@ -733,7 +719,7 @@ impl ComposeObjectInternal {
let etag = match resp.etag() {
Ok(v) => v,
Err(e) => return (Err(e), upload_id),
Err(e) => return (Err(e.into()), upload_id),
};
parts.push(PartInfo {
@ -753,7 +739,7 @@ impl ComposeObjectInternal {
let mut headers_copy = headers.clone();
headers_copy.add(
"x-amz-copy-source-range",
X_AMZ_COPY_SOURCE_RANGE,
format!("bytes={offset}-{end_bytes}"),
);
@ -772,7 +758,7 @@ impl ComposeObjectInternal {
let etag = match resp.etag() {
Ok(v) => v,
Err(e) => return (Err(e), upload_id),
Err(e) => return (Err(e.into()), upload_id),
};
parts.push(PartInfo {
@ -894,13 +880,8 @@ impl ComposeObject {
}
pub async fn send(self) -> Result<ComposeObjectResponse, Error> {
{
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
}
check_sse(&self.sse, &self.client)?;
let object: String = self.object.clone();
let bucket: String = self.bucket.clone();
@ -968,7 +949,7 @@ impl ComposeSource {
/// use minio::s3::builders::ComposeSource;
/// let src = ComposeSource::new("my-src-bucket", "my-src-object").unwrap();
/// ```
pub fn new(bucket_name: &str, object_name: &str) -> Result<Self, Error> {
pub fn new(bucket_name: &str, object_name: &str) -> Result<Self, ValidationErr> {
check_bucket_name(bucket_name, true)?;
check_object_name(object_name)?;
@ -987,38 +968,38 @@ impl ComposeSource {
self.headers.as_ref().expect("B: ABORT: ComposeSource::build_headers() must be called prior to this method invocation. This should not happen.").clone()
}
pub fn build_headers(&mut self, object_size: u64, etag: String) -> Result<(), Error> {
if let Some(v) = self.offset {
if v >= object_size {
return Err(Error::InvalidComposeSourceOffset(
self.bucket.to_string(),
self.object.to_string(),
self.version_id.clone(),
v,
object_size,
));
}
pub fn build_headers(&mut self, object_size: u64, etag: String) -> Result<(), ValidationErr> {
if let Some(v) = self.offset
&& v >= object_size
{
return Err(ValidationErr::InvalidComposeSourceOffset {
bucket: self.bucket.to_string(),
object: self.object.to_string(),
version: self.version_id.clone(),
offset: v,
object_size,
});
}
if let Some(v) = self.length {
if v > object_size {
return Err(Error::InvalidComposeSourceLength(
self.bucket.to_string(),
self.object.to_string(),
self.version_id.clone(),
v,
return Err(ValidationErr::InvalidComposeSourceLength {
bucket: self.bucket.to_string(),
object: self.object.to_string(),
version: self.version_id.clone(),
length: v,
object_size,
));
});
}
if (self.offset.unwrap_or_default() + v) > object_size {
return Err(Error::InvalidComposeSourceSize(
self.bucket.to_string(),
self.object.to_string(),
self.version_id.clone(),
self.offset.unwrap_or_default() + v,
return Err(ValidationErr::InvalidComposeSourceSize {
bucket: self.bucket.to_string(),
object: self.object.to_string(),
version: self.version_id.clone(),
compose_size: self.offset.unwrap_or_default() + v,
object_size,
));
});
}
}
@ -1034,26 +1015,23 @@ impl ComposeSource {
copy_source.push_str("?versionId=");
copy_source.push_str(&url_encode(v));
}
headers.add("x-amz-copy-source", copy_source);
headers.add(X_AMZ_COPY_SOURCE, copy_source);
if let Some(v) = &self.match_etag {
headers.add("x-amz-copy-source-if-match", v);
headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, v);
}
if let Some(v) = &self.not_match_etag {
headers.add("x-amz-copy-source-if-none-match", v);
headers.add(X_AMZ_COPY_SOURCE_IF_NONE_MATCH, v);
}
if let Some(v) = self.modified_since {
headers.add(
"x-amz-copy-source-if-modified-since",
to_http_header_value(v),
);
headers.add(X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = self.unmodified_since {
headers.add(
"x-amz-copy-source-if-unmodified-since",
X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE,
to_http_header_value(v),
);
}
@ -1062,8 +1040,8 @@ impl ComposeSource {
headers.add_multimap(v.copy_headers());
}
if !headers.contains_key("x-amz-copy-source-if-match") {
headers.add("x-amz-copy-source-if-match", etag);
if !headers.contains_key(X_AMZ_COPY_SOURCE_IF_MATCH) {
headers.add(X_AMZ_COPY_SOURCE_IF_MATCH, etag);
}
self.headers = Some(headers);
@ -1091,7 +1069,7 @@ pub struct CopySource {
}
impl CopySource {
pub fn new(bucket_name: &str, object_name: &str) -> Result<Self, Error> {
pub fn new(bucket_name: &str, object_name: &str) -> Result<Self, ValidationErr> {
check_bucket_name(bucket_name, true)?;
check_object_name(object_name)?;
@ -1161,20 +1139,20 @@ fn into_headers_copy_object(
}
if !tagging.is_empty() {
map.add("x-amz-tagging", tagging);
map.add(X_AMZ_TAGGING, tagging);
}
}
if let Some(v) = retention {
map.add("x-amz-object-lock-mode", v.mode.to_string());
map.add(X_AMZ_OBJECT_LOCK_MODE, v.mode.to_string());
map.add(
"x-amz-object-lock-retain-until-date",
X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE,
to_iso8601utc(v.retain_until_date),
);
}
if legal_hold {
map.add("x-amz-object-lock-legal-hold", "ON");
map.add(X_AMZ_OBJECT_LOCK_LEGAL_HOLD, "ON");
}
map

View File

@ -15,7 +15,8 @@
use crate::s3::Client;
use crate::s3::client::DEFAULT_REGION;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::CreateBucketResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -74,7 +75,7 @@ impl S3Api for CreateBucket {
}
impl ToS3Request for CreateBucket {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let region1: Option<&str> = self.region.as_deref();
@ -86,13 +87,16 @@ impl ToS3Request for CreateBucket {
(None, Some(v)) => v.to_string(),
(Some(r1), Some(r2)) if r1 == r2 => self.region.unwrap(), // Both are Some and equal
(Some(r1), Some(r2)) => {
return Err(Error::RegionMismatch(r1.to_string(), r2.to_string()));
return Err(ValidationErr::RegionMismatch {
bucket_region: r1.to_string(),
region: r2.to_string(),
});
}
};
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
if self.object_lock {
headers.add("x-amz-bucket-object-lock-enabled", "true");
headers.add(X_AMZ_BUCKET_OBJECT_LOCK_ENABLED, "true");
}
let data: String = match region_str.as_str() {

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::check_bucket_name;
@ -35,7 +35,7 @@ impl S3Api for DeleteBucket {
}
impl ToS3Request for DeleteBucket {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketEncryptionResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for DeleteBucketEncryption {
}
impl ToS3Request for DeleteBucketEncryption {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketLifecycleResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for DeleteBucketLifecycle {
}
impl ToS3Request for DeleteBucketLifecycle {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketNotificationResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::types::{NotificationConfig, S3Api, S3Request, ToS3Request};
@ -38,7 +38,7 @@ impl S3Api for DeleteBucketNotification {
}
impl ToS3Request for DeleteBucketNotification {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
const CONFIG: NotificationConfig = NotificationConfig {

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketPolicyResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for DeleteBucketPolicy {
}
impl ToS3Request for DeleteBucketPolicy {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketReplicationResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for DeleteBucketReplication {
}
impl ToS3Request for DeleteBucketReplication {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteBucketTaggingResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for DeleteBucketTagging {
}
impl ToS3Request for DeleteBucketTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::DELETE)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::DeleteObjectLockConfigResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::types::{ObjectLockConfig, S3Api, S3Request, ToS3Request};
@ -34,7 +34,7 @@ impl S3Api for DeleteObjectLockConfig {
}
impl ToS3Request for DeleteObjectLockConfig {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let config = ObjectLockConfig {

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::DeleteObjectTaggingResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -74,7 +74,7 @@ impl S3Api for DeleteObjectTagging {
}
impl ToS3Request for DeleteObjectTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;

View File

@ -13,27 +13,20 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//! Builders for RemoveObject APIs.
use crate::s3::Client;
use crate::s3::client::MAX_MULTIPART_COUNT;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::DeleteError;
use crate::s3::types::ListEntry;
use crate::s3::utils::{check_object_name, insert};
use crate::s3::{
Client,
error::Error,
response::{DeleteObjectResponse, DeleteObjectsResponse},
types::{S3Api, S3Request, ToS3Request, ToStream},
utils::{check_bucket_name, md5sum_hash},
};
use crate::s3::response::{DeleteError, DeleteObjectResponse, DeleteObjectsResponse};
use crate::s3::types::{ListEntry, S3Api, S3Request, ToS3Request, ToStream};
use crate::s3::utils::{check_bucket_name, check_object_name, insert, md5sum_hash};
use async_trait::async_trait;
use bytes::Bytes;
use futures_util::stream::iter;
use futures_util::{Stream, StreamExt, stream as futures_stream};
use http::Method;
use std::pin::Pin;
// region: object-to-delete
pub trait ValidKey: Into<String> {}
@ -154,7 +147,7 @@ impl S3Api for DeleteObject {
}
impl ToS3Request for DeleteObject {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object.key)?;
@ -163,7 +156,7 @@ impl ToS3Request for DeleteObject {
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
if self.bypass_governance_mode {
headers.add("x-amz-bypass-governance-retention", "true");
headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true");
}
Ok(S3Request::new(self.client, Method::DELETE)
@ -238,7 +231,7 @@ impl S3Api for DeleteObjects {
}
impl ToS3Request for DeleteObjects {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut data: String = String::from("<Delete>");
@ -263,10 +256,10 @@ impl ToS3Request for DeleteObjects {
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
{
if self.bypass_governance_mode {
headers.add("x-amz-bypass-governance-retention", "true");
headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true");
}
headers.add("Content-Type", "application/xml");
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_TYPE, "application/xml");
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
}
Ok(S3Request::new(self.client, Method::POST)
@ -374,7 +367,7 @@ impl DeleteObjectsStreaming {
self
}
async fn next_request(&mut self) -> Result<Option<DeleteObjects>, Error> {
async fn next_request(&mut self) -> Result<Option<DeleteObjects>, ValidationErr> {
let mut objects = Vec::new();
while let Some(object) = self.objects.items.next().await {
objects.push(object);
@ -413,7 +406,7 @@ impl ToStream for DeleteObjectsStreaming {
Some((response, this))
}
Ok(None) => None,
Err(e) => Some((Err(e), this)),
Err(e) => Some((Err(e.into()), this)),
}
},
)))

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetBucketEncryptionResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetBucketEncryption {
}
impl ToS3Request for GetBucketEncryption {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetBucketLifecycleResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -66,7 +66,7 @@ impl S3Api for GetBucketLifecycle {
}
impl ToS3Request for GetBucketLifecycle {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut query_params: Multimap = insert(self.extra_query_params, "lifecycle");

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetBucketNotificationResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetBucketNotification {
}
impl ToS3Request for GetBucketNotification {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetBucketPolicyResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetBucketPolicy {
}
impl ToS3Request for GetBucketPolicy {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetBucketReplicationResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetBucketReplication {
}
impl ToS3Request for GetBucketReplication {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::GetBucketTaggingResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -73,7 +73,7 @@ impl S3Api for GetBucketTagging {
}
impl ToS3Request for GetBucketTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetBucketVersioningResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetBucketVersioning {
}
impl ToS3Request for GetBucketVersioning {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -13,18 +13,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use http::Method;
use crate::s3::client::Client;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::utils::check_object_name;
use crate::s3::{
client::Client,
error::Error,
response::GetObjectResponse,
sse::{Sse, SseCustomerKey},
types::{S3Api, S3Request, ToS3Request},
utils::{UtcTime, check_bucket_name, to_http_header_value},
use crate::s3::response::GetObjectResponse;
use crate::s3::sse::{Sse, SseCustomerKey};
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{
UtcTime, check_bucket_name, check_object_name, check_ssec, to_http_header_value,
};
use http::Method;
/// Argument builder for the [`GetObject`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html) S3 API operation.
///
@ -122,14 +121,10 @@ impl S3Api for GetObject {
}
impl ToS3Request for GetObject {
fn to_s3request(self) -> Result<S3Request, Error> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if self.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_ssec(&self.ssec, &self.client)?;
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
{
@ -147,24 +142,24 @@ impl ToS3Request for GetObject {
if let Some(l) = length {
range.push_str(&(o + l - 1).to_string());
}
headers.add("Range", range);
headers.add(RANGE, range);
}
}
if let Some(v) = self.match_etag {
headers.add("if-match", v);
headers.add(IF_MATCH, v);
}
if let Some(v) = self.not_match_etag {
headers.add("if-none-match", v);
headers.add(IF_NONE_MATCH, v);
}
if let Some(v) = self.modified_since {
headers.add("if-modified-since", to_http_header_value(v));
headers.add(IF_MODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = self.unmodified_since {
headers.add("if-unmodified-since", to_http_header_value(v));
headers.add(IF_UNMODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = &self.ssec {

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetObjectLegalHoldResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -68,7 +68,7 @@ impl S3Api for GetObjectLegalHold {
}
impl ToS3Request for GetObjectLegalHold {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::builders::BucketCommon;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::response::GetObjectLockConfigResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, insert};
@ -34,7 +34,7 @@ impl S3Api for GetObjectLockConfig {
}
impl ToS3Request for GetObjectLockConfig {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -13,16 +13,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::client::Client;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetObjectPromptResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::sse::SseCustomerKey;
use crate::s3::utils::{check_bucket_name, check_object_name};
use crate::s3::{
client::Client,
error::Error,
response::GetObjectPromptResponse,
types::{S3Api, S3Request, ToS3Request},
};
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, check_object_name, check_ssec};
use bytes::Bytes;
use http::Method;
use serde_json::json;
@ -94,16 +92,13 @@ impl S3Api for GetObjectPrompt {
}
impl ToS3Request for GetObjectPrompt {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_ssec(&self.ssec, &self.client)?;
if self.client.is_aws_host() {
return Err(Error::UnsupportedApi("ObjectPrompt".into()));
}
if self.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
return Err(ValidationErr::UnsupportedAwsApi("ObjectPrompt".into()));
}
}
let mut query_params: Multimap = self.extra_query_params.unwrap_or_default();

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetObjectRetentionResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -74,7 +74,7 @@ impl S3Api for GetObjectRetention {
}
impl ToS3Request for GetObjectRetention {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetObjectTaggingResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -74,7 +74,7 @@ impl S3Api for GetObjectTagging {
}
impl ToS3Request for GetObjectTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;

View File

@ -16,6 +16,7 @@
use crate::s3::Client;
use crate::s3::creds::Credentials;
use crate::s3::error::Error;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::GetPresignedObjectUrlResponse;
use crate::s3::signer::presign_v4;
@ -68,7 +69,6 @@ impl GetPresignedObjectUrl {
/// Sends the request to generate a presigned URL for an S3 object.
pub async fn send(self) -> Result<GetPresignedObjectUrlResponse, Error> {
// NOTE: this send function is async and because of that, not comparable with other send functions...
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
@ -91,7 +91,7 @@ impl GetPresignedObjectUrl {
if let Some(p) = &self.client.shared.provider {
let creds: Credentials = p.fetch();
if let Some(t) = creds.session_token {
query_params.add("X-Amz-Security-Token", t);
query_params.add(X_AMZ_SECURITY_TOKEN, t);
}
let date = match self.request_time {

View File

@ -15,7 +15,8 @@
use crate::s3::Client;
use crate::s3::creds::Credentials;
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::header_constants::*;
use crate::s3::signer::post_presign_v4;
use crate::s3::utils::{
UtcTime, b64encode, check_bucket_name, to_amz_date, to_iso8601utc, to_signer_date, utc_now,
@ -41,12 +42,14 @@ impl GetPresignedPolicyFormData {
.await?;
let creds: Credentials = self.client.shared.provider.as_ref().unwrap().fetch();
self.policy.form_data(
creds.access_key,
creds.secret_key,
creds.session_token,
region,
)
self.policy
.form_data(
creds.access_key,
creds.secret_key,
creds.session_token,
region,
)
.map_err(Error::Validation)
}
}
@ -82,7 +85,7 @@ impl PostPolicy {
/// let expiration = utc_now() + Duration::days(7);
/// let policy = PostPolicy::new("bucket-name", expiration).unwrap();
/// ```
pub fn new(bucket_name: &str, expiration: UtcTime) -> Result<Self, Error> {
pub fn new(bucket_name: &str, expiration: UtcTime) -> Result<Self, ValidationErr> {
check_bucket_name(bucket_name, true)?;
Ok(Self {
@ -102,11 +105,11 @@ impl PostPolicy {
fn is_reserved_element(element: &str) -> bool {
element.eq_ignore_ascii_case("bucket")
|| element.eq_ignore_ascii_case("x-amz-algorithm")
|| element.eq_ignore_ascii_case("x-amz-credential")
|| element.eq_ignore_ascii_case("x-amz-date")
|| element.eq_ignore_ascii_case("policy")
|| element.eq_ignore_ascii_case("x-amz-signature")
|| element.eq_ignore_ascii_case(X_AMZ_ALGORITHM)
|| element.eq_ignore_ascii_case(X_AMZ_CREDENTIAL)
|| element.eq_ignore_ascii_case(X_AMZ_DATE)
|| element.eq_ignore_ascii_case(POLICY)
|| element.eq_ignore_ascii_case(X_AMZ_SIGNATURE)
}
fn get_credential_string(access_key: &String, date: &UtcTime, region: &String) -> String {
@ -131,10 +134,14 @@ impl PostPolicy {
/// // Add condition that 'key' (object name) equals to 'bucket-name'
/// policy.add_equals_condition("key", "bucket-name").unwrap();
/// ```
pub fn add_equals_condition(&mut self, element: &str, value: &str) -> Result<(), Error> {
pub fn add_equals_condition(
&mut self,
element: &str,
value: &str,
) -> Result<(), ValidationErr> {
if element.is_empty() {
return Err(Error::PostPolicyError(
"condition element cannot be empty".to_string(),
return Err(ValidationErr::PostPolicyError(
"condition element cannot be empty".into(),
));
}
@ -143,13 +150,15 @@ impl PostPolicy {
|| v.eq_ignore_ascii_case("redirect")
|| v.eq_ignore_ascii_case("content-length-range")
{
return Err(Error::PostPolicyError(format!(
return Err(ValidationErr::PostPolicyError(format!(
"{element} is unsupported for equals condition",
)));
}
if PostPolicy::is_reserved_element(v.as_str()) {
return Err(Error::PostPolicyError(format!("{element} cannot set")));
return Err(ValidationErr::PostPolicyError(format!(
"{element} cannot set"
)));
}
self.eq_conditions.insert(v, value.to_string());
@ -186,10 +195,14 @@ impl PostPolicy {
/// // Add condition that 'Content-Type' starts with 'image/'
/// policy.add_starts_with_condition("Content-Type", "image/").unwrap();
/// ```
pub fn add_starts_with_condition(&mut self, element: &str, value: &str) -> Result<(), Error> {
pub fn add_starts_with_condition(
&mut self,
element: &str,
value: &str,
) -> Result<(), ValidationErr> {
if element.is_empty() {
return Err(Error::PostPolicyError(
"condition element cannot be empty".to_string(),
return Err(ValidationErr::PostPolicyError(
"condition element cannot be empty".into(),
));
}
@ -198,13 +211,15 @@ impl PostPolicy {
|| v.eq_ignore_ascii_case("content-length-range")
|| (v.starts_with("x-amz-") && v.starts_with("x-amz-meta-"))
{
return Err(Error::PostPolicyError(format!(
return Err(ValidationErr::PostPolicyError(format!(
"{element} is unsupported for starts-with condition",
)));
}
if PostPolicy::is_reserved_element(v.as_str()) {
return Err(Error::PostPolicyError(format!("{element} cannot set")));
return Err(ValidationErr::PostPolicyError(format!(
"{element} cannot set"
)));
}
self.starts_with_conditions.insert(v, value.to_string());
@ -246,10 +261,10 @@ impl PostPolicy {
&mut self,
lower_limit: usize,
upper_limit: usize,
) -> Result<(), Error> {
) -> Result<(), ValidationErr> {
if lower_limit > upper_limit {
return Err(Error::PostPolicyError(
"lower limit cannot be greater than upper limit".to_string(),
return Err(ValidationErr::PostPolicyError(
"lower limit cannot be greater than upper limit".into(),
));
}
@ -272,16 +287,18 @@ impl PostPolicy {
secret_key: String,
session_token: Option<String>,
region: String,
) -> Result<HashMap<String, String>, Error> {
) -> Result<HashMap<String, String>, ValidationErr> {
if region.is_empty() {
return Err(Error::PostPolicyError("region cannot be empty".to_string()));
return Err(ValidationErr::PostPolicyError(
"region cannot be empty".into(),
));
}
if !self.eq_conditions.contains_key("key")
&& !self.starts_with_conditions.contains_key("key")
{
return Err(Error::PostPolicyError(
"key condition must be set".to_string(),
return Err(ValidationErr::PostPolicyError(
"key condition must be set".into(),
));
}
@ -328,13 +345,13 @@ impl PostPolicy {
let signature = post_presign_v4(&encoded_policy, &secret_key, date, &region);
let mut data: HashMap<String, String> = HashMap::new();
data.insert("x-amz-algorithm".into(), PostPolicy::ALGORITHM.to_string());
data.insert("x-amz-credential".into(), credential);
data.insert("x-amz-date".into(), amz_date);
data.insert("policy".into(), encoded_policy);
data.insert("x-amz-signature".into(), signature);
data.insert(X_AMZ_ALGORITHM.into(), PostPolicy::ALGORITHM.to_string());
data.insert(X_AMZ_CREDENTIAL.into(), credential);
data.insert(X_AMZ_DATE.into(), amz_date);
data.insert(POLICY.into(), encoded_policy);
data.insert(X_AMZ_SIGNATURE.into(), signature);
if let Some(v) = session_token {
data.insert("x-amz-security-token".into(), v);
data.insert(X_AMZ_SECURITY_TOKEN.into(), v);
}
Ok(data)

View File

@ -15,7 +15,7 @@
use crate::s3::Client;
use crate::s3::client::DEFAULT_REGION;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::GetRegionResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -63,7 +63,7 @@ impl S3Api for GetRegion {
}
impl ToS3Request for GetRegion {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
Ok(S3Request::new(self.client, Method::GET)

View File

@ -13,15 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use http::Method;
use crate::s3::Client;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::ListBucketsResponse;
use crate::s3::{
Client,
error::Error,
types::{S3Api, S3Request, ToS3Request},
};
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use http::Method;
/// Argument builder for the [`ListBuckets`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html) S3 API operation.
///
@ -58,7 +55,7 @@ impl S3Api for ListBuckets {
}
impl ToS3Request for ListBuckets {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
Ok(S3Request::new(self.client, Method::GET)
.query_params(self.extra_query_params.unwrap_or_default())
.headers(self.extra_headers.unwrap_or_default()))

View File

@ -12,23 +12,19 @@
//! Argument builders for ListObject APIs.
use crate::s3::client::Client;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::ListObjectsResponse;
use crate::s3::response::list_objects::{
ListObjectVersionsResponse, ListObjectsV1Response, ListObjectsV2Response,
};
use crate::s3::types::{S3Api, S3Request, ToS3Request, ToStream};
use crate::s3::utils::{check_bucket_name, insert};
use async_trait::async_trait;
use futures_util::{Stream, StreamExt, stream as futures_stream};
use http::Method;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::utils::insert;
use crate::s3::{
client::Client,
error::Error,
response::ListObjectsResponse,
response::list_objects::{
ListObjectVersionsResponse, ListObjectsV1Response, ListObjectsV2Response,
},
types::{S3Api, S3Request, ToS3Request, ToStream},
utils::check_bucket_name,
};
fn add_common_list_objects_query_params(
query_params: &mut Multimap,
delimiter: Option<String>,
@ -114,7 +110,7 @@ impl S3Api for ListObjectsV1 {
}
impl ToS3Request for ListObjectsV1 {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut query_params: Multimap = self.extra_query_params.unwrap_or_default();
@ -219,7 +215,7 @@ impl S3Api for ListObjectsV2 {
}
impl ToS3Request for ListObjectsV2 {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut query_params: Multimap = self.extra_query_params.unwrap_or_default();
@ -340,7 +336,7 @@ impl S3Api for ListObjectVersions {
}
impl ToS3Request for ListObjectVersions {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut query_params: Multimap = insert(self.extra_query_params, "versions");

View File

@ -13,19 +13,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::client::Client;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::ListenBucketNotificationResponse;
use crate::s3::types::{NotificationRecords, S3Api, S3Request, ToS3Request};
use crate::s3::utils::check_bucket_name;
use async_trait::async_trait;
use futures_util::Stream;
use http::Method;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::{
client::Client,
error::Error,
response::ListenBucketNotificationResponse,
types::{NotificationRecords, S3Api, S3Request, ToS3Request},
utils::check_bucket_name,
};
/// Argument builder for the [`ListenBucketNotification`](https://min.io/docs/minio/linux/developers/go/API.html#ListenBucketNotification)
///
/// This struct constructs the parameters required for the [`Client::listen_bucket_notification`](crate::s3::client::Client::listen_bucket_notification) method.
@ -92,11 +89,13 @@ impl S3Api for ListenBucketNotification {
}
impl ToS3Request for ListenBucketNotification {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
if self.client.is_aws_host() {
return Err(Error::UnsupportedApi("ListenBucketNotification".into()));
return Err(ValidationErr::UnsupportedAwsApi(
"ListenBucketNotification".into(),
));
}
}

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketEncryptionResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -75,7 +75,7 @@ impl S3Api for PutBucketEncryption {
}
impl ToS3Request for PutBucketEncryption {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let bytes: Bytes = self.config.to_xml().into();

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::lifecycle_config::LifecycleConfig;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::PutBucketLifecycleResponse;
@ -74,13 +75,13 @@ impl S3Api for PutBucketLifecycle {
}
impl ToS3Request for PutBucketLifecycle {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
let bytes: Bytes = self.config.to_xml().into();
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
Ok(S3Request::new(self.client, Method::PUT)
.region(self.region)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketNotificationResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -74,7 +74,7 @@ impl S3Api for PutBucketNotification {
}
impl ToS3Request for PutBucketNotification {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let bytes: Bytes = self.config.to_xml().into();

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketPolicyResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -74,7 +74,7 @@ impl S3Api for PutBucketPolicy {
}
impl ToS3Request for PutBucketPolicy {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let bytes: Bytes = self.config.into();

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketReplicationResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -74,7 +74,7 @@ impl S3Api for PutBucketReplication {
}
impl ToS3Request for PutBucketReplication {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let bytes: Bytes = self.config.to_xml().into();

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketTaggingResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -75,7 +75,7 @@ impl S3Api for PutBucketTagging {
}
impl ToS3Request for PutBucketTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let data: String = {

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutBucketVersioningResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -126,7 +126,7 @@ impl S3Api for PutBucketVersioning {
}
impl ToS3Request for PutBucketVersioning {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let data: String = {
@ -142,7 +142,7 @@ impl ToS3Request for PutBucketVersioning {
Some(VersioningStatus::Enabled) => data.push_str("<Status>Enabled</Status>"),
Some(VersioningStatus::Suspended) => data.push_str("<Status>Suspended</Status>"),
None => {
return Err(Error::InvalidVersioningStatus(
return Err(ValidationErr::InvalidVersioningStatus(
"Missing VersioningStatus".into(),
));
}

View File

@ -14,23 +14,21 @@
// limitations under the License.
use super::ObjectContent;
use crate::s3::builders::{ContentStream, Size};
use crate::s3::client::Client;
use crate::s3::error::{Error, IoError, ValidationErr};
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::a_response_traits::HasEtagFromHeaders;
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::utils::{check_object_name, insert};
use crate::s3::{
builders::{ContentStream, Size},
client::Client,
error::Error,
response::{
AbortMultipartUploadResponse, CompleteMultipartUploadResponse,
CreateMultipartUploadResponse, PutObjectContentResponse, PutObjectResponse,
UploadPartResponse,
},
sse::Sse,
types::{PartInfo, Retention, S3Api, S3Request, ToS3Request},
utils::{check_bucket_name, md5sum_hash, to_iso8601utc, url_encode},
use crate::s3::response::{
AbortMultipartUploadResponse, CompleteMultipartUploadResponse, CreateMultipartUploadResponse,
PutObjectContentResponse, PutObjectResponse, UploadPartResponse,
};
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::sse::Sse;
use crate::s3::types::{PartInfo, Retention, S3Api, S3Request, ToS3Request};
use crate::s3::utils::{check_bucket_name, md5sum_hash, to_iso8601utc, url_encode};
use crate::s3::utils::{check_object_name, check_sse, insert};
use bytes::{Bytes, BytesMut};
use http::Method;
use std::{collections::HashMap, sync::Arc};
@ -119,7 +117,7 @@ impl S3Api for CreateMultipartUpload {
}
impl ToS3Request for CreateMultipartUpload {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
@ -195,7 +193,7 @@ impl S3Api for AbortMultipartUpload {
}
impl ToS3Request for AbortMultipartUpload {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
@ -272,15 +270,17 @@ impl CompleteMultipartUpload {
}
impl ToS3Request for CompleteMultipartUpload {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if self.upload_id.is_empty() {
return Err(Error::InvalidUploadId("upload ID cannot be empty".into()));
return Err(ValidationErr::InvalidUploadId(
"upload ID cannot be empty".into(),
));
}
if self.parts.is_empty() {
return Err(Error::EmptyParts("parts cannot be empty".into()));
return Err(ValidationErr::EmptyParts("parts cannot be empty".into()));
}
}
@ -302,8 +302,8 @@ impl ToS3Request for CompleteMultipartUpload {
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
{
headers.add("Content-Type", "application/xml");
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_TYPE, "application/xml");
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
}
let mut query_params: Multimap = self.extra_query_params.unwrap_or_default();
query_params.add("uploadId", self.upload_id);
@ -409,22 +409,24 @@ impl S3Api for UploadPart {
}
impl ToS3Request for UploadPart {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if let Some(upload_id) = &self.upload_id {
if upload_id.is_empty() {
return Err(Error::InvalidUploadId("upload ID cannot be empty".into()));
}
if let Some(upload_id) = &self.upload_id
&& upload_id.is_empty()
{
return Err(ValidationErr::InvalidUploadId(
"upload ID cannot be empty".into(),
));
}
if let Some(part_number) = self.part_number {
if !(1..=MAX_MULTIPART_COUNT).contains(&part_number) {
return Err(Error::InvalidPartNumber(format!(
"part number must be between 1 and {MAX_MULTIPART_COUNT}"
)));
}
if let Some(part_number) = self.part_number
&& !(1..=MAX_MULTIPART_COUNT).contains(&part_number)
{
return Err(ValidationErr::InvalidPartNumber(format!(
"part number must be between 1 and {MAX_MULTIPART_COUNT}"
)));
}
}
@ -523,7 +525,7 @@ impl S3Api for PutObject {
}
impl ToS3Request for PutObject {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
self.0.to_s3request()
}
}
@ -532,7 +534,7 @@ impl ToS3Request for PutObject {
// region: put-object-content
/// PutObjectContent takes a `ObjectContent` stream and uploads it to MinIO/S3.
/// PutObjectContent takes an `ObjectContent` stream and uploads it to MinIO/S3.
///
/// It is a higher level API and handles multipart uploads transparently.
#[derive(Default)]
@ -631,12 +633,13 @@ impl PutObjectContent {
pub async fn send(mut self) -> Result<PutObjectContentResponse, Error> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_sse(&self.sse, &self.client)?;
let input_content = std::mem::take(&mut self.input_content);
self.content_stream = input_content
.to_content_stream()
.await
.map_err(Error::IOError)?;
.map_err(IoError::from)?;
// object_size may be Size::Unknown.
let object_size = self.content_stream.get_size();
@ -646,14 +649,12 @@ impl PutObjectContent {
self.part_size = Size::Known(part_size);
self.part_count = expected_parts;
if let Some(v) = &self.sse {
if v.tls_required() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
// Read the first part.
let seg_bytes = self.content_stream.read_upto(part_size as usize).await?;
let seg_bytes = self
.content_stream
.read_upto(part_size as usize)
.await
.map_err(IoError::from)?;
// In the first part read, if:
//
@ -691,7 +692,7 @@ impl PutObjectContent {
// Not enough data!
let expected: u64 = object_size.as_u64().unwrap();
let got: u64 = seg_bytes.len() as u64;
Err(Error::InsufficientData(expected, got))
Err(ValidationErr::InsufficientData { expected, got }.into())
} else {
let bucket: String = self.bucket.clone();
let object: String = self.object.clone();
@ -754,7 +755,10 @@ impl PutObjectContent {
if let Some(v) = first_part.take() {
v
} else {
self.content_stream.read_upto(part_size as usize).await?
self.content_stream
.read_upto(part_size as usize)
.await
.map_err(IoError::from)?
}
};
part_number += 1;
@ -764,19 +768,19 @@ impl PutObjectContent {
assert!(buffer_size <= part_size, "{buffer_size} <= {part_size}",);
if (buffer_size == 0) && (part_number > 1) {
// We are done as we uploaded at least 1 part and we have reached the end of the stream.
// We are done as we uploaded at least 1 part, and we have reached the end of the stream.
break;
}
// Check if we have too many parts to upload.
if self.part_count.is_none() && (part_number > MAX_MULTIPART_COUNT) {
return Err(Error::TooManyParts);
return Err(ValidationErr::TooManyParts(part_number as u64).into());
}
if object_size.is_known() {
let exp = object_size.as_u64().unwrap();
if exp < total_read {
return Err(Error::TooMuchData(exp));
return Err(ValidationErr::TooMuchData(exp).into());
}
}
@ -808,7 +812,7 @@ impl PutObjectContent {
size: buffer_size,
});
// Finally check if we are done.
// Finally, check if we are done.
if buffer_size < part_size {
done = true;
}
@ -820,7 +824,11 @@ impl PutObjectContent {
if object_size.is_known() {
let expected = object_size.as_u64().unwrap();
if expected != size {
return Err(Error::InsufficientData(expected, size));
return Err(ValidationErr::InsufficientData {
expected,
got: size,
}
.into());
}
}
@ -851,7 +859,7 @@ fn into_headers_put_object(
retention: Option<Retention>,
legal_hold: bool,
content_type: Option<String>,
) -> Result<Multimap, Error> {
) -> Result<Multimap, ValidationErr> {
let mut map = Multimap::new();
if let Some(v) = extra_headers {
@ -862,12 +870,12 @@ fn into_headers_put_object(
// Validate it.
for (k, _) in v.iter() {
if k.is_empty() {
return Err(Error::InvalidUserMetadata(
return Err(ValidationErr::InvalidUserMetadata(
"user metadata key cannot be empty".into(),
));
}
if !k.starts_with("x-amz-meta-") {
return Err(Error::InvalidUserMetadata(format!(
return Err(ValidationErr::InvalidUserMetadata(format!(
"user metadata key '{k}' does not start with 'x-amz-meta-'",
)));
}
@ -891,27 +899,27 @@ fn into_headers_put_object(
}
if !tagging.is_empty() {
map.insert("x-amz-tagging".into(), tagging);
map.insert(X_AMZ_TAGGING.into(), tagging);
}
}
if let Some(v) = retention {
map.insert("x-amz-object-lock-mode".into(), v.mode.to_string());
map.insert(X_AMZ_OBJECT_LOCK_MODE.into(), v.mode.to_string());
map.insert(
"x-amz-object-lock-retain-until-date".into(),
X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.into(),
to_iso8601utc(v.retain_until_date),
);
}
if legal_hold {
map.insert("x-amz-object-lock-legal-hold".into(), "ON".into());
map.insert(X_AMZ_OBJECT_LOCK_LEGAL_HOLD.into(), "ON".into());
}
// Set the Content-Type header if not already set.
if !map.contains_key("Content-Type") {
if !map.contains_key(CONTENT_TYPE) {
map.insert(
"Content-Type".into(),
content_type.unwrap_or_else(|| "application/octet-stream".into()),
CONTENT_TYPE.into(),
content_type.unwrap_or("application/octet-stream".into()),
);
}
@ -925,27 +933,30 @@ pub const MAX_MULTIPART_COUNT: u16 = 10_000;
/// Returns the size of each part to upload and the total number of parts. The
/// number of parts is `None` when the object size is unknown.
pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option<u16>), Error> {
pub fn calc_part_info(
object_size: Size,
part_size: Size,
) -> Result<(u64, Option<u16>), ValidationErr> {
// Validate arguments against limits.
if let Size::Known(v) = part_size {
if v < MIN_PART_SIZE {
return Err(Error::InvalidMinPartSize(v));
return Err(ValidationErr::InvalidMinPartSize(v));
}
if v > MAX_PART_SIZE {
return Err(Error::InvalidMaxPartSize(v));
return Err(ValidationErr::InvalidMaxPartSize(v));
}
}
if let Size::Known(v) = object_size {
if v > MAX_OBJECT_SIZE {
return Err(Error::InvalidObjectSize(v));
}
if let Size::Known(v) = object_size
&& v > MAX_OBJECT_SIZE
{
return Err(ValidationErr::InvalidObjectSize(v));
}
match (object_size, part_size) {
// If object size is unknown, part size must be provided.
(Size::Unknown, Size::Unknown) => Err(Error::MissingPartSize),
// If the object size is unknown, the part size must be provided.
(Size::Unknown, Size::Unknown) => Err(ValidationErr::MissingPartSize),
// If object size is unknown, and part size is known, the number of
// parts will be unknown, so return None for that.
@ -954,8 +965,7 @@ pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option
// If object size is known, and part size is unknown, calculate part
// size.
(Size::Known(object_size), Size::Unknown) => {
// 1. Calculate the minimum part size (i.e. assuming part count is
// maximum).
// 1. Calculate the minimum part size (i.e., assuming part count is the maximum).
let mut psize: u64 = (object_size as f64 / MAX_MULTIPART_COUNT as f64).ceil() as u64;
// 2. Round up to the nearest multiple of MIN_PART_SIZE.
@ -979,11 +989,11 @@ pub fn calc_part_info(object_size: Size, part_size: Size) -> Result<(u64, Option
(Size::Known(object_size), Size::Known(part_size)) => {
let part_count = (object_size as f64 / part_size as f64).ceil() as u16;
if part_count == 0 || part_count > MAX_MULTIPART_COUNT {
return Err(Error::InvalidPartCount(
return Err(ValidationErr::InvalidPartCount {
object_size,
part_size,
MAX_MULTIPART_COUNT,
));
part_count: MAX_MULTIPART_COUNT,
});
}
Ok((part_size, Some(part_count)))
@ -1002,29 +1012,29 @@ mod tests {
if let Size::Known(v) = part_size {
if v < MIN_PART_SIZE {
return match res {
Err(Error::InvalidMinPartSize(v_err)) => v == v_err,
Err(ValidationErr::InvalidMinPartSize(v_err)) => v == v_err,
_ => false,
}
}
if v > MAX_PART_SIZE {
return match res {
Err(Error::InvalidMaxPartSize(v_err)) => v == v_err,
Err(ValidationErr::InvalidMaxPartSize(v_err)) => v == v_err,
_ => false,
}
}
}
if let Size::Known(v) = object_size {
if v > MAX_OBJECT_SIZE {
if let Size::Known(v) = object_size
&& v > MAX_OBJECT_SIZE {
return match res {
Err(Error::InvalidObjectSize(v_err)) => v == v_err,
Err(ValidationErr::InvalidObjectSize(v_err)) => v == v_err,
_ => false,
}
}
}
// Validate the calculation of part size and part count.
match (object_size, part_size, res) {
(Size::Unknown, Size::Unknown, Err(Error::MissingPartSize)) => true,
(Size::Unknown, Size::Unknown, Err(ValidationErr::MissingPartSize)) => true,
(Size::Unknown, Size::Unknown, _) => false,
(Size::Unknown, Size::Known(part_size), Ok((psize, None))) => {
@ -1049,7 +1059,7 @@ mod tests {
(Size::Known(object_size), Size::Known(part_size), res) => {
if (part_size > object_size) || ((part_size * (MAX_MULTIPART_COUNT as u64)) < object_size) {
return match res {
Err(Error::InvalidPartCount(v1, v2, v3)) => {
Err(ValidationErr::InvalidPartCount{object_size:v1, part_size:v2, part_count:v3}) => {
(v1 == object_size) && (v2 == part_size) && (v3 == MAX_MULTIPART_COUNT)
}
_ => false,

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::PutObjectLegalHoldResponse;
use crate::s3::types::{S3Api, S3Request, ToS3Request};
@ -75,7 +76,7 @@ impl S3Api for PutObjectLegalHold {
}
impl ToS3Request for PutObjectLegalHold {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
@ -90,7 +91,7 @@ impl ToS3Request for PutObjectLegalHold {
let bytes: Bytes = Bytes::from(payload);
// TODO consider const payload with precalculated md5
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
Ok(S3Request::new(self.client, Method::PUT)
.region(self.region)

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::Multimap;
use crate::s3::response::PutObjectLockConfigResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -74,7 +74,7 @@ impl S3Api for PutObjectLockConfig {
}
impl ToS3Request for PutObjectLockConfig {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
let bytes: Bytes = self.config.to_xml().into();

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::PutObjectRetentionResponse;
use crate::s3::types::{RetentionMode, S3Api, S3Request, ToS3Request};
@ -95,15 +96,15 @@ impl S3Api for PutObjectRetention {
}
impl ToS3Request for PutObjectRetention {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if self.retention_mode.is_some() ^ self.retain_until_date.is_some() {
return Err(Error::InvalidRetentionConfig(String::from(
"both mode and retain_until_date must be set or unset",
)));
return Err(ValidationErr::InvalidRetentionConfig(
"both mode and retain_until_date must be set or unset".into(),
));
}
}
@ -125,9 +126,9 @@ impl ToS3Request for PutObjectRetention {
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
if self.bypass_governance_mode {
headers.add("x-amz-bypass-governance-retention", "true");
headers.add(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true");
}
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
let mut query_params: Multimap = insert(self.extra_query_params, "retention");
query_params.add_version(self.version_id);

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::PutObjectTaggingResponse;
use crate::s3::segmented_bytes::SegmentedBytes;
@ -83,7 +83,7 @@ impl S3Api for PutObjectTagging {
}
impl ToS3Request for PutObjectTagging {
fn to_s3request(self) -> Result<S3Request, Error> {
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;

View File

@ -14,12 +14,13 @@
// limitations under the License.
use crate::s3::Client;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::SelectObjectContentResponse;
use crate::s3::sse::SseCustomerKey;
use crate::s3::types::{S3Api, S3Request, SelectRequest, ToS3Request};
use crate::s3::utils::{check_bucket_name, check_object_name, insert, md5sum_hash};
use crate::s3::utils::{check_bucket_name, check_object_name, check_ssec, insert, md5sum_hash};
use async_trait::async_trait;
use bytes::Bytes;
use http::Method;
@ -90,19 +91,15 @@ impl S3Api for SelectObjectContent {
#[async_trait]
impl ToS3Request for SelectObjectContent {
fn to_s3request(self) -> Result<S3Request, Error> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_ssec(&self.ssec, &self.client)?;
if self.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
let bytes: Bytes = self.request.to_xml().into();
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
headers.add("Content-MD5", md5sum_hash(bytes.as_ref()));
headers.add(CONTENT_MD5, md5sum_hash(bytes.as_ref()));
let mut query_params: Multimap = insert(self.extra_query_params, "select");
query_params.add("select-type", "2");

View File

@ -13,19 +13,18 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use async_trait::async_trait;
use http::Method;
use crate::s3::client::Client;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::StatObjectResponse;
use crate::s3::utils::check_object_name;
use crate::s3::{
client::Client,
error::Error,
sse::{Sse, SseCustomerKey},
types::{S3Api, S3Request, ToS3Request},
utils::{UtcTime, check_bucket_name, to_http_header_value},
use crate::s3::sse::{Sse, SseCustomerKey};
use crate::s3::types::{S3Api, S3Request, ToS3Request};
use crate::s3::utils::{
UtcTime, check_bucket_name, check_object_name, check_ssec, to_http_header_value,
};
use async_trait::async_trait;
use http::Method;
/// Argument builder for the [`StatObject`](https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html) S3 API operation.
/// Retrieves all of the metadata from an object without returning the object itself.
@ -125,28 +124,24 @@ impl S3Api for StatObject {
#[async_trait]
impl ToS3Request for StatObject {
fn to_s3request(self) -> Result<S3Request, Error> {
{
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
if self.ssec.is_some() && !self.client.is_secure() {
return Err(Error::SseTlsRequired(None));
}
}
fn to_s3request(self) -> Result<S3Request, ValidationErr> {
check_bucket_name(&self.bucket, true)?;
check_object_name(&self.object)?;
check_ssec(&self.ssec, &self.client)?;
let mut headers: Multimap = self.extra_headers.unwrap_or_default();
{
if let Some(v) = self.match_etag {
headers.add("if-match", v);
headers.add(IF_MATCH, v);
}
if let Some(v) = self.not_match_etag {
headers.add("if-none-match", v);
headers.add(IF_NONE_MATCH, v);
}
if let Some(v) = self.modified_since {
headers.add("if-modified-since", to_http_header_value(v));
headers.add(IF_MODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = self.unmodified_since {
headers.add("if-unmodified-since", to_http_header_value(v));
headers.add(IF_UNMODIFIED_SINCE, to_http_header_value(v));
}
if let Some(v) = self.ssec {
headers.add_multimap(v.headers());

View File

@ -23,21 +23,23 @@ use std::sync::{Arc, OnceLock};
use crate::s3::builders::{BucketExists, ComposeSource};
use crate::s3::creds::Provider;
use crate::s3::error::{Error, ErrorCode, ErrorResponse};
use crate::s3::header_constants::*;
use crate::s3::http::BaseUrl;
use crate::s3::minio_error_response::{MinioErrorCode, MinioErrorResponse};
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::response::a_response_traits::{HasEtagFromHeaders, HasS3Fields};
use crate::s3::response::*;
use crate::s3::segmented_bytes::SegmentedBytes;
use crate::s3::signer::sign_v4_s3;
use crate::s3::utils::{EMPTY_SHA256, sha256_hash_sb, to_amz_date, utc_now};
use crate::s3::utils::{EMPTY_SHA256, check_ssec_with_log, sha256_hash_sb, to_amz_date, utc_now};
use crate::s3::error::{Error, IoError, NetworkError, S3ServerError, ValidationErr};
use bytes::Bytes;
use dashmap::DashMap;
use http::HeaderMap;
use hyper::http::Method;
use rand::Rng;
use reqwest::Body;
use reqwest::{Body, Response};
mod append_object;
mod bucket_exists;
@ -197,15 +199,17 @@ impl ClientBuilder {
))]
if let Some(v) = self.ssl_cert_file {
let mut buf = Vec::new();
File::open(v)?.read_to_end(&mut buf)?;
let certs = reqwest::Certificate::from_pem_bundle(&buf)?;
let mut file = File::open(v).map_err(IoError::IOError)?;
file.read_to_end(&mut buf).map_err(IoError::IOError)?;
let certs = reqwest::Certificate::from_pem_bundle(&buf).map_err(ValidationErr::from)?;
for cert in certs {
builder = builder.add_root_certificate(cert);
}
}
Ok(Client {
http_client: builder.build()?,
http_client: builder.build().map_err(ValidationErr::from)?,
shared: Arc::new(SharedClientItems {
base_url: self.base_url,
provider: self.provider,
@ -333,17 +337,13 @@ impl Client {
let sources_len = sources.len();
for source in sources.iter_mut() {
if source.ssec.is_some() && !self.is_secure() {
return Err(Error::SseTlsRequired(Some(format!(
"source {}/{}{}: ",
source.bucket,
source.object,
source
.version_id
.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v)
))));
}
check_ssec_with_log(
&source.ssec,
self,
&source.bucket,
&source.object,
&source.version_id,
)?;
i += 1;
@ -370,18 +370,19 @@ impl Client {
}
if (size < MIN_PART_SIZE) && (sources_len != 1) && (i != sources_len) {
return Err(Error::InvalidComposeSourcePartSize(
source.bucket.clone(),
source.object.clone(),
source.version_id.clone(),
return Err(ValidationErr::InvalidComposeSourcePartSize {
bucket: source.bucket.clone(),
object: source.object.clone(),
version: source.version_id.clone(),
size,
MIN_PART_SIZE,
));
expected_size: MIN_PART_SIZE,
}
.into());
}
object_size += size;
if object_size > MAX_OBJECT_SIZE {
return Err(Error::InvalidObjectSize(object_size));
return Err(ValidationErr::InvalidObjectSize(object_size).into());
}
if size > MAX_PART_SIZE {
@ -394,13 +395,14 @@ impl Client {
}
if last_part_size < MIN_PART_SIZE && sources_len != 1 && i != sources_len {
return Err(Error::InvalidComposeSourceMultipart(
source.bucket.to_string(),
source.object.to_string(),
source.version_id.clone(),
return Err(ValidationErr::InvalidComposeSourceMultipart {
bucket: source.bucket.to_string(),
object: source.object.to_string(),
version: source.version_id.clone(),
size,
MIN_PART_SIZE,
));
expected_size: MIN_PART_SIZE,
}
.into());
}
part_count += count as u16;
@ -409,7 +411,9 @@ impl Client {
}
if part_count > MAX_MULTIPART_COUNT {
return Err(Error::InvalidMultipartCount(MAX_MULTIPART_COUNT));
return Err(
ValidationErr::InvalidMultipartCount(MAX_MULTIPART_COUNT as u64).into(),
);
}
}
@ -436,14 +440,14 @@ impl Client {
)?;
{
headers.add("Host", url.host_header_value());
headers.add(HOST, url.host_header_value());
let sha256: String = match *method {
Method::PUT | Method::POST => {
if !headers.contains_key("Content-Type") {
headers.add("Content-Type", "application/octet-stream");
if !headers.contains_key(CONTENT_TYPE) {
headers.add(CONTENT_TYPE, "application/octet-stream");
}
let len: usize = body.as_ref().map_or(0, |b| b.len());
headers.add("Content-Length", len.to_string());
headers.add(CONTENT_LENGTH, len.to_string());
match body {
None => EMPTY_SHA256.into(),
Some(ref v) => {
@ -454,14 +458,14 @@ impl Client {
}
_ => EMPTY_SHA256.into(),
};
headers.add("x-amz-content-sha256", sha256.clone());
headers.add(X_AMZ_CONTENT_SHA256, sha256.clone());
let date = utc_now();
headers.add("x-amz-date", to_amz_date(date));
headers.add(X_AMZ_DATE, to_amz_date(date));
if let Some(p) = &self.shared.provider {
let creds = p.fetch();
if creds.session_token.is_some() {
headers.add("X-Amz-Security-Token", creds.session_token.unwrap());
headers.add(X_AMZ_SECURITY_TOKEN, creds.session_token.unwrap());
}
sign_v4_s3(
method,
@ -509,15 +513,12 @@ impl Client {
None => Vec::new(),
};
let stream = futures_util::stream::iter(
bytes_vec
.into_iter()
.map(|b| -> Result<_, std::io::Error> { Ok(b) }),
bytes_vec.into_iter().map(|b| -> Result<_, Error> { Ok(b) }),
);
req = req.body(Body::wrap_stream(stream));
}
let resp: reqwest::Response = req.send().await?;
let resp: Response = req.send().await.map_err(ValidationErr::from)?; //TODO request error handled by network error layer
if resp.status().is_success() {
return Ok(resp);
}
@ -525,9 +526,9 @@ impl Client {
let mut resp = resp;
let status_code = resp.status().as_u16();
let headers: HeaderMap = mem::take(resp.headers_mut());
let body: Bytes = resp.bytes().await?;
let body: Bytes = resp.bytes().await.map_err(ValidationErr::from)?;
let e: Error = self.shared.get_error_response(
let e: MinioErrorResponse = self.shared.create_minio_error_response(
body,
status_code,
headers,
@ -536,17 +537,17 @@ impl Client {
bucket_name,
object_name,
retry,
);
)?;
if let Error::S3Error(ref err) = e {
if (err.code == ErrorCode::NoSuchBucket) || (err.code == ErrorCode::RetryHead) {
if let Some(v) = bucket_name {
self.shared.region_map.remove(v);
}
}
// If the error is a NoSuchBucket or RetryHead, remove the bucket from the region map.
if (matches!(e.code(), MinioErrorCode::NoSuchBucket)
|| matches!(e.code(), MinioErrorCode::RetryHead))
&& let Some(v) = bucket_name
{
self.shared.region_map.remove(v);
};
Err(e)
Err(Error::S3Server(S3ServerError::S3Error(Box::new(e))))
}
pub(crate) async fn execute(
@ -574,8 +575,8 @@ impl Client {
match resp {
Ok(r) => return Ok(r),
Err(e) => match e {
Error::S3Error(ref er) => {
if er.code != ErrorCode::RetryHead {
Error::S3Server(S3ServerError::S3Error(ref er)) => {
if !matches!(er.code(), MinioErrorCode::RetryHead) {
return Err(e);
}
}
@ -614,16 +615,19 @@ impl SharedClientItems {
header_map: &reqwest::header::HeaderMap,
bucket_name: Option<&str>,
retry: bool,
) -> Result<(ErrorCode, String), Error> {
) -> Result<(MinioErrorCode, String), Error> {
let (mut code, mut message) = match status_code {
301 => (ErrorCode::PermanentRedirect, "Moved Permanently".into()),
307 => (ErrorCode::Redirect, "Temporary redirect".into()),
400 => (ErrorCode::BadRequest, "Bad request".into()),
_ => (ErrorCode::NoError, String::new()),
301 => (
MinioErrorCode::PermanentRedirect,
"Moved Permanently".into(),
),
307 => (MinioErrorCode::Redirect, "Temporary redirect".into()),
400 => (MinioErrorCode::BadRequest, "Bad request".into()),
_ => (MinioErrorCode::NoError, String::new()),
};
let region: &str = match header_map.get("x-amz-bucket-region") {
Some(v) => v.to_str()?,
let region: &str = match header_map.get(X_AMZ_BUCKET_REGION) {
Some(v) => v.to_str().map_err(ValidationErr::from)?,
_ => "",
};
@ -632,19 +636,20 @@ impl SharedClientItems {
message.push_str(region);
}
if retry && !region.is_empty() && (method == Method::HEAD) {
if let Some(v) = bucket_name {
if self.region_map.contains_key(v) {
code = ErrorCode::RetryHead;
message = String::new();
}
}
if retry
&& !region.is_empty()
&& (method == Method::HEAD)
&& let Some(v) = bucket_name
&& self.region_map.contains_key(v)
{
code = MinioErrorCode::RetryHead;
message = String::new();
}
Ok((code, message))
}
fn get_error_response(
fn create_minio_error_response(
&self,
body: Bytes,
http_status_code: u16,
@ -654,88 +659,98 @@ impl SharedClientItems {
bucket_name: Option<&str>,
object_name: Option<&str>,
retry: bool,
) -> Error {
) -> Result<MinioErrorResponse, Error> {
// if body is present, try to parse it as XML error response
if !body.is_empty() {
return match headers.get("Content-Type") {
Some(v) => match v.to_str() {
Ok(s) => match s.to_lowercase().contains("application/xml") {
true => match ErrorResponse::parse(body, headers) {
Ok(v) => Error::S3Error(v),
Err(e) => e,
},
false => Error::InvalidResponse(http_status_code, s.to_string()),
},
Err(e) => return Error::StrError(e),
},
_ => Error::InvalidResponse(http_status_code, String::new()),
let content_type = headers
.get(CONTENT_TYPE)
.ok_or_else(|| {
Error::S3Server(S3ServerError::InvalidServerResponse {
message: "missing Content-Type header".into(),
http_status_code,
content_type: String::new(),
})
})?
.to_str()
.map_err(Into::into) // ToStrError -> ValidationErr
.map_err(Error::Validation)?; // ValidationErr -> Error
return if content_type.to_lowercase().contains("application/xml") {
MinioErrorResponse::new_from_body(body, headers)
} else {
Err(Error::S3Server(S3ServerError::InvalidServerResponse {
message: format!(
"expected content-type 'application/xml', but got {content_type}"
),
http_status_code,
content_type: content_type.into(),
}))
};
}
// Decide code and message by status
let (code, message) = match http_status_code {
301 | 307 | 400 => match self.handle_redirect_response(
301 | 307 | 400 => self.handle_redirect_response(
http_status_code,
method,
&headers,
bucket_name,
retry,
) {
Ok(v) => v,
Err(e) => return e,
},
403 => (ErrorCode::AccessDenied, "Access denied".into()),
)?,
403 => (MinioErrorCode::AccessDenied, "Access denied".into()),
404 => match object_name {
Some(_) => (ErrorCode::NoSuchKey, "Object does not exist".into()),
_ => match bucket_name {
Some(_) => (ErrorCode::NoSuchBucket, "Bucket does not exist".into()),
_ => (
ErrorCode::ResourceNotFound,
Some(_) => (MinioErrorCode::NoSuchKey, "Object does not exist".into()),
None => match bucket_name {
Some(_) => (MinioErrorCode::NoSuchBucket, "Bucket does not exist".into()),
None => (
MinioErrorCode::ResourceNotFound,
"Request resource not found".into(),
),
},
},
405 => (
ErrorCode::MethodNotAllowed,
405 | 501 => (
MinioErrorCode::MethodNotAllowed,
"The specified method is not allowed against this resource".into(),
),
409 => match bucket_name {
Some(_) => (ErrorCode::NoSuchBucket, "Bucket does not exist".into()),
_ => (
ErrorCode::ResourceConflict,
Some(_) => (MinioErrorCode::NoSuchBucket, "Bucket does not exist".into()),
None => (
MinioErrorCode::ResourceConflict,
"Request resource conflicts".into(),
),
},
501 => (
ErrorCode::MethodNotAllowed,
"The specified method is not allowed against this resource".into(),
),
_ => return Error::ServerError(http_status_code),
_ => {
return Err(Error::Network(NetworkError::ServerError(http_status_code)));
}
};
let request_id: String = match headers.get("x-amz-request-id") {
Some(v) => match v.to_str() {
Ok(s) => s.to_string(),
Err(e) => return Error::StrError(e),
},
_ => String::new(),
let request_id = match headers.get(X_AMZ_REQUEST_ID) {
Some(v) => v
.to_str()
.map_err(Into::into)
.map_err(Error::Validation)? // ValidationErr -> Error
.to_string(),
None => String::new(),
};
let host_id: String = match headers.get("x-amz-id-2") {
Some(v) => match v.to_str() {
Ok(s) => s.to_string(),
Err(e) => return Error::StrError(e),
},
_ => String::new(),
let host_id = match headers.get(X_AMZ_ID_2) {
Some(v) => v
.to_str()
.map_err(Into::into)
.map_err(Error::Validation)? // ValidationErr -> Error
.to_string(),
None => String::new(),
};
Error::S3Error(ErrorResponse {
Ok(MinioErrorResponse::new(
headers,
code,
message,
resource: resource.to_string(),
(!message.is_empty()).then_some(message),
resource.to_string(),
request_id,
host_id,
bucket_name: bucket_name.unwrap_or_default().to_string(),
object_name: object_name.unwrap_or_default().to_string(),
})
bucket_name.map(String::from),
object_name.map(String::from),
))
}
}

View File

@ -15,7 +15,9 @@
use super::Client;
use crate::s3::builders::{DeleteBucket, DeleteObject, ObjectToDelete};
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::Error;
use crate::s3::error::S3ServerError::S3Error;
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::{BucketExistsResponse, DeleteResult};
use crate::s3::response::{
DeleteBucketResponse, DeleteObjectResponse, DeleteObjectsResponse, PutObjectLegalHoldResponse,
@ -130,14 +132,14 @@ impl Client {
let request: DeleteBucket = self.delete_bucket(&bucket);
match request.send().await {
Ok(resp) => Ok(resp),
Err(Error::S3Error(mut e)) => {
if matches!(e.code, ErrorCode::NoSuchBucket) {
Err(Error::S3Server(S3Error(mut e))) => {
if matches!(e.code(), MinioErrorCode::NoSuchBucket) {
Ok(DeleteBucketResponse {
request: Default::default(), //TODO consider how to handle this
body: Bytes::new(),
headers: e.headers,
headers: e.take_headers(),
})
} else if let ErrorCode::BucketNotEmpty(reason) = &e.code {
} else if matches!(e.code(), MinioErrorCode::BucketNotEmpty) {
// for convenience, add the first 5 documents that were are still in the bucket
// to the error message
let mut stream = self
@ -158,11 +160,14 @@ impl Client {
// else: silently ignore the error and keep looping
}
let new_reason = format!("{reason}: found content: {objs:?}");
e.code = ErrorCode::BucketNotEmpty(new_reason);
Err(Error::S3Error(e))
let new_msg = match e.message() {
None => format!("found content: {objs:?}"),
Some(msg) => format!("{msg}, found content: {objs:?}"),
};
e.set_message(new_msg);
Err(Error::S3Server(S3Error(e)))
} else {
Err(Error::S3Error(e))
Err(Error::S3Server(S3Error(e)))
}
}
Err(e) => Err(e),

View File

@ -15,7 +15,8 @@
use super::{Client, DEFAULT_REGION};
use crate::s3::builders::GetRegion;
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::types::S3Api;
impl Client {
@ -58,10 +59,11 @@ impl Client {
if !self.shared.base_url.region.is_empty()
&& (self.shared.base_url.region != *requested_region)
{
return Err(Error::RegionMismatch(
self.shared.base_url.region.clone(),
requested_region.clone(),
));
return Err(ValidationErr::RegionMismatch {
bucket_region: self.shared.base_url.region.clone(),
region: requested_region.clone(),
}
.into());
}
return Ok(requested_region.clone());
}

View File

@ -1,414 +1,342 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::minio_error_response::MinioErrorResponse;
use thiserror::Error;
//! Error definitions for S3 operations
// Client side validation issues like invalid url or bucket name
#[derive(Error, Debug)]
pub enum ValidationErr {
/// The specified bucket is not valid
#[error("Invalid bucket name: '{name}' - {reason}")]
InvalidBucketName { name: String, reason: String },
extern crate alloc;
use crate::s3::utils::get_default_text;
use bytes::{Buf, Bytes};
use http::HeaderMap;
use std::fmt;
use xmltree::Element;
/// No Bucket name was provided
#[error("No bucket name provided")]
MissingBucketName,
#[derive(Clone, Debug, Default, PartialEq)]
pub enum ErrorCode {
#[default]
NoError,
/// Error while parsing time from string
#[error("Time parse error: {0}")]
TimeParseError(#[from] chrono::ParseError),
PermanentRedirect,
Redirect,
BadRequest,
RetryHead,
NoSuchBucket,
NoSuchBucketPolicy,
ReplicationConfigurationNotFoundError,
ServerSideEncryptionConfigurationNotFoundError,
NoSuchTagSet,
NoSuchObjectLockConfiguration,
NoSuchLifecycleConfiguration,
NoSuchKey,
ResourceNotFound,
MethodNotAllowed,
ResourceConflict,
AccessDenied,
NotSupported,
BucketNotEmpty(String), // String contains optional reason msg
BucketAlreadyOwnedByYou,
InvalidWriteOffset,
/// Error while parsing a URL from string
#[error("Invalid URL: {0}")]
InvalidUrl(#[from] http::uri::InvalidUri),
OtherError(String),
}
/// Error while performing IO operations
#[error("IO error: {0}")]
IOError(#[from] std::io::Error),
impl ErrorCode {
pub fn parse(s: &str) -> Self {
match s.to_lowercase().as_str() {
"permanentredirect" => ErrorCode::PermanentRedirect,
"redirect" => ErrorCode::Redirect,
"badrequest" => ErrorCode::BadRequest,
"retryhead" => ErrorCode::RetryHead,
"nosuchbucket" => ErrorCode::NoSuchBucket,
"nosuchbucketpolicy" => ErrorCode::NoSuchBucketPolicy,
"replicationconfigurationnotfounderror" => {
ErrorCode::ReplicationConfigurationNotFoundError
}
"serversideencryptionconfigurationnotfounderror" => {
ErrorCode::ServerSideEncryptionConfigurationNotFoundError
}
"nosuchtagset" => ErrorCode::NoSuchTagSet,
"nosuchobjectlockconfiguration" => ErrorCode::NoSuchObjectLockConfiguration,
"nosuchlifecycleconfiguration" => ErrorCode::NoSuchLifecycleConfiguration,
"nosuchkey" => ErrorCode::NoSuchKey,
"resourcenotfound" => ErrorCode::ResourceNotFound,
"methodnotallowed" => ErrorCode::MethodNotAllowed,
"resourceconflict" => ErrorCode::ResourceConflict,
"accessdenied" => ErrorCode::AccessDenied,
"notsupported" => ErrorCode::NotSupported,
"bucketnotempty" => ErrorCode::BucketNotEmpty("".to_string()),
"bucketalreadyownedbyyou" => ErrorCode::BucketAlreadyOwnedByYou,
"invalidwriteoffset" => ErrorCode::InvalidWriteOffset,
#[error("XML parse error: {0}")]
XmlParseError(#[from] xmltree::ParseError),
v => ErrorCode::OtherError(v.to_owned()),
}
}
}
#[error("HTTP error: {0}")]
HttpError(#[from] reqwest::Error),
#[derive(Clone, Debug, Default)]
/// Error response for S3 operations
pub struct ErrorResponse {
/// Headers as returned by the server.
pub(crate) headers: HeaderMap,
pub code: ErrorCode,
pub message: String,
pub resource: String,
pub request_id: String,
pub host_id: String,
pub bucket_name: String,
pub object_name: String,
}
#[error("String error: {message}")]
StrError {
message: String,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
},
impl ErrorResponse {
pub fn parse(body: Bytes, headers: HeaderMap) -> Result<Self, Error> {
let root = match Element::parse(body.reader()) {
Ok(v) => v,
Err(e) => return Err(Error::XmlParseError(e)),
};
#[error("Integer parsing error: {0}")]
IntError(#[from] std::num::ParseIntError),
Ok(Self {
headers,
code: ErrorCode::parse(&get_default_text(&root, "Code")),
message: get_default_text(&root, "Message"),
resource: get_default_text(&root, "Resource"),
request_id: get_default_text(&root, "RequestId"),
host_id: get_default_text(&root, "HostId"),
bucket_name: get_default_text(&root, "BucketName"),
object_name: get_default_text(&root, "Key"),
})
}
}
#[error("Boolean parsing error: {0}")]
BoolError(#[from] std::str::ParseBoolError),
/// Error definitions
#[derive(Debug)]
pub enum Error {
TimeParseError(chrono::ParseError),
InvalidUrl(http::uri::InvalidUri),
IOError(std::io::Error),
XmlParseError(xmltree::ParseError),
HttpError(reqwest::Error),
StrError(reqwest::header::ToStrError),
IntError(std::num::ParseIntError),
BoolError(std::str::ParseBoolError),
Utf8Error(Box<dyn std::error::Error + Send + Sync + 'static>),
JsonError(serde_json::Error),
XmlError(String),
InvalidBaseUrl(String),
InvalidBucketName(String),
UrlBuildError(String),
RegionMismatch(String, String),
S3Error(ErrorResponse),
InvalidResponse(u16, String),
ServerError(u16),
#[error("Failed to parse as UTF-8: {0}")]
Utf8Error(#[from] std::str::Utf8Error),
#[error("JSON error: {0}")]
JsonError(#[from] serde_json::Error),
#[error("XML error: {message}")]
XmlError {
message: String,
#[source]
source: Option<Box<dyn std::error::Error + Send + Sync>>,
},
#[error("Invalid object name: {0}")]
InvalidObjectName(String),
#[error("Invalid upload ID: {0}")]
InvalidUploadId(String),
#[error("Invalid part number: {0}")]
InvalidPartNumber(String),
#[error("Invalid user metadata: {0}")]
InvalidUserMetadata(String),
#[error("Invalid boolean value: {0}")]
InvalidBooleanValue(String),
#[error("Invalid integer value: {message}")]
InvalidIntegerValue {
message: String,
#[source]
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Empty parts: {0}")]
EmptyParts(String),
#[error("Invalid retention mode: {0}")]
InvalidRetentionMode(String),
#[error("Invalid retention configuration: {0}")]
InvalidRetentionConfig(String),
#[error("Part size {0} is not supported; minimum allowed 5MiB")]
InvalidMinPartSize(u64),
#[error("Part size {0} is not supported; maximum allowed 5GiB")]
InvalidMaxPartSize(u64),
#[error("Object size {0} is not supported; maximum allowed 5TiB")]
InvalidObjectSize(u64),
#[error("Valid part size must be provided when object size is unknown")]
MissingPartSize,
InvalidPartCount(u64, u64, u16),
TooManyParts,
#[error(
"Object size {object_size} and part size {part_size} make more than {part_count} parts for upload"
)]
InvalidPartCount {
object_size: u64,
part_size: u64,
part_count: u16,
},
#[error("Too many parts for upload: {0} parts; maximum allowed is MAX_MULTIPART_COUNT parts")]
TooManyParts(u64),
#[error("{}", sse_tls_required_message(.0))]
SseTlsRequired(Option<String>),
#[error("Too much data in the stream - exceeds {0} bytes")]
TooMuchData(u64),
InsufficientData(u64, u64),
#[error("Not enough data in the stream; expected: {expected}, got: {got} bytes")]
InsufficientData { expected: u64, got: u64 },
#[error("Invalid legal hold: {0}")]
InvalidLegalHold(String),
#[error("Invalid select expression: {0}")]
InvalidSelectExpression(String),
#[error("Invalid header value type: {0}")]
InvalidHeaderValueType(u8),
CrcMismatch(String, u32, u32),
#[error("Invalid base URL: {0}")]
InvalidBaseUrl(String),
#[error("URL build error: {0}")]
UrlBuildError(String),
#[error("Region must be {bucket_region}, but passed {region}")]
RegionMismatch {
bucket_region: String,
region: String,
},
#[error("{crc_type} CRC mismatch; expected: {expected}, got: {got}")]
CrcMismatch {
crc_type: String,
expected: u32,
got: u32,
},
#[error("Unknown event type: {0}")]
UnknownEventType(String),
SelectError(String, String),
UnsupportedApi(String),
InvalidComposeSource(String),
InvalidComposeSourceOffset(String, String, Option<String>, u64, u64),
InvalidComposeSourceLength(String, String, Option<String>, u64, u64),
InvalidComposeSourceSize(String, String, Option<String>, u64, u64),
InvalidComposeSourcePartSize(String, String, Option<String>, u64, u64),
InvalidComposeSourceMultipart(String, String, Option<String>, u64, u64),
/// Error returned by the S3 Select API
#[error("Error code: {error_code}, error message: {error_message}")]
SelectError {
error_code: String,
error_message: String,
},
/// Error returned when the S3 API is not supported by AWS S3
#[error("{0} API is not supported in Amazon AWS S3")]
UnsupportedAwsApi(String),
#[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceOffset", &format!("offset {offset} is beyond object size {object_size}")))]
InvalidComposeSourceOffset {
bucket: String,
object: String,
version: Option<String>,
offset: u64,
object_size: u64,
},
#[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceLength", &format!("length {length} is beyond object size {object_size}")))]
InvalidComposeSourceLength {
bucket: String,
object: String,
version: Option<String>,
length: u64,
object_size: u64,
},
#[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceSize", &format!("compose size {compose_size} is beyond object size {object_size}")))]
InvalidComposeSourceSize {
bucket: String,
object: String,
version: Option<String>,
compose_size: u64,
object_size: u64,
},
#[error("Invalid directive: {0}")]
InvalidDirective(String),
#[error("Invalid copy directive: {0}")]
InvalidCopyDirective(String),
InvalidMultipartCount(u16),
MissingLifecycleAction,
InvalidExpiredObjectDeleteMarker,
InvalidDateAndDays(String),
InvalidLifecycleRuleId,
InvalidFilter,
#[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourcePartSize", &format!("compose size {size} must be greater than {expected_size}")))]
InvalidComposeSourcePartSize {
bucket: String,
object: String,
version: Option<String>,
size: u64,
expected_size: u64,
},
#[error("{}", format_s3_object_error(.bucket, .object, .version.as_deref(), "InvalidComposeSourceMultipart", &format!("size {size} for multipart split upload of {size}, last part size is less than {expected_size}")))]
InvalidComposeSourceMultipart {
bucket: String,
object: String,
version: Option<String>,
size: u64,
expected_size: u64,
},
#[error("Compose sources create more than allowed multipart count {0}")]
InvalidMultipartCount(u64),
#[error("Only one of And, Prefix or Tag must be provided: {0}")]
InvalidFilter(String),
#[error("Invalid versioning status: {0}")]
InvalidVersioningStatus(String),
#[error("Post policy error: {0}")]
PostPolicyError(String),
#[error("Invalid object lock config: {0}")]
InvalidObjectLockConfig(String),
NoClientProvided,
TagDecodingError(String, String),
#[error("Tag decoding failed: {error_message} on input '{input}'")]
TagDecodingError {
input: String,
error_message: String,
},
#[error("Content length is unknown")]
ContentLengthUnknown,
}
impl std::error::Error for Error {}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Error::TimeParseError(e) => write!(f, "{e}"),
Error::InvalidUrl(e) => write!(f, "{e}"),
Error::IOError(e) => write!(f, "{e}"),
Error::XmlParseError(e) => write!(f, "{e}"),
Error::HttpError(e) => write!(f, "{e}"),
Error::StrError(e) => write!(f, "{e}"),
Error::IntError(e) => write!(f, "{e}"),
Error::BoolError(e) => write!(f, "{e}"),
Error::Utf8Error(e) => write!(f, "{e}"),
Error::JsonError(e) => write!(f, "{e}"),
Error::XmlError(m) => write!(f, "{m}"),
Error::InvalidBucketName(m) => write!(f, "{m}"),
Error::InvalidObjectName(m) => write!(f, "{m}"),
Error::InvalidUploadId(m) => write!(f, "{m}"),
Error::InvalidPartNumber(m) => write!(f, "{m}"),
Error::InvalidUserMetadata(m) => write!(f, "{m}"),
Error::EmptyParts(m) => write!(f, "{m}"),
Error::InvalidRetentionMode(m) => write!(f, "invalid retention mode {m}"),
Error::InvalidRetentionConfig(m) => write!(f, "invalid retention configuration; {m}"),
Error::InvalidMinPartSize(s) => {
write!(f, "part size {s} is not supported; minimum allowed 5MiB")
}
Error::InvalidMaxPartSize(s) => {
write!(f, "part size {s} is not supported; maximum allowed 5GiB")
}
Error::InvalidObjectSize(s) => {
write!(f, "object size {s} is not supported; maximum allowed 5TiB",)
}
Error::MissingPartSize => write!(
f,
"valid part size must be provided when object size is unknown"
),
Error::InvalidPartCount(os, ps, pc) => write!(
f,
"object size {os} and part size {ps} make more than {pc} parts for upload"
),
Error::TooManyParts => write!(f, "too many parts for upload"),
Error::SseTlsRequired(m) => write!(
f,
"{}SSE operation must be performed over a secure connection",
m.as_ref().map_or(String::new(), |v| v.clone())
),
Error::TooMuchData(s) => write!(f, "too much data in the stream - exceeds {s} bytes"),
Error::InsufficientData(expected, got) => write!(
f,
"not enough data in the stream; expected: {expected}, got: {got} bytes",
),
Error::InvalidBaseUrl(m) => write!(f, "{m}"),
Error::UrlBuildError(m) => write!(f, "{m}"),
Error::InvalidLegalHold(s) => write!(f, "invalid legal hold {s}"),
Error::RegionMismatch(br, r) => write!(f, "region must be {br}, but passed {r}"),
Error::S3Error(er) => write!(
f,
"s3 operation failed; code: {:?}, message: {}, resource: {}, request_id: {}, host_id: {}, bucket_name: {}, object_name: {}",
er.code,
er.message,
er.resource,
er.request_id,
er.host_id,
er.bucket_name,
er.object_name,
),
Error::InvalidResponse(sc, ct) => write!(
f,
"invalid response received; status code: {sc}; content-type: {ct}"
),
Error::ServerError(sc) => write!(f, "server failed with HTTP status code {sc}"),
Error::InvalidSelectExpression(m) => write!(f, "{m}"),
Error::InvalidHeaderValueType(v) => write!(f, "invalid header value type {v}"),
Error::CrcMismatch(t, e, g) => {
write!(f, "{t} CRC mismatch; expected: {e}, got: {g}")
}
Error::UnknownEventType(et) => write!(f, "unknown event type {et}"),
Error::SelectError(ec, em) => write!(f, "error code: {ec}, error message: {em}"),
Error::UnsupportedApi(a) => write!(f, "{a} API is not supported in Amazon AWS S3"),
Error::InvalidComposeSource(m) => write!(f, "{m}"),
Error::InvalidComposeSourceOffset(b, o, v, of, os) => write!(
f,
"source {}/{}{}: offset {} is beyond object size {}",
b,
o,
v.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v),
of,
os
),
Error::InvalidComposeSourceLength(b, o, v, l, os) => write!(
f,
"source {}/{}{}: length {} is beyond object size {}",
b,
o,
v.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v),
l,
os
),
Error::InvalidComposeSourceSize(b, o, v, cs, os) => write!(
f,
"source {}/{}{}: compose size {} is beyond object size {}",
b,
o,
v.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v),
cs,
os
),
Error::InvalidDirective(m) => write!(f, "{m}"),
Error::InvalidCopyDirective(m) => write!(f, "{m}"),
Error::InvalidComposeSourcePartSize(b, o, v, s, es) => write!(
f,
"source {}/{}{}: size {} must be greater than {}",
b,
o,
v.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v),
s,
es
),
Error::InvalidComposeSourceMultipart(b, o, v, s, es) => write!(
f,
"source {}/{}{}: size {} for multipart split upload of {}, last part size is less than {}",
b,
o,
v.as_ref()
.map_or(String::new(), |v| String::from("?versionId=") + v),
s,
s,
es
),
Error::InvalidMultipartCount(c) => write!(
f,
"Compose sources create more than allowed multipart count {c}",
),
Error::MissingLifecycleAction => write!(
f,
"at least one of action (AbortIncompleteMultipartUpload, Expiration, NoncurrentVersionExpiration, NoncurrentVersionTransition or Transition) must be specified in a rule"
),
Error::InvalidExpiredObjectDeleteMarker => write!(
f,
"ExpiredObjectDeleteMarker must not be provided along with Date and Days"
),
Error::InvalidDateAndDays(m) => {
write!(f, "Only one of date or days of {m} must be set")
}
Error::InvalidLifecycleRuleId => write!(f, "id must be exceed 255 characters"),
Error::InvalidFilter => write!(f, "only one of And, Prefix or Tag must be provided"),
Error::InvalidVersioningStatus(m) => write!(f, "{m}"),
Error::PostPolicyError(m) => write!(f, "{m}"),
Error::InvalidObjectLockConfig(m) => write!(f, "{m}"),
Error::NoClientProvided => write!(f, "no client provided"),
Error::TagDecodingError(input, error_message) => {
write!(f, "tag decoding failed: {error_message} on input '{input}'")
}
Error::ContentLengthUnknown => write!(f, "content length is unknown"),
impl From<reqwest::header::ToStrError> for ValidationErr {
fn from(err: reqwest::header::ToStrError) -> Self {
ValidationErr::StrError {
message: "The provided value has an invalid encoding".into(),
source: Some(Box::new(err)),
}
}
}
impl From<chrono::ParseError> for Error {
fn from(err: chrono::ParseError) -> Self {
Error::TimeParseError(err)
// Some convenience methods for creating ValidationErr instances
impl ValidationErr {
pub fn xml_error(message: impl Into<String>) -> Self {
Self::XmlError {
message: message.into(),
source: None,
}
}
pub fn xml_error_with_source(
message: impl Into<String>,
source: impl Into<Box<dyn std::error::Error + Send + Sync>>,
) -> Self {
Self::XmlError {
message: message.into(),
source: Some(source.into()),
}
}
}
impl From<http::uri::InvalidUri> for Error {
fn from(err: http::uri::InvalidUri) -> Self {
Error::InvalidUrl(err)
// IO errors from accessing local files
#[derive(Error, Debug)]
pub enum IoError {
/// Error while performing IO operations
#[error("IO error: {0}")]
IOError(#[from] std::io::Error),
}
// IO errors on the network like network time out
#[derive(Error, Debug)]
pub enum NetworkError {
#[error("Server failed with HTTP status code {0}")]
ServerError(u16),
}
// Server response errors like bucket does not exist, etc.
// This would include any server sent validation errors.
#[derive(Error, Debug)]
pub enum S3ServerError {
/// S3 Errors as returned by the S3 server
#[error("S3 error: {0}")]
S3Error(#[from] Box<MinioErrorResponse>), // NOTE: Boxing to prevent: "warning: large size difference between variants"
#[error(
"Invalid server response received; {message}; HTTP status code: {http_status_code}; content-type: {content_type}"
)]
InvalidServerResponse {
message: String,
http_status_code: u16,
content_type: String,
},
}
// Top-level Minio client error
#[derive(Error, Debug)]
pub enum Error {
#[error("S3 server error occurred")]
S3Server(#[from] S3ServerError),
#[error("Drive IO error occurred")]
DriveIo(#[from] IoError),
#[error("Network error occurred")]
Network(#[from] NetworkError),
#[error("Validation error occurred")]
Validation(#[from] ValidationErr),
}
// region message helpers
// Helper functions for formatting error messages with Option<String>
fn sse_tls_required_message(prefix: &Option<String>) -> String {
match prefix {
Some(p) => format!("{p} SSE operation must be performed over a secure connection",),
None => "SSE operation must be performed over a secure connection".to_string(),
}
}
impl From<std::io::Error> for Error {
fn from(err: std::io::Error) -> Self {
Error::IOError(err)
}
fn format_s3_object_error(
bucket: &str,
object: &str,
version: Option<&str>,
error_type: &str,
details: &str,
) -> String {
let version_str = match &version.map(String::from) {
Some(v) => format!("?versionId={v}"),
None => String::new(),
};
format!("source {bucket}/{object}{version_str}: {error_type} {details}")
}
impl From<xmltree::ParseError> for Error {
fn from(err: xmltree::ParseError) -> Self {
Error::XmlParseError(err)
}
}
impl From<reqwest::Error> for Error {
fn from(err: reqwest::Error) -> Self {
Error::HttpError(err)
}
}
impl From<reqwest::header::ToStrError> for Error {
fn from(err: reqwest::header::ToStrError) -> Self {
Error::StrError(err)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(err: std::num::ParseIntError) -> Self {
Error::IntError(err)
}
}
impl From<std::str::ParseBoolError> for Error {
fn from(err: std::str::ParseBoolError) -> Self {
Error::BoolError(err)
}
}
impl From<alloc::string::FromUtf8Error> for Error {
fn from(err: alloc::string::FromUtf8Error) -> Self {
Error::Utf8Error(err.into())
}
}
impl From<std::str::Utf8Error> for Error {
fn from(err: std::str::Utf8Error) -> Self {
Error::Utf8Error(err.into())
}
}
impl From<serde_json::Error> for Error {
fn from(err: serde_json::Error) -> Self {
Error::JsonError(err)
}
}
// endregion message helpers

107
src/s3/header_constants.rs Normal file
View File

@ -0,0 +1,107 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
pub const IF_MATCH: &str = "if-match";
pub const IF_NONE_MATCH: &str = "if-none-match";
pub const IF_MODIFIED_SINCE: &str = "if-modified-since";
pub const IF_UNMODIFIED_SINCE: &str = "if-unmodified-since";
pub const CONTENT_MD5: &str = "Content-MD5";
pub const CONTENT_TYPE: &str = "Content-Type";
pub const AUTHORIZATION: &str = "Authorization";
pub const RANGE: &str = "Range";
pub const HOST: &str = "Host";
pub const CONTENT_LENGTH: &str = "Content-Length";
pub const POLICY: &str = "policy";
pub const X_MINIO_DEPLOYMENT_ID: &str = "x-minio-deployment-id";
pub const X_AMZ_VERSION_ID: &str = "x-amz-version-id";
pub const X_AMZ_ID_2: &str = "x-amz-id-2";
pub const X_AMZ_WRITE_OFFSET_BYTES: &str = "x-amz-write-offset-bytes";
pub const X_AMZ_OBJECT_SIZE: &str = "x-amz-object-size";
pub const X_AMZ_TAGGING: &str = "x-amz-tagging";
pub const X_AMZ_BUCKET_REGION: &str = "x-amz-bucket-region";
pub const X_AMZ_OBJECT_LOCK_MODE: &str = "x-amz-object-lock-mode";
pub const X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE: &str = "x-amz-object-lock-retain-until-date";
pub const X_AMZ_OBJECT_LOCK_LEGAL_HOLD: &str = "x-amz-object-lock-legal-hold";
pub const X_AMZ_METADATA_DIRECTIVE: &str = "x-amz-metadata-directive";
pub const X_AMZ_TAGGING_DIRECTIVE: &str = "x-amz-tagging-directive";
pub const X_AMZ_COPY_SOURCE: &str = "x-amz-copy-source";
pub const X_AMZ_COPY_SOURCE_RANGE: &str = "x-amz-copy-source-range";
pub const X_AMZ_COPY_SOURCE_IF_MATCH: &str = "x-amz-copy-source-if-match";
pub const X_AMZ_COPY_SOURCE_IF_NONE_MATCH: &str = "x-amz-copy-source-if-none-match";
pub const X_AMZ_COPY_SOURCE_IF_UNMODIFIED_SINCE: &str = "x-amz-copy-source-if-unmodified-since";
pub const X_AMZ_COPY_SOURCE_IF_MODIFIED_SINCE: &str = "x-amz-copy-source-if-modified-since";
pub const X_AMZ_BUCKET_OBJECT_LOCK_ENABLED: &str = "x-amz-bucket-object-lock-enabled";
pub const X_AMZ_BYPASS_GOVERNANCE_RETENTION: &str = "x-amz-bypass-governance-retention";
pub const X_AMZ_DATE: &str = "x-amz-date";
pub const X_AMZ_DELETE_MARKER: &str = "x-amz-delete-marker";
pub const X_AMZ_ALGORITHM: &str = "x-amz-algorithm";
pub const X_AMZ_CREDENTIAL: &str = "x-amz-credential";
pub const X_AMZ_SIGNATURE: &str = "x-amz-signature";
pub const X_AMZ_REQUEST_ID: &str = "x-amz-request-id";
pub const X_AMZ_EXPIRES: &str = "x-amz-expires";
pub const X_AMZ_SIGNED_HEADERS: &str = "x-amz-signedheaders";
pub const X_AMZ_CONTENT_SHA256: &str = "x-amz-content-sha256";
pub const X_AMZ_SECURITY_TOKEN: &str = "x-amz-security-token";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION: &str = "X-Amz-Server-Side-Encryption";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CONTEXT: &str = "X-Amz-Server-Side-Encryption-Context";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID: &str =
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str =
"X-Amz-Server-Side-Encryption-Customer-Algorithm";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str =
"X-Amz-Server-Side-Encryption-Customer-Key";
pub const X_AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str =
"X-Amz-Server-Side-Encryption-Customer-Key-MD5";
pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM: &str =
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm";
pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY: &str =
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key";
pub const X_AMZ_COPY_SOURCE_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5: &str =
"X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-MD5";

View File

@ -17,7 +17,8 @@
use super::utils::urlencode_object_key;
use crate::s3::client::DEFAULT_REGION;
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::multimap::{Multimap, MultimapExt};
use crate::s3::utils::match_hostname;
use derivative::Derivative;
@ -135,7 +136,7 @@ fn get_aws_info(
aws_s3_prefix: &mut String,
aws_domain_suffix: &mut String,
dualstack: &mut bool,
) -> Result<(), Error> {
) -> Result<(), ValidationErr> {
if !match_hostname(host) {
return Ok(());
}
@ -156,18 +157,18 @@ fn get_aws_info(
}
if !match_aws_s3_endpoint(host) {
return Err(Error::UrlBuildError(
String::from("invalid Amazon AWS host ") + host,
));
return Err(ValidationErr::UrlBuildError(format!(
"invalid Amazon AWS host {host}"
)));
}
let matcher = AWS_S3_PREFIX_REGEX.find(host).unwrap();
let s3_prefix = host.get(..matcher.end()).unwrap();
if s3_prefix.contains("s3-accesspoint") && !https {
return Err(Error::UrlBuildError(
String::from("use HTTPS scheme for host ") + host,
));
return Err(ValidationErr::UrlBuildError(format!(
"use HTTPS scheme for host {host}"
)));
}
let mut tokens: Vec<_> = host.get(matcher.len()..).unwrap().split('.').collect();
@ -195,9 +196,9 @@ fn get_aws_info(
if domain_suffix.ends_with(".cn") && !s3_prefix.ends_with("s3-accelerate.") && region.is_empty()
{
return Err(Error::UrlBuildError(
String::from("region missing in Amazon S3 China endpoint ") + host,
));
return Err(ValidationErr::UrlBuildError(format!(
"region missing in Amazon S3 China endpoint {host}"
)));
}
*region = region_in_host;
@ -223,7 +224,7 @@ pub struct BaseUrl {
}
impl FromStr for BaseUrl {
type Err = Error;
type Err = ValidationErr;
/// Convert a string to a BaseUrl.
///
@ -245,7 +246,7 @@ impl FromStr for BaseUrl {
/// // Get base URL from IPv6 address
/// let base_url: BaseUrl = "[0:0:0:0:0:ffff:c0a8:7c3f]:9000".parse().unwrap();
/// ```
fn from_str(s: &str) -> Result<Self, Self::Err> {
fn from_str(s: &str) -> Result<Self, ValidationErr> {
let url = s.parse::<Uri>()?;
let https = match url.scheme() {
@ -254,9 +255,9 @@ impl FromStr for BaseUrl {
"http" => false,
"https" => true,
_ => {
return Err(Error::InvalidBaseUrl(String::from(
"scheme must be http or https",
)));
return Err(ValidationErr::InvalidBaseUrl(
"scheme must be http or https".into(),
));
}
},
};
@ -264,9 +265,9 @@ impl FromStr for BaseUrl {
let mut host = match url.host() {
Some(h) => h,
_ => {
return Err(Error::InvalidBaseUrl(String::from(
"valid host must be provided",
)));
return Err(ValidationErr::InvalidBaseUrl(
"valid host must be provided".into(),
));
}
};
@ -285,15 +286,15 @@ impl FromStr for BaseUrl {
}
if url.path() != "/" && url.path() != "" {
return Err(Error::InvalidBaseUrl(String::from(
"path must be empty for base URL",
)));
return Err(ValidationErr::InvalidBaseUrl(
"path must be empty for base URL".into(),
));
}
if url.query().is_some() {
return Err(Error::InvalidBaseUrl(String::from(
"query must be none for base URL",
)));
return Err(ValidationErr::InvalidBaseUrl(
"query must be none for base URL".into(),
));
}
let mut region = String::new();
@ -335,7 +336,7 @@ impl BaseUrl {
bucket_name: &str,
enforce_path_style: bool,
region: &str,
) -> Result<(), Error> {
) -> Result<(), ValidationErr> {
let mut host = String::from(&self.aws_s3_prefix);
host.push_str(&self.aws_domain_suffix);
if host.eq_ignore_ascii_case("s3-external-1.amazonaws.com")
@ -349,9 +350,9 @@ impl BaseUrl {
host = String::from(&self.aws_s3_prefix);
if self.aws_s3_prefix.contains("s3-accelerate") {
if bucket_name.contains('.') {
return Err(Error::UrlBuildError(String::from(
"bucket name with '.' is not allowed for accelerate endpoint",
)));
return Err(ValidationErr::UrlBuildError(
"bucket name with '.' is not allowed for accelerate endpoint".into(),
));
}
if enforce_path_style {
@ -408,7 +409,7 @@ impl BaseUrl {
query: &Multimap,
bucket_name: Option<&str>,
object_name: Option<&str>,
) -> Result<Url, Error> {
) -> Result<Url, ValidationErr> {
let mut url = Url {
https: self.https,
host: self.host.clone(),

View File

@ -1,4 +1,19 @@
use crate::s3::error::Error;
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2025 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::ValidationErr;
use crate::s3::types::Filter;
use crate::s3::utils::to_iso8601utc;
use xmltree::Element;
@ -10,7 +25,7 @@ pub struct LifecycleConfig {
}
impl LifecycleConfig {
pub fn from_xml(root: &Element) -> Result<LifecycleConfig, Error> {
pub fn from_xml(root: &Element) -> Result<LifecycleConfig, ValidationErr> {
let mut config = LifecycleConfig { rules: Vec::new() };
// Process all Rule elements in the XML
@ -23,7 +38,7 @@ impl LifecycleConfig {
Ok(config)
}
pub fn validate(&self) -> Result<(), Error> {
pub fn validate(&self) -> Result<(), ValidationErr> {
// Skip validation if empty
if self.rules.is_empty() {
return Ok(());
@ -90,17 +105,15 @@ impl LifecycleConfig {
data.push_str(&days.to_string());
data.push_str("</Days>");
}
if let Some(delete_marker) = rule.expiration_expired_object_delete_marker {
if delete_marker {
data.push_str(
"<ExpiredObjectDeleteMarker>true</ExpiredObjectDeleteMarker>",
);
}
if let Some(delete_marker) = rule.expiration_expired_object_delete_marker
&& delete_marker
{
data.push_str("<ExpiredObjectDeleteMarker>true</ExpiredObjectDeleteMarker>");
}
if let Some(delete_all) = rule.expiration_expired_object_all_versions {
if delete_all {
data.push_str("<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>");
}
if let Some(delete_all) = rule.expiration_expired_object_all_versions
&& delete_all
{
data.push_str("<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>");
}
data.push_str("</Expiration>");
}
@ -118,10 +131,10 @@ impl LifecycleConfig {
data.push_str(&days.to_string());
data.push_str("</Days>");
if let Some(delete_marker) = rule.all_versions_expiration_delete_marker {
if delete_marker {
data.push_str("<DeleteMarker>true</DeleteMarker>");
}
if let Some(delete_marker) = rule.all_versions_expiration_delete_marker
&& delete_marker
{
data.push_str("<DeleteMarker>true</DeleteMarker>");
}
data.push_str("</AllVersionsExpiration>");
@ -248,14 +261,14 @@ pub struct LifecycleRule {
}
impl LifecycleRule {
pub fn from_xml(rule_elem: &Element) -> Result<Self, Error> {
pub fn from_xml(rule_elem: &Element) -> Result<Self, ValidationErr> {
let mut rule = LifecycleRule::default();
// Parse ID
if let Some(id_elem) = rule_elem.get_child("ID") {
if let Some(id_text) = id_elem.get_text() {
rule.id = id_text.to_string();
}
if let Some(id_elem) = rule_elem.get_child("ID")
&& let Some(id_text) = id_elem.get_text()
{
rule.id = id_text.to_string();
}
// Parse Status
@ -264,7 +277,7 @@ impl LifecycleRule {
rule.status = status_text == "Enabled";
}
} else {
return Err(Error::XmlError("Missing <Status> element".to_string()));
return Err(ValidationErr::xml_error("Missing <Status> element"));
}
// Parse Filter
@ -273,202 +286,205 @@ impl LifecycleRule {
}
// Parse AbortIncompleteMultipartUpload
if let Some(abort_elem) = rule_elem.get_child("AbortIncompleteMultipartUpload") {
if let Some(days_elem) = abort_elem.get_child("DaysAfterInitiation") {
if let Some(days_text) = days_elem.get_text() {
rule.abort_incomplete_multipart_upload_days_after_initiation =
Some(days_text.parse().map_err(|_| {
Error::XmlError("Invalid DaysAfterInitiation value".to_string())
})?);
}
}
}
if let Some(abort_elem) = rule_elem.get_child("AbortIncompleteMultipartUpload")
&& let Some(days_elem) = abort_elem.get_child("DaysAfterInitiation")
&& let Some(days_text) = days_elem.get_text()
{
rule.abort_incomplete_multipart_upload_days_after_initiation =
Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source("Invalid DaysAfterInitiation value", e)
})?)
};
// Parse Expiration
if let Some(expiration_elem) = rule_elem.get_child("Expiration") {
// Date
if let Some(date_elem) = expiration_elem.get_child("Date") {
if let Some(date_text) = date_elem.get_text() {
// Assume a function that parses ISO8601 to DateTime<Utc>
rule.expiration_date = Some(parse_iso8601(&date_text)?);
}
if let Some(date_elem) = expiration_elem.get_child("Date")
&& let Some(date_text) = date_elem.get_text()
{
// Assume a function that parses ISO8601 to DateTime<Utc>
rule.expiration_date = Some(parse_iso8601(&date_text)?);
}
// Days
if let Some(days_elem) = expiration_elem.get_child("Days") {
if let Some(days_text) = days_elem.get_text() {
rule.expiration_days = Some(days_text.parse().map_err(|_| {
Error::XmlError("Invalid Expiration Days value".to_string())
})?);
}
if let Some(days_elem) = expiration_elem.get_child("Days")
&& let Some(days_text) = days_elem.get_text()
{
rule.expiration_days = Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source("Invalid Expiration Days value", e)
})?);
}
// ExpiredObjectDeleteMarker
if let Some(delete_marker_elem) = expiration_elem.get_child("ExpiredObjectDeleteMarker")
&& let Some(delete_marker_text) = delete_marker_elem.get_text()
{
if let Some(delete_marker_text) = delete_marker_elem.get_text() {
rule.expiration_expired_object_delete_marker =
Some(delete_marker_text == "true");
}
rule.expiration_expired_object_delete_marker = Some(delete_marker_text == "true");
}
// ExpiredObjectAllVersions
if let Some(all_versions_elem) = expiration_elem.get_child("ExpiredObjectAllVersions") {
if let Some(all_versions_text) = all_versions_elem.get_text() {
rule.expiration_expired_object_all_versions = Some(all_versions_text == "true");
}
if let Some(all_versions_elem) = expiration_elem.get_child("ExpiredObjectAllVersions")
&& let Some(all_versions_text) = all_versions_elem.get_text()
{
rule.expiration_expired_object_all_versions = Some(all_versions_text == "true");
}
}
// Parse DelMarkerExpiration
if let Some(del_marker_elem) = rule_elem.get_child("DelMarkerExpiration") {
if let Some(days_elem) = del_marker_elem.get_child("Days") {
if let Some(days_text) = days_elem.get_text() {
rule.del_marker_expiration_days = Some(days_text.parse().map_err(|_| {
Error::XmlError("Invalid DelMarkerExpiration Days value".to_string())
})?);
}
}
if let Some(del_marker_elem) = rule_elem.get_child("DelMarkerExpiration")
&& let Some(days_elem) = del_marker_elem.get_child("Days")
&& let Some(days_text) = days_elem.get_text()
{
rule.del_marker_expiration_days = Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source("Invalid DelMarkerExpiration Days value", e)
})?);
}
// Parse AllVersionsExpiration
if let Some(all_versions_elem) = rule_elem.get_child("AllVersionsExpiration") {
if let Some(days_elem) = all_versions_elem.get_child("Days") {
if let Some(days_text) = days_elem.get_text() {
rule.all_versions_expiration_days = Some(days_text.parse().map_err(|_| {
Error::XmlError("Invalid AllVersionsExpiration Days value".to_string())
})?);
}
if let Some(days_elem) = all_versions_elem.get_child("Days")
&& let Some(days_text) = days_elem.get_text()
{
rule.all_versions_expiration_days = Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source(
"Invalid AllVersionsExpiration Days value",
e,
)
})?);
}
if let Some(delete_marker_elem) = all_versions_elem.get_child("DeleteMarker") {
if let Some(delete_marker_text) = delete_marker_elem.get_text() {
rule.all_versions_expiration_delete_marker = Some(delete_marker_text == "true");
}
if let Some(delete_marker_elem) = all_versions_elem.get_child("DeleteMarker")
&& let Some(delete_marker_text) = delete_marker_elem.get_text()
{
rule.all_versions_expiration_delete_marker = Some(delete_marker_text == "true");
}
}
// Parse NoncurrentVersionExpiration
if let Some(noncurrent_exp_elem) = rule_elem.get_child("NoncurrentVersionExpiration") {
if let Some(days_elem) = noncurrent_exp_elem.get_child("NoncurrentDays") {
if let Some(days_text) = days_elem.get_text() {
rule.noncurrent_version_expiration_noncurrent_days =
Some(days_text.parse().map_err(|_| {
Error::XmlError(
"Invalid NoncurrentVersionExpiration NoncurrentDays value"
.to_string(),
)
})?);
}
if let Some(days_elem) = noncurrent_exp_elem.get_child("NoncurrentDays")
&& let Some(days_text) = days_elem.get_text()
{
rule.noncurrent_version_expiration_noncurrent_days =
Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source(
"Invalid NoncurrentVersionExpiration NoncurrentDays value",
e,
)
})?);
}
if let Some(versions_elem) = noncurrent_exp_elem.get_child("NewerNoncurrentVersions") {
if let Some(versions_text) = versions_elem.get_text() {
rule.noncurrent_version_expiration_newer_versions =
Some(versions_text.parse().map_err(|_| {
Error::XmlError("Invalid NewerNoncurrentVersions value".to_string())
})?);
}
if let Some(versions_elem) = noncurrent_exp_elem.get_child("NewerNoncurrentVersions")
&& let Some(versions_text) = versions_elem.get_text()
{
rule.noncurrent_version_expiration_newer_versions =
Some(versions_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source(
"Invalid NewerNoncurrentVersions value",
e,
)
})?);
}
}
// Parse NoncurrentVersionTransition
if let Some(noncurrent_trans_elem) = rule_elem.get_child("NoncurrentVersionTransition") {
if let Some(days_elem) = noncurrent_trans_elem.get_child("NoncurrentDays") {
if let Some(days_text) = days_elem.get_text() {
rule.noncurrent_version_transition_noncurrent_days =
Some(days_text.parse().map_err(|_| {
Error::XmlError(
"Invalid NoncurrentVersionTransition NoncurrentDays value"
.to_string(),
)
})?);
}
if let Some(days_elem) = noncurrent_trans_elem.get_child("NoncurrentDays")
&& let Some(days_text) = days_elem.get_text()
{
rule.noncurrent_version_transition_noncurrent_days =
Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source(
"Invalid NoncurrentVersionTransition NoncurrentDays value",
e,
)
})?);
}
if let Some(storage_elem) = noncurrent_trans_elem.get_child("StorageClass") {
if let Some(storage_text) = storage_elem.get_text() {
rule.noncurrent_version_transition_storage_class =
Some(storage_text.to_string());
}
if let Some(storage_elem) = noncurrent_trans_elem.get_child("StorageClass")
&& let Some(storage_text) = storage_elem.get_text()
{
rule.noncurrent_version_transition_storage_class = Some(storage_text.to_string());
}
if let Some(versions_elem) = noncurrent_trans_elem.get_child("NewerNoncurrentVersions")
&& let Some(versions_text) = versions_elem.get_text()
{
if let Some(versions_text) = versions_elem.get_text() {
rule.noncurrent_version_transition_newer_versions =
Some(versions_text.parse().map_err(|_| {
Error::XmlError("Invalid NewerNoncurrentVersions value".to_string())
})?);
}
rule.noncurrent_version_transition_newer_versions =
Some(versions_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source(
"Invalid NewerNoncurrentVersions value",
e,
)
})?);
}
}
// Parse Transition
if let Some(transition_elem) = rule_elem.get_child("Transition") {
// Date
if let Some(date_elem) = transition_elem.get_child("Date") {
if let Some(date_text) = date_elem.get_text() {
rule.transition_date = Some(parse_iso8601(&date_text)?);
}
if let Some(date_elem) = transition_elem.get_child("Date")
&& let Some(date_text) = date_elem.get_text()
{
rule.transition_date = Some(parse_iso8601(&date_text)?);
}
// Days
if let Some(days_elem) = transition_elem.get_child("Days") {
if let Some(days_text) = days_elem.get_text() {
rule.transition_days = Some(days_text.parse().map_err(|_| {
Error::XmlError("Invalid Transition Days value".to_string())
})?);
}
if let Some(days_elem) = transition_elem.get_child("Days")
&& let Some(days_text) = days_elem.get_text()
{
rule.transition_days = Some(days_text.parse().map_err(|e| {
ValidationErr::xml_error_with_source("Invalid Transition Days value", e)
})?);
}
// StorageClass
if let Some(storage_elem) = transition_elem.get_child("StorageClass") {
if let Some(storage_text) = storage_elem.get_text() {
rule.transition_storage_class = Some(storage_text.to_string());
}
if let Some(storage_elem) = transition_elem.get_child("StorageClass")
&& let Some(storage_text) = storage_elem.get_text()
{
rule.transition_storage_class = Some(storage_text.to_string());
}
}
Ok(rule)
}
pub fn validate(&self) -> Result<(), Error> {
pub fn validate(&self) -> Result<(), ValidationErr> {
// Basic validation requirements
// Ensure ID is present
if self.id.is_empty() {
return Err(Error::XmlError("Rule ID cannot be empty".to_string()));
return Err(ValidationErr::xml_error("Rule ID cannot be empty"));
}
// Validate storage classes in transitions
if let Some(storage_class) = &self.transition_storage_class {
if storage_class.is_empty() {
return Err(Error::XmlError(
"Transition StorageClass cannot be empty".to_string(),
));
}
if let Some(storage_class) = &self.transition_storage_class
&& storage_class.is_empty()
{
return Err(ValidationErr::xml_error(
"Transition StorageClass cannot be empty",
));
}
if let Some(storage_class) = &self.noncurrent_version_transition_storage_class {
if storage_class.is_empty() {
return Err(Error::XmlError(
"NoncurrentVersionTransition StorageClass cannot be empty".to_string(),
));
}
if let Some(storage_class) = &self.noncurrent_version_transition_storage_class
&& storage_class.is_empty()
{
return Err(ValidationErr::xml_error(
"NoncurrentVersionTransition StorageClass cannot be empty",
));
}
// Check that expiration has either days or date, not both
if self.expiration_days.is_some() && self.expiration_date.is_some() {
return Err(Error::XmlError(
"Expiration cannot specify both Days and Date".to_string(),
return Err(ValidationErr::xml_error(
"Expiration cannot specify both Days and Date",
));
}
// Check that transition has either days or date, not both
if self.transition_days.is_some() && self.transition_date.is_some() {
return Err(Error::XmlError(
"Transition cannot specify both Days and Date".to_string(),
return Err(ValidationErr::xml_error(
"Transition cannot specify both Days and Date",
));
}
@ -477,8 +493,10 @@ impl LifecycleRule {
}
// Helper function to parse ISO8601 dates
fn parse_iso8601(date_str: &str) -> Result<chrono::DateTime<chrono::Utc>, Error> {
fn parse_iso8601(date_str: &str) -> Result<chrono::DateTime<chrono::Utc>, ValidationErr> {
chrono::DateTime::parse_from_rfc3339(date_str)
.map(|dt| dt.with_timezone(&chrono::Utc))
.map_err(|_| Error::XmlError(format!("Invalid date format: {date_str}")))
.map_err(|e| {
ValidationErr::xml_error_with_source(format!("Invalid date format: {date_str}"), e)
})
}

View File

@ -0,0 +1,320 @@
// MinIO Rust Library for Amazon S3 Compatible Cloud Storage
// Copyright 2022 MinIO, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::utils::{get_text_default, get_text_option};
use bytes::{Buf, Bytes};
use http::HeaderMap;
use std::str::FromStr;
use thiserror::Error;
use xmltree::Element;
/// Error codes for Minio operations as returned by the server.
#[derive(Clone, Debug, Error, Default, PartialEq)]
pub enum MinioErrorCode {
// region errors codes equal to the minio-go SDK in s3-error.go
// quoted lines are from the minio-go SDK but not used in the minio-rs SDK (yet)
//BadDigest: "The Content-Md5 you specified did not match what we received.",
//EntityTooSmall: "Your proposed upload is smaller than the minimum allowed object size.",
//EntityTooLarge: "Your proposed upload exceeds the maximum allowed object size.",
//IncompleteBody: "You did not provide the number of bytes specified by the Content-Length HTTP header.",
//InternalError: "We encountered an internal error, please try again.",
//InvalidAccessKeyID: "The access key ID you provided does not exist in our records.",
//InvalidBucketName: "The specified bucket is not valid.",
//InvalidDigest: "The Content-Md5 you specified is not valid.",
//InvalidRange: "The requested range is not satisfiable.",
//MalformedXML: "The XML you provided was not well-formed or did not validate against our published schema.",
//MissingContentLength: "You must provide the Content-Length HTTP header.",
//MissingContentMD5: "Missing required header for this request: Content-Md5.",
//MissingRequestBodyError: "Request body is empty.",
/// The specified key does not exist
NoSuchBucket,
/// The bucket policy does not exist
NoSuchBucketPolicy,
///The specified key does not exist
NoSuchKey,
//NoSuchUpload: "The specified multipart upload does not exist. The upload ID may be invalid, or the upload may have been aborted or completed.",
//NotImplemented: "A header you provided implies functionality that is not implemented.",
//PreconditionFailed: "At least one of the pre-conditions you specified did not hold.",
//RequestTimeTooSkewed: "The difference between the request time and the server's time is too large.",
//SignatureDoesNotMatch: "The request signature we calculated does not match the signature you provided. Check your key and signing method.",
/// The specified method is not allowed against this resource
MethodNotAllowed,
//InvalidPart: "One or more of the specified parts could not be found.",
//InvalidPartOrder: "The list of parts was not in ascending order. The parts list must be specified in order by part number.",
//InvalidObjectState: "The operation is not valid for the current state of the object.",
//AuthorizationHeaderMalformed: "The authorization header is malformed; the region is wrong.",
//MalformedPOSTRequest: "The body of your POST request is not well-formed multipart/form-data.",
/// The bucket you tried to delete is not empty
BucketNotEmpty,
//AllAccessDisabled: "All access to this bucket has been disabled.",
//MalformedPolicy: "Policy has invalid resource.",
//MissingFields: "Missing fields in request.",
//AuthorizationQueryParametersError: "Error parsing the X-Amz-Credential parameter; the Credential is mal-formed; expecting \"<YOUR-AKID>/YYYYMMDD/REGION/SERVICE/aws4_request\".",
//MalformedDate: "Invalid date format header, expected to be in ISO8601, RFC1123 or RFC1123Z time format.",
///Your previous request to create the named bucket succeeded and you already own it
BucketAlreadyOwnedByYou,
//InvalidDuration: "Duration provided in the request is invalid.",
//XAmzContentSHA256Mismatch: "The provided 'x-amz-content-sha256' header does not match what was computed.",
//NoSuchCORSConfiguration: "The specified bucket does not have a CORS configuration.",
//Conflict: "Bucket not empty.",
/// endregion
#[default]
NoError,
InvalidMinioErrorCode,
PermanentRedirect,
Redirect,
BadRequest,
RetryHead,
ReplicationConfigurationNotFoundError,
ServerSideEncryptionConfigurationNotFoundError,
NoSuchTagSet,
NoSuchObjectLockConfiguration,
NoSuchLifecycleConfiguration,
ResourceNotFound,
ResourceConflict,
AccessDenied,
NotSupported,
InvalidWriteOffset,
OtherError(String), // This is a catch-all for any error code not explicitly defined
}
#[allow(dead_code)]
const ALL_MINIO_ERROR_CODE: &[MinioErrorCode] = &[
MinioErrorCode::NoError,
MinioErrorCode::InvalidMinioErrorCode,
MinioErrorCode::PermanentRedirect,
MinioErrorCode::Redirect,
MinioErrorCode::BadRequest,
MinioErrorCode::RetryHead,
MinioErrorCode::NoSuchBucket,
MinioErrorCode::NoSuchBucketPolicy,
MinioErrorCode::ReplicationConfigurationNotFoundError,
MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError,
MinioErrorCode::NoSuchTagSet,
MinioErrorCode::NoSuchObjectLockConfiguration,
MinioErrorCode::NoSuchLifecycleConfiguration,
MinioErrorCode::NoSuchKey,
MinioErrorCode::ResourceNotFound,
MinioErrorCode::MethodNotAllowed,
MinioErrorCode::ResourceConflict,
MinioErrorCode::AccessDenied,
MinioErrorCode::NotSupported,
MinioErrorCode::BucketNotEmpty,
MinioErrorCode::BucketAlreadyOwnedByYou,
MinioErrorCode::InvalidWriteOffset,
//MinioErrorCode::OtherError("".to_string()),
];
impl FromStr for MinioErrorCode {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Error> {
match s.to_lowercase().as_str() {
"noerror" => Ok(MinioErrorCode::NoError),
"invalidminioerrorcode" => Ok(MinioErrorCode::InvalidMinioErrorCode),
"permanentredirect" => Ok(MinioErrorCode::PermanentRedirect),
"redirect" => Ok(MinioErrorCode::Redirect),
"badrequest" => Ok(MinioErrorCode::BadRequest),
"retryhead" => Ok(MinioErrorCode::RetryHead),
"nosuchbucket" => Ok(MinioErrorCode::NoSuchBucket),
"nosuchbucketpolicy" => Ok(MinioErrorCode::NoSuchBucketPolicy),
"replicationconfigurationnotfounderror" => {
Ok(MinioErrorCode::ReplicationConfigurationNotFoundError)
}
"serversideencryptionconfigurationnotfounderror" => {
Ok(MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError)
}
"nosuchtagset" => Ok(MinioErrorCode::NoSuchTagSet),
"nosuchobjectlockconfiguration" => Ok(MinioErrorCode::NoSuchObjectLockConfiguration),
"nosuchlifecycleconfiguration" => Ok(MinioErrorCode::NoSuchLifecycleConfiguration),
"nosuchkey" => Ok(MinioErrorCode::NoSuchKey),
"resourcenotfound" => Ok(MinioErrorCode::ResourceNotFound),
"methodnotallowed" => Ok(MinioErrorCode::MethodNotAllowed),
"resourceconflict" => Ok(MinioErrorCode::ResourceConflict),
"accessdenied" => Ok(MinioErrorCode::AccessDenied),
"notsupported" => Ok(MinioErrorCode::NotSupported),
"bucketnotempty" => Ok(MinioErrorCode::BucketNotEmpty),
"bucketalreadyownedbyyou" => Ok(MinioErrorCode::BucketAlreadyOwnedByYou),
"invalidwriteoffset" => Ok(MinioErrorCode::InvalidWriteOffset),
v => Ok(MinioErrorCode::OtherError(v.to_owned())),
}
}
}
impl std::fmt::Display for MinioErrorCode {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MinioErrorCode::NoError => write!(f, "NoError"),
MinioErrorCode::InvalidMinioErrorCode => write!(f, "InvalidMinioErrorCode"),
MinioErrorCode::PermanentRedirect => write!(f, "PermanentRedirect"),
MinioErrorCode::Redirect => write!(f, "Redirect"),
MinioErrorCode::BadRequest => write!(f, "BadRequest"),
MinioErrorCode::RetryHead => write!(f, "RetryHead"),
MinioErrorCode::NoSuchBucket => write!(f, "NoSuchBucket"),
MinioErrorCode::NoSuchBucketPolicy => write!(f, "NoSuchBucketPolicy"),
MinioErrorCode::ReplicationConfigurationNotFoundError => {
write!(f, "ReplicationConfigurationNotFoundError")
}
MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError => {
write!(f, "ServerSideEncryptionConfigurationNotFoundError")
}
MinioErrorCode::NoSuchTagSet => write!(f, "NoSuchTagSet"),
MinioErrorCode::NoSuchObjectLockConfiguration => {
write!(f, "NoSuchObjectLockConfiguration")
}
MinioErrorCode::NoSuchLifecycleConfiguration => {
write!(f, "NoSuchLifecycleConfiguration")
}
MinioErrorCode::NoSuchKey => write!(f, "NoSuchKey"),
MinioErrorCode::ResourceNotFound => write!(f, "ResourceNotFound"),
MinioErrorCode::MethodNotAllowed => write!(f, "MethodNotAllowed"),
MinioErrorCode::ResourceConflict => write!(f, "ResourceConflict"),
MinioErrorCode::AccessDenied => write!(f, "AccessDenied"),
MinioErrorCode::NotSupported => write!(f, "NotSupported"),
MinioErrorCode::BucketNotEmpty => write!(f, "BucketNotEmpty"),
MinioErrorCode::BucketAlreadyOwnedByYou => write!(f, "BucketAlreadyOwnedByYou"),
MinioErrorCode::InvalidWriteOffset => write!(f, "InvalidWriteOffset"),
MinioErrorCode::OtherError(msg) => write!(f, "{msg}"),
}
}
}
#[cfg(test)]
mod test_error_code {
use super::*;
/// Test that all MinioErrorCode values can be converted to and from strings
#[test]
fn test_minio_error_code_roundtrip() {
for code in ALL_MINIO_ERROR_CODE {
let str = code.to_string();
let code_obs: MinioErrorCode = str.parse().unwrap();
assert_eq!(
code_obs, *code,
"Failed MinioErrorCode round-trip: code {code} -> str '{str}' -> code {code_obs}"
);
}
}
}
/// MinioErrorResponse Is the typed error returned by all API operations.
/// equivalent of ErrorResponse in the minio-go SDK
#[derive(Clone, Debug, Default)]
pub struct MinioErrorResponse {
code: MinioErrorCode,
message: Option<String>,
headers: HeaderMap,
resource: String,
request_id: String,
host_id: String,
bucket_name: Option<String>,
object_name: Option<String>,
}
impl MinioErrorResponse {
pub fn new(
headers: HeaderMap,
code: MinioErrorCode,
message: Option<String>,
resource: String,
request_id: String,
host_id: String,
bucket_name: Option<String>,
object_name: Option<String>,
) -> Self {
Self {
headers,
code,
message,
resource,
request_id,
host_id,
bucket_name,
object_name,
}
}
pub fn new_from_body(body: Bytes, headers: HeaderMap) -> Result<Self, Error> {
let root = Element::parse(body.reader()).map_err(ValidationErr::from)?;
Ok(Self {
headers,
code: MinioErrorCode::from_str(&get_text_default(&root, "Code"))?,
message: get_text_option(&root, "Message"),
resource: get_text_default(&root, "Resource"),
request_id: get_text_default(&root, "RequestId"),
host_id: get_text_default(&root, "HostId"),
bucket_name: get_text_option(&root, "BucketName"),
object_name: get_text_option(&root, "Key"),
})
}
pub fn headers(&self) -> &HeaderMap {
&self.headers
}
/// Take ownership of the headers as returned by the server.
pub fn take_headers(&mut self) -> HeaderMap {
std::mem::take(&mut self.headers)
}
pub fn code(&self) -> MinioErrorCode {
self.code.clone()
}
pub fn message(&self) -> &Option<String> {
&self.message
}
pub fn set_message(&mut self, message: String) {
self.message = Some(message);
}
pub fn resource(&self) -> &str {
&self.resource
}
pub fn request_id(&self) -> &str {
&self.request_id
}
pub fn host_id(&self) -> &str {
&self.host_id
}
pub fn bucket_name(&self) -> &Option<String> {
&self.bucket_name
}
pub fn object_name(&self) -> &Option<String> {
&self.object_name
}
}
impl std::fmt::Display for MinioErrorResponse {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"S3 operation failed: \n\tcode: {:?}\n\tmessage: {:?}\n\tresource: {}\n\trequest_id: {}\n\thost_id: {}\n\tbucket_name: {:?}\n\tobject_name: {:?}",
self.code,
self.message,
self.resource,
self.request_id,
self.host_id,
self.bucket_name,
self.object_name,
)
}
}
impl std::error::Error for MinioErrorResponse {}

View File

@ -19,8 +19,10 @@ pub mod builders;
pub mod client;
pub mod creds;
pub mod error;
pub mod header_constants;
pub mod http;
pub mod lifecycle_config;
pub mod minio_error_response;
pub mod multimap;
mod object_content;
pub mod response;

View File

@ -15,12 +15,11 @@
use crate::s3::utils::url_encode;
use lazy_static::lazy_static;
use multimap::MultiMap;
use regex::Regex;
use std::collections::BTreeMap;
/// Multimap for string key and string value
pub type Multimap = MultiMap<String, String>;
pub type Multimap = multimap::MultiMap<String, String>;
pub trait MultimapExt {
/// Adds a key-value pair to the multimap

View File

@ -24,7 +24,7 @@ use crate::s3::segmented_bytes::SegmentedBytes;
#[cfg(test)]
use quickcheck::Arbitrary;
type IoResult<T> = Result<T, std::io::Error>;
type IoResult<T> = core::result::Result<T, std::io::Error>;
// region: Size
@ -209,11 +209,9 @@ impl ObjectContent {
if file_path.is_dir() {
return Err(std::io::Error::other("path is a directory"));
}
let parent_dir = file_path.parent().ok_or_else(|| {
std::io::Error::other(format!(
"path {file_path:?} does not have a parent directory"
))
})?;
let parent_dir = file_path.parent().ok_or(std::io::Error::other(format!(
"path {file_path:?} does not have a parent directory"
)))?;
if !parent_dir.is_dir() {
async_std::fs::create_dir_all(parent_dir).await?;
}

View File

@ -1,6 +1,7 @@
use crate::s3::error::Error;
use crate::s3::error::ValidationErr;
use crate::s3::header_constants::*;
use crate::s3::types::S3Request;
use crate::s3::utils::{get_text, trim_quotes};
use crate::s3::utils::{get_text_result, parse_bool, trim_quotes};
use bytes::{Buf, Bytes};
use http::HeaderMap;
use std::collections::HashMap;
@ -21,7 +22,7 @@ macro_rules! impl_from_s3response {
Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?
body: resp.bytes().await.map_err(ValidationErr::from)?,
})
}
}
@ -44,7 +45,7 @@ macro_rules! impl_from_s3response_with_size {
Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
object_size: 0, // Default value, can be set later
})
}
@ -117,7 +118,7 @@ pub trait HasVersion: HasS3Fields {
#[inline]
fn version_id(&self) -> Option<&str> {
self.headers()
.get("x-amz-version-id")
.get(X_AMZ_VERSION_ID)
.and_then(|v| v.to_str().ok())
}
}
@ -128,7 +129,7 @@ pub trait HasEtagFromHeaders: HasS3Fields {
/// Returns the value of the `ETag` header from response headers (for operations that return ETag in headers).
/// The ETag is typically a hash of the object content, but it may vary based on the storage backend.
#[inline]
fn etag(&self) -> Result<String, Error> {
fn etag(&self) -> Result<String, ValidationErr> {
// Retrieve the ETag from the response headers.
let etag = self
.headers()
@ -148,10 +149,10 @@ pub trait HasEtagFromBody: HasS3Fields {
/// Returns the value of the `ETag` from the response body, which is a unique identifier for
/// the object version. The ETag is typically a hash of the object content, but it may vary
/// based on the storage backend.
fn etag(&self) -> Result<String, Error> {
fn etag(&self) -> Result<String, ValidationErr> {
// Retrieve the ETag from the response body.
let root = xmltree::Element::parse(self.body().clone().reader())?;
let etag: String = get_text(&root, "ETag")?;
let etag: String = get_text_result(&root, "ETag")?;
Ok(trim_quotes(etag))
}
}
@ -162,7 +163,7 @@ pub trait HasObjectSize: HasS3Fields {
#[inline]
fn object_size(&self) -> u64 {
self.headers()
.get("x-amz-object-size")
.get(X_AMZ_OBJECT_SIZE)
.and_then(|v| v.to_str().ok())
.and_then(|s| s.parse::<u64>().ok())
.unwrap_or(0)
@ -181,18 +182,10 @@ pub trait HasIsDeleteMarker: HasS3Fields {
/// was not (false) a delete marker before deletion. In a simple DELETE, this header indicates
/// whether (true) or not (false) the current version of the object is a delete marker.
#[inline]
fn is_delete_marker(&self) -> Result<Option<bool>, Error> {
Ok(Some(
self.headers()
.get("x-amz-delete-marker")
.map(|v| v == "true")
.unwrap_or(false),
))
//Ok(match self.headers().get("x-amz-delete-marker") {
// Some(v) => Some(v.to_str()?.parse::<bool>()?),
// None => None,
//})
fn is_delete_marker(&self) -> Result<bool, ValidationErr> {
self.headers()
.get(X_AMZ_DELETE_MARKER)
.map_or(Ok(false), |v| parse_bool(v.to_str()?))
}
}
@ -201,7 +194,7 @@ pub trait HasTagging: HasS3Fields {
///
/// If the bucket has no tags, this will return an empty `HashMap`.
#[inline]
fn tags(&self) -> Result<HashMap<String, String>, Error> {
fn tags(&self) -> Result<HashMap<String, String>, ValidationErr> {
let mut tags = HashMap::new();
if self.body().is_empty() {
// Note: body is empty when server responses with NoSuchTagSet
@ -210,9 +203,9 @@ pub trait HasTagging: HasS3Fields {
let mut root = Element::parse(self.body().clone().reader())?;
let element = root
.get_mut_child("TagSet")
.ok_or(Error::XmlError("<TagSet> tag not found".to_string()))?;
.ok_or(ValidationErr::xml_error("<TagSet> tag not found"))?;
while let Some(v) = element.take_child("Tag") {
tags.insert(get_text(&v, "Key")?, get_text(&v, "Value")?);
tags.insert(get_text_result(&v, "Key")?, get_text_result(&v, "Value")?);
}
Ok(tags)
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasEtagFromHeaders, HasObject, HasObjectSize, HasRegion, HasS3Fields, HasVersion,
};

View File

@ -14,7 +14,9 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::S3ServerError::S3Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -47,15 +49,19 @@ impl FromS3Response for BucketExistsResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
exists: true,
}),
Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucket) => Ok(Self {
request,
headers: e.headers,
body: Bytes::new(),
exists: false,
}),
Err(Error::S3Server(S3Error(mut e)))
if matches!(e.code(), MinioErrorCode::NoSuchBucket) =>
{
Ok(Self {
request,
headers: e.take_headers(),
body: Bytes::new(),
exists: false,
})
}
Err(e) => Err(e),
}
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasEtagFromBody, HasObject, HasRegion, HasS3Fields, HasVersion,
};

View File

@ -14,7 +14,7 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -46,17 +46,17 @@ impl FromS3Response for CreateBucketResponse {
let mut resp: reqwest::Response = response?;
let mut request = request;
let bucket: &str = request
let bucket = request
.bucket
.as_deref()
.ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?;
.ok_or(ValidationErr::MissingBucketName)?;
let region: &str = &request.inner_region;
request.client.add_bucket_region(bucket, region);
Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
})
}
}

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use bytes::Bytes;
@ -44,16 +45,16 @@ impl FromS3Response for DeleteBucketResponse {
let mut resp: reqwest::Response = response?;
let mut request = request;
let bucket: &str = request
let bucket = request
.bucket
.as_deref()
.ok_or_else(|| Error::InvalidBucketName("no bucket specified".into()))?;
.ok_or(Error::Validation(ValidationErr::MissingBucketName))?;
request.client.remove_bucket_region(bucket);
Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
})
}
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -46,13 +47,17 @@ impl FromS3Response for DeleteBucketPolicyResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
}),
Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucketPolicy) => Ok(Self {
request,
headers: e.headers,
body: Bytes::new(),
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(e.code(), MinioErrorCode::NoSuchBucketPolicy) =>
{
Ok(Self {
request,
headers: e.take_headers(),
body: Bytes::new(),
})
}
Err(e) => Err(e),
}
}

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -46,14 +47,17 @@ impl FromS3Response for DeleteBucketReplicationResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Error(e))
if matches!(e.code, ErrorCode::ReplicationConfigurationNotFoundError) =>
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(
e.code(),
MinioErrorCode::ReplicationConfigurationNotFoundError
) =>
{
Ok(Self {
request,
headers: e.headers,
headers: e.take_headers(),
body: Bytes::new(),
})
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};

View File

@ -13,12 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasIsDeleteMarker, HasRegion, HasS3Fields, HasVersion,
};
use crate::s3::types::{FromS3Response, S3Request};
use crate::s3::utils::{get_default_text, get_option_text, get_text};
use crate::s3::utils::{get_text_default, get_text_option, get_text_result};
use crate::{impl_from_s3response, impl_has_s3fields};
use bytes::{Buf, Bytes};
use http::HeaderMap;
@ -101,7 +101,7 @@ impl_has_s3fields!(DeleteObjectsResponse);
impl DeleteObjectsResponse {
/// Returns the bucket name for which the delete operation was performed.
pub fn result(&self) -> Result<Vec<DeleteResult>, Error> {
let root = Element::parse(self.body.clone().reader())?;
let root = Element::parse(self.body.clone().reader()).map_err(ValidationErr::from)?;
let result = root
.children
.iter()
@ -109,19 +109,19 @@ impl DeleteObjectsResponse {
.map(|elem| {
if elem.name == "Deleted" {
Ok(DeleteResult::Deleted(DeletedObject {
name: get_text(elem, "Key")?,
version_id: get_option_text(elem, "VersionId"),
delete_marker: get_default_text(elem, "DeleteMarker").to_lowercase()
name: get_text_result(elem, "Key")?,
version_id: get_text_option(elem, "VersionId"),
delete_marker: get_text_default(elem, "DeleteMarker").to_lowercase()
== "true",
delete_marker_version_id: get_option_text(elem, "DeleteMarkerVersionId"),
delete_marker_version_id: get_text_option(elem, "DeleteMarkerVersionId"),
}))
} else {
assert_eq!(elem.name, "Error");
Ok(DeleteResult::Error(DeleteError {
code: get_text(elem, "Code")?,
message: get_text(elem, "Message")?,
object_name: get_text(elem, "Key")?,
version_id: get_option_text(elem, "VersionId"),
code: get_text_result(elem, "Code")?,
message: get_text_result(elem, "Message")?,
object_name: get_text_result(elem, "Key")?,
version_id: get_text_option(elem, "VersionId"),
}))
}
})

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion,
};

View File

@ -14,10 +14,11 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request, SseConfig};
use crate::s3::utils::{get_option_text, get_text};
use crate::s3::utils::{get_text_option, get_text_result};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use http::HeaderMap;
@ -49,25 +50,25 @@ impl GetBucketEncryptionResponse {
///
/// This includes the encryption algorithm and, if applicable, the AWS KMS key ID used for encrypting objects.
/// If the bucket has no default encryption configuration, this method returns a default `SseConfig` with empty fields.
pub fn config(&self) -> Result<SseConfig, Error> {
pub fn config(&self) -> Result<SseConfig, ValidationErr> {
if self.body.is_empty() {
return Ok(SseConfig::default());
}
let mut root = Element::parse(self.body.clone().reader())?; // clone of Bytes is inexpensive
let mut root = Element::parse(self.body.clone().reader()).map_err(ValidationErr::from)?; // clone of Bytes is inexpensive
let rule = root
.get_mut_child("Rule")
.ok_or(Error::XmlError("<Rule> tag not found".into()))?;
.ok_or(ValidationErr::xml_error("<Rule> tag not found"))?;
let sse_by_default = rule
.get_mut_child("ApplyServerSideEncryptionByDefault")
.ok_or(Error::XmlError(
"<ApplyServerSideEncryptionByDefault> tag not found".into(),
.ok_or(ValidationErr::xml_error(
"<ApplyServerSideEncryptionByDefault> tag not found",
))?;
Ok(SseConfig {
sse_algorithm: get_text(sse_by_default, "SSEAlgorithm")?,
kms_master_key_id: get_option_text(sse_by_default, "KMSMasterKeyID"),
sse_algorithm: get_text_result(sse_by_default, "SSEAlgorithm")?,
kms_master_key_id: get_text_option(sse_by_default, "KMSMasterKeyID"),
})
}
}
@ -82,17 +83,17 @@ impl FromS3Response for GetBucketEncryptionResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Error(e))
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(
e.code,
ErrorCode::ServerSideEncryptionConfigurationNotFoundError
e.code(),
MinioErrorCode::ServerSideEncryptionConfigurationNotFoundError
) =>
{
Ok(Self {
request,
headers: e.headers,
headers: e.take_headers(),
body: Bytes::new(),
})
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::lifecycle_config::LifecycleConfig;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
@ -49,7 +49,7 @@ impl GetBucketLifecycleResponse {
///
/// This configuration includes rules for managing the lifecycle of objects in the bucket,
/// such as transitioning them to different storage classes or expiring them after a specified period.
pub fn config(&self) -> Result<LifecycleConfig, Error> {
pub fn config(&self) -> Result<LifecycleConfig, ValidationErr> {
LifecycleConfig::from_xml(&Element::parse(self.body.clone().reader())?)
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, NotificationConfig, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};
@ -47,7 +47,7 @@ impl GetBucketNotificationResponse {
///
/// This configuration includes the event types and the destinations (e.g., SNS topics, SQS queues, Lambda functions)
/// configured to receive notifications for those events.
pub fn config(&self) -> Result<NotificationConfig, Error> {
pub fn config(&self) -> Result<NotificationConfig, ValidationErr> {
NotificationConfig::from_xml(&mut Element::parse(self.body.clone().reader())?)
}
}

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -46,10 +47,8 @@ impl GetBucketPolicyResponse {
///
/// This method retrieves the policy associated with the bucket, which defines permissions
/// for accessing the bucket and its contents.
pub fn config(&self) -> Result<&str, Error> {
std::str::from_utf8(&self.body).map_err(|e| {
Error::Utf8Error(format!("Failed to parse bucket policy as UTF-8: {e}").into())
})
pub fn config(&self) -> Result<&str, ValidationErr> {
Ok(std::str::from_utf8(&self.body)?)
}
}
@ -63,13 +62,17 @@ impl FromS3Response for GetBucketPolicyResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
}),
Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchBucketPolicy) => Ok(Self {
request,
headers: e.headers,
body: Bytes::from_static("{}".as_ref()),
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(e.code(), MinioErrorCode::NoSuchBucketPolicy) =>
{
Ok(Self {
request,
headers: e.take_headers(),
body: Bytes::from_static("{}".as_ref()),
})
}
Err(e) => Err(e),
}
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, ReplicationConfig, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};
@ -48,7 +48,7 @@ impl GetBucketReplicationResponse {
/// and one or more replication rules that specify the conditions under which objects are replicated.
///
/// For more details on replication configuration elements, see the [AWS S3 Replication Configuration documentation](https://docs.aws.amazon.com/AmazonS3/latest/userguide/replication-add-config.html).
pub fn config(&self) -> Result<ReplicationConfig, Error> {
pub fn config(&self) -> Result<ReplicationConfig, ValidationErr> {
let root = Element::parse(self.body.clone().reader())?;
ReplicationConfig::from_xml(&root)
}

View File

@ -14,7 +14,8 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields, HasTagging};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
@ -52,13 +53,17 @@ impl FromS3Response for GetBucketTaggingResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
}),
Err(Error::S3Error(e)) if matches!(e.code, ErrorCode::NoSuchTagSet) => Ok(Self {
request,
headers: e.headers,
body: Bytes::new(),
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(e.code(), MinioErrorCode::NoSuchTagSet) =>
{
Ok(Self {
request,
headers: e.take_headers(),
body: Bytes::new(),
})
}
Err(e) => Err(e),
}
}

View File

@ -14,10 +14,10 @@
// limitations under the License.
use crate::s3::builders::VersioningStatus;
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::s3::utils::get_option_text;
use crate::s3::utils::get_text_option;
use crate::{impl_from_s3response, impl_has_s3fields};
use bytes::{Buf, Bytes};
use http::HeaderMap;
@ -51,9 +51,9 @@ impl GetBucketVersioningResponse {
/// - `Some(VersioningStatus::Enabled)` if versioning is enabled.
/// - `Some(VersioningStatus::Suspended)` if versioning is suspended.
/// - `None` if versioning has never been configured for this bucket.
pub fn status(&self) -> Result<Option<VersioningStatus>, Error> {
pub fn status(&self) -> Result<Option<VersioningStatus>, ValidationErr> {
let root = Element::parse(self.body.clone().reader())?;
Ok(get_option_text(&root, "Status").map(|v| match v.as_str() {
Ok(get_text_option(&root, "Status").map(|v| match v.as_str() {
"Enabled" => VersioningStatus::Enabled,
_ => VersioningStatus::Suspended, // Default case
}))
@ -65,8 +65,8 @@ impl GetBucketVersioningResponse {
/// - `Some(true)` if MFA delete is enabled.
/// - `Some(false)` if MFA delete is disabled.
/// - `None` if MFA delete has never been configured for this bucket.
pub fn mfa_delete(&self) -> Result<Option<bool>, Error> {
pub fn mfa_delete(&self) -> Result<Option<bool>, ValidationErr> {
let root = Element::parse(self.body.clone().reader())?;
Ok(get_option_text(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled")))
Ok(get_text_option(&root, "MFADelete").map(|v| v.eq_ignore_ascii_case("Enabled")))
}
}

View File

@ -14,14 +14,12 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::builders::ObjectContent;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasEtagFromHeaders, HasObject, HasRegion, HasS3Fields, HasVersion,
};
use crate::s3::{
builders::ObjectContent,
error::Error,
types::{FromS3Response, S3Request},
};
use crate::s3::types::{FromS3Response, S3Request};
use async_trait::async_trait;
use bytes::Bytes;
use futures_util::TryStreamExt;
@ -52,10 +50,10 @@ impl GetObjectResponse {
}
/// Returns the content size (in Bytes) of the object.
pub fn object_size(&self) -> Result<u64, Error> {
pub fn object_size(&self) -> Result<u64, ValidationErr> {
self.resp
.content_length()
.ok_or(Error::ContentLengthUnknown)
.ok_or(ValidationErr::ContentLengthUnknown)
}
}

View File

@ -13,12 +13,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion,
};
use crate::s3::types::{FromS3Response, S3Request};
use crate::s3::utils::get_default_text;
use crate::s3::utils::get_text_default;
use crate::{impl_from_s3response, impl_has_s3fields};
use bytes::{Buf, Bytes};
use http::HeaderMap;
@ -47,11 +47,11 @@ impl GetObjectLegalHoldResponse {
/// Returns the legal hold status of the object.
///
/// This method retrieves whether the legal hold is enabled for the specified object.
pub fn enabled(&self) -> Result<bool, Error> {
pub fn enabled(&self) -> Result<bool, ValidationErr> {
if self.body.is_empty() {
return Ok(false); // No legal hold configuration present due to NoSuchObjectLockConfiguration
}
let root = Element::parse(self.body.clone().reader())?;
Ok(get_default_text(&root, "Status") == "ON")
Ok(get_text_default(&root, "Status") == "ON")
}
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, ObjectLockConfig, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};
@ -46,7 +46,7 @@ impl GetObjectLockConfigResponse {
///
/// This method retrieves the Object Lock settings, which include retention mode and period,
/// as well as legal hold status for the bucket.
pub fn config(&self) -> Result<ObjectLockConfig, Error> {
pub fn config(&self) -> Result<ObjectLockConfig, ValidationErr> {
ObjectLockConfig::from_xml(&Element::parse(self.body.clone().reader())?)
}
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{HasBucket, HasObject, HasRegion, HasS3Fields};
use crate::s3::types::{FromS3Response, S3Request};
use crate::{impl_from_s3response, impl_has_s3fields};
@ -38,9 +38,7 @@ impl GetObjectPromptResponse {
/// Returns the prompt response for the object.
///
/// This method retrieves the content of the object as a UTF-8 encoded string.
pub fn prompt_response(&self) -> Result<&str, Error> {
std::str::from_utf8(&self.body).map_err(|e| {
Error::Utf8Error(format!("Failed to parse prompt_response as UTF-8: {e}").into())
})
pub fn prompt_response(&self) -> Result<&str, ValidationErr> {
Ok(std::str::from_utf8(&self.body)?)
}
}

View File

@ -14,12 +14,13 @@
// limitations under the License.
use crate::impl_has_s3fields;
use crate::s3::error::{Error, ErrorCode};
use crate::s3::error::{Error, S3ServerError, ValidationErr};
use crate::s3::minio_error_response::MinioErrorCode;
use crate::s3::response::a_response_traits::{
HasBucket, HasObject, HasRegion, HasS3Fields, HasVersion,
};
use crate::s3::types::{FromS3Response, RetentionMode, S3Request};
use crate::s3::utils::{UtcTime, from_iso8601utc, get_option_text};
use crate::s3::utils::{UtcTime, from_iso8601utc, get_text_option};
use async_trait::async_trait;
use bytes::{Buf, Bytes};
use http::HeaderMap;
@ -45,12 +46,12 @@ impl GetObjectRetentionResponse {
/// Returns the retention mode of the object.
///
/// This method retrieves the retention mode, which can be either `Governance` or `Compliance`.
pub fn retention_mode(&self) -> Result<Option<RetentionMode>, Error> {
pub fn retention_mode(&self) -> Result<Option<RetentionMode>, ValidationErr> {
if self.body.is_empty() {
return Ok(None);
}
let root = Element::parse(self.body.clone().reader())?;
Ok(match get_option_text(&root, "Mode") {
Ok(match get_text_option(&root, "Mode") {
Some(v) => Some(RetentionMode::parse(&v)?),
_ => None,
})
@ -59,12 +60,12 @@ impl GetObjectRetentionResponse {
/// Returns the date until which the object is retained.
///
/// This method retrieves the retention date, which indicates when the object will no longer be retained.
pub fn retain_until_date(&self) -> Result<Option<UtcTime>, Error> {
pub fn retain_until_date(&self) -> Result<Option<UtcTime>, ValidationErr> {
if self.body.is_empty() {
return Ok(None);
}
let root = Element::parse(self.body.clone().reader())?;
Ok(match get_option_text(&root, "RetainUntilDate") {
Ok(match get_text_option(&root, "RetainUntilDate") {
Some(v) => Some(from_iso8601utc(&v)?),
_ => None,
})
@ -81,14 +82,14 @@ impl FromS3Response for GetObjectRetentionResponse {
Ok(mut resp) => Ok(Self {
request,
headers: mem::take(resp.headers_mut()),
body: resp.bytes().await?,
body: resp.bytes().await.map_err(ValidationErr::from)?,
}),
Err(Error::S3Error(e))
if matches!(e.code, ErrorCode::NoSuchObjectLockConfiguration) =>
Err(Error::S3Server(S3ServerError::S3Error(mut e)))
if matches!(e.code(), MinioErrorCode::NoSuchObjectLockConfiguration) =>
{
Ok(Self {
request,
headers: e.headers,
headers: e.take_headers(),
body: Bytes::new(),
})
}

View File

@ -13,7 +13,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::s3::error::Error;
use crate::s3::error::{Error, ValidationErr};
use crate::s3::response::a_response_traits::{
HasBucket, HasObject, HasRegion, HasS3Fields, HasTagging, HasVersion,
};

Some files were not shown because too many files have changed in this diff Show More