chore: cleanup error handling

This commit is contained in:
Matteo Settenvini 2025-08-10 18:01:44 +02:00
parent 3c07716a83
commit caacb91123
Signed by: matteo
GPG key ID: 1C1B12600D81DE05
2 changed files with 28 additions and 21 deletions

View file

@ -63,6 +63,17 @@ enum Error {
UnknownError(String),
}
impl From<object_store::Error> for Error {
fn from(value: object_store::Error) -> Self {
match value {
object_store::Error::NotFound { path, source: _ } => {
Self::NotFound(format!("object not found at {}", path))
}
err => Error::UnknownError(err.to_string()),
}
}
}
#[rocket::get("/")]
async fn index_root(state: &State<Settings>) -> Result<FileView, Error> {
index(None, state).await
@ -81,27 +92,24 @@ async fn index(path: Option<PathBuf>, state: &State<Settings>) -> Result<FileVie
};
/*
The way things work in S3, the following holds for us:
- we need to use a slash as separator
- getting the bucket address (empty prefix) will
return an XML file with all properties; we don't
want that.
We try first to retrieve list an object as a file. If we fail,
we fallback to retrieving the equivalent folder.
*/
if let Some(path) = &object_path
&& object_exists(path, &state).await?
if let Some(object_path) = &object_path
&& object_exists(object_path, &state).await?
{
serve_object(&path, &state).await
log::info!("serving S3 object at {}", &object_path);
serve_object(&object_path, &state).await
} else {
let path = path.unwrap_or_default();
log::info!("listing S3 objects at {}", path.display());
let objects = file_view(object_path, &state).await?;
let rendered = Template::render(
"index",
context! {
path: format!("{}/", path.unwrap_or("".into()).display()),
path: format!("{}/", path.display()),
objects
},
);
@ -111,6 +119,7 @@ async fn index(path: Option<PathBuf>, state: &State<Settings>) -> Result<FileVie
}
async fn object_exists(s3_path: &ObjectStorePath, settings: &Settings) -> Result<bool, Error> {
log::debug!("checking existence of S3 object at {}", s3_path);
match settings.s3_bucket.head(s3_path).await {
Ok(_metadata) => Ok(true),
Err(object_store::Error::NotFound { path: _, source: _ }) => Ok(false),
@ -123,10 +132,7 @@ async fn serve_object(s3_path: &ObjectStorePath, settings: &Settings) -> Result<
.s3_bucket
.get(&s3_path)
.await
.map_err(|e| match e {
object_store::Error::NotFound { path: _, source: _ } => Error::NotFound(e.to_string()),
_ => Error::UnknownError(e.to_string()),
})?
.map_err(Error::from)?
.into_stream();
let s3_path = s3_path.clone();
@ -161,12 +167,7 @@ async fn file_view(
.s3_bucket
.list_with_delimiter(s3_folder_path.as_ref())
.await
.map_err(|err| match err {
object_store::Error::NotFound { path: _, source: _ } => {
Error::NotFound("object not found".into())
}
err => Error::UnknownError(err.to_string()),
})?;
.map_err(Error::from)?;
let folders = s3_objects.common_prefixes.into_iter().map(|dir| {
let dirname = dir.parts().last().unwrap();

View file

@ -2,9 +2,10 @@
// SPDX-License-Identifier: EUPL-1.2
use {
object_store::{ObjectStore, aws},
object_store::{BackoffConfig, ObjectStore, RetryConfig, aws},
rocket::serde::Deserialize,
serde::de::Error,
std::time::Duration,
};
#[derive(Deserialize)]
@ -48,6 +49,11 @@ impl TryInto<Box<dyn ObjectStore>> for S3Config {
.with_secret_access_key(self.secret_access_key)
.with_virtual_hosted_style_request(!self.path_style)
.with_allow_http(true)
.with_retry(RetryConfig {
max_retries: 1,
backoff: BackoffConfig::default(),
retry_timeout: Duration::from_millis(500),
})
.build()?;
log::info!(