2023-07-01 13:03:41 +02:00
|
|
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
|
|
|
// SPDX-License-Identifier: EUPL-1.2
|
|
|
|
|
|
|
|
use {
|
|
|
|
lazy_static::lazy_static,
|
|
|
|
rocket::response::Responder,
|
2023-07-12 16:52:23 +02:00
|
|
|
rocket::serde::Serialize,
|
2023-07-01 13:03:41 +02:00
|
|
|
rocket_dyn_templates::{context, Template},
|
|
|
|
std::path::PathBuf,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct Settings {
|
|
|
|
access_key_id: String,
|
|
|
|
secret_access_key: String,
|
|
|
|
bucket_name: String,
|
|
|
|
endpoint: String,
|
|
|
|
region: String,
|
|
|
|
}
|
|
|
|
|
|
|
|
lazy_static! {
|
|
|
|
static ref SETTINGS: Settings = {
|
|
|
|
let settings = config::Config::builder()
|
|
|
|
.add_source(config::File::with_name("Settings.toml"))
|
|
|
|
.add_source(config::Environment::with_prefix("SERVES3"))
|
|
|
|
.build()
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
Settings {
|
|
|
|
access_key_id: settings
|
|
|
|
.get_string("access_key_id")
|
|
|
|
.expect("Missing configuration key access_key_id"),
|
|
|
|
secret_access_key: settings
|
|
|
|
.get_string("secret_access_key")
|
|
|
|
.expect("Missing configuration key secret_access_key"),
|
|
|
|
bucket_name: settings
|
|
|
|
.get_string("bucket")
|
|
|
|
.expect("Missing configuration key bucket"),
|
|
|
|
region: settings
|
|
|
|
.get_string("region")
|
|
|
|
.expect("Missing configuration key region"),
|
|
|
|
endpoint: settings
|
|
|
|
.get_string("endpoint")
|
|
|
|
.expect("Missing configuration key endpoint"),
|
|
|
|
}
|
|
|
|
};
|
|
|
|
static ref BUCKET: s3::bucket::Bucket = {
|
|
|
|
let region = s3::Region::Custom {
|
|
|
|
region: SETTINGS.region.clone(),
|
|
|
|
endpoint: SETTINGS.endpoint.clone(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let credentials = s3::creds::Credentials::new(
|
|
|
|
Some(&SETTINGS.access_key_id),
|
|
|
|
Some(&SETTINGS.secret_access_key),
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
None,
|
|
|
|
)
|
|
|
|
.expect("Wrong server S3 configuration");
|
|
|
|
s3::bucket::Bucket::new(&SETTINGS.bucket_name, region, credentials)
|
|
|
|
.expect("Cannot find or authenticate to S3 bucket")
|
|
|
|
};
|
2023-07-02 18:19:46 +02:00
|
|
|
static ref FILEVIEW_TEMPLATE: &'static str = std::include_str!("../templates/index.html.tera");
|
2023-07-02 18:41:47 +02:00
|
|
|
|
|
|
|
// Workaround for https://github.com/SergioBenitez/Rocket/issues/1792
|
2023-07-12 17:19:26 +02:00
|
|
|
static ref EMPTY_DIR: tempfile::TempDir = tempfile::tempdir()
|
2023-07-02 18:41:47 +02:00
|
|
|
.expect("Unable to create an empty temporary folder, is the whole FS read-only?");
|
2023-07-01 13:03:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Responder)]
|
2023-07-01 21:31:04 +02:00
|
|
|
enum FileView {
|
|
|
|
#[response(content_type = "text/html")]
|
|
|
|
Folder(Template),
|
2023-07-01 13:03:41 +02:00
|
|
|
|
2023-07-01 21:31:04 +02:00
|
|
|
#[response(content_type = "application/octet-stream")]
|
|
|
|
File(Vec<u8>),
|
|
|
|
}
|
|
|
|
|
2023-07-12 16:52:23 +02:00
|
|
|
#[derive(Serialize)]
|
|
|
|
struct FileViewItem {
|
|
|
|
path: String,
|
|
|
|
size: String,
|
|
|
|
size_bytes: u64,
|
|
|
|
last_modification: String,
|
|
|
|
}
|
|
|
|
|
2023-07-01 21:31:04 +02:00
|
|
|
#[derive(Responder, Debug)]
|
|
|
|
enum Error {
|
2023-07-01 13:03:41 +02:00
|
|
|
#[response(status = 404)]
|
|
|
|
NotFound(String),
|
2023-07-02 18:18:34 +02:00
|
|
|
|
|
|
|
#[response(status = 500)]
|
|
|
|
UnknownError(String),
|
2023-07-01 13:03:41 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
#[rocket::get("/<path..>")]
|
2023-07-01 21:31:04 +02:00
|
|
|
async fn index(path: PathBuf) -> Result<FileView, Error> {
|
|
|
|
/*
|
|
|
|
The way things work in S3, the following holds for us:
|
|
|
|
- we need to use a slash as separator
|
|
|
|
- folders need to be queried ending with a slash
|
2023-07-02 12:11:13 +02:00
|
|
|
- getting the bucket address (empty prefix) will
|
|
|
|
return an XML file with all properties; we don't
|
|
|
|
want that.
|
2023-07-01 21:31:04 +02:00
|
|
|
|
2023-07-02 02:06:27 +02:00
|
|
|
We try first to retrieve list an object as a file. If we fail,
|
|
|
|
we fallback to retrieving the equivalent folder.
|
2023-07-01 21:31:04 +02:00
|
|
|
*/
|
2024-05-28 14:10:01 +02:00
|
|
|
|
2023-07-02 12:11:13 +02:00
|
|
|
if let Ok(result) = s3_serve_file(&path).await {
|
|
|
|
Ok(result)
|
|
|
|
} else {
|
|
|
|
let objects = s3_fileview(&path).await?;
|
|
|
|
let rendered = Template::render(
|
|
|
|
"index",
|
|
|
|
context! {
|
|
|
|
path: format!("{}/", path.display()),
|
|
|
|
objects
|
|
|
|
},
|
|
|
|
);
|
|
|
|
Ok(FileView::Folder(rendered))
|
|
|
|
}
|
|
|
|
}
|
2023-07-02 02:06:27 +02:00
|
|
|
|
2023-07-02 12:11:13 +02:00
|
|
|
async fn s3_serve_file(path: &PathBuf) -> Result<FileView, Error> {
|
|
|
|
let is_root_prefix = path.as_os_str().is_empty();
|
|
|
|
if is_root_prefix {
|
|
|
|
return Err(Error::NotFound("Root prefix is not a file".into()));
|
2023-07-02 02:06:27 +02:00
|
|
|
}
|
2023-07-01 21:31:04 +02:00
|
|
|
|
2023-07-02 12:11:13 +02:00
|
|
|
// FIXME: this can be big, we should use streaming,
|
|
|
|
// not loading in memory!
|
2023-07-02 18:18:34 +02:00
|
|
|
let response = BUCKET
|
2023-07-02 12:11:13 +02:00
|
|
|
.get_object(format!("{}", path.display()))
|
|
|
|
.await
|
2023-07-02 18:18:34 +02:00
|
|
|
.map_err(|_| Error::UnknownError("Unable to connect to S3 bucket".into()))?;
|
|
|
|
|
|
|
|
match response.status_code() {
|
|
|
|
200 | 204 => {
|
|
|
|
let bytes = response.bytes().to_vec();
|
|
|
|
Ok(FileView::File(bytes))
|
|
|
|
}
|
|
|
|
404 => Err(Error::NotFound("Object not found".into())),
|
|
|
|
_ => Err(Error::UnknownError("Unknown S3 error".into())),
|
|
|
|
}
|
2023-07-01 21:31:04 +02:00
|
|
|
}
|
|
|
|
|
2023-07-12 16:52:23 +02:00
|
|
|
async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
2023-07-01 21:31:04 +02:00
|
|
|
/*
|
|
|
|
if listing a folder:
|
|
|
|
- folders will be under 'common_prefixes'
|
|
|
|
- files will be under the 'contents' property
|
|
|
|
*/
|
2023-07-02 02:06:27 +02:00
|
|
|
|
|
|
|
let parent = path.parent();
|
|
|
|
let s3_folder_path = match parent {
|
|
|
|
Some(_) => format!("{}/", path.display()),
|
|
|
|
None => "".into(),
|
|
|
|
};
|
|
|
|
|
|
|
|
let s3_objects = BUCKET
|
2024-04-17 16:06:26 +02:00
|
|
|
.list(s3_folder_path.clone(), Some("/".into()))
|
2023-07-02 02:06:27 +02:00
|
|
|
.await
|
|
|
|
.map_err(|_| Error::NotFound("Object not found".into()))?;
|
|
|
|
|
|
|
|
let objects = s3_objects
|
2023-07-01 21:31:04 +02:00
|
|
|
.iter()
|
2023-07-12 16:52:23 +02:00
|
|
|
.flat_map(|list| -> Vec<Option<FileViewItem>> {
|
2023-07-01 21:31:04 +02:00
|
|
|
let prefix = if let Some(p) = &list.prefix {
|
|
|
|
p.as_str()
|
|
|
|
} else {
|
|
|
|
""
|
|
|
|
};
|
|
|
|
|
2023-07-12 16:52:23 +02:00
|
|
|
let folders = list.common_prefixes.iter().flatten().map(|dir| {
|
|
|
|
let path = dir.prefix.strip_prefix(&prefix);
|
|
|
|
path.map(|path| FileViewItem {
|
|
|
|
path: path.to_owned(),
|
|
|
|
size_bytes: 0,
|
|
|
|
size: "[DIR]".to_owned(),
|
|
|
|
last_modification: String::default(),
|
|
|
|
})
|
|
|
|
});
|
|
|
|
|
|
|
|
let files = list.contents.iter().map(|obj| {
|
|
|
|
let path = obj.key.strip_prefix(&prefix);
|
|
|
|
path.map(|path| FileViewItem {
|
|
|
|
path: path.to_owned(),
|
|
|
|
size_bytes: obj.size,
|
|
|
|
size: size_bytes_to_human(obj.size),
|
|
|
|
last_modification: obj.last_modified.clone(),
|
|
|
|
})
|
|
|
|
});
|
2023-07-02 02:06:27 +02:00
|
|
|
|
2023-07-01 21:31:04 +02:00
|
|
|
folders.chain(files).collect()
|
|
|
|
})
|
|
|
|
.flatten()
|
2023-07-02 02:06:27 +02:00
|
|
|
.collect();
|
|
|
|
|
|
|
|
Ok(objects)
|
2023-07-01 13:03:41 +02:00
|
|
|
}
|
|
|
|
|
2023-07-12 16:52:23 +02:00
|
|
|
fn size_bytes_to_human(bytes: u64) -> String {
|
|
|
|
use human_size::{Any, SpecificSize};
|
|
|
|
|
|
|
|
let size: f64 = bytes as f64;
|
|
|
|
let digits = size.log10().floor() as u32;
|
|
|
|
let mut order = digits / 3;
|
|
|
|
let unit = match order {
|
|
|
|
0 => Any::Byte,
|
|
|
|
1 => Any::Kilobyte,
|
|
|
|
2 => Any::Megabyte,
|
|
|
|
_ => {
|
|
|
|
order = 3; // Let's stop here.
|
|
|
|
Any::Gigabyte
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
format!(
|
|
|
|
"{:.3}",
|
|
|
|
SpecificSize::new(size / 10u64.pow(order * 3) as f64, unit)
|
|
|
|
.unwrap_or(SpecificSize::new(0., Any::Byte).unwrap())
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-07-01 13:03:41 +02:00
|
|
|
#[rocket::launch]
|
|
|
|
fn rocket() -> _ {
|
2023-07-02 17:54:26 +02:00
|
|
|
eprintln!("Proxying to {} for {}", BUCKET.host(), BUCKET.name());
|
2023-07-02 18:41:47 +02:00
|
|
|
|
|
|
|
let config_figment = rocket::Config::figment().merge(("template_dir", EMPTY_DIR.path())); // We compile the templates in anyway.
|
|
|
|
|
|
|
|
rocket::custom(config_figment)
|
2023-07-01 13:03:41 +02:00
|
|
|
.mount("/", rocket::routes![index])
|
2023-07-02 18:18:34 +02:00
|
|
|
.attach(Template::custom(|engines| {
|
|
|
|
engines
|
|
|
|
.tera
|
|
|
|
.add_raw_template("index", *FILEVIEW_TEMPLATE)
|
|
|
|
.unwrap()
|
|
|
|
}))
|
2023-07-01 13:03:41 +02:00
|
|
|
}
|
2024-04-17 16:06:26 +02:00
|
|
|
|
|
|
|
// Test section starts
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
2024-05-28 14:10:01 +02:00
|
|
|
use rstest::rstest;
|
2024-04-17 16:06:26 +02:00
|
|
|
|
|
|
|
#[rstest]
|
|
|
|
#[case(1024, "1.024 kB")]
|
|
|
|
#[case(10240, "10.240 kB")]
|
|
|
|
#[case(1024*1024, "1.049 MB")]
|
|
|
|
#[case(1024*1024*1024, "1.074 GB")]
|
|
|
|
#[case(0, "0.000 B")]
|
|
|
|
#[case(u64::MAX, format!("{:.3} GB",u64::MAX as f64/(1_000_000_000.0)))]
|
|
|
|
#[case(u64::MIN, format!("{:.3} B",u64::MIN as f64))]
|
|
|
|
|
|
|
|
fn test_size_bytes_to_human(#[case] bytes: u64, #[case] expected: String) {
|
2024-05-28 14:10:01 +02:00
|
|
|
println!("{}", size_bytes_to_human(bytes));
|
2024-04-17 16:06:26 +02:00
|
|
|
assert_eq!(size_bytes_to_human(bytes), expected);
|
|
|
|
}
|
|
|
|
}
|