Serve both files and directories

This commit is contained in:
Matteo Settenvini 2023-07-02 02:06:27 +02:00
parent 053854ce72
commit 9fa09de8df
3 changed files with 61 additions and 36 deletions

View File

@ -29,9 +29,31 @@ Then just configure Apache or NGINX to proxy to the given port. For example:
# ... other options ...
</VirtualHost>
```
You probably also want a systemd unit file, for instance `/etc/systemd/system/serves3@.service`:
```ini
[Unit]
Description=ServeS3, a S3 proxy
StartLimitInterval=100
StartLimitBurst=10
[Service]
Type=simple
ExecStart=/usr/local/bin/serves3
WorkingDirectory=/etc/serves3/%i/
Environment=ROCKET_PORT=%i
Restart=always
RestartSec=5s
[Install]
WantedBy=multi-user.target
```
Then, e.g. for running on port 8000, you would put the corresponding configuration file in `/etc/serves3/8000/` and install the unit with `systemctl enable --now servers3@8000.service`.
## Build and install
```bash

View File

@ -1,8 +1,6 @@
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
// SPDX-License-Identifier: EUPL-1.2
use s3::serde_types::ListBucketResult;
use {
lazy_static::lazy_static,
rocket::response::Responder,
@ -80,61 +78,61 @@ enum Error {
#[rocket::get("/<path..>")]
async fn index(path: PathBuf) -> Result<FileView, Error> {
let parent = path.parent();
let s3_path = format!(
"{}{}",
path.display(),
match parent {
Some(_) => "/",
None => "",
}
);
/*
The way things work in S3, the following holds for us:
- we need to use a slash as separator
- folders need to be queried ending with a slash
- getting the bucket address will return an XML file
with all properties; we don't want that.
We try first to retrieve list an object as a folder. If we fail,
we fallback to retrieving the object itself.
We try first to retrieve list an object as a file. If we fail,
we fallback to retrieving the equivalent folder.
*/
let s3_objects = BUCKET.list(s3_path, Some("/".into())).await;
let s3_objects = match s3_objects {
Ok(s3_objects) => s3_objects,
Err(_) => {
// TODO: this can be big, we should use streaming,
// not loading in memory.
// FIXME: this can be big, we should use streaming,
// not loading in memory!
if !path.as_os_str().is_empty() {
let data = BUCKET
.get_object(format!("{}", path.display()))
.await
.map_err(|_| Error::NotFound("Object not found".into()))?
.bytes()
.to_vec();
return Ok(FileView::File(data));
}
};
.map_err(|_| Error::NotFound("Object not found".into()));
let objects = s3_fileview(&s3_objects);
if let Ok(contents) = data {
let bytes = contents.bytes().to_vec();
return Ok(FileView::File(bytes));
}
}
let objects = s3_fileview(&path).await?;
let rendered = Template::render(
"index",
context! {
path: format!("{}/", path.display()),
has_parent: !path.as_os_str().is_empty(),
objects
},
);
Ok(FileView::Folder(rendered))
}
fn s3_fileview(s3_objects: &Vec<ListBucketResult>) -> Vec<&str> {
async fn s3_fileview(path: &PathBuf) -> Result<Vec<String>, Error> {
/*
if listing a folder:
- folders will be under 'common_prefixes'
- files will be under the 'contents' property
*/
s3_objects
let parent = path.parent();
let s3_folder_path = match parent {
Some(_) => format!("{}/", path.display()),
None => "".into(),
};
let s3_objects = BUCKET
.list(s3_folder_path, Some("/".into()))
.await
.map_err(|_| Error::NotFound("Object not found".into()))?;
let objects = s3_objects
.iter()
.flat_map(|list| -> Vec<Option<&str>> {
let prefix = if let Some(p) = &list.prefix {
@ -148,14 +146,19 @@ fn s3_fileview(s3_objects: &Vec<ListBucketResult>) -> Vec<&str> {
.iter()
.flatten()
.map(|dir| dir.prefix.strip_prefix(&prefix));
let files = list
.contents
.iter()
.map(|obj| obj.key.strip_prefix(&prefix));
folders.chain(files).collect()
})
.flatten()
.collect()
.map(str::to_owned)
.collect();
Ok(objects)
}
#[rocket::launch]

View File

@ -8,7 +8,7 @@
<body>
<h1>{{ path }}</h1>
<ul>
{% if has_parent %}
{% if path != "/" %}
<li><a href="../">..</a></li>
{% endif %}