forked from matteo/serves3
Compare commits
9 Commits
mocking_an
...
main
Author | SHA1 | Date |
---|---|---|
Matteo Settenvini | 373b141346 | |
Matteo Settenvini | e3aca4fe72 | |
Matteo Settenvini | 804ab6ef36 | |
Matteo Settenvini | 59c0543fd2 | |
Eren AY | cf98738a0d | |
Matteo Settenvini | dcd3c10bdd | |
Matteo Settenvini | 4defbcec1f | |
Eren AY | ed3a1fbfe9 | |
Eren Ay | 0318729d3f |
|
@ -7,4 +7,4 @@
|
||||||
|
|
||||||
/build
|
/build
|
||||||
/target
|
/target
|
||||||
/Settings.toml
|
/serves3.toml
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
repos:
|
repos:
|
||||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||||
rev: v4.5.0
|
rev: v4.6.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: check-yaml
|
- id: check-yaml
|
||||||
name: Check YAML files syntax
|
name: Check YAML files syntax
|
||||||
|
@ -23,7 +23,7 @@ repos:
|
||||||
name: Ensure no trailing spaces at the end of lines
|
name: Ensure no trailing spaces at the end of lines
|
||||||
|
|
||||||
- repo: https://github.com/Lucas-C/pre-commit-hooks.git
|
- repo: https://github.com/Lucas-C/pre-commit-hooks.git
|
||||||
rev: v1.5.2
|
rev: v1.5.5
|
||||||
hooks:
|
hooks:
|
||||||
- id: remove-crlf
|
- id: remove-crlf
|
||||||
name: Enforce LF instead of CRLF for newlines
|
name: Enforce LF instead of CRLF for newlines
|
||||||
|
@ -40,7 +40,7 @@ repos:
|
||||||
name: Check Rust code
|
name: Check Rust code
|
||||||
|
|
||||||
- repo: https://github.com/fsfe/reuse-tool.git
|
- repo: https://github.com/fsfe/reuse-tool.git
|
||||||
rev: v2.1.0
|
rev: v4.0.3
|
||||||
hooks:
|
hooks:
|
||||||
- id: reuse
|
- id: reuse
|
||||||
name: Check copyright and license information
|
name: Check copyright and license information
|
||||||
|
@ -56,6 +56,6 @@ repos:
|
||||||
- id: trufflehog
|
- id: trufflehog
|
||||||
name: TruffleHog
|
name: TruffleHog
|
||||||
description: Detect secrets in your data.
|
description: Detect secrets in your data.
|
||||||
entry: bash -c 'podman run -v "$(pwd):/workdir" --rm docker.io/trufflesecurity/trufflehog:latest git file:///workdir'
|
entry: bash -c 'podman run -v "$(pwd):/workdir" --rm docker.io/trufflesecurity/trufflehog:latest git file:///workdir' --only-verified
|
||||||
language: system
|
language: system
|
||||||
stages: ["commit", "push"]
|
stages: ["commit", "push"]
|
||||||
|
|
|
@ -42,6 +42,25 @@
|
||||||
},
|
},
|
||||||
"args": [],
|
"args": [],
|
||||||
"cwd": "${workspaceFolder}"
|
"cwd": "${workspaceFolder}"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "lldb",
|
||||||
|
"request": "launch",
|
||||||
|
"name": "Debug integration test 'integration'",
|
||||||
|
"cargo": {
|
||||||
|
"args": [
|
||||||
|
"test",
|
||||||
|
"--no-run",
|
||||||
|
"--test=integration",
|
||||||
|
"--package=serves3"
|
||||||
|
],
|
||||||
|
"filter": {
|
||||||
|
"name": "integration",
|
||||||
|
"kind": "test"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"args": [],
|
||||||
|
"cwd": "${workspaceFolder}"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,4 +2,5 @@
|
||||||
// SPDX-License-Identifier: CC0-1.0
|
// SPDX-License-Identifier: CC0-1.0
|
||||||
{
|
{
|
||||||
"liveServer.settings.port": 8001,
|
"liveServer.settings.port": 8001,
|
||||||
|
"cmake.configureOnOpen": true,
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,13 +3,15 @@
|
||||||
|
|
||||||
cmake_minimum_required(VERSION 3.25)
|
cmake_minimum_required(VERSION 3.25)
|
||||||
|
|
||||||
project(serves3 VERSION 1.0.0 LANGUAGES NONE)
|
project(serves3 VERSION 1.1.0 LANGUAGES C)
|
||||||
|
|
||||||
|
include(GNUInstallDirs)
|
||||||
|
|
||||||
include(FetchContent)
|
include(FetchContent)
|
||||||
FetchContent_Declare(
|
FetchContent_Declare(
|
||||||
Corrosion
|
Corrosion
|
||||||
GIT_REPOSITORY https://github.com/corrosion-rs/corrosion.git
|
GIT_REPOSITORY https://github.com/corrosion-rs/corrosion.git
|
||||||
GIT_TAG v0.4.1
|
GIT_TAG v0.5.0
|
||||||
)
|
)
|
||||||
FetchContent_MakeAvailable(Corrosion)
|
FetchContent_MakeAvailable(Corrosion)
|
||||||
|
|
||||||
|
@ -20,3 +22,7 @@ corrosion_import_crate(
|
||||||
message(STATUS "Imported crates: ${imported_crates}")
|
message(STATUS "Imported crates: ${imported_crates}")
|
||||||
|
|
||||||
install(IMPORTED_RUNTIME_ARTIFACTS serves3)
|
install(IMPORTED_RUNTIME_ARTIFACTS serves3)
|
||||||
|
install(FILES serves3.toml.example
|
||||||
|
DESTINATION ${CMAKE_INSTALL_DOCDIR})
|
||||||
|
install(FILES serves3@.service
|
||||||
|
DESTINATION ${CMAKE_INSTALL_PREFIX}/lib/systemd/system)
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
32
Cargo.toml
32
Cargo.toml
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
[package]
|
[package]
|
||||||
name = "serves3"
|
name = "serves3"
|
||||||
version = "1.0.0"
|
version = "1.1.1"
|
||||||
|
|
||||||
authors = ["Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>"]
|
authors = ["Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>"]
|
||||||
description = "A very simple proxy to browse files from private S3 buckets"
|
description = "A very simple proxy to browse files from private S3 buckets"
|
||||||
|
@ -20,19 +20,27 @@ edition = "2021"
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
config = "0.13"
|
anyhow = "1.0"
|
||||||
human-size = "0.4"
|
human-size = "0.4"
|
||||||
lazy_static = "1.4"
|
lazy_static = "1.4"
|
||||||
|
log = "0.4"
|
||||||
rocket = "0.5"
|
rocket = "0.5"
|
||||||
rocket_dyn_templates = { version = "0.1.0", features = ["tera"] }
|
rocket_dyn_templates = { version = "0.2.0", features = ["tera"] }
|
||||||
rust-s3 = { version = "0.33", default-features = false, features = ["tokio-native-tls"] }
|
rust-s3 = { version = "0.35", default-features = false, features = [
|
||||||
serde = { version = "1.0" }
|
"tokio-rustls-tls",
|
||||||
tempfile = { version = "3.6" }
|
] }
|
||||||
|
serde = "1.0"
|
||||||
|
tempfile = "3.6"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
anyhow = "1.0"
|
delegate = "0.13"
|
||||||
async-trait = "0.1"
|
futures = "0.3"
|
||||||
bytes = "1.5"
|
libc = "0.2"
|
||||||
mockall = "0.12"
|
regex = "1.10"
|
||||||
rstest = "0.19"
|
reqwest = "0.12"
|
||||||
tokio = "1.37"
|
rstest = "0.22"
|
||||||
|
scraper = "0.20"
|
||||||
|
test-log = "0.2"
|
||||||
|
testcontainers = "0.23"
|
||||||
|
testcontainers-modules = { version = "0.11", features = ["minio"] }
|
||||||
|
tokio = { version = "1", features = ["process"] }
|
||||||
|
|
60
README.md
60
README.md
|
@ -1,5 +1,8 @@
|
||||||
[//]: # SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
|
||||||
[//]: # SPDX-License-Identifier: EUPL-1.2
|
<!--
|
||||||
|
SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
SPDX-License-Identifier: EUPL-1.2
|
||||||
|
-->
|
||||||
|
|
||||||
# serves3
|
# serves3
|
||||||
|
|
||||||
|
@ -11,9 +14,39 @@ Also helpful to do a different TLS termination.
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
Copy `Settings.toml.example` to `Settings.toml` and adjust your settings.
|
Copy `serves3.toml.example` to `serves3.toml` from this project's sources and adjust your settings. If the project was built and installed via CMake, a copy of the example settings file is in `/usr/share/doc/serves3`.
|
||||||
|
|
||||||
You can also add a `Rocket.toml` file to customize the server options. See the [Rocket documentation](https://rocket.rs/v0.5-rc/guide/configuration/#rockettoml).
|
For instance:
|
||||||
|
|
||||||
|
```toml
|
||||||
|
# apply this configuration to Rocket's "default" profile
|
||||||
|
[default.s3_bucket]
|
||||||
|
|
||||||
|
# the bucket name
|
||||||
|
name = ""
|
||||||
|
# the API endpoint address
|
||||||
|
endpoint = "https://eu-central-1.linodeobjects.com"
|
||||||
|
# the bucket region
|
||||||
|
region = "eu-central-1"
|
||||||
|
# the access key ID
|
||||||
|
access_key_id = ""
|
||||||
|
# the access key secret
|
||||||
|
secret_access_key = ""
|
||||||
|
# whether to use path_style S3 URLs, see
|
||||||
|
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access
|
||||||
|
path_style = false
|
||||||
|
|
||||||
|
# Here you can add any other rocket options, see
|
||||||
|
# https://rocket.rs/guide/v0.5/configuration/
|
||||||
|
|
||||||
|
[default]
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
|
||||||
|
[release]
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also use the same file to customize the server options. See the [Rocket documentation](https://rocket.rs/v0.5-rc/guide/configuration/#rockettoml) for a list of understood values.
|
||||||
|
|
||||||
Then just configure Apache or NGINX to proxy to the given port. For example:
|
Then just configure Apache or NGINX to proxy to the given port. For example:
|
||||||
|
|
||||||
|
@ -59,10 +92,10 @@ Then, e.g. for running on port 8000, you would put the corresponding configurati
|
||||||
If you want more granular control on installation options, use CMake:
|
If you want more granular control on installation options, use CMake:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cmake -B build .
|
cmake -DCMAKE_INSTALL_PREFIX=/usr -B build .
|
||||||
cmake --build build
|
cmake --build build
|
||||||
cmake --install build
|
sudo cmake --install build
|
||||||
cd run-folder # folder with Settings.toml
|
cd run-folder # folder with serves3.toml
|
||||||
serves3
|
serves3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -70,6 +103,17 @@ Else you can simply rely on `cargo`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
cargo install --root /usr/local --path . # for instance
|
cargo install --root /usr/local --path . # for instance
|
||||||
cd run-folder # folder with Settings.toml
|
cd run-folder # folder with serves3.toml
|
||||||
serves3
|
serves3
|
||||||
```
|
```
|
||||||
|
|
||||||
|
# Changelog
|
||||||
|
|
||||||
|
## 1.1.0 Reworked configuration file logic
|
||||||
|
|
||||||
|
* **Breaking change**: configuration file renamed to `serves3.toml`. Please note that the format changed slightly; have a look at the provided `serves3.toml.example` file for reference.
|
||||||
|
* Fixes #2: URLs to directories not ending with a slash are not redirected properly
|
||||||
|
|
||||||
|
## 1.0.0
|
||||||
|
|
||||||
|
* Initial release.
|
||||||
|
|
|
@ -1,9 +0,0 @@
|
||||||
# SPDX-FileCopyrightText: Public domain.
|
|
||||||
# SPDX-License-Identifier: CC0-1.0
|
|
||||||
|
|
||||||
access_key_id = ""
|
|
||||||
secret_access_key = ""
|
|
||||||
|
|
||||||
bucket = ""
|
|
||||||
endpoint = "https://eu-central-1.linodeobjects.com"
|
|
||||||
region = "eu-central-1"
|
|
54
deny.toml
54
deny.toml
|
@ -12,6 +12,7 @@
|
||||||
# The values provided in this template are the default values that will be used
|
# The values provided in this template are the default values that will be used
|
||||||
# when any section or field is not specified in your own configuration
|
# when any section or field is not specified in your own configuration
|
||||||
|
|
||||||
|
[graph]
|
||||||
# If 1 or more target triples (and optionally, target_features) are specified,
|
# If 1 or more target triples (and optionally, target_features) are specified,
|
||||||
# only the specified targets will be checked when running `cargo deny check`.
|
# only the specified targets will be checked when running `cargo deny check`.
|
||||||
# This means, if a particular package is only ever used as a target specific
|
# This means, if a particular package is only ever used as a target specific
|
||||||
|
@ -38,30 +39,13 @@ targets = [
|
||||||
db-path = "~/.cargo/advisory-db"
|
db-path = "~/.cargo/advisory-db"
|
||||||
# The url(s) of the advisory databases to use
|
# The url(s) of the advisory databases to use
|
||||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||||
# The lint level for security vulnerabilities
|
|
||||||
vulnerability = "deny"
|
|
||||||
# The lint level for unmaintained crates
|
|
||||||
unmaintained = "warn"
|
|
||||||
# The lint level for crates that have been yanked from their source registry
|
# The lint level for crates that have been yanked from their source registry
|
||||||
yanked = "warn"
|
yanked = "warn"
|
||||||
# The lint level for crates with security notices. Note that as of
|
|
||||||
# 2019-12-17 there are no security notice advisories in
|
|
||||||
# https://github.com/rustsec/advisory-db
|
|
||||||
notice = "warn"
|
|
||||||
# A list of advisory IDs to ignore. Note that ignored advisories will still
|
# A list of advisory IDs to ignore. Note that ignored advisories will still
|
||||||
# output a note when they are encountered.
|
# output a note when they are encountered.
|
||||||
ignore = [
|
ignore = [
|
||||||
#"RUSTSEC-0000-0000",
|
#"RUSTSEC-0000-0000",
|
||||||
]
|
]
|
||||||
# Threshold for security vulnerabilities, any vulnerability with a CVSS score
|
|
||||||
# lower than the range specified will be ignored. Note that ignored advisories
|
|
||||||
# will still output a note when they are encountered.
|
|
||||||
# * None - CVSS Score 0.0
|
|
||||||
# * Low - CVSS Score 0.1 - 3.9
|
|
||||||
# * Medium - CVSS Score 4.0 - 6.9
|
|
||||||
# * High - CVSS Score 7.0 - 8.9
|
|
||||||
# * Critical - CVSS Score 9.0 - 10.0
|
|
||||||
#severity-threshold =
|
|
||||||
|
|
||||||
# If this is true, then cargo deny will use the git executable to fetch advisory database.
|
# If this is true, then cargo deny will use the git executable to fetch advisory database.
|
||||||
# If this is false, then it uses a built-in git library.
|
# If this is false, then it uses a built-in git library.
|
||||||
|
@ -73,36 +57,20 @@ ignore = [
|
||||||
# More documentation for the licenses section can be found here:
|
# More documentation for the licenses section can be found here:
|
||||||
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html
|
||||||
[licenses]
|
[licenses]
|
||||||
# The lint level for crates which do not have a detectable license
|
|
||||||
unlicensed = "deny"
|
|
||||||
# List of explicitly allowed licenses
|
# List of explicitly allowed licenses
|
||||||
# See https://spdx.org/licenses/ for list of possible licenses
|
# See https://spdx.org/licenses/ for list of possible licenses
|
||||||
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
||||||
allow = [
|
allow = [
|
||||||
#"MIT",
|
"Apache-2.0",
|
||||||
#"Apache-2.0",
|
"BSD-3-Clause",
|
||||||
#"Apache-2.0 WITH LLVM-exception",
|
"CC0-1.0",
|
||||||
|
"EUPL-1.2",
|
||||||
|
"ISC",
|
||||||
|
"MIT",
|
||||||
|
"OpenSSL",
|
||||||
|
"MPL-2.0",
|
||||||
|
"Unicode-DFS-2016",
|
||||||
]
|
]
|
||||||
# List of explicitly disallowed licenses
|
|
||||||
# See https://spdx.org/licenses/ for list of possible licenses
|
|
||||||
# [possible values: any SPDX 3.11 short identifier (+ optional exception)].
|
|
||||||
deny = [
|
|
||||||
#"Nokia",
|
|
||||||
]
|
|
||||||
# Lint level for licenses considered copyleft
|
|
||||||
copyleft = "allow"
|
|
||||||
# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses
|
|
||||||
# * both - The license will be approved if it is both OSI-approved *AND* FSF
|
|
||||||
# * either - The license will be approved if it is either OSI-approved *OR* FSF
|
|
||||||
# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF
|
|
||||||
# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved
|
|
||||||
# * neither - This predicate is ignored and the default lint level is used
|
|
||||||
allow-osi-fsf-free = "either"
|
|
||||||
# Lint level used when no other predicates are matched
|
|
||||||
# 1. License isn't in the allow or deny lists
|
|
||||||
# 2. License isn't copyleft
|
|
||||||
# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither"
|
|
||||||
default = "deny"
|
|
||||||
# The confidence threshold for detecting a license from license text.
|
# The confidence threshold for detecting a license from license text.
|
||||||
# The higher the value, the more closely the license text must be to the
|
# The higher the value, the more closely the license text must be to the
|
||||||
# canonical license text of a valid SPDX license file.
|
# canonical license text of a valid SPDX license file.
|
||||||
|
@ -133,7 +101,7 @@ expression = "MIT AND ISC AND OpenSSL"
|
||||||
# depending on the rest of your configuration
|
# depending on the rest of your configuration
|
||||||
license-files = [
|
license-files = [
|
||||||
# Each entry is a crate relative path, and the (opaque) hash of its contents
|
# Each entry is a crate relative path, and the (opaque) hash of its contents
|
||||||
{ path = "LICENSE", hash = 0xbd0eed23 }
|
{ path = "LICENSE", hash = 0xbd0eed23 },
|
||||||
]
|
]
|
||||||
|
|
||||||
[licenses.private]
|
[licenses.private]
|
||||||
|
|
|
@ -0,0 +1,28 @@
|
||||||
|
# SPDX-FileCopyrightText: Public domain.
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
|
||||||
|
# apply this configuration to Rocket's "default" profile
|
||||||
|
[default.s3_bucket]
|
||||||
|
|
||||||
|
# the bucket name
|
||||||
|
name = ""
|
||||||
|
# the API endpoint address
|
||||||
|
endpoint = "https://eu-central-1.linodeobjects.com"
|
||||||
|
# the bucket region
|
||||||
|
region = "eu-central-1"
|
||||||
|
# the access key ID
|
||||||
|
access_key_id = ""
|
||||||
|
# the access key secret
|
||||||
|
secret_access_key = ""
|
||||||
|
# whether to use path_style S3 URLs, see
|
||||||
|
# https://docs.aws.amazon.com/AmazonS3/latest/userguide/VirtualHosting.html#path-style-access
|
||||||
|
path_style = false
|
||||||
|
|
||||||
|
# Here you can add any other rocket options, see
|
||||||
|
# https://rocket.rs/guide/v0.5/configuration/
|
||||||
|
|
||||||
|
[default]
|
||||||
|
|
||||||
|
[debug]
|
||||||
|
|
||||||
|
[release]
|
|
@ -0,0 +1,19 @@
|
||||||
|
# SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
# SPDX-License-Identifier: CC0-1.0
|
||||||
|
|
||||||
|
[Unit]
|
||||||
|
Description=ServeS3, a S3 proxy
|
||||||
|
StartLimitInterval=100
|
||||||
|
StartLimitBurst=10
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
ExecStart=/usr/local/bin/serves3
|
||||||
|
WorkingDirectory=/etc/serves3/%i/
|
||||||
|
Environment=ROCKET_PORT=%i
|
||||||
|
|
||||||
|
Restart=always
|
||||||
|
RestartSec=5s
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
234
src/main.rs
234
src/main.rs
|
@ -1,85 +1,41 @@
|
||||||
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
// SPDX-License-Identifier: EUPL-1.2
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
mod settings;
|
||||||
|
mod sizes;
|
||||||
|
|
||||||
use {
|
use {
|
||||||
rocket::response::Responder,
|
anyhow::Result,
|
||||||
rocket::serde::Serialize,
|
lazy_static::lazy_static,
|
||||||
|
rocket::{
|
||||||
|
fairing::AdHoc,
|
||||||
|
figment::{
|
||||||
|
providers::{Env, Format as _, Toml},
|
||||||
|
Profile,
|
||||||
|
},
|
||||||
|
http::uri::Origin,
|
||||||
|
response::{Redirect, Responder},
|
||||||
|
serde::Serialize,
|
||||||
|
State,
|
||||||
|
},
|
||||||
rocket_dyn_templates::{context, Template},
|
rocket_dyn_templates::{context, Template},
|
||||||
|
settings::Settings,
|
||||||
std::path::PathBuf,
|
std::path::PathBuf,
|
||||||
};
|
};
|
||||||
|
|
||||||
use lazy_static::lazy_static;
|
#[derive(Responder)]
|
||||||
|
|
||||||
struct Settings {
|
|
||||||
access_key_id: String,
|
|
||||||
secret_access_key: String,
|
|
||||||
bucket_name: String,
|
|
||||||
endpoint: String,
|
|
||||||
region: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
lazy_static! {
|
|
||||||
static ref SETTINGS: Settings = {
|
|
||||||
let settings = config::Config::builder()
|
|
||||||
.add_source(config::File::with_name("Settings.toml"))
|
|
||||||
.add_source(config::Environment::with_prefix("SERVES3"))
|
|
||||||
.build()
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Settings {
|
|
||||||
access_key_id: settings
|
|
||||||
.get_string("access_key_id")
|
|
||||||
.expect("Missing configuration key access_key_id"),
|
|
||||||
secret_access_key: settings
|
|
||||||
.get_string("secret_access_key")
|
|
||||||
.expect("Missing configuration key secret_access_key"),
|
|
||||||
bucket_name: settings
|
|
||||||
.get_string("bucket")
|
|
||||||
.expect("Missing configuration key bucket"),
|
|
||||||
region: settings
|
|
||||||
.get_string("region")
|
|
||||||
.expect("Missing configuration key region"),
|
|
||||||
endpoint: settings
|
|
||||||
.get_string("endpoint")
|
|
||||||
.expect("Missing configuration key endpoint"),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
static ref BUCKET: s3::bucket::Bucket = {
|
|
||||||
let region = s3::Region::Custom {
|
|
||||||
region: SETTINGS.region.clone(),
|
|
||||||
endpoint: SETTINGS.endpoint.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let credentials = s3::creds::Credentials::new(
|
|
||||||
Some(&SETTINGS.access_key_id),
|
|
||||||
Some(&SETTINGS.secret_access_key),
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
None,
|
|
||||||
)
|
|
||||||
.expect("Wrong server S3 configuration");
|
|
||||||
s3::bucket::Bucket::new(&SETTINGS.bucket_name, region, credentials)
|
|
||||||
.expect("Cannot find or authenticate to S3 bucket")
|
|
||||||
};
|
|
||||||
static ref FILEVIEW_TEMPLATE: &'static str = std::include_str!("../templates/index.html.tera");
|
|
||||||
|
|
||||||
// Workaround for https://github.com/SergioBenitez/Rocket/issues/1792
|
|
||||||
static ref EMPTY_DIR: tempfile::TempDir = tempfile::tempdir()
|
|
||||||
.expect("Unable to create an empty temporary folder, is the whole FS read-only?");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Responder, Debug)]
|
|
||||||
enum FileView {
|
enum FileView {
|
||||||
#[response(content_type = "text/html")]
|
#[response(content_type = "text/html")]
|
||||||
Folder(Template),
|
Folder(Template),
|
||||||
|
|
||||||
#[response(content_type = "application/octet-stream")]
|
#[response(content_type = "application/octet-stream")]
|
||||||
File(Vec<u8>),
|
File(Vec<u8>),
|
||||||
|
|
||||||
|
Redirect(Redirect),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize)]
|
||||||
struct FileViewItem {
|
struct FileViewItem {
|
||||||
parent: String,
|
|
||||||
path: String,
|
path: String,
|
||||||
size: String,
|
size: String,
|
||||||
size_bytes: u64,
|
size_bytes: u64,
|
||||||
|
@ -96,7 +52,11 @@ enum Error {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rocket::get("/<path..>")]
|
#[rocket::get("/<path..>")]
|
||||||
async fn index(path: PathBuf) -> Result<FileView, Error> {
|
async fn index(
|
||||||
|
path: PathBuf,
|
||||||
|
uri: &Origin<'_>,
|
||||||
|
state: &State<Settings>,
|
||||||
|
) -> Result<FileView, Error> {
|
||||||
/*
|
/*
|
||||||
The way things work in S3, the following holds for us:
|
The way things work in S3, the following holds for us:
|
||||||
- we need to use a slash as separator
|
- we need to use a slash as separator
|
||||||
|
@ -108,11 +68,18 @@ async fn index(path: PathBuf) -> Result<FileView, Error> {
|
||||||
We try first to retrieve list an object as a file. If we fail,
|
We try first to retrieve list an object as a file. If we fail,
|
||||||
we fallback to retrieving the equivalent folder.
|
we fallback to retrieving the equivalent folder.
|
||||||
*/
|
*/
|
||||||
|
if let Ok(result) = s3_serve_file(&path, &state).await {
|
||||||
if let Ok(result) = s3_serve_file(&path).await {
|
|
||||||
Ok(result)
|
Ok(result)
|
||||||
} else {
|
} else {
|
||||||
let objects = s3_fileview(&path).await?;
|
// We need to redirect to a path ending with a slash as
|
||||||
|
// per comment above if we know this is not a file.
|
||||||
|
let mut uri = uri.to_string();
|
||||||
|
if !uri.ends_with('/') {
|
||||||
|
uri.push('/');
|
||||||
|
return Ok(FileView::Redirect(Redirect::permanent(uri)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let objects = s3_fileview(&path, &state).await?;
|
||||||
let rendered = Template::render(
|
let rendered = Template::render(
|
||||||
"index",
|
"index",
|
||||||
context! {
|
context! {
|
||||||
|
@ -124,7 +91,7 @@ async fn index(path: PathBuf) -> Result<FileView, Error> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn s3_serve_file(path: &PathBuf) -> Result<FileView, Error> {
|
async fn s3_serve_file(path: &PathBuf, settings: &Settings) -> Result<FileView, Error> {
|
||||||
let is_root_prefix = path.as_os_str().is_empty();
|
let is_root_prefix = path.as_os_str().is_empty();
|
||||||
if is_root_prefix {
|
if is_root_prefix {
|
||||||
return Err(Error::NotFound("Root prefix is not a file".into()));
|
return Err(Error::NotFound("Root prefix is not a file".into()));
|
||||||
|
@ -132,7 +99,8 @@ async fn s3_serve_file(path: &PathBuf) -> Result<FileView, Error> {
|
||||||
|
|
||||||
// FIXME: this can be big, we should use streaming,
|
// FIXME: this can be big, we should use streaming,
|
||||||
// not loading in memory!
|
// not loading in memory!
|
||||||
let response = BUCKET
|
let response = settings
|
||||||
|
.s3_bucket
|
||||||
.get_object(format!("{}", path.display()))
|
.get_object(format!("{}", path.display()))
|
||||||
.await
|
.await
|
||||||
.map_err(|_| Error::UnknownError("Unable to connect to S3 bucket".into()))?;
|
.map_err(|_| Error::UnknownError("Unable to connect to S3 bucket".into()))?;
|
||||||
|
@ -147,7 +115,7 @@ async fn s3_serve_file(path: &PathBuf) -> Result<FileView, Error> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
async fn s3_fileview(path: &PathBuf, settings: &Settings) -> Result<Vec<FileViewItem>, Error> {
|
||||||
/*
|
/*
|
||||||
if listing a folder:
|
if listing a folder:
|
||||||
- folders will be under 'common_prefixes'
|
- folders will be under 'common_prefixes'
|
||||||
|
@ -160,8 +128,9 @@ async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
||||||
None => "".into(),
|
None => "".into(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let s3_objects = BUCKET
|
let s3_objects = settings
|
||||||
.list(s3_folder_path.clone(), Some("/".into()))
|
.s3_bucket
|
||||||
|
.list(s3_folder_path, Some("/".into()))
|
||||||
.await
|
.await
|
||||||
.map_err(|_| Error::NotFound("Object not found".into()))?;
|
.map_err(|_| Error::NotFound("Object not found".into()))?;
|
||||||
|
|
||||||
|
@ -177,7 +146,6 @@ async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
||||||
let folders = list.common_prefixes.iter().flatten().map(|dir| {
|
let folders = list.common_prefixes.iter().flatten().map(|dir| {
|
||||||
let path = dir.prefix.strip_prefix(&prefix);
|
let path = dir.prefix.strip_prefix(&prefix);
|
||||||
path.map(|path| FileViewItem {
|
path.map(|path| FileViewItem {
|
||||||
parent: s3_folder_path.clone(),
|
|
||||||
path: path.to_owned(),
|
path: path.to_owned(),
|
||||||
size_bytes: 0,
|
size_bytes: 0,
|
||||||
size: "[DIR]".to_owned(),
|
size: "[DIR]".to_owned(),
|
||||||
|
@ -188,10 +156,9 @@ async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
||||||
let files = list.contents.iter().map(|obj| {
|
let files = list.contents.iter().map(|obj| {
|
||||||
let path = obj.key.strip_prefix(&prefix);
|
let path = obj.key.strip_prefix(&prefix);
|
||||||
path.map(|path| FileViewItem {
|
path.map(|path| FileViewItem {
|
||||||
parent: s3_folder_path.clone(),
|
|
||||||
path: path.to_owned(),
|
path: path.to_owned(),
|
||||||
size_bytes: obj.size,
|
size_bytes: obj.size,
|
||||||
size: size_bytes_to_human(obj.size),
|
size: sizes::bytes_to_human(obj.size),
|
||||||
last_modification: obj.last_modified.clone(),
|
last_modification: obj.last_modified.clone(),
|
||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
@ -204,120 +171,27 @@ async fn s3_fileview(path: &PathBuf) -> Result<Vec<FileViewItem>, Error> {
|
||||||
Ok(objects)
|
Ok(objects)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn size_bytes_to_human(bytes: u64) -> String {
|
lazy_static! {
|
||||||
use human_size::{Any, SpecificSize};
|
// Workaround for https://github.com/SergioBenitez/Rocket/issues/1792
|
||||||
|
static ref EMPTY_DIR: tempfile::TempDir = tempfile::tempdir()
|
||||||
let size: f64 = bytes as f64;
|
.expect("Unable to create an empty temporary folder, is the whole FS read-only?");
|
||||||
let digits = size.log10().floor() as u32;
|
|
||||||
let mut order = digits / 3;
|
|
||||||
let unit = match order {
|
|
||||||
0 => Any::Byte,
|
|
||||||
1 => Any::Kilobyte,
|
|
||||||
2 => Any::Megabyte,
|
|
||||||
_ => {
|
|
||||||
order = 3; // Let's stop here.
|
|
||||||
Any::Gigabyte
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
format!(
|
|
||||||
"{:.3}",
|
|
||||||
SpecificSize::new(size / 10u64.pow(order * 3) as f64, unit)
|
|
||||||
.unwrap_or(SpecificSize::new(0., Any::Byte).unwrap())
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[rocket::launch]
|
#[rocket::launch]
|
||||||
fn rocket() -> _ {
|
fn rocket() -> _ {
|
||||||
eprintln!("Proxying to {} for {}", BUCKET.host(), BUCKET.name());
|
let config_figment = rocket::Config::figment()
|
||||||
|
.merge(Toml::file("serves3.toml").nested())
|
||||||
let config_figment = rocket::Config::figment().merge(("template_dir", EMPTY_DIR.path())); // We compile the templates in anyway.
|
.merge(Env::prefixed("SERVES3_").global())
|
||||||
|
.merge(("template_dir", EMPTY_DIR.path())) // We compile the templates in anyway
|
||||||
|
.select(Profile::from_env_or("SERVES3_PROFILE", "default"));
|
||||||
|
|
||||||
rocket::custom(config_figment)
|
rocket::custom(config_figment)
|
||||||
.mount("/", rocket::routes![index])
|
.mount("/", rocket::routes![index])
|
||||||
|
.attach(AdHoc::config::<Settings>())
|
||||||
.attach(Template::custom(|engines| {
|
.attach(Template::custom(|engines| {
|
||||||
engines
|
engines
|
||||||
.tera
|
.tera
|
||||||
.add_raw_template("index", *FILEVIEW_TEMPLATE)
|
.add_raw_template("index", std::include_str!("../templates/index.html.tera"))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test section starts
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
use anyhow::Result;
|
|
||||||
use async_trait::async_trait;
|
|
||||||
use bytes::Bytes;
|
|
||||||
|
|
||||||
use rstest::rstest;
|
|
||||||
// Note this useful idiom: importing names from outer (for mod tests) scope.
|
|
||||||
use super::*;
|
|
||||||
use mockall::mock;
|
|
||||||
use s3::{error::S3Error, request::ResponseData};
|
|
||||||
|
|
||||||
/// A trait implemented by a Struct we want to mock
|
|
||||||
#[async_trait]
|
|
||||||
pub trait Bah {
|
|
||||||
async fn get_object<S: AsRef<str> + 'static + std::marker::Send>(
|
|
||||||
&self,
|
|
||||||
path: S,
|
|
||||||
) -> Result<ResponseData, S3Error>;
|
|
||||||
}
|
|
||||||
|
|
||||||
mock! {
|
|
||||||
pub Bucket {}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl Bah for Bucket {
|
|
||||||
async fn get_object<S: AsRef<str> + 'static + std::marker::Send>(&self, path: S) -> Result<ResponseData, S3Error>;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rstest]
|
|
||||||
#[case(1024, "1.024 kB")]
|
|
||||||
#[case(10240, "10.240 kB")]
|
|
||||||
#[case(1024*1024, "1.049 MB")]
|
|
||||||
#[case(1024*1024*1024, "1.074 GB")]
|
|
||||||
#[case(0, "0.000 B")]
|
|
||||||
#[case(u64::MAX, format!("{:.3} GB",u64::MAX as f64/(1_000_000_000.0)))]
|
|
||||||
#[case(u64::MIN, format!("{:.3} B",u64::MIN as f64))]
|
|
||||||
|
|
||||||
fn test_size_bytes_to_human(#[case] bytes: u64, #[case] expected: String) {
|
|
||||||
println!("{}", size_bytes_to_human(bytes));
|
|
||||||
assert_eq!(size_bytes_to_human(bytes), expected);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_s3_serve_file() -> Result<()>{
|
|
||||||
let mut mock_bucket = MockBucket::new();
|
|
||||||
|
|
||||||
mock_bucket.expect_get_object().times(1).returning(|_:String| {
|
|
||||||
panic!("here");
|
|
||||||
// Ok(ResponseData::new(
|
|
||||||
// Bytes::new(),
|
|
||||||
// 200,
|
|
||||||
// HashMap::<String, String>::new(),
|
|
||||||
// ))
|
|
||||||
});
|
|
||||||
|
|
||||||
let test_path: PathBuf = "foo".into();
|
|
||||||
|
|
||||||
let result = s3_serve_file(&test_path).await;
|
|
||||||
|
|
||||||
println!("{:?}", result);
|
|
||||||
assert!(result.is_ok());
|
|
||||||
let result = result.unwrap();
|
|
||||||
|
|
||||||
let bytes = match result {
|
|
||||||
FileView::File(b) => b,
|
|
||||||
_ => panic!("Should be a file."),
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(bytes,vec![0]);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
use {anyhow::anyhow, rocket::serde::Deserialize, serde::de::Error};
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
#[serde(crate = "rocket::serde")]
|
||||||
|
pub struct Settings {
|
||||||
|
#[serde(deserialize_with = "deserialize_s3_bucket")]
|
||||||
|
pub s3_bucket: Box<s3::Bucket>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn deserialize_s3_bucket<'de, D>(deserializer: D) -> Result<Box<s3::Bucket>, D::Error>
|
||||||
|
where
|
||||||
|
D: serde::Deserializer<'de>,
|
||||||
|
{
|
||||||
|
let config = S3Config::deserialize(deserializer)?;
|
||||||
|
config.try_into().map_err(D::Error::custom)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
pub struct S3Config {
|
||||||
|
pub name: String,
|
||||||
|
pub endpoint: String,
|
||||||
|
pub region: String,
|
||||||
|
|
||||||
|
#[serde(default)]
|
||||||
|
pub path_style: bool,
|
||||||
|
|
||||||
|
pub access_key_id: String,
|
||||||
|
pub secret_access_key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TryInto<Box<s3::Bucket>> for S3Config {
|
||||||
|
type Error = anyhow::Error;
|
||||||
|
|
||||||
|
fn try_into(self) -> Result<Box<s3::Bucket>, Self::Error> {
|
||||||
|
let region = s3::Region::Custom {
|
||||||
|
region: self.region,
|
||||||
|
endpoint: self.endpoint,
|
||||||
|
};
|
||||||
|
|
||||||
|
let credentials = s3::creds::Credentials::new(
|
||||||
|
Some(&self.access_key_id),
|
||||||
|
Some(&self.secret_access_key),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
log::info!(
|
||||||
|
"Serving contents from bucket {} at {}",
|
||||||
|
&self.name,
|
||||||
|
region.endpoint()
|
||||||
|
);
|
||||||
|
|
||||||
|
let bucket = s3::Bucket::new(&self.name, region, credentials).map_err(|e| anyhow!(e));
|
||||||
|
if self.path_style {
|
||||||
|
bucket.map(|mut b| {
|
||||||
|
b.set_path_style();
|
||||||
|
b
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
bucket
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
pub fn bytes_to_human(bytes: u64) -> String {
|
||||||
|
use human_size::{Any, SpecificSize};
|
||||||
|
|
||||||
|
let size: f64 = bytes as f64;
|
||||||
|
let digits = size.log10().floor() as u32;
|
||||||
|
let mut order = digits / 3;
|
||||||
|
let unit = match order {
|
||||||
|
0 => Any::Byte,
|
||||||
|
1 => Any::Kilobyte,
|
||||||
|
2 => Any::Megabyte,
|
||||||
|
_ => {
|
||||||
|
order = 3; // Let's stop here.
|
||||||
|
Any::Gigabyte
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
format!(
|
||||||
|
"{:.3}",
|
||||||
|
SpecificSize::new(size / 10u64.pow(order * 3) as f64, unit)
|
||||||
|
.unwrap_or(SpecificSize::new(0., Any::Byte).unwrap())
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// -------------------------------------------------------------
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use rstest::rstest;
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
#[case(1024, "1.024 kB")]
|
||||||
|
#[case(10240, "10.240 kB")]
|
||||||
|
#[case(1024*1024, "1.049 MB")]
|
||||||
|
#[case(1024*1024*1024, "1.074 GB")]
|
||||||
|
#[case(0, "0.000 B")]
|
||||||
|
#[case(u64::MAX, format!("{:.3} GB",u64::MAX as f64/(1_000_000_000.0)))]
|
||||||
|
#[case(u64::MIN, format!("{:.3} B",u64::MIN as f64))]
|
||||||
|
fn bytes_to_human(#[case] bytes: u64, #[case] expected: String) {
|
||||||
|
assert_eq!(super::bytes_to_human(bytes), expected);
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
use {
|
||||||
|
delegate::delegate,
|
||||||
|
std::borrow::Cow,
|
||||||
|
testcontainers::{
|
||||||
|
core::{ContainerPort, WaitFor},
|
||||||
|
Image,
|
||||||
|
},
|
||||||
|
testcontainers_modules::minio,
|
||||||
|
};
|
||||||
|
|
||||||
|
const MINIO_IMAGE_TAG: &'static str = "RELEASE.2024-09-22T00-33-43Z";
|
||||||
|
|
||||||
|
pub struct MinIO {
|
||||||
|
inner: minio::MinIO,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Image for MinIO {
|
||||||
|
fn tag(&self) -> &str {
|
||||||
|
MINIO_IMAGE_TAG.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ready_conditions(&self) -> Vec<WaitFor> {
|
||||||
|
vec![WaitFor::message_on_stderr("API:")]
|
||||||
|
}
|
||||||
|
|
||||||
|
delegate! {
|
||||||
|
to self.inner {
|
||||||
|
fn name(&self) -> &str;
|
||||||
|
fn expose_ports(&self) -> &[ContainerPort];
|
||||||
|
fn env_vars(
|
||||||
|
&self,
|
||||||
|
) -> impl IntoIterator<Item = (impl Into<Cow<'_, str>>, impl Into<Cow<'_, str>>)>;
|
||||||
|
fn mounts(&self) -> impl IntoIterator<Item = &testcontainers::core::Mount>;
|
||||||
|
fn copy_to_sources(&self) -> impl IntoIterator<Item = &testcontainers::CopyToContainer>;
|
||||||
|
fn entrypoint(&self) -> Option<&str>;
|
||||||
|
fn cmd(&self) -> impl IntoIterator<Item = impl Into<std::borrow::Cow<'_, str>>>;
|
||||||
|
fn exec_after_start(
|
||||||
|
&self,
|
||||||
|
cs: testcontainers::core::ContainerState,
|
||||||
|
) -> Result<Vec<testcontainers::core::ExecCommand>, testcontainers::TestcontainersError>;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MinIO {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,122 @@
|
||||||
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
mod minio;
|
||||||
|
|
||||||
|
use {
|
||||||
|
anyhow::{anyhow, Result},
|
||||||
|
reqwest::Url,
|
||||||
|
std::{ptr::null_mut, str::FromStr},
|
||||||
|
testcontainers::{runners::AsyncRunner, ContainerAsync},
|
||||||
|
tokio::io::AsyncBufReadExt as _,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct Test {
|
||||||
|
pub base_url: Url,
|
||||||
|
pub bucket: Box<s3::Bucket>,
|
||||||
|
pub serves3: tokio::process::Child,
|
||||||
|
pub _minio: ContainerAsync<minio::MinIO>,
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAXIMUM_SERVES3_INIT_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5);
|
||||||
|
|
||||||
|
const BUCKET_NAME: &'static str = "integration-test-bucket";
|
||||||
|
const REGION: &'static str = "test-region";
|
||||||
|
const ACCESS_KEY: &'static str = "minioadmin";
|
||||||
|
const SECRET_KEY: &'static str = "minioadmin";
|
||||||
|
|
||||||
|
impl Test {
|
||||||
|
pub async fn new() -> Result<Self> {
|
||||||
|
// NOTE: this testsuite was setup to work
|
||||||
|
// against a recent version of podman,
|
||||||
|
// which correctly distinguishes between
|
||||||
|
// stdout and stderr of the running container.
|
||||||
|
|
||||||
|
let image = minio::MinIO::default();
|
||||||
|
let container = image.start().await?;
|
||||||
|
|
||||||
|
let endpoint = format!(
|
||||||
|
"http://{host}:{port}",
|
||||||
|
host = container.get_host().await?,
|
||||||
|
port = container.get_host_port_ipv4(9000).await?
|
||||||
|
);
|
||||||
|
|
||||||
|
let credentials = s3::creds::Credentials::new(
|
||||||
|
Some(&ACCESS_KEY),
|
||||||
|
Some(&SECRET_KEY),
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some("test"),
|
||||||
|
)?;
|
||||||
|
let bucket = s3::Bucket::create_with_path_style(
|
||||||
|
&BUCKET_NAME,
|
||||||
|
s3::Region::Custom {
|
||||||
|
region: REGION.into(),
|
||||||
|
endpoint: endpoint.clone(),
|
||||||
|
},
|
||||||
|
credentials,
|
||||||
|
s3::BucketConfiguration::private(),
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.bucket;
|
||||||
|
|
||||||
|
let bin = std::env!("CARGO_BIN_EXE_serves3");
|
||||||
|
let mut child = tokio::process::Command::new(bin)
|
||||||
|
.env("SERVES3_ADDRESS", "127.0.0.1")
|
||||||
|
.env("SERVES3_PORT", "0")
|
||||||
|
.env("SERVES3_LOG_LEVEL", "debug")
|
||||||
|
.env(
|
||||||
|
"SERVES3_S3_BUCKET",
|
||||||
|
format!(
|
||||||
|
r#"{{
|
||||||
|
name = "{name}",
|
||||||
|
endpoint = "{endpoint}",
|
||||||
|
region = "{region}",
|
||||||
|
access_key_id = "{user}",
|
||||||
|
secret_access_key = "{secret}",
|
||||||
|
path_style = true
|
||||||
|
}}"#,
|
||||||
|
name = BUCKET_NAME,
|
||||||
|
endpoint = endpoint,
|
||||||
|
region = ®ION,
|
||||||
|
user = ACCESS_KEY,
|
||||||
|
secret = SECRET_KEY
|
||||||
|
),
|
||||||
|
)
|
||||||
|
.stdout(std::process::Stdio::piped())
|
||||||
|
.spawn()?;
|
||||||
|
|
||||||
|
let base_url = tokio::time::timeout(MAXIMUM_SERVES3_INIT_TIMEOUT, async {
|
||||||
|
let stdout = child.stdout.as_mut().unwrap();
|
||||||
|
let mut lines = tokio::io::BufReader::new(stdout).lines();
|
||||||
|
let re = regex::Regex::new("^Rocket has launched from (http://.+)$").unwrap();
|
||||||
|
while let Some(line) = lines.next_line().await? {
|
||||||
|
println!("{}", &line);
|
||||||
|
if let Some(captures) = re.captures(&line) {
|
||||||
|
let url = captures.get(1).unwrap().as_str();
|
||||||
|
return Ok(Url::from_str(url)?);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(anyhow!("Rocket did not print that it has started"))
|
||||||
|
})
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
base_url,
|
||||||
|
bucket,
|
||||||
|
serves3: child,
|
||||||
|
_minio: container,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Test {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
let pid = self.serves3.id().unwrap() as i32;
|
||||||
|
libc::kill(pid, libc::SIGTERM);
|
||||||
|
libc::waitpid(pid, null_mut(), 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,158 @@
|
||||||
|
// SPDX-FileCopyrightText: © Matteo Settenvini <matteo.settenvini@montecristosoftware.eu>
|
||||||
|
// SPDX-License-Identifier: EUPL-1.2
|
||||||
|
|
||||||
|
mod common;
|
||||||
|
|
||||||
|
use scraper::{Html, Selector};
|
||||||
|
|
||||||
|
#[test_log::test(tokio::test)]
|
||||||
|
async fn serves_files() -> anyhow::Result<()> {
|
||||||
|
let test = common::Test::new().await?;
|
||||||
|
|
||||||
|
test.bucket
|
||||||
|
.put_object("file.txt", "I am a file".as_bytes())
|
||||||
|
.await?;
|
||||||
|
test.bucket
|
||||||
|
.put_object("folder/file.txt", "I am a file in a folder".as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let resp = reqwest::get(test.base_url.join("file.txt")?).await?;
|
||||||
|
assert_eq!(resp.bytes().await?, "I am a file");
|
||||||
|
|
||||||
|
let resp = reqwest::get(test.base_url.join("folder/file.txt")?).await?;
|
||||||
|
assert_eq!(resp.bytes().await?, "I am a file in a folder");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_log::test(tokio::test)]
|
||||||
|
async fn serves_top_level_folder() -> anyhow::Result<()> {
|
||||||
|
let test = common::Test::new().await?;
|
||||||
|
|
||||||
|
test.bucket
|
||||||
|
.put_object("file.txt", "I am a file".as_bytes())
|
||||||
|
.await?;
|
||||||
|
test.bucket
|
||||||
|
.put_object("folder/file.txt", "I am a file in a folder".as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Check that a file in the toplevel is listed:
|
||||||
|
let resp = reqwest::get(test.base_url.clone()).await?;
|
||||||
|
assert!(
|
||||||
|
resp.status().is_success(),
|
||||||
|
"Request failed with {}",
|
||||||
|
resp.status()
|
||||||
|
);
|
||||||
|
let text = resp.text().await?;
|
||||||
|
println!("{}", &text);
|
||||||
|
let document = Html::parse_document(&text);
|
||||||
|
|
||||||
|
let selector = Selector::parse(r#"h1"#).unwrap();
|
||||||
|
for title in document.select(&selector) {
|
||||||
|
assert_eq!(title.inner_html(), "/", "title doesn't match");
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(1) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("folder/"));
|
||||||
|
assert_eq!(item.text().next(), Some("folder/"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(2) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("file.txt"));
|
||||||
|
assert_eq!(item.text().next(), Some("file.txt"));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_log::test(tokio::test)]
|
||||||
|
async fn serves_second_level_folder() -> anyhow::Result<()> {
|
||||||
|
let test = common::Test::new().await?;
|
||||||
|
|
||||||
|
test.bucket
|
||||||
|
.put_object("file.txt", "I am a file".as_bytes())
|
||||||
|
.await?;
|
||||||
|
test.bucket
|
||||||
|
.put_object("folder/file.txt", "I am a file in a folder".as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Check that a file in the second level is listed:
|
||||||
|
let resp = reqwest::get(test.base_url.join("folder/")?).await?;
|
||||||
|
assert!(
|
||||||
|
resp.status().is_success(),
|
||||||
|
"Request failed with {}",
|
||||||
|
resp.status()
|
||||||
|
);
|
||||||
|
let text = resp.text().await?;
|
||||||
|
println!("{}", &text);
|
||||||
|
let document = Html::parse_document(&text);
|
||||||
|
|
||||||
|
let selector = Selector::parse(r#"h1"#).unwrap();
|
||||||
|
for title in document.select(&selector) {
|
||||||
|
assert_eq!(title.inner_html(), "folder/", "title doesn't match");
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(1) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("../"));
|
||||||
|
assert_eq!(item.inner_html(), "..");
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(2) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("file.txt"));
|
||||||
|
assert_eq!(item.inner_html(), "file.txt");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test_log::test(tokio::test)]
|
||||||
|
async fn serves_second_level_folder_without_ending_slash() -> anyhow::Result<()> {
|
||||||
|
let test = common::Test::new().await?;
|
||||||
|
|
||||||
|
test.bucket
|
||||||
|
.put_object("file.txt", "I am a file".as_bytes())
|
||||||
|
.await?;
|
||||||
|
test.bucket
|
||||||
|
.put_object("folder/file.txt", "I am a file in a folder".as_bytes())
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Check that a file in the second level is listed even without an ending slash:
|
||||||
|
let resp = reqwest::get(test.base_url.join("folder")?).await?;
|
||||||
|
assert!(
|
||||||
|
resp.status().is_success(),
|
||||||
|
"Request failed with {}",
|
||||||
|
resp.status()
|
||||||
|
);
|
||||||
|
let text = resp.text().await?;
|
||||||
|
println!("{}", &text);
|
||||||
|
let document = Html::parse_document(&text);
|
||||||
|
|
||||||
|
let selector = Selector::parse(r#"h1"#).unwrap();
|
||||||
|
for title in document.select(&selector) {
|
||||||
|
assert_eq!(title.inner_html(), "folder/", "title doesn't match");
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(1) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("../"));
|
||||||
|
assert_eq!(item.inner_html(), "..");
|
||||||
|
}
|
||||||
|
|
||||||
|
let selector =
|
||||||
|
Selector::parse(r#"table > tbody > tr:nth-child(2) > td:first-child > a"#).unwrap();
|
||||||
|
for item in document.select(&selector) {
|
||||||
|
assert_eq!(item.attr("href"), Some("file.txt"));
|
||||||
|
assert_eq!(item.inner_html(), "file.txt");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
Loading…
Reference in New Issue