feat: add Docker image metadata support

- Store metadata (.meta.json) alongside manifests with:
  - push_timestamp, last_pulled, downloads counter
  - size_bytes, os, arch, variant
  - layers list with digest and size
- Update metadata on manifest pull (increment downloads, update last_pulled)
- Extract OS/arch from config blob on push
- Extend UI API TagInfo with metadata fields
- Add public_url config option for pull commands
- Add Docker upstream proxy with auth support
- Add raw repository support
- Bump version to 0.2.12
This commit is contained in:
2026-01-30 15:52:29 +00:00
parent ee4e01467a
commit 5fc4237ac5
16 changed files with 1090 additions and 43 deletions

View File

@@ -1,4 +1,6 @@
use crate::activity_log::{ActionType, ActivityEntry};
use crate::registry::docker_auth::DockerAuth;
use crate::storage::Storage;
use crate::validation::{validate_digest, validate_docker_name, validate_docker_reference};
use crate::AppState;
use axum::{
@@ -10,9 +12,32 @@ use axum::{
Json, Router,
};
use parking_lot::RwLock;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
/// Metadata for a Docker image stored alongside manifests
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct ImageMetadata {
pub push_timestamp: u64,
pub last_pulled: u64,
pub downloads: u64,
pub size_bytes: u64,
pub os: String,
pub arch: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub variant: Option<String>,
pub layers: Vec<LayerInfo>,
}
/// Information about a single layer in a Docker image
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LayerInfo {
pub digest: String,
pub size: u64,
}
/// In-progress upload sessions for chunked uploads
/// Maps UUID -> accumulated data
@@ -75,25 +100,63 @@ async fn download_blob(
}
let key = format!("docker/{}/blobs/{}", name, digest);
match state.storage.get(&key).await {
Ok(data) => {
// Try local storage first
if let Ok(data) = state.storage.get(&key).await {
state.metrics.record_download("docker");
state.metrics.record_cache_hit();
state.activity.push(ActivityEntry::new(
ActionType::Pull,
format!("{}@{}", name, &digest[..19.min(digest.len())]),
"docker",
"LOCAL",
));
return (
StatusCode::OK,
[(header::CONTENT_TYPE, "application/octet-stream")],
data,
)
.into_response();
}
// Try upstream proxies
for upstream in &state.config.docker.upstreams {
if let Ok(data) = fetch_blob_from_upstream(
&upstream.url,
&name,
&digest,
&state.docker_auth,
state.config.docker.proxy_timeout,
)
.await
{
state.metrics.record_download("docker");
state.metrics.record_cache_hit();
state.metrics.record_cache_miss();
state.activity.push(ActivityEntry::new(
ActionType::Pull,
ActionType::ProxyFetch,
format!("{}@{}", name, &digest[..19.min(digest.len())]),
"docker",
"LOCAL",
"PROXY",
));
(
// Cache in storage (fire and forget)
let storage = state.storage.clone();
let key_clone = key.clone();
let data_clone = data.clone();
tokio::spawn(async move {
let _ = storage.put(&key_clone, &data_clone).await;
});
return (
StatusCode::OK,
[(header::CONTENT_TYPE, "application/octet-stream")],
data,
Bytes::from(data),
)
.into_response()
.into_response();
}
Err(_) => StatusCode::NOT_FOUND.into_response(),
}
StatusCode::NOT_FOUND.into_response()
}
async fn start_upload(Path(name): Path<String>) -> Response {
@@ -213,35 +276,106 @@ async fn get_manifest(
}
let key = format!("docker/{}/manifests/{}.json", name, reference);
match state.storage.get(&key).await {
Ok(data) => {
// Try local storage first
if let Ok(data) = state.storage.get(&key).await {
state.metrics.record_download("docker");
state.metrics.record_cache_hit();
state.activity.push(ActivityEntry::new(
ActionType::Pull,
format!("{}:{}", name, reference),
"docker",
"LOCAL",
));
// Calculate digest for Docker-Content-Digest header
use sha2::Digest;
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
// Detect manifest media type from content
let content_type = detect_manifest_media_type(&data);
// Update metadata (downloads, last_pulled) in background
let meta_key = format!("docker/{}/manifests/{}.meta.json", name, reference);
let storage_clone = state.storage.clone();
tokio::spawn(update_metadata_on_pull(storage_clone, meta_key));
return (
StatusCode::OK,
[
(header::CONTENT_TYPE, content_type),
(HeaderName::from_static("docker-content-digest"), digest),
],
data,
)
.into_response();
}
// Try upstream proxies
for upstream in &state.config.docker.upstreams {
if let Ok((data, content_type)) = fetch_manifest_from_upstream(
&upstream.url,
&name,
&reference,
&state.docker_auth,
state.config.docker.proxy_timeout,
)
.await
{
state.metrics.record_download("docker");
state.metrics.record_cache_hit();
state.metrics.record_cache_miss();
state.activity.push(ActivityEntry::new(
ActionType::Pull,
ActionType::ProxyFetch,
format!("{}:{}", name, reference),
"docker",
"LOCAL",
"PROXY",
));
// Calculate digest for Docker-Content-Digest header
use sha2::Digest;
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
(
// Cache manifest and create metadata (fire and forget)
let storage = state.storage.clone();
let key_clone = key.clone();
let data_clone = data.clone();
let name_clone = name.clone();
let reference_clone = reference.clone();
let digest_clone = digest.clone();
tokio::spawn(async move {
// Store manifest by tag and digest
let _ = storage.put(&key_clone, &data_clone).await;
let digest_key = format!("docker/{}/manifests/{}.json", name_clone, digest_clone);
let _ = storage.put(&digest_key, &data_clone).await;
// Extract and save metadata
let metadata = extract_metadata(&data_clone, &storage, &name_clone).await;
if let Ok(meta_json) = serde_json::to_vec(&metadata) {
let meta_key = format!(
"docker/{}/manifests/{}.meta.json",
name_clone, reference_clone
);
let _ = storage.put(&meta_key, &meta_json).await;
let digest_meta_key =
format!("docker/{}/manifests/{}.meta.json", name_clone, digest_clone);
let _ = storage.put(&digest_meta_key, &meta_json).await;
}
});
return (
StatusCode::OK,
[
(
header::CONTENT_TYPE,
"application/vnd.docker.distribution.manifest.v2+json".to_string(),
),
(header::CONTENT_TYPE, content_type),
(HeaderName::from_static("docker-content-digest"), digest),
],
data,
Bytes::from(data),
)
.into_response()
.into_response();
}
Err(_) => StatusCode::NOT_FOUND.into_response(),
}
StatusCode::NOT_FOUND.into_response()
}
async fn put_manifest(
@@ -272,6 +406,17 @@ async fn put_manifest(
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
// Extract and save metadata
let metadata = extract_metadata(&body, &state.storage, &name).await;
let meta_key = format!("docker/{}/manifests/{}.meta.json", name, reference);
if let Ok(meta_json) = serde_json::to_vec(&metadata) {
let _ = state.storage.put(&meta_key, &meta_json).await;
// Also save metadata by digest
let digest_meta_key = format!("docker/{}/manifests/{}.meta.json", name, digest);
let _ = state.storage.put(&digest_meta_key, &meta_json).await;
}
state.metrics.record_upload("docker");
state.activity.push(ActivityEntry::new(
ActionType::Push,
@@ -308,3 +453,314 @@ async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>)
.collect();
(StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response()
}
/// Fetch a blob from an upstream Docker registry
async fn fetch_blob_from_upstream(
upstream_url: &str,
name: &str,
digest: &str,
docker_auth: &DockerAuth,
timeout: u64,
) -> Result<Vec<u8>, ()> {
let url = format!(
"{}/v2/{}/blobs/{}",
upstream_url.trim_end_matches('/'),
name,
digest
);
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(timeout))
.build()
.map_err(|_| ())?;
// First try without auth
let response = client.get(&url).send().await.map_err(|_| ())?;
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
// Get Www-Authenticate header and fetch token
let www_auth = response
.headers()
.get("www-authenticate")
.and_then(|v| v.to_str().ok())
.map(String::from);
if let Some(token) = docker_auth
.get_token(upstream_url, name, www_auth.as_deref())
.await
{
client
.get(&url)
.header("Authorization", format!("Bearer {}", token))
.send()
.await
.map_err(|_| ())?
} else {
return Err(());
}
} else {
response
};
if !response.status().is_success() {
return Err(());
}
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
}
/// Fetch a manifest from an upstream Docker registry
/// Returns (manifest_bytes, content_type)
async fn fetch_manifest_from_upstream(
upstream_url: &str,
name: &str,
reference: &str,
docker_auth: &DockerAuth,
timeout: u64,
) -> Result<(Vec<u8>, String), ()> {
let url = format!(
"{}/v2/{}/manifests/{}",
upstream_url.trim_end_matches('/'),
name,
reference
);
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(timeout))
.build()
.map_err(|_| ())?;
// Request with Accept header for manifest types
let accept_header = "application/vnd.docker.distribution.manifest.v2+json, \
application/vnd.docker.distribution.manifest.list.v2+json, \
application/vnd.oci.image.manifest.v1+json, \
application/vnd.oci.image.index.v1+json";
// First try without auth
let response = client
.get(&url)
.header("Accept", accept_header)
.send()
.await
.map_err(|_| ())?;
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
// Get Www-Authenticate header and fetch token
let www_auth = response
.headers()
.get("www-authenticate")
.and_then(|v| v.to_str().ok())
.map(String::from);
if let Some(token) = docker_auth
.get_token(upstream_url, name, www_auth.as_deref())
.await
{
client
.get(&url)
.header("Accept", accept_header)
.header("Authorization", format!("Bearer {}", token))
.send()
.await
.map_err(|_| ())?
} else {
return Err(());
}
} else {
response
};
if !response.status().is_success() {
return Err(());
}
let content_type = response
.headers()
.get("content-type")
.and_then(|v| v.to_str().ok())
.unwrap_or("application/vnd.docker.distribution.manifest.v2+json")
.to_string();
let bytes = response.bytes().await.map_err(|_| ())?;
Ok((bytes.to_vec(), content_type))
}
/// Detect manifest media type from its JSON content
fn detect_manifest_media_type(data: &[u8]) -> String {
// Try to parse as JSON and extract mediaType
if let Ok(json) = serde_json::from_slice::<Value>(data) {
if let Some(media_type) = json.get("mediaType").and_then(|v| v.as_str()) {
return media_type.to_string();
}
// Check schemaVersion for older manifests
if let Some(schema_version) = json.get("schemaVersion").and_then(|v| v.as_u64()) {
if schema_version == 1 {
return "application/vnd.docker.distribution.manifest.v1+json".to_string();
}
// schemaVersion 2 without mediaType is likely docker manifest v2
if json.get("config").is_some() {
return "application/vnd.docker.distribution.manifest.v2+json".to_string();
}
// If it has "manifests" array, it's an index/list
if json.get("manifests").is_some() {
return "application/vnd.oci.image.index.v1+json".to_string();
}
}
}
// Default fallback
"application/vnd.docker.distribution.manifest.v2+json".to_string()
}
/// Extract metadata from a Docker manifest
/// Handles both single-arch manifests and multi-arch indexes
async fn extract_metadata(manifest: &[u8], storage: &Storage, name: &str) -> ImageMetadata {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let mut metadata = ImageMetadata {
push_timestamp: now,
last_pulled: 0,
downloads: 0,
..Default::default()
};
let Ok(json) = serde_json::from_slice::<Value>(manifest) else {
return metadata;
};
// Check if this is a manifest list/index (multi-arch)
if json.get("manifests").is_some() {
// For multi-arch, extract info from the first platform manifest
if let Some(manifests) = json.get("manifests").and_then(|m| m.as_array()) {
// Sum sizes from all platform manifests
let total_size: u64 = manifests
.iter()
.filter_map(|m| m.get("size").and_then(|s| s.as_u64()))
.sum();
metadata.size_bytes = total_size;
// Get OS/arch from first platform (usually linux/amd64)
if let Some(first) = manifests.first() {
if let Some(platform) = first.get("platform") {
metadata.os = platform
.get("os")
.and_then(|v| v.as_str())
.unwrap_or("multi-arch")
.to_string();
metadata.arch = platform
.get("architecture")
.and_then(|v| v.as_str())
.unwrap_or("multi")
.to_string();
metadata.variant = platform
.get("variant")
.and_then(|v| v.as_str())
.map(String::from);
}
}
}
return metadata;
}
// Single-arch manifest - extract layers
if let Some(layers) = json.get("layers").and_then(|l| l.as_array()) {
let mut total_size: u64 = 0;
for layer in layers {
let digest = layer
.get("digest")
.and_then(|d| d.as_str())
.unwrap_or("")
.to_string();
let size = layer.get("size").and_then(|s| s.as_u64()).unwrap_or(0);
total_size += size;
metadata.layers.push(LayerInfo { digest, size });
}
metadata.size_bytes = total_size;
}
// Try to get OS/arch from config blob
if let Some(config) = json.get("config") {
if let Some(config_digest) = config.get("digest").and_then(|d| d.as_str()) {
let (os, arch, variant) = get_config_info(storage, name, config_digest).await;
metadata.os = os;
metadata.arch = arch;
metadata.variant = variant;
}
}
// If we couldn't get OS/arch, set defaults
if metadata.os.is_empty() {
metadata.os = "unknown".to_string();
}
if metadata.arch.is_empty() {
metadata.arch = "unknown".to_string();
}
metadata
}
/// Get OS/arch information from a config blob
async fn get_config_info(
storage: &Storage,
name: &str,
config_digest: &str,
) -> (String, String, Option<String>) {
let key = format!("docker/{}/blobs/{}", name, config_digest);
let Ok(data) = storage.get(&key).await else {
return ("unknown".to_string(), "unknown".to_string(), None);
};
let Ok(config) = serde_json::from_slice::<Value>(&data) else {
return ("unknown".to_string(), "unknown".to_string(), None);
};
let os = config
.get("os")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
let arch = config
.get("architecture")
.and_then(|v| v.as_str())
.unwrap_or("unknown")
.to_string();
let variant = config
.get("variant")
.and_then(|v| v.as_str())
.map(String::from);
(os, arch, variant)
}
/// Update metadata when a manifest is pulled
/// Increments download counter and updates last_pulled timestamp
async fn update_metadata_on_pull(storage: Storage, meta_key: String) {
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
// Try to read existing metadata
let mut metadata = if let Ok(data) = storage.get(&meta_key).await {
serde_json::from_slice::<ImageMetadata>(&data).unwrap_or_default()
} else {
ImageMetadata::default()
};
// Update pull stats
metadata.downloads += 1;
metadata.last_pulled = now;
// Save back
if let Ok(json) = serde_json::to_vec(&metadata) {
let _ = storage.put(&meta_key, &json).await;
}
}

View File

@@ -0,0 +1,189 @@
use parking_lot::RwLock;
use std::collections::HashMap;
use std::time::{Duration, Instant};
/// Cached Docker registry token
struct CachedToken {
token: String,
expires_at: Instant,
}
/// Docker registry authentication handler
/// Manages Bearer token acquisition and caching for upstream registries
pub struct DockerAuth {
tokens: RwLock<HashMap<String, CachedToken>>,
client: reqwest::Client,
}
impl DockerAuth {
pub fn new(timeout: u64) -> Self {
Self {
tokens: RwLock::new(HashMap::new()),
client: reqwest::Client::builder()
.timeout(Duration::from_secs(timeout))
.build()
.unwrap_or_default(),
}
}
/// Get a valid token for the given registry and repository scope
/// Returns cached token if still valid, otherwise fetches a new one
pub async fn get_token(
&self,
registry_url: &str,
name: &str,
www_authenticate: Option<&str>,
) -> Option<String> {
let cache_key = format!("{}:{}", registry_url, name);
// Check cache first
{
let tokens = self.tokens.read();
if let Some(cached) = tokens.get(&cache_key) {
if cached.expires_at > Instant::now() {
return Some(cached.token.clone());
}
}
}
// Need to fetch a new token
let www_auth = www_authenticate?;
let token = self.fetch_token(www_auth, name).await?;
// Cache the token (default 5 minute expiry)
{
let mut tokens = self.tokens.write();
tokens.insert(
cache_key,
CachedToken {
token: token.clone(),
expires_at: Instant::now() + Duration::from_secs(300),
},
);
}
Some(token)
}
/// Parse Www-Authenticate header and fetch token from auth server
/// Format: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/alpine:pull"
async fn fetch_token(&self, www_authenticate: &str, name: &str) -> Option<String> {
let params = parse_www_authenticate(www_authenticate)?;
let realm = params.get("realm")?;
let service = params.get("service").map(|s| s.as_str()).unwrap_or("");
// Build token request URL
let scope = format!("repository:{}:pull", name);
let url = format!("{}?service={}&scope={}", realm, service, scope);
let response = self.client.get(&url).send().await.ok()?;
if !response.status().is_success() {
return None;
}
let json: serde_json::Value = response.json().await.ok()?;
// Docker Hub returns "token", some registries return "access_token"
json.get("token")
.or_else(|| json.get("access_token"))
.and_then(|v| v.as_str())
.map(String::from)
}
/// Make an authenticated request to an upstream registry
pub async fn fetch_with_auth(
&self,
url: &str,
registry_url: &str,
name: &str,
) -> Result<reqwest::Response, ()> {
// First try without auth
let response = self.client.get(url).send().await.map_err(|_| ())?;
if response.status() == reqwest::StatusCode::UNAUTHORIZED {
// Extract Www-Authenticate header
let www_auth = response
.headers()
.get("www-authenticate")
.and_then(|v| v.to_str().ok())
.map(String::from);
// Get token and retry
if let Some(token) = self
.get_token(registry_url, name, www_auth.as_deref())
.await
{
return self
.client
.get(url)
.header("Authorization", format!("Bearer {}", token))
.send()
.await
.map_err(|_| ());
}
return Err(());
}
Ok(response)
}
}
impl Default for DockerAuth {
fn default() -> Self {
Self::new(60)
}
}
/// Parse Www-Authenticate header into key-value pairs
/// Example: Bearer realm="https://auth.docker.io/token",service="registry.docker.io"
fn parse_www_authenticate(header: &str) -> Option<HashMap<String, String>> {
let header = header
.strip_prefix("Bearer ")
.or_else(|| header.strip_prefix("bearer "))?;
let mut params = HashMap::new();
for part in header.split(',') {
let part = part.trim();
if let Some((key, value)) = part.split_once('=') {
let value = value.trim_matches('"');
params.insert(key.to_string(), value.to_string());
}
}
Some(params)
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_www_authenticate() {
let header = r#"Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/alpine:pull""#;
let params = parse_www_authenticate(header).unwrap();
assert_eq!(
params.get("realm"),
Some(&"https://auth.docker.io/token".to_string())
);
assert_eq!(
params.get("service"),
Some(&"registry.docker.io".to_string())
);
}
#[test]
fn test_parse_www_authenticate_lowercase() {
let header = r#"bearer realm="https://ghcr.io/token",service="ghcr.io""#;
let params = parse_www_authenticate(header).unwrap();
assert_eq!(
params.get("realm"),
Some(&"https://ghcr.io/token".to_string())
);
}
}

View File

@@ -1,11 +1,15 @@
mod cargo_registry;
mod docker;
pub mod docker;
pub mod docker_auth;
mod maven;
mod npm;
mod pypi;
mod raw;
pub use cargo_registry::routes as cargo_routes;
pub use docker::routes as docker_routes;
pub use docker_auth::DockerAuth;
pub use maven::routes as maven_routes;
pub use npm::routes as npm_routes;
pub use pypi::routes as pypi_routes;
pub use raw::routes as raw_routes;

View File

@@ -80,7 +80,7 @@ async fn package_versions(
// Try proxy if configured
if let Some(proxy_url) = &state.config.pypi.proxy {
let url = format!("{}{}/", proxy_url.trim_end_matches('/'), normalized);
let url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
if let Ok(html) = fetch_package_page(&url, state.config.pypi.proxy_timeout).await {
// Rewrite URLs in the HTML to point to our registry
@@ -125,7 +125,7 @@ async fn download_file(
// Try proxy if configured
if let Some(proxy_url) = &state.config.pypi.proxy {
// First, fetch the package page to find the actual download URL
let page_url = format!("{}{}/", proxy_url.trim_end_matches('/'), normalized);
let page_url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
if let Ok(html) = fetch_package_page(&page_url, state.config.pypi.proxy_timeout).await {
// Find the URL for this specific file
@@ -233,6 +233,30 @@ fn rewrite_pypi_links(html: &str, package_name: &str) -> String {
}
}
result.push_str(remaining);
// Remove data-core-metadata and data-dist-info-metadata attributes
// as we don't serve .metadata files (PEP 658)
let result = remove_attribute(&result, "data-core-metadata");
let result = remove_attribute(&result, "data-dist-info-metadata");
result
}
/// Remove an HTML attribute from all tags
fn remove_attribute(html: &str, attr_name: &str) -> String {
let mut result = String::with_capacity(html.len());
let mut remaining = html;
let pattern = format!(" {}=\"", attr_name);
while let Some(attr_start) = remaining.find(&pattern) {
result.push_str(&remaining[..attr_start]);
remaining = &remaining[attr_start + pattern.len()..];
// Skip the attribute value
if let Some(attr_end) = remaining.find('"') {
remaining = &remaining[attr_end + 1..];
}
}
result.push_str(remaining);
result
}

View File

@@ -0,0 +1,133 @@
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{
body::Bytes,
extract::{Path, State},
http::{header, StatusCode},
response::{IntoResponse, Response},
routing::get,
Router,
};
use std::sync::Arc;
pub fn routes() -> Router<Arc<AppState>> {
Router::new().route(
"/raw/{*path}",
get(download)
.put(upload)
.delete(delete_file)
.head(check_exists),
)
}
async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
if !state.config.raw.enabled {
return StatusCode::NOT_FOUND.into_response();
}
let key = format!("raw/{}", path);
match state.storage.get(&key).await {
Ok(data) => {
state.metrics.record_download("raw");
state
.activity
.push(ActivityEntry::new(ActionType::Pull, path, "raw", "LOCAL"));
// Guess content type from extension
let content_type = guess_content_type(&key);
(StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data).into_response()
}
Err(_) => StatusCode::NOT_FOUND.into_response(),
}
}
async fn upload(
State(state): State<Arc<AppState>>,
Path(path): Path<String>,
body: Bytes,
) -> Response {
if !state.config.raw.enabled {
return StatusCode::NOT_FOUND.into_response();
}
// Check file size limit
if body.len() as u64 > state.config.raw.max_file_size {
return (
StatusCode::PAYLOAD_TOO_LARGE,
format!(
"File too large. Max size: {} bytes",
state.config.raw.max_file_size
),
)
.into_response();
}
let key = format!("raw/{}", path);
match state.storage.put(&key, &body).await {
Ok(()) => {
state.metrics.record_upload("raw");
state
.activity
.push(ActivityEntry::new(ActionType::Push, path, "raw", "LOCAL"));
StatusCode::CREATED.into_response()
}
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
async fn delete_file(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
if !state.config.raw.enabled {
return StatusCode::NOT_FOUND.into_response();
}
let key = format!("raw/{}", path);
match state.storage.delete(&key).await {
Ok(()) => StatusCode::NO_CONTENT.into_response(),
Err(crate::storage::StorageError::NotFound) => StatusCode::NOT_FOUND.into_response(),
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
async fn check_exists(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
if !state.config.raw.enabled {
return StatusCode::NOT_FOUND.into_response();
}
let key = format!("raw/{}", path);
match state.storage.stat(&key).await {
Some(meta) => (
StatusCode::OK,
[
(header::CONTENT_LENGTH, meta.size.to_string()),
(header::CONTENT_TYPE, guess_content_type(&key).to_string()),
],
)
.into_response(),
None => StatusCode::NOT_FOUND.into_response(),
}
}
fn guess_content_type(path: &str) -> &'static str {
let ext = path.rsplit('.').next().unwrap_or("");
match ext.to_lowercase().as_str() {
"json" => "application/json",
"xml" => "application/xml",
"html" | "htm" => "text/html",
"css" => "text/css",
"js" => "application/javascript",
"txt" => "text/plain",
"md" => "text/markdown",
"yaml" | "yml" => "application/x-yaml",
"toml" => "application/toml",
"tar" => "application/x-tar",
"gz" | "gzip" => "application/gzip",
"zip" => "application/zip",
"png" => "image/png",
"jpg" | "jpeg" => "image/jpeg",
"gif" => "image/gif",
"svg" => "image/svg+xml",
"pdf" => "application/pdf",
"wasm" => "application/wasm",
_ => "application/octet-stream",
}
}