security: harden Docker registry and container runtime

- Verify blob digest (SHA256) on upload, reject mismatches (DIGEST_INVALID)
- Reject sha512 digests (only sha256 supported)
- Add upload session limits: max 100 concurrent, 2GB per session, 30min TTL
- Bind upload sessions to repository name (prevent session fixation)
- Filter .meta.json from Docker tag list (fix ArgoCD Image Updater recursion)
- Fix catalog to show namespaced images (library/alpine instead of library)
- Add security headers: CSP, X-Frame-Options, X-Content-Type-Options, Referrer-Policy
- Run containers as non-root user (USER nora) in all 3 Dockerfiles
- Add configurable NORA_MAX_UPLOAD_SESSIONS and NORA_MAX_UPLOAD_SESSION_SIZE_MB
This commit is contained in:
2026-03-19 08:29:28 +00:00
parent 52e59a8272
commit c1f6430aa9
8 changed files with 225 additions and 57 deletions

44
Cargo.lock generated
View File

@@ -1267,20 +1267,6 @@ version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21" checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
[[package]]
name = "nora-cli"
version = "0.2.32"
dependencies = [
"clap",
"flate2",
"indicatif",
"reqwest",
"serde",
"serde_json",
"tar",
"tokio",
]
[[package]] [[package]]
name = "nora-fuzz" name = "nora-fuzz"
version = "0.0.0" version = "0.0.0"
@@ -1317,6 +1303,7 @@ dependencies = [
"thiserror 2.0.18", "thiserror 2.0.18",
"tokio", "tokio",
"toml", "toml",
"tower-http",
"tower_governor", "tower_governor",
"tracing", "tracing",
"tracing-subscriber", "tracing-subscriber",
@@ -1327,25 +1314,6 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "nora-storage"
version = "0.2.32"
dependencies = [
"axum",
"base64",
"chrono",
"httpdate",
"quick-xml",
"serde",
"serde_json",
"sha2",
"tokio",
"toml",
"tracing",
"tracing-subscriber",
"uuid",
]
[[package]] [[package]]
name = "nu-ansi-term" name = "nu-ansi-term"
version = "0.50.3" version = "0.50.3"
@@ -1540,16 +1508,6 @@ dependencies = [
"winapi", "winapi",
] ]
[[package]]
name = "quick-xml"
version = "0.39.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "958f21e8e7ceb5a1aa7fa87fab28e7c75976e0bfe7e23ff069e0a260f894067d"
dependencies = [
"memchr",
"serde",
]
[[package]] [[package]]
name = "quinn" name = "quinn"
version = "0.11.9" version = "0.11.9"

View File

@@ -2,9 +2,11 @@
# Binary is pre-built by CI (cargo build --release) and passed via context # Binary is pre-built by CI (cargo build --release) and passed via context
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805
RUN apk add --no-cache ca-certificates && mkdir -p /data RUN apk add --no-cache ca-certificates \
&& addgroup -S nora && adduser -S -G nora nora \
&& mkdir -p /data && chown nora:nora /data
COPY nora /usr/local/bin/nora COPY --chown=nora:nora nora /usr/local/bin/nora
ENV RUST_LOG=info ENV RUST_LOG=info
ENV NORA_HOST=0.0.0.0 ENV NORA_HOST=0.0.0.0
@@ -17,5 +19,7 @@ EXPOSE 4000
VOLUME ["/data"] VOLUME ["/data"]
USER nora
ENTRYPOINT ["/usr/local/bin/nora"] ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"] CMD ["serve"]

View File

@@ -6,11 +6,14 @@
# RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/* # RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/*
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
RUN apk add --no-cache ca-certificates RUN apk add --no-cache ca-certificates \
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
FROM scratch FROM scratch
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=certs /etc/passwd /etc/passwd
COPY --from=certs /etc/group /etc/group
COPY nora /usr/local/bin/nora COPY nora /usr/local/bin/nora
ENV RUST_LOG=info ENV RUST_LOG=info
@@ -24,5 +27,7 @@ EXPOSE 4000
VOLUME ["/data"] VOLUME ["/data"]
USER nora
ENTRYPOINT ["/usr/local/bin/nora"] ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"] CMD ["serve"]

View File

@@ -6,11 +6,14 @@
# RUN dnf install -y ca-certificates && dnf clean all # RUN dnf install -y ca-certificates && dnf clean all
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
RUN apk add --no-cache ca-certificates RUN apk add --no-cache ca-certificates \
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
FROM scratch FROM scratch
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY --from=certs /etc/passwd /etc/passwd
COPY --from=certs /etc/group /etc/group
COPY nora /usr/local/bin/nora COPY nora /usr/local/bin/nora
ENV RUST_LOG=info ENV RUST_LOG=info
@@ -24,5 +27,7 @@ EXPOSE 4000
VOLUME ["/data"] VOLUME ["/data"]
USER nora
ENTRYPOINT ["/usr/local/bin/nora"] ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"] CMD ["serve"]

View File

@@ -39,6 +39,11 @@
- **Security** - **Security**
- Basic Auth (htpasswd + bcrypt) - Basic Auth (htpasswd + bcrypt)
- Revocable API tokens with RBAC - Revocable API tokens with RBAC
- Blob digest verification (SHA256)
- Non-root container images
- Security headers (CSP, X-Frame-Options, nosniff)
- Upload session limits (DoS protection)
- Configurable upload size for ML models (`NORA_MAX_UPLOAD_SESSION_SIZE_MB`)
- ENV-based configuration (12-Factor) - ENV-based configuration (12-Factor)
- SBOM (SPDX + CycloneDX) in every release - SBOM (SPDX + CycloneDX) in every release
- See [SECURITY.md](SECURITY.md) for vulnerability reporting - See [SECURITY.md](SECURITY.md) for vulnerability reporting

View File

@@ -49,6 +49,7 @@ tower_governor = "0.8"
governor = "0.10" governor = "0.10"
parking_lot = "0.12" parking_lot = "0.12"
zeroize = { version = "1.8", features = ["derive"] } zeroize = { version = "1.8", features = ["derive"] }
tower-http = { version = "0.6", features = ["set-header"] }
[dev-dependencies] [dev-dependencies]
tempfile = "3" tempfile = "3"

View File

@@ -24,7 +24,7 @@ mod tokens;
mod ui; mod ui;
mod validation; mod validation;
use axum::{extract::DefaultBodyLimit, middleware, Router}; use axum::{extract::DefaultBodyLimit, http::HeaderValue, middleware, Router};
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
@@ -375,6 +375,22 @@ async fn run_server(config: Config, storage: Storage) {
.layer(DefaultBodyLimit::max( .layer(DefaultBodyLimit::max(
state.config.server.body_limit_mb * 1024 * 1024, state.config.server.body_limit_mb * 1024 * 1024,
)) ))
.layer(tower_http::set_header::SetResponseHeaderLayer::overriding(
axum::http::header::HeaderName::from_static("x-content-type-options"),
HeaderValue::from_static("nosniff"),
))
.layer(tower_http::set_header::SetResponseHeaderLayer::overriding(
axum::http::header::HeaderName::from_static("x-frame-options"),
HeaderValue::from_static("DENY"),
))
.layer(tower_http::set_header::SetResponseHeaderLayer::overriding(
axum::http::header::HeaderName::from_static("referrer-policy"),
HeaderValue::from_static("strict-origin-when-cross-origin"),
))
.layer(tower_http::set_header::SetResponseHeaderLayer::overriding(
axum::http::header::HeaderName::from_static("content-security-policy"),
HeaderValue::from_static("default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.tailwindcss.com https://unpkg.com; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self'; connect-src 'self'"),
))
.layer(middleware::from_fn(request_id::request_id_middleware)) .layer(middleware::from_fn(request_id::request_id_middleware))
.layer(middleware::from_fn(metrics::metrics_middleware)) .layer(middleware::from_fn(metrics::metrics_middleware))
.layer(middleware::from_fn_with_state( .layer(middleware::from_fn_with_state(

View File

@@ -44,11 +44,57 @@ pub struct LayerInfo {
pub size: u64, pub size: u64,
} }
/// In-progress upload session with metadata
struct UploadSession {
data: Vec<u8>,
name: String,
created_at: std::time::Instant,
}
/// Max concurrent upload sessions (prevent memory exhaustion)
const DEFAULT_MAX_UPLOAD_SESSIONS: usize = 100;
/// Max data per session (default 2 GB, configurable via NORA_MAX_UPLOAD_SESSION_SIZE_MB)
const DEFAULT_MAX_SESSION_SIZE_MB: usize = 2048;
/// Session TTL (30 minutes)
const SESSION_TTL: Duration = Duration::from_secs(30 * 60);
/// Read max upload sessions from env or use default
fn max_upload_sessions() -> usize {
std::env::var("NORA_MAX_UPLOAD_SESSIONS")
.ok()
.and_then(|v| v.parse().ok())
.unwrap_or(DEFAULT_MAX_UPLOAD_SESSIONS)
}
/// Read max session size from env (in MB) or use default
fn max_session_size() -> usize {
let mb = std::env::var("NORA_MAX_UPLOAD_SESSION_SIZE_MB")
.ok()
.and_then(|v| v.parse::<usize>().ok())
.unwrap_or(DEFAULT_MAX_SESSION_SIZE_MB);
mb.saturating_mul(1024 * 1024)
}
/// In-progress upload sessions for chunked uploads /// In-progress upload sessions for chunked uploads
/// Maps UUID -> accumulated data /// Maps UUID -> UploadSession with limits and TTL
static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, Vec<u8>>>> = static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, UploadSession>>> =
std::sync::LazyLock::new(|| RwLock::new(HashMap::new())); std::sync::LazyLock::new(|| RwLock::new(HashMap::new()));
/// Remove expired upload sessions (called periodically)
fn cleanup_expired_sessions() {
let mut sessions = UPLOAD_SESSIONS.write();
let before = sessions.len();
sessions.retain(|_, s| s.created_at.elapsed() < SESSION_TTL);
let removed = before - sessions.len();
if removed > 0 {
tracing::info!(
removed = removed,
remaining = sessions.len(),
"Cleaned up expired upload sessions"
);
}
}
pub fn routes() -> Router<Arc<AppState>> { pub fn routes() -> Router<Arc<AppState>> {
Router::new() Router::new()
.route("/v2/", get(check)) .route("/v2/", get(check))
@@ -108,9 +154,19 @@ async fn catalog(State(state): State<Arc<AppState>>) -> Json<Value> {
let mut repos: Vec<String> = keys let mut repos: Vec<String> = keys
.iter() .iter()
.filter_map(|k| { .filter_map(|k| {
k.strip_prefix("docker/") let rest = k.strip_prefix("docker/")?;
.and_then(|rest| rest.split('/').next()) // Find the first known directory separator (manifests/ or blobs/)
.map(String::from) let name = if let Some(idx) = rest.find("/manifests/") {
&rest[..idx]
} else if let Some(idx) = rest.find("/blobs/") {
&rest[..idx]
} else {
return None;
};
if name.is_empty() {
return None;
}
Some(name.to_string())
}) })
.collect(); .collect();
@@ -254,7 +310,38 @@ async fn start_upload(Path(name): Path<String>) -> Response {
return (StatusCode::BAD_REQUEST, e.to_string()).into_response(); return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
} }
// Cleanup expired sessions before checking limits
cleanup_expired_sessions();
// Enforce max concurrent sessions
{
let sessions = UPLOAD_SESSIONS.read();
let max_sessions = max_upload_sessions();
if sessions.len() >= max_sessions {
tracing::warn!(
max = max_sessions,
current = sessions.len(),
"Upload session limit reached — rejecting new upload"
);
return (StatusCode::TOO_MANY_REQUESTS, "Too many concurrent uploads").into_response();
}
}
let uuid = uuid::Uuid::new_v4().to_string(); let uuid = uuid::Uuid::new_v4().to_string();
// Create session with metadata
{
let mut sessions = UPLOAD_SESSIONS.write();
sessions.insert(
uuid.clone(),
UploadSession {
data: Vec::new(),
name: name.clone(),
created_at: std::time::Instant::now(),
},
);
}
let location = format!("/v2/{}/blobs/uploads/{}", name, uuid); let location = format!("/v2/{}/blobs/uploads/{}", name, uuid);
( (
StatusCode::ACCEPTED, StatusCode::ACCEPTED,
@@ -276,9 +363,47 @@ async fn patch_blob(Path((name, uuid)): Path<(String, String)>, body: Bytes) ->
// Append data to the upload session and get total size // Append data to the upload session and get total size
let total_size = { let total_size = {
let mut sessions = UPLOAD_SESSIONS.write(); let mut sessions = UPLOAD_SESSIONS.write();
let session = sessions.entry(uuid.clone()).or_default(); let session = match sessions.get_mut(&uuid) {
session.extend_from_slice(&body); Some(s) => s,
session.len() None => {
return (StatusCode::NOT_FOUND, "Upload session not found or expired")
.into_response();
}
};
// Verify session belongs to this repository
if session.name != name {
tracing::warn!(
session_name = %session.name,
request_name = %name,
"SECURITY: upload session name mismatch — possible session fixation"
);
return (
StatusCode::BAD_REQUEST,
"Session does not belong to this repository",
)
.into_response();
}
// Check session TTL
if session.created_at.elapsed() >= SESSION_TTL {
sessions.remove(&uuid);
return (StatusCode::NOT_FOUND, "Upload session expired").into_response();
}
// Check size limit
let new_size = session.data.len() + body.len();
if new_size > max_session_size() {
sessions.remove(&uuid);
return (
StatusCode::PAYLOAD_TOO_LARGE,
"Upload session exceeds size limit",
)
.into_response();
}
session.data.extend_from_slice(&body);
session.data.len()
}; };
let location = format!("/v2/{}/blobs/uploads/{}", name, uuid); let location = format!("/v2/{}/blobs/uploads/{}", name, uuid);
@@ -325,8 +450,22 @@ async fn upload_blob(
// Get data from chunked session if exists, otherwise use body directly // Get data from chunked session if exists, otherwise use body directly
let data = { let data = {
let mut sessions = UPLOAD_SESSIONS.write(); let mut sessions = UPLOAD_SESSIONS.write();
if let Some(mut session_data) = sessions.remove(&uuid) { if let Some(session) = sessions.remove(&uuid) {
// Verify session belongs to this repository
if session.name != name {
tracing::warn!(
session_name = %session.name,
request_name = %name,
"SECURITY: upload finalization name mismatch"
);
return (
StatusCode::BAD_REQUEST,
"Session does not belong to this repository",
)
.into_response();
}
// Chunked upload: append any final body data and use session // Chunked upload: append any final body data and use session
let mut session_data = session.data;
if !body.is_empty() { if !body.is_empty() {
session_data.extend_from_slice(&body); session_data.extend_from_slice(&body);
} }
@@ -337,6 +476,40 @@ async fn upload_blob(
} }
}; };
// Only sha256 digests are supported for verification
if !digest.starts_with("sha256:") {
return (
StatusCode::BAD_REQUEST,
"Only sha256 digests are supported for blob uploads",
)
.into_response();
}
// Verify digest matches uploaded content (Docker Distribution Spec)
{
use sha2::Digest as _;
let computed = format!("sha256:{:x}", sha2::Sha256::digest(&data));
if computed != *digest {
tracing::warn!(
expected = %digest,
computed = %computed,
name = %name,
"SECURITY: blob digest mismatch — rejecting upload"
);
return (
StatusCode::BAD_REQUEST,
Json(json!({
"errors": [{
"code": "DIGEST_INVALID",
"message": "provided digest did not match uploaded content",
"detail": { "expected": digest, "computed": computed }
}]
})),
)
.into_response();
}
}
let key = format!("docker/{}/blobs/{}", name, digest); let key = format!("docker/{}/blobs/{}", name, digest);
match state.storage.put(&key, &data).await { match state.storage.put(&key, &data).await {
Ok(()) => { Ok(()) => {
@@ -619,6 +792,7 @@ async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>)
.and_then(|t| t.strip_suffix(".json")) .and_then(|t| t.strip_suffix(".json"))
.map(String::from) .map(String::from)
}) })
.filter(|t| !t.ends_with(".meta") && !t.contains(".meta."))
.collect(); .collect();
(StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response() (StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response()
} }