7 Commits

Author SHA1 Message Date
d0a9459acd Fix Docker push/pull: add PATCH endpoint for chunked uploads
- Add PATCH handler for /v2/{name}/blobs/uploads/{uuid} to support
  chunked blob uploads (Docker sends data chunks via PATCH)
- Include Range header in PATCH response to indicate bytes received
- Add Docker-Content-Digest header to GET manifest responses
- Store manifests by both tag and digest for proper pull support
- Add parking_lot dependency for upload session state management
2026-01-26 12:01:05 +00:00
482a68637e Fix rate limiting: exempt health/metrics, increase upload limits
- Health, metrics, UI, and API docs are now exempt from rate limiting
- Increased upload rate limits to 200 req/s with burst of 500 for Docker compatibility
2026-01-26 11:04:14 +00:00
61f8a39279 Use self-hosted runner for release builds
16-core runner should be 3-4x faster than GitHub's 2-core runners
2026-01-26 10:39:04 +00:00
835a6f0b14 Speed up release workflow
- Remove duplicate tests (already run on push to main)
- Build only for amd64 (arm64 rarely needed for VPS)
2026-01-26 10:18:11 +00:00
340c49bf12 Fix formatting 2026-01-26 10:14:11 +00:00
c84d13c26e Increase upload rate limits for Docker parallel requests
Docker client sends many parallel requests when pushing layers.
Increased upload rate limiter from 10 req/s to 50 req/s and burst from 20 to 100.
2026-01-26 10:10:45 +00:00
7e8978533a fix: resolve clippy warnings and format code 2026-01-26 08:31:00 +00:00
14 changed files with 597 additions and 85 deletions

View File

@@ -9,25 +9,9 @@ env:
IMAGE_NAME: ${{ github.repository }} IMAGE_NAME: ${{ github.repository }}
jobs: jobs:
test:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo
uses: Swatinem/rust-cache@v2
- name: Run tests
run: cargo test --package nora-registry
build: build:
name: Build & Push name: Build & Push
runs-on: ubuntu-latest runs-on: self-hosted
needs: test
permissions: permissions:
contents: read contents: read
packages: write packages: write
@@ -63,7 +47,7 @@ jobs:
uses: docker/build-push-action@v5 uses: docker/build-push-action@v5
with: with:
context: . context: .
platforms: linux/amd64,linux/arm64 platforms: linux/amd64
push: true push: true
tags: ${{ steps.meta.outputs.tags }} tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }} labels: ${{ steps.meta.outputs.labels }}

1
Cargo.lock generated
View File

@@ -1212,6 +1212,7 @@ dependencies = [
"httpdate", "httpdate",
"indicatif", "indicatif",
"lazy_static", "lazy_static",
"parking_lot",
"prometheus", "prometheus",
"reqwest", "reqwest",
"serde", "serde",

440
TODO.md Normal file
View File

@@ -0,0 +1,440 @@
# NORA Roadmap / TODO
## v0.2.0 - DONE
- [x] Unit tests (75 tests passing)
- [x] Input validation (path traversal protection)
- [x] Rate limiting (brute-force protection)
- [x] Request ID tracking
- [x] Migrate command (local <-> S3)
- [x] Error handling (thiserror)
- [x] SVG brand icons
---
## v0.3.0 - OIDC / Workload Identity Federation
### Killer Feature: OIDC for CI/CD
Zero-secret authentication for GitHub Actions, GitLab CI, etc.
**Goal:** Replace manual `ROBOT_TOKEN` rotation with federated identity.
```yaml
# GitHub Actions example
permissions:
id-token: write
steps:
- name: Login to NORA
uses: nora/login-action@v1
```
### Config Structure (draft)
```toml
[auth.oidc]
enabled = true
# GitHub Actions
[[auth.oidc.providers]]
name = "github-actions"
issuer = "https://token.actions.githubusercontent.com"
audience = "https://nora.example.com"
[[auth.oidc.providers.rules]]
# Claim matching (supports glob)
match = { repository = "my-org/*", ref = "refs/heads/main" }
# Granted permissions
permissions = ["push:my-org/*", "pull:*"]
[[auth.oidc.providers.rules]]
match = { repository = "my-org/*", ref = "refs/heads/*" }
permissions = ["pull:*"]
# GitLab CI
[[auth.oidc.providers]]
name = "gitlab-ci"
issuer = "https://gitlab.com"
audience = "https://nora.example.com"
[[auth.oidc.providers.rules]]
match = { project_path = "my-group/*" }
permissions = ["push:my-group/*", "pull:*"]
```
### Implementation Tasks
- [ ] JWT validation library (jsonwebtoken crate)
- [ ] OIDC discovery (/.well-known/openid-configuration)
- [ ] JWKS fetching and caching
- [ ] Claims extraction and glob matching
- [ ] Permission resolution from rules
- [ ] Token exchange endpoint (POST /auth/oidc/token)
- [ ] GitHub Action: `nora/login-action`
---
## v0.4.0 - Transparent Docker Hub Proxy
### Pain Point
Harbor forces tag changes: `docker pull my-harbor/proxy-cache/library/nginx`
This breaks Helm charts hardcoded to `nginx`.
### Goal
Transparent pull-through cache:
```bash
docker pull nora.example.com/nginx # -> proxies to Docker Hub
```
### Implementation Tasks
- [ ] Registry v2 API interception
- [ ] Upstream registry configuration
- [ ] Cache layer management
- [ ] Rate limit handling (Docker Hub limits)
---
## v0.5.0 - Repo-level RBAC
### Challenge
Per-repository permissions need fast lookup (100 layers per push).
### Solution
Glob patterns for 90% of cases:
```toml
[[auth.rules]]
subject = "team-frontend"
permissions = ["push:frontend/*", "pull:*"]
[[auth.rules]]
subject = "ci-bot"
permissions = ["push:*/release-*", "pull:*"]
```
### Implementation Tasks
- [ ] In-memory permission cache
- [ ] Glob pattern matcher (globset crate)
- [ ] Permission inheritance (org -> project -> repo)
---
## Target Audience
1. DevOps engineers tired of Java/Go monsters
2. Edge/IoT installations (Raspberry Pi, branch offices)
3. Educational platforms (student labs)
4. CI/CD pipelines (GitHub Actions, GitLab CI)
## Competitive Advantages
| Feature | NORA | Harbor | Nexus |
|---------|------|--------|-------|
| Memory | <100MB | 2GB+ | 4GB+ |
| OIDC for CI | v0.3.0 | No | No |
| Transparent proxy | v0.4.0 | No (tag rewrite) | Partial |
| Single binary | Yes | No (microservices) | No (Java) |
| Zero-config upgrade | Yes | Complex | Complex |
---
## v0.6.0 - Online Garbage Collection
### Pain Point
Harbor GC blocks registry for hours. Can't push during cleanup.
### Goal
Non-blocking garbage collection with zero downtime.
### Implementation Tasks
- [ ] Mark-and-sweep without locking
- [ ] Background blob cleanup
- [ ] Progress reporting via API/CLI
- [ ] `nora gc --dry-run` preview
---
## v0.7.0 - Retention Policies
### Pain Point
"Keep last 10 tags" sounds simple, works poorly everywhere.
### Goal
Declarative retention rules in config:
```toml
[[retention]]
match = "*/dev-*"
keep_last = 5
[[retention]]
match = "*/release-*"
keep_last = 20
older_than = "90d"
[[retention]]
match = "**/pr-*"
older_than = "7d"
```
### Implementation Tasks
- [ ] Glob pattern matching for repos/tags
- [ ] Age-based and count-based rules
- [ ] Dry-run mode
- [ ] Scheduled execution (cron-style)
---
## v0.8.0 - Multi-tenancy & Quotas
### Pain Point
Harbor projects have quotas but configuration is painful. Nexus has no real isolation.
### Goal
Simple namespaces with limits:
```toml
[[tenants]]
name = "team-frontend"
storage_quota = "50GB"
rate_limit = { push = 100, pull = 1000 } # per hour
[[tenants]]
name = "team-backend"
storage_quota = "100GB"
```
### Implementation Tasks
- [ ] Tenant isolation (namespace prefix)
- [ ] Storage quota tracking
- [ ] Per-tenant rate limiting
- [ ] Usage reporting API
---
## v0.9.0 - Smart Replication
### Pain Point
Harbor replication rules are complex, errors silently swallowed.
### Goal
Simple CLI-driven replication with clear feedback:
```bash
nora replicate --to remote-dc --filter "prod/*" --dry-run
nora replicate --from gcr.io/my-project/* --to local/imported/
```
### Implementation Tasks
- [ ] Push-based replication to remote NORA
- [ ] Pull-based import from external registries (Docker Hub, GCR, ECR, Quay)
- [ ] Filter by glob patterns
- [ ] Progress bar and detailed logs
- [ ] Retry logic with exponential backoff
---
## v1.0.0 - Production Ready
### Features to polish
- [ ] Full CLI (`nora images ls`, `nora tag`, `nora delete`)
- [ ] Webhooks with filters and retry logic
- [ ] Enhanced Prometheus metrics (per-repo stats, cache hit ratio, bandwidth per tenant)
- [ ] TUI dashboard (optional)
- [ ] Helm chart for Kubernetes deployment
- [ ] Official Docker image on ghcr.io
---
## Future Ideas (v1.x+)
### Cold Storage Tiering
Auto-move old tags to S3 Glacier:
```toml
[[storage.tiering]]
match = "*"
older_than = "180d"
move_to = "s3-glacier"
```
### Vulnerability Scanning Integration
Not built-in (use Trivy), but:
- [ ] Webhook on push -> trigger external scan
- [ ] Store scan results as OCI artifacts
- [ ] Block pull if critical CVEs (policy)
### Image Signing (Cosign/Notation)
- [ ] Signature storage (OCI artifacts)
- [ ] Policy enforcement (reject unsigned)
### P2P Distribution (Dragonfly/Kraken style)
For large clusters pulling same image simultaneously.
---
---
## Architecture / DDD
### Current State (v0.2.0)
Monolithic structure, all in `nora-registry/src/`:
```
src/
├── main.rs # CLI + server setup
├── auth.rs # htpasswd + basic auth
├── tokens.rs # API tokens
├── storage/ # Storage backends (local, s3)
├── registry/ # Protocol handlers (docker, maven, npm, cargo, pypi)
├── ui/ # Web dashboard
└── ...
```
### Target Architecture (v1.0+)
#### Domain-Driven Design Boundaries
```
nora/
├── nora-core/ # Domain layer (no dependencies)
│ ├── src/
│ │ ├── artifact.rs # Artifact, Digest, Tag, Manifest
│ │ ├── repository.rs # Repository, Namespace
│ │ ├── identity.rs # User, ServiceAccount, Token
│ │ ├── policy.rs # Permission, Rule, Quota
│ │ └── events.rs # DomainEvent (ArtifactPushed, etc.)
├── nora-auth/ # Authentication bounded context
│ ├── src/
│ │ ├── htpasswd.rs # Basic auth provider
│ │ ├── oidc.rs # OIDC/JWT provider
│ │ ├── token.rs # API token provider
│ │ └── rbac.rs # Permission resolver
├── nora-storage/ # Storage bounded context
│ ├── src/
│ │ ├── backend.rs # StorageBackend trait
│ │ ├── local.rs # Filesystem
│ │ ├── s3.rs # S3-compatible
│ │ ├── tiered.rs # Hot/cold tiering
│ │ └── gc.rs # Garbage collection
├── nora-registry/ # Application layer (HTTP API)
│ ├── src/
│ │ ├── api/
│ │ │ ├── oci.rs # OCI Distribution API (/v2/)
│ │ │ ├── maven.rs # Maven repository
│ │ │ ├── npm.rs # npm registry
│ │ │ ├── cargo.rs # Cargo registry
│ │ │ └── pypi.rs # PyPI (simple API)
│ │ ├── proxy/ # Upstream proxy/cache
│ │ ├── webhook/ # Event webhooks
│ │ └── ui/ # Web dashboard
├── nora-cli/ # CLI application
│ ├── src/
│ │ ├── commands/
│ │ │ ├── serve.rs
│ │ │ ├── images.rs # nora images ls/delete/tag
│ │ │ ├── gc.rs # nora gc
│ │ │ ├── backup.rs # nora backup/restore
│ │ │ ├── migrate.rs # nora migrate
│ │ │ └── replicate.rs
│ │ └── tui/ # Optional TUI dashboard
└── nora-sdk/ # Client SDK (for nora/login-action)
└── src/
├── client.rs # HTTP client
└── oidc.rs # Token exchange
```
#### Key Principles
1. **Hexagonal Architecture**
- Core domain has no external dependencies
- Ports (traits) define boundaries
- Adapters implement ports (S3, filesystem, OIDC providers)
2. **Event-Driven**
- Domain events: `ArtifactPushed`, `ArtifactDeleted`, `TagCreated`
- Webhooks subscribe to events
- Async processing for GC, replication
3. **CQRS-lite**
- Commands: Push, Delete, CreateToken
- Queries: List, Get, Search
- Separate read/write paths for hot endpoints
4. **Configuration as Code**
- All policies in `nora.toml`
- No database for config (file-based)
- GitOps friendly
#### Trait Boundaries (Ports)
```rust
// nora-core/src/ports.rs
#[async_trait]
pub trait ArtifactStore {
async fn push_blob(&self, digest: &Digest, data: Bytes) -> Result<()>;
async fn get_blob(&self, digest: &Digest) -> Result<Bytes>;
async fn push_manifest(&self, repo: &Repository, tag: &Tag, manifest: &Manifest) -> Result<()>;
async fn get_manifest(&self, repo: &Repository, reference: &Reference) -> Result<Manifest>;
async fn list_tags(&self, repo: &Repository) -> Result<Vec<Tag>>;
async fn delete(&self, repo: &Repository, reference: &Reference) -> Result<()>;
}
#[async_trait]
pub trait IdentityProvider {
async fn authenticate(&self, credentials: &Credentials) -> Result<Identity>;
async fn authorize(&self, identity: &Identity, action: &Action, resource: &Resource) -> Result<bool>;
}
#[async_trait]
pub trait EventPublisher {
async fn publish(&self, event: DomainEvent) -> Result<()>;
}
```
#### Migration Path
| Phase | Action |
|-------|--------|
| v0.3 | Extract `nora-auth` crate (OIDC work) |
| v0.4 | Extract `nora-core` domain types |
| v0.5 | Extract `nora-storage` with trait boundaries |
| v0.6+ | Refactor registry handlers to use ports |
| v1.0 | Full hexagonal architecture |
### Technical Debt to Address
- [ ] Remove `unwrap()` in non-test code (started in e9984cf)
- [ ] Add tracing spans to all handlers
- [ ] Consistent error types across modules
- [ ] Extract hardcoded limits to config
- [ ] Add OpenTelemetry support (traces, not just metrics)
### Performance Requirements
| Metric | Target |
|--------|--------|
| Memory (idle) | <50MB |
| Memory (under load) | <100MB |
| Startup time | <1s |
| Blob throughput | Wire speed (no processing overhead) |
| Manifest latency | <10ms p99 |
| Auth check | <1ms (cached) |
### Security Requirements
- [ ] No secrets in logs (already redacting)
- [ ] TLS termination (or trust reverse proxy)
- [ ] Content-addressable storage (immutable blobs)
- [ ] Audit log for all mutations
- [ ] SBOM generation for NORA itself
---
## Notes
- S3 storage: already implemented
- Web UI: minimalist read-only dashboard (done)
- TUI: consider for v1.0
- Vulnerability scanning: out of scope (use Trivy externally)
- Image signing: out of scope for now (use cosign externally)

View File

@@ -41,6 +41,7 @@ chrono = { version = "0.4", features = ["serde"] }
thiserror = "2" thiserror = "2"
tower_governor = "0.8" tower_governor = "0.8"
governor = "0.10" governor = "0.10"
parking_lot = "0.12"
[dev-dependencies] [dev-dependencies]
tempfile = "3" tempfile = "3"

View File

@@ -405,7 +405,9 @@ mod tests {
// Protected paths // Protected paths
assert!(!is_public_path("/v2/myimage/blobs/sha256:abc")); assert!(!is_public_path("/v2/myimage/blobs/sha256:abc"));
assert!(!is_public_path("/v2/library/nginx/manifests/latest")); assert!(!is_public_path("/v2/library/nginx/manifests/latest"));
assert!(!is_public_path("/maven2/com/example/artifact/1.0/artifact.jar")); assert!(!is_public_path(
"/maven2/com/example/artifact/1.0/artifact.jar"
));
assert!(!is_public_path("/npm/lodash")); assert!(!is_public_path("/npm/lodash"));
} }

View File

@@ -1,3 +1,4 @@
#![allow(dead_code)]
//! Application error handling with HTTP response conversion //! Application error handling with HTTP response conversion
//! //!
//! Provides a unified error type that can be converted to HTTP responses //! Provides a unified error type that can be converted to HTTP responses

View File

@@ -29,11 +29,7 @@ pub use storage::Storage;
use tokens::TokenStore; use tokens::TokenStore;
#[derive(Parser)] #[derive(Parser)]
#[command( #[command(name = "nora", version, about = "Multi-protocol artifact registry")]
name = "nora",
version,
about = "Multi-protocol artifact registry"
)]
struct Cli { struct Cli {
#[command(subcommand)] #[command(subcommand)]
command: Option<Commands>, command: Option<Commands>,
@@ -223,14 +219,22 @@ async fn run_server(config: Config, storage: Storage) {
.merge(registry::pypi_routes()) .merge(registry::pypi_routes())
.layer(rate_limit::upload_rate_limiter()); .layer(rate_limit::upload_rate_limiter());
let app = Router::new() // Routes WITHOUT rate limiting (health, metrics, UI)
let public_routes = Router::new()
.merge(health::routes()) .merge(health::routes())
.merge(metrics::routes()) .merge(metrics::routes())
.merge(ui::routes()) .merge(ui::routes())
.merge(openapi::routes()) .merge(openapi::routes());
// Routes WITH rate limiting
let rate_limited_routes = Router::new()
.merge(auth_routes) .merge(auth_routes)
.merge(registry_routes) .merge(registry_routes)
.layer(rate_limit::general_rate_limiter()) // General rate limit for all routes .layer(rate_limit::general_rate_limiter());
let app = Router::new()
.merge(public_routes)
.merge(rate_limited_routes)
.layer(DefaultBodyLimit::max(100 * 1024 * 1024)) // 100MB default body limit .layer(DefaultBodyLimit::max(100 * 1024 * 1024)) // 100MB default body limit
.layer(middleware::from_fn(request_id::request_id_middleware)) .layer(middleware::from_fn(request_id::request_id_middleware))
.layer(middleware::from_fn(metrics::metrics_middleware)) .layer(middleware::from_fn(metrics::metrics_middleware))

View File

@@ -8,17 +8,12 @@ use indicatif::{ProgressBar, ProgressStyle};
use tracing::{info, warn}; use tracing::{info, warn};
/// Migration options /// Migration options
#[derive(Default)]
pub struct MigrateOptions { pub struct MigrateOptions {
/// If true, show what would be migrated without copying /// If true, show what would be migrated without copying
pub dry_run: bool, pub dry_run: bool,
} }
impl Default for MigrateOptions {
fn default() -> Self {
Self { dry_run: false }
}
}
/// Migration statistics /// Migration statistics
#[derive(Debug, Default)] #[derive(Debug, Default)]
pub struct MigrateStats { pub struct MigrateStats {
@@ -64,7 +59,9 @@ pub async fn migrate(
let pb = ProgressBar::new(keys.len() as u64); let pb = ProgressBar::new(keys.len() as u64);
pb.set_style( pb.set_style(
ProgressStyle::default_bar() ProgressStyle::default_bar()
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})") .template(
"{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})",
)
.expect("Invalid progress bar template") .expect("Invalid progress bar template")
.progress_chars("#>-"), .progress_chars("#>-"),
); );

View File

@@ -1,3 +1,4 @@
#![allow(dead_code)]
//! Rate limiting configuration and middleware //! Rate limiting configuration and middleware
//! //!
//! Provides rate limiting to protect against: //! Provides rate limiting to protect against:
@@ -29,8 +30,8 @@ impl Default for RateLimitConfig {
Self { Self {
auth_rps: 1, // 1 req/sec for auth (strict) auth_rps: 1, // 1 req/sec for auth (strict)
auth_burst: 5, // Allow burst of 5 auth_burst: 5, // Allow burst of 5
upload_rps: 10, // 10 req/sec for uploads upload_rps: 200, // 200 req/sec for uploads (Docker needs high parallelism)
upload_burst: 20, // Allow burst of 20 upload_burst: 500, // Allow burst of 500
general_rps: 100, // 100 req/sec general general_rps: 100, // 100 req/sec general
general_burst: 200, // Allow burst of 200 general_burst: 200, // Allow burst of 200
} }
@@ -57,15 +58,16 @@ pub fn auth_rate_limiter() -> tower_governor::GovernorLayer<
/// Create rate limiter layer for upload endpoints /// Create rate limiter layer for upload endpoints
/// ///
/// Default: 10 requests per second, burst of 20 /// Default: 200 requests per second, burst of 500
/// High limits to accommodate Docker client's aggressive parallel layer uploads
pub fn upload_rate_limiter() -> tower_governor::GovernorLayer< pub fn upload_rate_limiter() -> tower_governor::GovernorLayer<
tower_governor::key_extractor::PeerIpKeyExtractor, tower_governor::key_extractor::PeerIpKeyExtractor,
governor::middleware::StateInformationMiddleware, governor::middleware::StateInformationMiddleware,
axum::body::Body, axum::body::Body,
> { > {
let config = GovernorConfigBuilder::default() let config = GovernorConfigBuilder::default()
.per_second(10) .per_second(200)
.burst_size(20) .burst_size(500)
.use_headers() .use_headers()
.finish() .finish()
.unwrap(); .unwrap();
@@ -100,7 +102,7 @@ mod tests {
let config = RateLimitConfig::default(); let config = RateLimitConfig::default();
assert_eq!(config.auth_rps, 1); assert_eq!(config.auth_rps, 1);
assert_eq!(config.auth_burst, 5); assert_eq!(config.auth_burst, 5);
assert_eq!(config.upload_rps, 10); assert_eq!(config.upload_rps, 200);
assert_eq!(config.general_rps, 100); assert_eq!(config.general_rps, 100);
} }

View File

@@ -5,12 +5,19 @@ use axum::{
extract::{Path, State}, extract::{Path, State},
http::{header, HeaderName, StatusCode}, http::{header, HeaderName, StatusCode},
response::{IntoResponse, Response}, response::{IntoResponse, Response},
routing::{get, head, put}, routing::{get, head, patch, put},
Json, Router, Json, Router,
}; };
use parking_lot::RwLock;
use serde_json::{json, Value}; use serde_json::{json, Value};
use std::collections::HashMap;
use std::sync::Arc; use std::sync::Arc;
/// In-progress upload sessions for chunked uploads
/// Maps UUID -> accumulated data
static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, Vec<u8>>>> =
std::sync::LazyLock::new(|| RwLock::new(HashMap::new()));
pub fn routes() -> Router<Arc<AppState>> { pub fn routes() -> Router<Arc<AppState>> {
Router::new() Router::new()
.route("/v2/", get(check)) .route("/v2/", get(check))
@@ -20,7 +27,10 @@ pub fn routes() -> Router<Arc<AppState>> {
"/v2/{name}/blobs/uploads/", "/v2/{name}/blobs/uploads/",
axum::routing::post(start_upload), axum::routing::post(start_upload),
) )
.route("/v2/{name}/blobs/uploads/{uuid}", put(upload_blob)) .route(
"/v2/{name}/blobs/uploads/{uuid}",
patch(patch_blob).put(upload_blob),
)
.route("/v2/{name}/manifests/{reference}", get(get_manifest)) .route("/v2/{name}/manifests/{reference}", get(get_manifest))
.route("/v2/{name}/manifests/{reference}", put(put_manifest)) .route("/v2/{name}/manifests/{reference}", put(put_manifest))
.route("/v2/{name}/tags/list", get(list_tags)) .route("/v2/{name}/tags/list", get(list_tags))
@@ -92,9 +102,46 @@ async fn start_upload(Path(name): Path<String>) -> Response {
.into_response() .into_response()
} }
/// PATCH handler for chunked blob uploads
/// Docker client sends data chunks via PATCH, then finalizes with PUT
async fn patch_blob(Path((name, uuid)): Path<(String, String)>, body: Bytes) -> Response {
if let Err(e) = validate_docker_name(&name) {
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
}
// Append data to the upload session and get total size
let total_size = {
let mut sessions = UPLOAD_SESSIONS.write();
let session = sessions.entry(uuid.clone()).or_insert_with(Vec::new);
session.extend_from_slice(&body);
session.len()
};
let location = format!("/v2/{}/blobs/uploads/{}", name, uuid);
// Range header indicates bytes 0 to (total_size - 1) have been received
let range = if total_size > 0 {
format!("0-{}", total_size - 1)
} else {
"0-0".to_string()
};
(
StatusCode::ACCEPTED,
[
(header::LOCATION, location),
(header::RANGE, range),
(HeaderName::from_static("docker-upload-uuid"), uuid),
],
)
.into_response()
}
/// PUT handler for completing blob uploads
/// Handles both monolithic uploads (body contains all data) and
/// chunked upload finalization (body may be empty, data in session)
async fn upload_blob( async fn upload_blob(
State(state): State<Arc<AppState>>, State(state): State<Arc<AppState>>,
Path((name, _uuid)): Path<(String, String)>, Path((name, uuid)): Path<(String, String)>,
axum::extract::Query(params): axum::extract::Query<std::collections::HashMap<String, String>>, axum::extract::Query(params): axum::extract::Query<std::collections::HashMap<String, String>>,
body: Bytes, body: Bytes,
) -> Response { ) -> Response {
@@ -111,8 +158,23 @@ async fn upload_blob(
return (StatusCode::BAD_REQUEST, e.to_string()).into_response(); return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
} }
// Get data from chunked session if exists, otherwise use body directly
let data = {
let mut sessions = UPLOAD_SESSIONS.write();
if let Some(mut session_data) = sessions.remove(&uuid) {
// Chunked upload: append any final body data and use session
if !body.is_empty() {
session_data.extend_from_slice(&body);
}
session_data
} else {
// Monolithic upload: use body directly
body.to_vec()
}
};
let key = format!("docker/{}/blobs/{}", name, digest); let key = format!("docker/{}/blobs/{}", name, digest);
match state.storage.put(&key, &body).await { match state.storage.put(&key, &data).await {
Ok(()) => { Ok(()) => {
let location = format!("/v2/{}/blobs/{}", name, digest); let location = format!("/v2/{}/blobs/{}", name, digest);
(StatusCode::CREATED, [(header::LOCATION, location)]).into_response() (StatusCode::CREATED, [(header::LOCATION, location)]).into_response()
@@ -134,15 +196,23 @@ async fn get_manifest(
let key = format!("docker/{}/manifests/{}.json", name, reference); let key = format!("docker/{}/manifests/{}.json", name, reference);
match state.storage.get(&key).await { match state.storage.get(&key).await {
Ok(data) => ( Ok(data) => {
// Calculate digest for Docker-Content-Digest header
use sha2::Digest;
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
(
StatusCode::OK, StatusCode::OK,
[( [
(
header::CONTENT_TYPE, header::CONTENT_TYPE,
"application/vnd.docker.distribution.manifest.v2+json", "application/vnd.docker.distribution.manifest.v2+json".to_string(),
)], ),
(HeaderName::from_static("docker-content-digest"), digest),
],
data, data,
) )
.into_response(), .into_response()
}
Err(_) => StatusCode::NOT_FOUND.into_response(), Err(_) => StatusCode::NOT_FOUND.into_response(),
} }
} }
@@ -159,11 +229,22 @@ async fn put_manifest(
return (StatusCode::BAD_REQUEST, e.to_string()).into_response(); return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
} }
let key = format!("docker/{}/manifests/{}.json", name, reference); // Calculate digest
match state.storage.put(&key, &body).await {
Ok(()) => {
use sha2::Digest; use sha2::Digest;
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body)); let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body));
// Store by tag/reference
let key = format!("docker/{}/manifests/{}.json", name, reference);
if let Err(_) = state.storage.put(&key, &body).await {
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
// Also store by digest for direct digest lookups
let digest_key = format!("docker/{}/manifests/{}.json", name, digest);
if let Err(_) = state.storage.put(&digest_key, &body).await {
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
}
let location = format!("/v2/{}/manifests/{}", name, reference); let location = format!("/v2/{}/manifests/{}", name, reference);
( (
StatusCode::CREATED, StatusCode::CREATED,
@@ -174,14 +255,8 @@ async fn put_manifest(
) )
.into_response() .into_response()
} }
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
}
}
async fn list_tags( async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>) -> Response {
State(state): State<Arc<AppState>>,
Path(name): Path<String>,
) -> Response {
if let Err(e) = validate_docker_name(&name) { if let Err(e) = validate_docker_name(&name) {
return (StatusCode::BAD_REQUEST, e.to_string()).into_response(); return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
} }

View File

@@ -76,11 +76,9 @@ impl Storage {
pub async fn list(&self, prefix: &str) -> Vec<String> { pub async fn list(&self, prefix: &str) -> Vec<String> {
// Empty prefix is valid for listing all // Empty prefix is valid for listing all
if !prefix.is_empty() { if !prefix.is_empty() && validate_storage_key(prefix).is_err() {
if let Err(_) = validate_storage_key(prefix) {
return Vec::new(); return Vec::new();
} }
}
self.inner.list(prefix).await self.inner.list(prefix).await
} }

View File

@@ -59,7 +59,13 @@ pub fn render_dashboard(stats: &RegistryStats) -> String {
</div> </div>
</div> </div>
"##, "##,
stat_card("Docker", icons::DOCKER, stats.docker, "/ui/docker", "images"), stat_card(
"Docker",
icons::DOCKER,
stats.docker,
"/ui/docker",
"images"
),
stat_card("Maven", icons::MAVEN, stats.maven, "/ui/maven", "artifacts"), stat_card("Maven", icons::MAVEN, stats.maven, "/ui/maven", "artifacts"),
stat_card("npm", icons::NPM, stats.npm, "/ui/npm", "packages"), stat_card("npm", icons::NPM, stats.npm, "/ui/npm", "packages"),
stat_card("Cargo", icons::CARGO, stats.cargo, "/ui/cargo", "crates"), stat_card("Cargo", icons::CARGO, stats.cargo, "/ui/cargo", "crates"),
@@ -455,7 +461,9 @@ fn get_registry_icon(registry_type: &str) -> &'static str {
"npm" => icons::NPM, "npm" => icons::NPM,
"cargo" => icons::CARGO, "cargo" => icons::CARGO,
"pypi" => icons::PYPI, "pypi" => icons::PYPI,
_ => r#"<path fill="currentColor" d="M10 4H4c-1.1 0-1.99.9-1.99 2L2 18c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2h-8l-2-2z"/>"#, _ => {
r#"<path fill="currentColor" d="M10 4H4c-1.1 0-1.99.9-1.99 2L2 18c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2h-8l-2-2z"/>"#
}
} }
} }

View File

@@ -1,3 +1,4 @@
#![allow(dead_code)]
//! Input validation for artifact registry paths and identifiers //! Input validation for artifact registry paths and identifiers
//! //!
//! Provides security validation to prevent path traversal attacks and //! Provides security validation to prevent path traversal attacks and
@@ -92,7 +93,7 @@ pub fn validate_storage_key(key: &str) -> Result<(), ValidationError> {
// Check each segment // Check each segment
for segment in key.split('/') { for segment in key.split('/') {
if segment.is_empty() && key != "" { if segment.is_empty() && !key.is_empty() {
// Allow trailing slash but not double slashes // Allow trailing slash but not double slashes
continue; continue;
} }

View File

@@ -133,9 +133,7 @@ async fn main() {
.expect("Failed to bind to address"); .expect("Failed to bind to address");
info!("nora-storage (S3 compatible) running on http://{}", addr); info!("nora-storage (S3 compatible) running on http://{}", addr);
axum::serve(listener, app) axum::serve(listener, app).await.expect("Server error");
.await
.expect("Server error");
} }
async fn list_buckets(State(state): State<Arc<AppState>>) -> Response { async fn list_buckets(State(state): State<Arc<AppState>>) -> Response {