feat: initialize NORA artifact registry

Cloud-native multi-protocol artifact registry in Rust.

- Docker Registry v2
- Maven (+ proxy)
- npm (+ proxy)
- Cargo, PyPI
- Web UI, Swagger, Prometheus
- Local & S3 storage
- 32MB Docker image

Created by DevITWay
https://getnora.io
This commit is contained in:
2026-01-25 17:03:18 +00:00
commit 586420a476
36 changed files with 7613 additions and 0 deletions

View File

@@ -0,0 +1,131 @@
use async_trait::async_trait;
use axum::body::Bytes;
use std::path::PathBuf;
use tokio::fs;
use tokio::io::AsyncReadExt;
use super::{FileMeta, Result, StorageBackend, StorageError};
/// Local filesystem storage backend (zero-config default)
pub struct LocalStorage {
base_path: PathBuf,
}
impl LocalStorage {
pub fn new(path: &str) -> Self {
Self {
base_path: PathBuf::from(path),
}
}
fn key_to_path(&self, key: &str) -> PathBuf {
self.base_path.join(key)
}
/// Recursively list all files under a directory (sync helper)
fn list_files_sync(dir: &PathBuf, base: &PathBuf, prefix: &str, results: &mut Vec<String>) {
if let Ok(entries) = std::fs::read_dir(dir) {
for entry in entries.flatten() {
let path = entry.path();
if path.is_file() {
if let Ok(rel_path) = path.strip_prefix(base) {
let key = rel_path.to_string_lossy().replace('\\', "/");
if key.starts_with(prefix) || prefix.is_empty() {
results.push(key);
}
}
} else if path.is_dir() {
Self::list_files_sync(&path, base, prefix, results);
}
}
}
}
}
#[async_trait]
impl StorageBackend for LocalStorage {
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
let path = self.key_to_path(key);
// Create parent directories
if let Some(parent) = path.parent() {
fs::create_dir_all(parent)
.await
.map_err(|e| StorageError::Io(e.to_string()))?;
}
// Write file
fs::write(&path, data)
.await
.map_err(|e| StorageError::Io(e.to_string()))?;
Ok(())
}
async fn get(&self, key: &str) -> Result<Bytes> {
let path = self.key_to_path(key);
if !path.exists() {
return Err(StorageError::NotFound);
}
let mut file = fs::File::open(&path).await.map_err(|e| {
if e.kind() == std::io::ErrorKind::NotFound {
StorageError::NotFound
} else {
StorageError::Io(e.to_string())
}
})?;
let mut buffer = Vec::new();
file.read_to_end(&mut buffer)
.await
.map_err(|e| StorageError::Io(e.to_string()))?;
Ok(Bytes::from(buffer))
}
async fn list(&self, prefix: &str) -> Vec<String> {
let base = self.base_path.clone();
let prefix = prefix.to_string();
// Use blocking task for filesystem traversal
tokio::task::spawn_blocking(move || {
let mut results = Vec::new();
if base.exists() {
Self::list_files_sync(&base, &base, &prefix, &mut results);
}
results.sort();
results
})
.await
.unwrap_or_default()
}
async fn stat(&self, key: &str) -> Option<FileMeta> {
let path = self.key_to_path(key);
let metadata = fs::metadata(&path).await.ok()?;
let modified = metadata
.modified()
.ok()?
.duration_since(std::time::UNIX_EPOCH)
.ok()?
.as_secs();
Some(FileMeta {
size: metadata.len(),
modified,
})
}
async fn health_check(&self) -> bool {
// For local storage, just check if base directory exists or can be created
if self.base_path.exists() {
return true;
}
fs::create_dir_all(&self.base_path).await.is_ok()
}
fn backend_name(&self) -> &'static str {
"local"
}
}

View File

@@ -0,0 +1,93 @@
mod local;
mod s3;
pub use local::LocalStorage;
pub use s3::S3Storage;
use async_trait::async_trait;
use axum::body::Bytes;
use std::fmt;
use std::sync::Arc;
/// File metadata
#[derive(Debug, Clone)]
pub struct FileMeta {
pub size: u64,
pub modified: u64, // Unix timestamp
}
#[derive(Debug)]
pub enum StorageError {
Network(String),
NotFound,
Io(String),
}
impl fmt::Display for StorageError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Network(msg) => write!(f, "Network error: {}", msg),
Self::NotFound => write!(f, "Object not found"),
Self::Io(msg) => write!(f, "IO error: {}", msg),
}
}
}
impl std::error::Error for StorageError {}
pub type Result<T> = std::result::Result<T, StorageError>;
/// Storage backend trait
#[async_trait]
pub trait StorageBackend: Send + Sync {
async fn put(&self, key: &str, data: &[u8]) -> Result<()>;
async fn get(&self, key: &str) -> Result<Bytes>;
async fn list(&self, prefix: &str) -> Vec<String>;
async fn stat(&self, key: &str) -> Option<FileMeta>;
async fn health_check(&self) -> bool;
fn backend_name(&self) -> &'static str;
}
/// Storage wrapper for dynamic dispatch
#[derive(Clone)]
pub struct Storage {
inner: Arc<dyn StorageBackend>,
}
impl Storage {
pub fn new_local(path: &str) -> Self {
Self {
inner: Arc::new(LocalStorage::new(path)),
}
}
pub fn new_s3(s3_url: &str, bucket: &str) -> Self {
Self {
inner: Arc::new(S3Storage::new(s3_url, bucket)),
}
}
pub async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
self.inner.put(key, data).await
}
pub async fn get(&self, key: &str) -> Result<Bytes> {
self.inner.get(key).await
}
pub async fn list(&self, prefix: &str) -> Vec<String> {
self.inner.list(prefix).await
}
pub async fn stat(&self, key: &str) -> Option<FileMeta> {
self.inner.stat(key).await
}
pub async fn health_check(&self) -> bool {
self.inner.health_check().await
}
pub fn backend_name(&self) -> &'static str {
self.inner.backend_name()
}
}

View File

@@ -0,0 +1,129 @@
use async_trait::async_trait;
use axum::body::Bytes;
use super::{FileMeta, Result, StorageBackend, StorageError};
/// S3-compatible storage backend (MinIO, AWS S3)
pub struct S3Storage {
s3_url: String,
bucket: String,
client: reqwest::Client,
}
impl S3Storage {
pub fn new(s3_url: &str, bucket: &str) -> Self {
Self {
s3_url: s3_url.to_string(),
bucket: bucket.to_string(),
client: reqwest::Client::new(),
}
}
fn parse_s3_keys(xml: &str, prefix: &str) -> Vec<String> {
xml.split("<Key>")
.filter_map(|part| part.split("</Key>").next())
.filter(|key| key.starts_with(prefix))
.map(String::from)
.collect()
}
}
#[async_trait]
impl StorageBackend for S3Storage {
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self
.client
.put(&url)
.body(data.to_vec())
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))?;
if response.status().is_success() {
Ok(())
} else {
Err(StorageError::Network(format!(
"PUT failed: {}",
response.status()
)))
}
}
async fn get(&self, key: &str) -> Result<Bytes> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self
.client
.get(&url)
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))?;
if response.status().is_success() {
response
.bytes()
.await
.map_err(|e| StorageError::Network(e.to_string()))
} else if response.status().as_u16() == 404 {
Err(StorageError::NotFound)
} else {
Err(StorageError::Network(format!(
"GET failed: {}",
response.status()
)))
}
}
async fn list(&self, prefix: &str) -> Vec<String> {
let url = format!("{}/{}", self.s3_url, self.bucket);
match self.client.get(&url).send().await {
Ok(response) if response.status().is_success() => {
if let Ok(xml) = response.text().await {
Self::parse_s3_keys(&xml, prefix)
} else {
Vec::new()
}
}
_ => Vec::new(),
}
}
async fn stat(&self, key: &str) -> Option<FileMeta> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self.client.head(&url).send().await.ok()?;
if !response.status().is_success() {
return None;
}
let size = response
.headers()
.get("content-length")
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse().ok())
.unwrap_or(0);
// S3 uses Last-Modified header, but for simplicity use current time if unavailable
let modified = response
.headers()
.get("last-modified")
.and_then(|v| v.to_str().ok())
.and_then(|v| httpdate::parse_http_date(v).ok())
.map(|t| {
t.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
})
.unwrap_or(0);
Some(FileMeta { size, modified })
}
async fn health_check(&self) -> bool {
let url = format!("{}/{}", self.s3_url, self.bucket);
match self.client.head(&url).send().await {
Ok(response) => response.status().is_success() || response.status().as_u16() == 404,
Err(_) => false,
}
}
fn backend_name(&self) -> &'static str {
"s3"
}
}