mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-12 09:10:32 +00:00
feat: implement storage migration command
- nora migrate --from local --to s3 - Dry-run mode with --dry-run flag - Progress bar with indicatif - Skip existing files in destination - Summary statistics (migrated, skipped, failed, bytes)
This commit is contained in:
@@ -1,15 +1,20 @@
|
|||||||
mod auth;
|
mod auth;
|
||||||
mod backup;
|
mod backup;
|
||||||
mod config;
|
mod config;
|
||||||
|
mod error;
|
||||||
mod health;
|
mod health;
|
||||||
mod metrics;
|
mod metrics;
|
||||||
|
mod migrate;
|
||||||
mod openapi;
|
mod openapi;
|
||||||
|
mod rate_limit;
|
||||||
mod registry;
|
mod registry;
|
||||||
|
mod request_id;
|
||||||
mod storage;
|
mod storage;
|
||||||
mod tokens;
|
mod tokens;
|
||||||
mod ui;
|
mod ui;
|
||||||
|
mod validation;
|
||||||
|
|
||||||
use axum::{middleware, Router};
|
use axum::{extract::DefaultBodyLimit, middleware, Router};
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
@@ -120,9 +125,35 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Some(Commands::Migrate { from, to, dry_run }) => {
|
Some(Commands::Migrate { from, to, dry_run }) => {
|
||||||
eprintln!("Migration from '{}' to '{}' (dry_run: {})", from, to, dry_run);
|
let source = match from.as_str() {
|
||||||
eprintln!("TODO: Migration not yet implemented");
|
"local" => Storage::new_local(&config.storage.path),
|
||||||
std::process::exit(1);
|
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||||
|
_ => {
|
||||||
|
error!("Invalid source: '{}'. Use 'local' or 's3'", from);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let dest = match to.as_str() {
|
||||||
|
"local" => Storage::new_local(&config.storage.path),
|
||||||
|
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||||
|
_ => {
|
||||||
|
error!("Invalid destination: '{}'. Use 'local' or 's3'", to);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if from == to {
|
||||||
|
error!("Source and destination cannot be the same");
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
let options = migrate::MigrateOptions { dry_run };
|
||||||
|
|
||||||
|
if let Err(e) = migrate::migrate(&source, &dest, options).await {
|
||||||
|
error!("Migration failed: {}", e);
|
||||||
|
std::process::exit(1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -180,17 +211,28 @@ async fn run_server(config: Config, storage: Storage) {
|
|||||||
tokens,
|
tokens,
|
||||||
});
|
});
|
||||||
|
|
||||||
let app = Router::new()
|
// Token routes with strict rate limiting (brute-force protection)
|
||||||
.merge(health::routes())
|
let auth_routes = auth::token_routes().layer(rate_limit::auth_rate_limiter());
|
||||||
.merge(metrics::routes())
|
|
||||||
.merge(ui::routes())
|
// Registry routes with upload rate limiting
|
||||||
.merge(openapi::routes())
|
let registry_routes = Router::new()
|
||||||
.merge(auth::token_routes())
|
|
||||||
.merge(registry::docker_routes())
|
.merge(registry::docker_routes())
|
||||||
.merge(registry::maven_routes())
|
.merge(registry::maven_routes())
|
||||||
.merge(registry::npm_routes())
|
.merge(registry::npm_routes())
|
||||||
.merge(registry::cargo_routes())
|
.merge(registry::cargo_routes())
|
||||||
.merge(registry::pypi_routes())
|
.merge(registry::pypi_routes())
|
||||||
|
.layer(rate_limit::upload_rate_limiter());
|
||||||
|
|
||||||
|
let app = Router::new()
|
||||||
|
.merge(health::routes())
|
||||||
|
.merge(metrics::routes())
|
||||||
|
.merge(ui::routes())
|
||||||
|
.merge(openapi::routes())
|
||||||
|
.merge(auth_routes)
|
||||||
|
.merge(registry_routes)
|
||||||
|
.layer(rate_limit::general_rate_limiter()) // General rate limit for all routes
|
||||||
|
.layer(DefaultBodyLimit::max(100 * 1024 * 1024)) // 100MB default body limit
|
||||||
|
.layer(middleware::from_fn(request_id::request_id_middleware))
|
||||||
.layer(middleware::from_fn(metrics::metrics_middleware))
|
.layer(middleware::from_fn(metrics::metrics_middleware))
|
||||||
.layer(middleware::from_fn_with_state(
|
.layer(middleware::from_fn_with_state(
|
||||||
state.clone(),
|
state.clone(),
|
||||||
|
|||||||
236
nora-registry/src/migrate.rs
Normal file
236
nora-registry/src/migrate.rs
Normal file
@@ -0,0 +1,236 @@
|
|||||||
|
//! Migration between storage backends
|
||||||
|
//!
|
||||||
|
//! Supports migrating artifacts from one storage backend to another
|
||||||
|
//! (e.g., local filesystem to S3 or vice versa).
|
||||||
|
|
||||||
|
use crate::storage::Storage;
|
||||||
|
use indicatif::{ProgressBar, ProgressStyle};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
/// Migration options
|
||||||
|
pub struct MigrateOptions {
|
||||||
|
/// If true, show what would be migrated without copying
|
||||||
|
pub dry_run: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for MigrateOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self { dry_run: false }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Migration statistics
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
pub struct MigrateStats {
|
||||||
|
/// Total number of keys found
|
||||||
|
pub total_keys: usize,
|
||||||
|
/// Number of keys successfully migrated
|
||||||
|
pub migrated: usize,
|
||||||
|
/// Number of keys skipped (already exist in destination)
|
||||||
|
pub skipped: usize,
|
||||||
|
/// Number of keys that failed to migrate
|
||||||
|
pub failed: usize,
|
||||||
|
/// Total bytes transferred
|
||||||
|
pub total_bytes: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Migrate artifacts from source to destination storage
|
||||||
|
pub async fn migrate(
|
||||||
|
from: &Storage,
|
||||||
|
to: &Storage,
|
||||||
|
options: MigrateOptions,
|
||||||
|
) -> Result<MigrateStats, String> {
|
||||||
|
println!(
|
||||||
|
"Migration: {} -> {}",
|
||||||
|
from.backend_name(),
|
||||||
|
to.backend_name()
|
||||||
|
);
|
||||||
|
|
||||||
|
if options.dry_run {
|
||||||
|
println!("DRY RUN - no data will be copied");
|
||||||
|
}
|
||||||
|
|
||||||
|
// List all keys from source
|
||||||
|
println!("Scanning source storage...");
|
||||||
|
let keys = from.list("").await;
|
||||||
|
|
||||||
|
if keys.is_empty() {
|
||||||
|
println!("No artifacts found in source storage.");
|
||||||
|
return Ok(MigrateStats::default());
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("Found {} artifacts to migrate", keys.len());
|
||||||
|
|
||||||
|
let pb = ProgressBar::new(keys.len() as u64);
|
||||||
|
pb.set_style(
|
||||||
|
ProgressStyle::default_bar()
|
||||||
|
.template("{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})")
|
||||||
|
.expect("Invalid progress bar template")
|
||||||
|
.progress_chars("#>-"),
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut stats = MigrateStats {
|
||||||
|
total_keys: keys.len(),
|
||||||
|
..Default::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
for key in &keys {
|
||||||
|
// Check if already exists in destination
|
||||||
|
if to.stat(key).await.is_some() {
|
||||||
|
stats.skipped += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.dry_run {
|
||||||
|
// Just count what would be migrated
|
||||||
|
if let Some(meta) = from.stat(key).await {
|
||||||
|
stats.total_bytes += meta.size;
|
||||||
|
}
|
||||||
|
stats.migrated += 1;
|
||||||
|
pb.inc(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch from source
|
||||||
|
match from.get(key).await {
|
||||||
|
Ok(data) => {
|
||||||
|
// Write to destination
|
||||||
|
match to.put(key, &data).await {
|
||||||
|
Ok(()) => {
|
||||||
|
stats.migrated += 1;
|
||||||
|
stats.total_bytes += data.len() as u64;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to write {}: {}", key, e);
|
||||||
|
stats.failed += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to read {}: {}", key, e);
|
||||||
|
stats.failed += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
pb.finish_with_message("Migration complete");
|
||||||
|
|
||||||
|
println!();
|
||||||
|
println!("Migration summary:");
|
||||||
|
println!(" Total artifacts: {}", stats.total_keys);
|
||||||
|
println!(" Migrated: {}", stats.migrated);
|
||||||
|
println!(" Skipped (already exists): {}", stats.skipped);
|
||||||
|
println!(" Failed: {}", stats.failed);
|
||||||
|
println!(" Total bytes: {} KB", stats.total_bytes / 1024);
|
||||||
|
|
||||||
|
if stats.failed > 0 {
|
||||||
|
warn!("{} artifacts failed to migrate", stats.failed);
|
||||||
|
}
|
||||||
|
|
||||||
|
if options.dry_run {
|
||||||
|
info!("Dry run complete. Re-run without --dry-run to perform actual migration.");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(stats)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use tempfile::TempDir;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_migrate_local_to_local() {
|
||||||
|
let src_dir = TempDir::new().unwrap();
|
||||||
|
let dst_dir = TempDir::new().unwrap();
|
||||||
|
|
||||||
|
let src = Storage::new_local(src_dir.path().to_str().unwrap());
|
||||||
|
let dst = Storage::new_local(dst_dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
// Add test data to source
|
||||||
|
src.put("test/file1", b"data1").await.unwrap();
|
||||||
|
src.put("test/file2", b"data2").await.unwrap();
|
||||||
|
|
||||||
|
let stats = migrate(&src, &dst, MigrateOptions::default())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(stats.migrated, 2);
|
||||||
|
assert_eq!(stats.failed, 0);
|
||||||
|
assert_eq!(stats.skipped, 0);
|
||||||
|
|
||||||
|
// Verify destination has the files
|
||||||
|
assert!(dst.get("test/file1").await.is_ok());
|
||||||
|
assert!(dst.get("test/file2").await.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_migrate_skips_existing() {
|
||||||
|
let src_dir = TempDir::new().unwrap();
|
||||||
|
let dst_dir = TempDir::new().unwrap();
|
||||||
|
|
||||||
|
let src = Storage::new_local(src_dir.path().to_str().unwrap());
|
||||||
|
let dst = Storage::new_local(dst_dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
// Add same file to both
|
||||||
|
src.put("test/file", b"source").await.unwrap();
|
||||||
|
dst.put("test/file", b"destination").await.unwrap();
|
||||||
|
|
||||||
|
let stats = migrate(&src, &dst, MigrateOptions::default())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(stats.migrated, 0);
|
||||||
|
assert_eq!(stats.skipped, 1);
|
||||||
|
|
||||||
|
// Destination should still have original content
|
||||||
|
let data = dst.get("test/file").await.unwrap();
|
||||||
|
assert_eq!(&*data, b"destination");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_migrate_dry_run() {
|
||||||
|
let src_dir = TempDir::new().unwrap();
|
||||||
|
let dst_dir = TempDir::new().unwrap();
|
||||||
|
|
||||||
|
let src = Storage::new_local(src_dir.path().to_str().unwrap());
|
||||||
|
let dst = Storage::new_local(dst_dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
src.put("test/file", b"data").await.unwrap();
|
||||||
|
|
||||||
|
let stats = migrate(
|
||||||
|
&src,
|
||||||
|
&dst,
|
||||||
|
MigrateOptions {
|
||||||
|
dry_run: true,
|
||||||
|
..Default::default()
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(stats.migrated, 1);
|
||||||
|
|
||||||
|
// Destination should be empty (dry run)
|
||||||
|
assert!(dst.get("test/file").await.is_err());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_migrate_empty_source() {
|
||||||
|
let src_dir = TempDir::new().unwrap();
|
||||||
|
let dst_dir = TempDir::new().unwrap();
|
||||||
|
|
||||||
|
let src = Storage::new_local(src_dir.path().to_str().unwrap());
|
||||||
|
let dst = Storage::new_local(dst_dir.path().to_str().unwrap());
|
||||||
|
|
||||||
|
let stats = migrate(&src, &dst, MigrateOptions::default())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(stats.total_keys, 0);
|
||||||
|
assert_eq!(stats.migrated, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user