fix: persist dashboard metrics and count versions instead of repos

Metrics (downloads, uploads, cache hits) were stored in-memory only
and reset to zero on every restart. Now they persist to metrics.json
in the storage directory with:
- Load on startup from {storage_path}/metrics.json
- Background save every 30 seconds
- Final save on graceful shutdown
- Atomic writes (tmp + rename) to prevent corruption

Artifact count on dashboard now shows total tags/versions across
all registries instead of just counting unique repository names.
This matches user expectations when pushing multiple tags to the
same image (e.g. myapp:v1, myapp:v2 now shows 2, not 1).
This commit is contained in:
2026-03-13 15:43:03 +00:00
parent b80c7c5160
commit 61de6c6ddd
3 changed files with 119 additions and 11 deletions

View File

@@ -1,8 +1,29 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Instant;
use tracing::{info, warn};
/// Serializable snapshot of metrics for persistence
#[derive(Serialize, Deserialize, Default)]
struct MetricsSnapshot {
downloads: u64,
uploads: u64,
cache_hits: u64,
cache_misses: u64,
docker_downloads: u64,
docker_uploads: u64,
npm_downloads: u64,
maven_downloads: u64,
maven_uploads: u64,
cargo_downloads: u64,
pypi_downloads: u64,
raw_downloads: u64,
raw_uploads: u64,
}
/// Dashboard metrics for tracking registry activity
/// Uses atomic counters for thread-safe access without locks
@@ -25,6 +46,9 @@ pub struct DashboardMetrics {
pub raw_uploads: AtomicU64,
pub start_time: Instant,
/// Path to metrics.json for persistence
persist_path: Option<PathBuf>,
}
impl DashboardMetrics {
@@ -44,6 +68,75 @@ impl DashboardMetrics {
raw_downloads: AtomicU64::new(0),
raw_uploads: AtomicU64::new(0),
start_time: Instant::now(),
persist_path: None,
}
}
/// Create metrics with persistence — loads existing data from metrics.json
pub fn with_persistence(storage_path: &str) -> Self {
let path = Path::new(storage_path).join("metrics.json");
let mut metrics = Self::new();
metrics.persist_path = Some(path.clone());
// Load existing metrics if file exists
if path.exists() {
match std::fs::read_to_string(&path) {
Ok(data) => match serde_json::from_str::<MetricsSnapshot>(&data) {
Ok(snap) => {
metrics.downloads = AtomicU64::new(snap.downloads);
metrics.uploads = AtomicU64::new(snap.uploads);
metrics.cache_hits = AtomicU64::new(snap.cache_hits);
metrics.cache_misses = AtomicU64::new(snap.cache_misses);
metrics.docker_downloads = AtomicU64::new(snap.docker_downloads);
metrics.docker_uploads = AtomicU64::new(snap.docker_uploads);
metrics.npm_downloads = AtomicU64::new(snap.npm_downloads);
metrics.maven_downloads = AtomicU64::new(snap.maven_downloads);
metrics.maven_uploads = AtomicU64::new(snap.maven_uploads);
metrics.cargo_downloads = AtomicU64::new(snap.cargo_downloads);
metrics.pypi_downloads = AtomicU64::new(snap.pypi_downloads);
metrics.raw_downloads = AtomicU64::new(snap.raw_downloads);
metrics.raw_uploads = AtomicU64::new(snap.raw_uploads);
info!(
downloads = snap.downloads,
uploads = snap.uploads,
"Loaded persisted metrics"
);
}
Err(e) => warn!("Failed to parse metrics.json: {}", e),
},
Err(e) => warn!("Failed to read metrics.json: {}", e),
}
}
metrics
}
/// Save current metrics to disk
pub fn save(&self) {
let Some(path) = &self.persist_path else {
return;
};
let snap = MetricsSnapshot {
downloads: self.downloads.load(Ordering::Relaxed),
uploads: self.uploads.load(Ordering::Relaxed),
cache_hits: self.cache_hits.load(Ordering::Relaxed),
cache_misses: self.cache_misses.load(Ordering::Relaxed),
docker_downloads: self.docker_downloads.load(Ordering::Relaxed),
docker_uploads: self.docker_uploads.load(Ordering::Relaxed),
npm_downloads: self.npm_downloads.load(Ordering::Relaxed),
maven_downloads: self.maven_downloads.load(Ordering::Relaxed),
maven_uploads: self.maven_uploads.load(Ordering::Relaxed),
cargo_downloads: self.cargo_downloads.load(Ordering::Relaxed),
pypi_downloads: self.pypi_downloads.load(Ordering::Relaxed),
raw_downloads: self.raw_downloads.load(Ordering::Relaxed),
raw_uploads: self.raw_uploads.load(Ordering::Relaxed),
};
// Atomic write: write to tmp then rename
let tmp = path.with_extension("json.tmp");
if let Ok(data) = serde_json::to_string_pretty(&snap) {
if std::fs::write(&tmp, &data).is_ok() {
let _ = std::fs::rename(&tmp, path);
}
}
}

View File

@@ -336,7 +336,7 @@ async fn run_server(config: Config, storage: Storage) {
start_time,
auth,
tokens,
metrics: DashboardMetrics::new(),
metrics: DashboardMetrics::with_persistence(&storage_path),
activity: ActivityLog::new(50),
audit: AuditLog::new(&storage_path),
docker_auth,
@@ -387,6 +387,16 @@ async fn run_server(config: Config, storage: Storage) {
"Available endpoints"
);
// Background task: persist metrics every 30 seconds
let metrics_state = state.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
loop {
interval.tick().await;
metrics_state.metrics.save();
}
});
// Graceful shutdown on SIGTERM/SIGINT
axum::serve(
listener,
@@ -396,6 +406,9 @@ async fn run_server(config: Config, storage: Storage) {
.await
.expect("Server error");
// Save metrics on shutdown
state.metrics.save();
info!(
uptime_seconds = state.start_time.elapsed().as_secs(),
"Nora shutdown complete"

View File

@@ -141,11 +141,13 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
let pypi_size: u64 = pypi_repos.iter().map(|r| r.size).sum();
let total_storage = docker_size + maven_size + npm_size + cargo_size + pypi_size;
let total_artifacts = docker_repos.len()
+ maven_repos.len()
+ npm_repos.len()
+ cargo_repos.len()
+ pypi_repos.len();
// Count total versions/tags, not just repositories
let docker_versions: usize = docker_repos.iter().map(|r| r.versions).sum();
let maven_versions: usize = maven_repos.iter().map(|r| r.versions).sum();
let npm_versions: usize = npm_repos.iter().map(|r| r.versions).sum();
let cargo_versions: usize = cargo_repos.iter().map(|r| r.versions).sum();
let pypi_versions: usize = pypi_repos.iter().map(|r| r.versions).sum();
let total_artifacts = docker_versions + maven_versions + npm_versions + cargo_versions + pypi_versions;
let global_stats = GlobalStats {
downloads: state.metrics.downloads.load(Ordering::Relaxed),
@@ -158,35 +160,35 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
let registry_card_stats = vec![
RegistryCardStats {
name: "docker".to_string(),
artifact_count: docker_repos.len(),
artifact_count: docker_versions,
downloads: state.metrics.get_registry_downloads("docker"),
uploads: state.metrics.get_registry_uploads("docker"),
size_bytes: docker_size,
},
RegistryCardStats {
name: "maven".to_string(),
artifact_count: maven_repos.len(),
artifact_count: maven_versions,
downloads: state.metrics.get_registry_downloads("maven"),
uploads: state.metrics.get_registry_uploads("maven"),
size_bytes: maven_size,
},
RegistryCardStats {
name: "npm".to_string(),
artifact_count: npm_repos.len(),
artifact_count: npm_versions,
downloads: state.metrics.get_registry_downloads("npm"),
uploads: 0,
size_bytes: npm_size,
},
RegistryCardStats {
name: "cargo".to_string(),
artifact_count: cargo_repos.len(),
artifact_count: cargo_versions,
downloads: state.metrics.get_registry_downloads("cargo"),
uploads: 0,
size_bytes: cargo_size,
},
RegistryCardStats {
name: "pypi".to_string(),
artifact_count: pypi_repos.len(),
artifact_count: pypi_versions,
downloads: state.metrics.get_registry_downloads("pypi"),
uploads: 0,
size_bytes: pypi_size,