mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-12 09:10:32 +00:00
Metrics (downloads, uploads, cache hits) were stored in-memory only
and reset to zero on every restart. Now they persist to metrics.json
in the storage directory with:
- Load on startup from {storage_path}/metrics.json
- Background save every 30 seconds
- Final save on graceful shutdown
- Atomic writes (tmp + rename) to prevent corruption
Artifact count on dashboard now shows total tags/versions across
all registries instead of just counting unique repository names.
This matches user expectations when pushing multiple tags to the
same image (e.g. myapp:v1, myapp:v2 now shows 2, not 1).
894 lines
30 KiB
Rust
894 lines
30 KiB
Rust
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
|
// SPDX-License-Identifier: MIT
|
|
|
|
use super::components::{format_size, format_timestamp, html_escape};
|
|
use super::templates::encode_uri_component;
|
|
use crate::activity_log::ActivityEntry;
|
|
use crate::repo_index::RepoInfo;
|
|
use crate::AppState;
|
|
use crate::Storage;
|
|
use axum::{
|
|
extract::{Path, Query, State},
|
|
response::Json,
|
|
};
|
|
use serde::{Deserialize, Serialize};
|
|
use std::collections::{HashMap, HashSet};
|
|
use std::sync::atomic::Ordering;
|
|
use std::sync::Arc;
|
|
|
|
#[derive(Serialize)]
|
|
pub struct RegistryStats {
|
|
pub docker: usize,
|
|
pub maven: usize,
|
|
pub npm: usize,
|
|
pub cargo: usize,
|
|
pub pypi: usize,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct TagInfo {
|
|
pub name: String,
|
|
pub size: u64,
|
|
pub created: String,
|
|
pub downloads: u64,
|
|
pub last_pulled: Option<String>,
|
|
pub os: String,
|
|
pub arch: String,
|
|
pub layers_count: usize,
|
|
pub pull_command: String,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct DockerDetail {
|
|
pub tags: Vec<TagInfo>,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct VersionInfo {
|
|
pub version: String,
|
|
pub size: u64,
|
|
pub published: String,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct PackageDetail {
|
|
pub versions: Vec<VersionInfo>,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct MavenArtifact {
|
|
pub filename: String,
|
|
pub size: u64,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct MavenDetail {
|
|
pub artifacts: Vec<MavenArtifact>,
|
|
}
|
|
|
|
#[derive(Deserialize)]
|
|
pub struct SearchQuery {
|
|
pub q: Option<String>,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct DashboardResponse {
|
|
pub global_stats: GlobalStats,
|
|
pub registry_stats: Vec<RegistryCardStats>,
|
|
pub mount_points: Vec<MountPoint>,
|
|
pub activity: Vec<ActivityEntry>,
|
|
pub uptime_seconds: u64,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct GlobalStats {
|
|
pub downloads: u64,
|
|
pub uploads: u64,
|
|
pub artifacts: u64,
|
|
pub cache_hit_percent: f64,
|
|
pub storage_bytes: u64,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct RegistryCardStats {
|
|
pub name: String,
|
|
pub artifact_count: usize,
|
|
pub downloads: u64,
|
|
pub uploads: u64,
|
|
pub size_bytes: u64,
|
|
}
|
|
|
|
#[derive(Serialize)]
|
|
pub struct MountPoint {
|
|
pub registry: String,
|
|
pub mount_path: String,
|
|
pub proxy_upstream: Option<String>,
|
|
}
|
|
|
|
// ============ API Handlers ============
|
|
|
|
pub async fn api_stats(State(state): State<Arc<AppState>>) -> Json<RegistryStats> {
|
|
// Trigger index rebuild if needed, then get counts
|
|
let _ = state.repo_index.get("docker", &state.storage).await;
|
|
let _ = state.repo_index.get("maven", &state.storage).await;
|
|
let _ = state.repo_index.get("npm", &state.storage).await;
|
|
let _ = state.repo_index.get("cargo", &state.storage).await;
|
|
let _ = state.repo_index.get("pypi", &state.storage).await;
|
|
|
|
let (docker, maven, npm, cargo, pypi) = state.repo_index.counts();
|
|
Json(RegistryStats {
|
|
docker,
|
|
maven,
|
|
npm,
|
|
cargo,
|
|
pypi,
|
|
})
|
|
}
|
|
|
|
pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<DashboardResponse> {
|
|
// Get indexes (will rebuild if dirty)
|
|
let docker_repos = state.repo_index.get("docker", &state.storage).await;
|
|
let maven_repos = state.repo_index.get("maven", &state.storage).await;
|
|
let npm_repos = state.repo_index.get("npm", &state.storage).await;
|
|
let cargo_repos = state.repo_index.get("cargo", &state.storage).await;
|
|
let pypi_repos = state.repo_index.get("pypi", &state.storage).await;
|
|
|
|
// Calculate sizes from cached index
|
|
let docker_size: u64 = docker_repos.iter().map(|r| r.size).sum();
|
|
let maven_size: u64 = maven_repos.iter().map(|r| r.size).sum();
|
|
let npm_size: u64 = npm_repos.iter().map(|r| r.size).sum();
|
|
let cargo_size: u64 = cargo_repos.iter().map(|r| r.size).sum();
|
|
let pypi_size: u64 = pypi_repos.iter().map(|r| r.size).sum();
|
|
let total_storage = docker_size + maven_size + npm_size + cargo_size + pypi_size;
|
|
|
|
// Count total versions/tags, not just repositories
|
|
let docker_versions: usize = docker_repos.iter().map(|r| r.versions).sum();
|
|
let maven_versions: usize = maven_repos.iter().map(|r| r.versions).sum();
|
|
let npm_versions: usize = npm_repos.iter().map(|r| r.versions).sum();
|
|
let cargo_versions: usize = cargo_repos.iter().map(|r| r.versions).sum();
|
|
let pypi_versions: usize = pypi_repos.iter().map(|r| r.versions).sum();
|
|
let total_artifacts = docker_versions + maven_versions + npm_versions + cargo_versions + pypi_versions;
|
|
|
|
let global_stats = GlobalStats {
|
|
downloads: state.metrics.downloads.load(Ordering::Relaxed),
|
|
uploads: state.metrics.uploads.load(Ordering::Relaxed),
|
|
artifacts: total_artifacts as u64,
|
|
cache_hit_percent: state.metrics.cache_hit_rate(),
|
|
storage_bytes: total_storage,
|
|
};
|
|
|
|
let registry_card_stats = vec![
|
|
RegistryCardStats {
|
|
name: "docker".to_string(),
|
|
artifact_count: docker_versions,
|
|
downloads: state.metrics.get_registry_downloads("docker"),
|
|
uploads: state.metrics.get_registry_uploads("docker"),
|
|
size_bytes: docker_size,
|
|
},
|
|
RegistryCardStats {
|
|
name: "maven".to_string(),
|
|
artifact_count: maven_versions,
|
|
downloads: state.metrics.get_registry_downloads("maven"),
|
|
uploads: state.metrics.get_registry_uploads("maven"),
|
|
size_bytes: maven_size,
|
|
},
|
|
RegistryCardStats {
|
|
name: "npm".to_string(),
|
|
artifact_count: npm_versions,
|
|
downloads: state.metrics.get_registry_downloads("npm"),
|
|
uploads: 0,
|
|
size_bytes: npm_size,
|
|
},
|
|
RegistryCardStats {
|
|
name: "cargo".to_string(),
|
|
artifact_count: cargo_versions,
|
|
downloads: state.metrics.get_registry_downloads("cargo"),
|
|
uploads: 0,
|
|
size_bytes: cargo_size,
|
|
},
|
|
RegistryCardStats {
|
|
name: "pypi".to_string(),
|
|
artifact_count: pypi_versions,
|
|
downloads: state.metrics.get_registry_downloads("pypi"),
|
|
uploads: 0,
|
|
size_bytes: pypi_size,
|
|
},
|
|
];
|
|
|
|
let mount_points = vec![
|
|
MountPoint {
|
|
registry: "Docker".to_string(),
|
|
mount_path: "/v2/".to_string(),
|
|
proxy_upstream: None,
|
|
},
|
|
MountPoint {
|
|
registry: "Maven".to_string(),
|
|
mount_path: "/maven2/".to_string(),
|
|
proxy_upstream: state.config.maven.proxies.first().cloned(),
|
|
},
|
|
MountPoint {
|
|
registry: "npm".to_string(),
|
|
mount_path: "/npm/".to_string(),
|
|
proxy_upstream: state.config.npm.proxy.clone(),
|
|
},
|
|
MountPoint {
|
|
registry: "Cargo".to_string(),
|
|
mount_path: "/cargo/".to_string(),
|
|
proxy_upstream: None,
|
|
},
|
|
MountPoint {
|
|
registry: "PyPI".to_string(),
|
|
mount_path: "/simple/".to_string(),
|
|
proxy_upstream: state.config.pypi.proxy.clone(),
|
|
},
|
|
];
|
|
|
|
let activity = state.activity.recent(20);
|
|
let uptime_seconds = state.start_time.elapsed().as_secs();
|
|
|
|
Json(DashboardResponse {
|
|
global_stats,
|
|
registry_stats: registry_card_stats,
|
|
mount_points,
|
|
activity,
|
|
uptime_seconds,
|
|
})
|
|
}
|
|
|
|
pub async fn api_list(
|
|
State(state): State<Arc<AppState>>,
|
|
Path(registry_type): Path<String>,
|
|
) -> Json<Vec<RepoInfo>> {
|
|
let repos = state.repo_index.get(®istry_type, &state.storage).await;
|
|
Json((*repos).clone())
|
|
}
|
|
|
|
pub async fn api_detail(
|
|
State(state): State<Arc<AppState>>,
|
|
Path((registry_type, name)): Path<(String, String)>,
|
|
) -> Json<serde_json::Value> {
|
|
match registry_type.as_str() {
|
|
"docker" => {
|
|
let detail = get_docker_detail(&state, &name).await;
|
|
Json(serde_json::to_value(detail).unwrap_or_default())
|
|
}
|
|
"npm" => {
|
|
let detail = get_npm_detail(&state.storage, &name).await;
|
|
Json(serde_json::to_value(detail).unwrap_or_default())
|
|
}
|
|
"cargo" => {
|
|
let detail = get_cargo_detail(&state.storage, &name).await;
|
|
Json(serde_json::to_value(detail).unwrap_or_default())
|
|
}
|
|
_ => Json(serde_json::json!({})),
|
|
}
|
|
}
|
|
|
|
pub async fn api_search(
|
|
State(state): State<Arc<AppState>>,
|
|
Path(registry_type): Path<String>,
|
|
Query(params): Query<SearchQuery>,
|
|
) -> axum::response::Html<String> {
|
|
let query = params.q.unwrap_or_default().to_lowercase();
|
|
|
|
let repos = state.repo_index.get(®istry_type, &state.storage).await;
|
|
|
|
let filtered: Vec<&RepoInfo> = if query.is_empty() {
|
|
repos.iter().collect()
|
|
} else {
|
|
repos
|
|
.iter()
|
|
.filter(|r| r.name.to_lowercase().contains(&query))
|
|
.collect()
|
|
};
|
|
|
|
// Return HTML fragment for HTMX
|
|
let html = if filtered.is_empty() {
|
|
r#"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
|
<div class="text-4xl mb-2">🔍</div>
|
|
<div>No matching repositories found</div>
|
|
</td></tr>"#
|
|
.to_string()
|
|
} else {
|
|
filtered
|
|
.iter()
|
|
.map(|repo| {
|
|
let detail_url =
|
|
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
|
|
format!(
|
|
r#"
|
|
<tr class="hover:bg-slate-50 cursor-pointer" onclick="window.location='{}'">
|
|
<td class="px-6 py-4">
|
|
<a href="{}" class="text-blue-600 hover:text-blue-800 font-medium">{}</a>
|
|
</td>
|
|
<td class="px-6 py-4 text-slate-600">{}</td>
|
|
<td class="px-6 py-4 text-slate-600">{}</td>
|
|
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
|
</tr>
|
|
"#,
|
|
detail_url,
|
|
detail_url,
|
|
html_escape(&repo.name),
|
|
repo.versions,
|
|
format_size(repo.size),
|
|
&repo.updated
|
|
)
|
|
})
|
|
.collect::<Vec<_>>()
|
|
.join("")
|
|
};
|
|
|
|
axum::response::Html(html)
|
|
}
|
|
|
|
// ============ Data Fetching Functions ============
|
|
// NOTE: Legacy functions below - kept for reference, will be removed in future cleanup
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
|
|
let all_keys = storage.list("").await;
|
|
|
|
let docker = all_keys
|
|
.iter()
|
|
.filter(|k| k.starts_with("docker/") && k.contains("/manifests/"))
|
|
.filter_map(|k| k.split('/').nth(1))
|
|
.collect::<HashSet<_>>()
|
|
.len();
|
|
|
|
let maven = all_keys
|
|
.iter()
|
|
.filter(|k| k.starts_with("maven/"))
|
|
.filter_map(|k| {
|
|
// Extract groupId/artifactId from maven path
|
|
let parts: Vec<_> = k.strip_prefix("maven/")?.split('/').collect();
|
|
if parts.len() >= 2 {
|
|
Some(parts[..parts.len() - 1].join("/"))
|
|
} else {
|
|
None
|
|
}
|
|
})
|
|
.collect::<HashSet<_>>()
|
|
.len();
|
|
|
|
let npm = all_keys
|
|
.iter()
|
|
.filter(|k| k.starts_with("npm/") && k.ends_with("/metadata.json"))
|
|
.count();
|
|
|
|
let cargo = all_keys
|
|
.iter()
|
|
.filter(|k| k.starts_with("cargo/") && k.ends_with("/metadata.json"))
|
|
.count();
|
|
|
|
let pypi = all_keys
|
|
.iter()
|
|
.filter(|k| k.starts_with("pypi/"))
|
|
.filter_map(|k| k.strip_prefix("pypi/")?.split('/').next())
|
|
.collect::<HashSet<_>>()
|
|
.len();
|
|
|
|
RegistryStats {
|
|
docker,
|
|
maven,
|
|
npm,
|
|
cargo,
|
|
pypi,
|
|
}
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
|
let keys = storage.list("docker/").await;
|
|
|
|
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new(); // (info, latest_modified)
|
|
|
|
for key in &keys {
|
|
// Skip .meta.json files
|
|
if key.ends_with(".meta.json") {
|
|
continue;
|
|
}
|
|
|
|
if let Some(rest) = key.strip_prefix("docker/") {
|
|
let parts: Vec<_> = rest.split('/').collect();
|
|
if parts.len() >= 3 {
|
|
let name = parts[0].to_string();
|
|
let entry = repos.entry(name.clone()).or_insert_with(|| {
|
|
(
|
|
RepoInfo {
|
|
name,
|
|
versions: 0,
|
|
size: 0,
|
|
updated: "N/A".to_string(),
|
|
},
|
|
0,
|
|
)
|
|
});
|
|
|
|
if parts[1] == "manifests" && key.ends_with(".json") {
|
|
entry.0.versions += 1;
|
|
|
|
// Parse manifest to get actual image size (config + layers)
|
|
if let Ok(manifest_data) = storage.get(key).await {
|
|
if let Ok(manifest) =
|
|
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
|
{
|
|
let config_size = manifest
|
|
.get("config")
|
|
.and_then(|c| c.get("size"))
|
|
.and_then(|s| s.as_u64())
|
|
.unwrap_or(0);
|
|
let layers_size: u64 = manifest
|
|
.get("layers")
|
|
.and_then(|l| l.as_array())
|
|
.map(|layers| {
|
|
layers
|
|
.iter()
|
|
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
|
.sum()
|
|
})
|
|
.unwrap_or(0);
|
|
entry.0.size += config_size + layers_size;
|
|
}
|
|
}
|
|
|
|
// Update timestamp
|
|
if let Some(meta) = storage.stat(key).await {
|
|
if meta.modified > entry.1 {
|
|
entry.1 = meta.modified;
|
|
entry.0.updated = format_timestamp(meta.modified);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut result: Vec<_> = repos.into_values().map(|(r, _)| r).collect();
|
|
result.sort_by(|a, b| a.name.cmp(&b.name));
|
|
result
|
|
}
|
|
|
|
pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
|
|
let prefix = format!("docker/{}/manifests/", name);
|
|
let keys = state.storage.list(&prefix).await;
|
|
|
|
// Build public URL for pull commands
|
|
let registry_host =
|
|
state.config.server.public_url.clone().unwrap_or_else(|| {
|
|
format!("{}:{}", state.config.server.host, state.config.server.port)
|
|
});
|
|
|
|
let mut tags = Vec::new();
|
|
for key in &keys {
|
|
// Skip .meta.json files
|
|
if key.ends_with(".meta.json") {
|
|
continue;
|
|
}
|
|
|
|
if let Some(tag_name) = key
|
|
.strip_prefix(&prefix)
|
|
.and_then(|s| s.strip_suffix(".json"))
|
|
{
|
|
// Load metadata from .meta.json file
|
|
let meta_key = format!("{}.meta.json", key.trim_end_matches(".json"));
|
|
let metadata = if let Ok(meta_data) = state.storage.get(&meta_key).await {
|
|
serde_json::from_slice::<crate::registry::docker::ImageMetadata>(&meta_data)
|
|
.unwrap_or_default()
|
|
} else {
|
|
crate::registry::docker::ImageMetadata::default()
|
|
};
|
|
|
|
// Get file stats for created timestamp if metadata doesn't have push_timestamp
|
|
let created = if metadata.push_timestamp > 0 {
|
|
format_timestamp(metadata.push_timestamp)
|
|
} else if let Some(file_meta) = state.storage.stat(key).await {
|
|
format_timestamp(file_meta.modified)
|
|
} else {
|
|
"N/A".to_string()
|
|
};
|
|
|
|
// Calculate size from manifest layers (config + layers)
|
|
let size = if metadata.size_bytes > 0 {
|
|
metadata.size_bytes
|
|
} else {
|
|
// Parse manifest to get actual image size
|
|
if let Ok(manifest_data) = state.storage.get(key).await {
|
|
if let Ok(manifest) =
|
|
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
|
{
|
|
let config_size = manifest
|
|
.get("config")
|
|
.and_then(|c| c.get("size"))
|
|
.and_then(|s| s.as_u64())
|
|
.unwrap_or(0);
|
|
let layers_size: u64 = manifest
|
|
.get("layers")
|
|
.and_then(|l| l.as_array())
|
|
.map(|layers| {
|
|
layers
|
|
.iter()
|
|
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
|
.sum()
|
|
})
|
|
.unwrap_or(0);
|
|
config_size + layers_size
|
|
} else {
|
|
0
|
|
}
|
|
} else {
|
|
0
|
|
}
|
|
};
|
|
|
|
// Format last_pulled
|
|
let last_pulled = if metadata.last_pulled > 0 {
|
|
Some(format_timestamp(metadata.last_pulled))
|
|
} else {
|
|
None
|
|
};
|
|
|
|
// Build pull command
|
|
let pull_command = format!("docker pull {}/{}:{}", registry_host, name, tag_name);
|
|
|
|
tags.push(TagInfo {
|
|
name: tag_name.to_string(),
|
|
size,
|
|
created,
|
|
downloads: metadata.downloads,
|
|
last_pulled,
|
|
os: if metadata.os.is_empty() {
|
|
"unknown".to_string()
|
|
} else {
|
|
metadata.os
|
|
},
|
|
arch: if metadata.arch.is_empty() {
|
|
"unknown".to_string()
|
|
} else {
|
|
metadata.arch
|
|
},
|
|
layers_count: metadata.layers.len(),
|
|
pull_command,
|
|
});
|
|
}
|
|
}
|
|
|
|
DockerDetail { tags }
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_maven_repos(storage: &Storage) -> Vec<RepoInfo> {
|
|
let keys = storage.list("maven/").await;
|
|
|
|
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
|
|
|
for key in &keys {
|
|
if let Some(rest) = key.strip_prefix("maven/") {
|
|
let parts: Vec<_> = rest.split('/').collect();
|
|
if parts.len() >= 2 {
|
|
let artifact_path = parts[..parts.len() - 1].join("/");
|
|
let entry = repos.entry(artifact_path.clone()).or_insert_with(|| {
|
|
(
|
|
RepoInfo {
|
|
name: artifact_path,
|
|
versions: 0,
|
|
size: 0,
|
|
updated: "N/A".to_string(),
|
|
},
|
|
0,
|
|
)
|
|
});
|
|
entry.0.versions += 1;
|
|
if let Some(meta) = storage.stat(key).await {
|
|
entry.0.size += meta.size;
|
|
if meta.modified > entry.1 {
|
|
entry.1 = meta.modified;
|
|
entry.0.updated = format_timestamp(meta.modified);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut result: Vec<_> = repos.into_values().map(|(r, _)| r).collect();
|
|
result.sort_by(|a, b| a.name.cmp(&b.name));
|
|
result
|
|
}
|
|
|
|
pub async fn get_maven_detail(storage: &Storage, path: &str) -> MavenDetail {
|
|
let prefix = format!("maven/{}/", path);
|
|
let keys = storage.list(&prefix).await;
|
|
|
|
let mut artifacts = Vec::new();
|
|
for key in &keys {
|
|
if let Some(filename) = key.strip_prefix(&prefix) {
|
|
if filename.contains('/') {
|
|
continue;
|
|
}
|
|
let size = storage.stat(key).await.map(|m| m.size).unwrap_or(0);
|
|
artifacts.push(MavenArtifact {
|
|
filename: filename.to_string(),
|
|
size,
|
|
});
|
|
}
|
|
}
|
|
|
|
MavenDetail { artifacts }
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_npm_packages(storage: &Storage) -> Vec<RepoInfo> {
|
|
let keys = storage.list("npm/").await;
|
|
|
|
let mut packages: HashMap<String, RepoInfo> = HashMap::new();
|
|
|
|
// Find all metadata.json files
|
|
for key in &keys {
|
|
if key.ends_with("/metadata.json") {
|
|
if let Some(name) = key
|
|
.strip_prefix("npm/")
|
|
.and_then(|s| s.strip_suffix("/metadata.json"))
|
|
{
|
|
// Parse metadata to get version count and info
|
|
if let Ok(data) = storage.get(key).await {
|
|
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
|
let versions_count = metadata
|
|
.get("versions")
|
|
.and_then(|v| v.as_object())
|
|
.map(|v| v.len())
|
|
.unwrap_or(0);
|
|
|
|
// Calculate total size from dist.unpackedSize or estimate
|
|
let total_size: u64 = metadata
|
|
.get("versions")
|
|
.and_then(|v| v.as_object())
|
|
.map(|versions| {
|
|
versions
|
|
.values()
|
|
.filter_map(|v| {
|
|
v.get("dist")
|
|
.and_then(|d| d.get("unpackedSize"))
|
|
.and_then(|s| s.as_u64())
|
|
})
|
|
.sum()
|
|
})
|
|
.unwrap_or(0);
|
|
|
|
// Get latest version time for "updated"
|
|
let updated = metadata
|
|
.get("time")
|
|
.and_then(|t| t.get("modified"))
|
|
.and_then(|m| m.as_str())
|
|
.map(|s| s[..10].to_string()) // Take just date part
|
|
.unwrap_or_else(|| "N/A".to_string());
|
|
|
|
packages.insert(
|
|
name.to_string(),
|
|
RepoInfo {
|
|
name: name.to_string(),
|
|
versions: versions_count,
|
|
size: total_size,
|
|
updated,
|
|
},
|
|
);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut result: Vec<_> = packages.into_values().collect();
|
|
result.sort_by(|a, b| a.name.cmp(&b.name));
|
|
result
|
|
}
|
|
|
|
pub async fn get_npm_detail(storage: &Storage, name: &str) -> PackageDetail {
|
|
let metadata_key = format!("npm/{}/metadata.json", name);
|
|
|
|
let mut versions = Vec::new();
|
|
|
|
// Parse metadata.json for version info
|
|
if let Ok(data) = storage.get(&metadata_key).await {
|
|
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
|
if let Some(versions_obj) = metadata.get("versions").and_then(|v| v.as_object()) {
|
|
let time_obj = metadata.get("time").and_then(|t| t.as_object());
|
|
|
|
for (version, info) in versions_obj {
|
|
let size = info
|
|
.get("dist")
|
|
.and_then(|d| d.get("unpackedSize"))
|
|
.and_then(|s| s.as_u64())
|
|
.unwrap_or(0);
|
|
|
|
let published = time_obj
|
|
.and_then(|t| t.get(version))
|
|
.and_then(|p| p.as_str())
|
|
.map(|s| s[..10].to_string())
|
|
.unwrap_or_else(|| "N/A".to_string());
|
|
|
|
versions.push(VersionInfo {
|
|
version: version.clone(),
|
|
size,
|
|
published,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
// Sort by version (semver-like, newest first)
|
|
versions.sort_by(|a, b| {
|
|
let a_parts: Vec<u32> = a
|
|
.version
|
|
.split('.')
|
|
.filter_map(|s| s.parse().ok())
|
|
.collect();
|
|
let b_parts: Vec<u32> = b
|
|
.version
|
|
.split('.')
|
|
.filter_map(|s| s.parse().ok())
|
|
.collect();
|
|
b_parts.cmp(&a_parts)
|
|
});
|
|
|
|
PackageDetail { versions }
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_cargo_crates(storage: &Storage) -> Vec<RepoInfo> {
|
|
let keys = storage.list("cargo/").await;
|
|
|
|
let mut crates: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
|
|
|
for key in &keys {
|
|
if let Some(rest) = key.strip_prefix("cargo/") {
|
|
let parts: Vec<_> = rest.split('/').collect();
|
|
if !parts.is_empty() {
|
|
let name = parts[0].to_string();
|
|
let entry = crates.entry(name.clone()).or_insert_with(|| {
|
|
(
|
|
RepoInfo {
|
|
name,
|
|
versions: 0,
|
|
size: 0,
|
|
updated: "N/A".to_string(),
|
|
},
|
|
0,
|
|
)
|
|
});
|
|
|
|
if parts.len() >= 3 && key.ends_with(".crate") {
|
|
entry.0.versions += 1;
|
|
if let Some(meta) = storage.stat(key).await {
|
|
entry.0.size += meta.size;
|
|
if meta.modified > entry.1 {
|
|
entry.1 = meta.modified;
|
|
entry.0.updated = format_timestamp(meta.modified);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut result: Vec<_> = crates.into_values().map(|(r, _)| r).collect();
|
|
result.sort_by(|a, b| a.name.cmp(&b.name));
|
|
result
|
|
}
|
|
|
|
pub async fn get_cargo_detail(storage: &Storage, name: &str) -> PackageDetail {
|
|
let prefix = format!("cargo/{}/", name);
|
|
let keys = storage.list(&prefix).await;
|
|
|
|
let mut versions = Vec::new();
|
|
for key in keys.iter().filter(|k| k.ends_with(".crate")) {
|
|
if let Some(rest) = key.strip_prefix(&prefix) {
|
|
let parts: Vec<_> = rest.split('/').collect();
|
|
if !parts.is_empty() {
|
|
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
|
(meta.size, format_timestamp(meta.modified))
|
|
} else {
|
|
(0, "N/A".to_string())
|
|
};
|
|
versions.push(VersionInfo {
|
|
version: parts[0].to_string(),
|
|
size,
|
|
published,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
PackageDetail { versions }
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub async fn get_pypi_packages(storage: &Storage) -> Vec<RepoInfo> {
|
|
let keys = storage.list("pypi/").await;
|
|
|
|
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
|
|
|
for key in &keys {
|
|
if let Some(rest) = key.strip_prefix("pypi/") {
|
|
let parts: Vec<_> = rest.split('/').collect();
|
|
if !parts.is_empty() {
|
|
let name = parts[0].to_string();
|
|
let entry = packages.entry(name.clone()).or_insert_with(|| {
|
|
(
|
|
RepoInfo {
|
|
name,
|
|
versions: 0,
|
|
size: 0,
|
|
updated: "N/A".to_string(),
|
|
},
|
|
0,
|
|
)
|
|
});
|
|
|
|
if parts.len() >= 2 {
|
|
entry.0.versions += 1;
|
|
if let Some(meta) = storage.stat(key).await {
|
|
entry.0.size += meta.size;
|
|
if meta.modified > entry.1 {
|
|
entry.1 = meta.modified;
|
|
entry.0.updated = format_timestamp(meta.modified);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
|
|
result.sort_by(|a, b| a.name.cmp(&b.name));
|
|
result
|
|
}
|
|
|
|
pub async fn get_pypi_detail(storage: &Storage, name: &str) -> PackageDetail {
|
|
let prefix = format!("pypi/{}/", name);
|
|
let keys = storage.list(&prefix).await;
|
|
|
|
let mut versions = Vec::new();
|
|
for key in &keys {
|
|
if let Some(filename) = key.strip_prefix(&prefix) {
|
|
if let Some(version) = extract_pypi_version(name, filename) {
|
|
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
|
(meta.size, format_timestamp(meta.modified))
|
|
} else {
|
|
(0, "N/A".to_string())
|
|
};
|
|
versions.push(VersionInfo {
|
|
version,
|
|
size,
|
|
published,
|
|
});
|
|
}
|
|
}
|
|
}
|
|
|
|
PackageDetail { versions }
|
|
}
|
|
|
|
fn extract_pypi_version(name: &str, filename: &str) -> Option<String> {
|
|
// Handle both .tar.gz and .whl files
|
|
let clean_name = name.replace('-', "_");
|
|
|
|
if filename.ends_with(".tar.gz") {
|
|
// package-1.0.0.tar.gz
|
|
let base = filename.strip_suffix(".tar.gz")?;
|
|
let version = base
|
|
.strip_prefix(&format!("{}-", name))
|
|
.or_else(|| base.strip_prefix(&format!("{}-", clean_name)))?;
|
|
Some(version.to_string())
|
|
} else if filename.ends_with(".whl") {
|
|
// package-1.0.0-py3-none-any.whl
|
|
let parts: Vec<_> = filename.split('-').collect();
|
|
if parts.len() >= 2 {
|
|
Some(parts[1].to_string())
|
|
} else {
|
|
None
|
|
}
|
|
} else {
|
|
None
|
|
}
|
|
}
|