88 Commits

Author SHA1 Message Date
161d7f706a chore: bump version to 0.2.24 2026-02-24 17:09:55 +00:00
e4e38e3aab docs: add Astra Linux SE restore to CHANGELOG [Unreleased] 2026-02-24 17:02:14 +00:00
b153bc0c5b ci: restore Astra Linux SE build, scan, and release image 2026-02-24 17:01:14 +00:00
d76383c701 docs: update CHANGELOG for v0.2.19–v0.2.23 and Unreleased (EN/RU) 2026-02-24 16:44:49 +00:00
d161c2f645 feat: add install.sh script 2026-02-24 15:00:19 +00:00
c7f9d5c036 ci: fix binary path in image (/usr/local/bin/nora) 2026-02-24 14:03:16 +00:00
b41bfd9a88 ci: pin build job to nora runner label to avoid wrong runner 2026-02-24 13:18:11 +00:00
3e3070a401 docs: use logo.jpg in README 2026-02-24 12:47:07 +00:00
3868b16ea4 docs: replace text title with SVG logo, O styled in blue-600 2026-02-24 12:29:07 +00:00
3a6d3eeb9a feat: add binary + sha256 to GitHub Release artifacts 2026-02-24 12:14:29 +00:00
dd29707395 ci: ignore RUSTSEC-2025-0119 (number_prefix unmaintained, transitive via indicatif) 2026-02-24 12:06:34 +00:00
e7a6a652af ci: allow CDLA-Permissive-2.0 license (webpki-roots) 2026-02-24 11:54:19 +00:00
4ad802ce2f fix: bump prometheus 0.13->0.14 and bytes 1.11.0->1.11.1 (CVE-2025-53605, CVE-2026-25541) 2026-02-24 11:36:07 +00:00
dependabot[bot]
04c806b659 chore(deps): bump chrono from 0.4.43 to 0.4.44 (#10)
Bumps [chrono](https://github.com/chronotope/chrono) from 0.4.43 to 0.4.44.
- [Release notes](https://github.com/chronotope/chrono/releases)
- [Changelog](https://github.com/chronotope/chrono/blob/main/CHANGELOG.md)
- [Commits](https://github.com/chronotope/chrono/compare/v0.4.43...v0.4.44)

---
updated-dependencies:
- dependency-name: chrono
  dependency-version: 0.4.44
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:23:06 +01:00
dependabot[bot]
50a5395a87 chore(deps): bump quick-xml from 0.31.0 to 0.39.2 (#9)
Bumps [quick-xml](https://github.com/tafia/quick-xml) from 0.31.0 to 0.39.2.
- [Release notes](https://github.com/tafia/quick-xml/releases)
- [Changelog](https://github.com/tafia/quick-xml/blob/master/Changelog.md)
- [Commits](https://github.com/tafia/quick-xml/compare/v0.31.0...v0.39.2)

---
updated-dependencies:
- dependency-name: quick-xml
  dependency-version: 0.39.2
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:22:58 +01:00
dependabot[bot]
bcd172f23f chore(deps): bump toml from 0.8.23 to 1.0.3+spec-1.1.0 (#7)
Bumps [toml](https://github.com/toml-rs/toml) from 0.8.23 to 1.0.3+spec-1.1.0.
- [Commits](https://github.com/toml-rs/toml/compare/toml-v0.8.23...toml-v1.0.3)

---
updated-dependencies:
- dependency-name: toml
  dependency-version: 1.0.3+spec-1.1.0
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:22:52 +01:00
dependabot[bot]
a5a7c4f8be chore(deps): bump flate2 from 1.1.8 to 1.1.9 (#6)
Bumps [flate2](https://github.com/rust-lang/flate2-rs) from 1.1.8 to 1.1.9.
- [Release notes](https://github.com/rust-lang/flate2-rs/releases)
- [Commits](https://github.com/rust-lang/flate2-rs/compare/1.1.8...1.1.9)

---
updated-dependencies:
- dependency-name: flate2
  dependency-version: 1.1.9
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:22:46 +01:00
dependabot[bot]
2c7c497c30 chore(deps): bump softprops/action-gh-release from 1 to 2 (#5)
Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 1 to 2.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](https://github.com/softprops/action-gh-release/compare/v1...v2)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-version: '2'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:20:23 +01:00
dependabot[bot]
6b6f88ab9c chore(deps): bump actions/checkout from 4 to 6 (#4)
Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 6.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v4...v6)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:20:19 +01:00
dependabot[bot]
1255e3227b chore(deps): bump docker/build-push-action from 5 to 6 (#3)
Bumps [docker/build-push-action](https://github.com/docker/build-push-action) from 5 to 6.
- [Release notes](https://github.com/docker/build-push-action/releases)
- [Commits](https://github.com/docker/build-push-action/compare/v5...v6)

---
updated-dependencies:
- dependency-name: docker/build-push-action
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:20:16 +01:00
dependabot[bot]
aabd0b76fb chore(deps): bump aquasecurity/trivy-action from 0.30.0 to 0.34.1 (#2)
Bumps [aquasecurity/trivy-action](https://github.com/aquasecurity/trivy-action) from 0.30.0 to 0.34.1.
- [Release notes](https://github.com/aquasecurity/trivy-action/releases)
- [Commits](https://github.com/aquasecurity/trivy-action/compare/0.30.0...0.34.1)

---
updated-dependencies:
- dependency-name: aquasecurity/trivy-action
  dependency-version: 0.34.1
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2026-02-24 12:20:12 +01:00
ac14405af3 ci: restore scan gate on release, block on HIGH/CRITICAL CVE 2026-02-24 10:53:28 +00:00
5f385dce45 ci: add dependabot, pin trivy-action@0.30.0, release no longer waits on scan 2026-02-24 10:48:06 +00:00
761e08f168 ci: upgrade codeql-action v3 -> v4 2026-02-24 10:41:37 +00:00
eb4f82df07 ci: fix deny.toml deprecated keys (copyleft, unlicensed removed in cargo-deny) 2026-02-24 10:26:58 +00:00
9784ad1813 chore: bump version to 0.2.22 2026-02-24 09:20:52 +00:00
fc1288820d ci: remove astra build for now 2026-02-24 00:39:16 +00:00
a17a75161b ci: consolidate all docker builds into single job to fix runner network issues 2026-02-24 00:07:44 +00:00
0b3ef3ab96 ci: use shared runner filesystem instead of artifact API to avoid network upload 2026-02-23 23:41:41 +00:00
99e290d30c ci: fix SBOM image tag and registry credentials 2026-02-23 18:53:17 +00:00
f74b781d1f ci: build musl static binary, fix cargo path (hardcode github-runner home) 2026-02-23 18:08:57 +00:00
05c765627f ci: fix trivy image tag (strip v prefix) 2026-02-23 16:47:18 +00:00
1813546bee ci: move trivy image scan to separate ubuntu-latest job to avoid self-hosted timeout 2026-02-23 16:15:03 +00:00
196c313f20 ci: add cargo cache to build-binary job, remove nora proxy (no sparse protocol) 2026-02-23 14:17:39 +00:00
aece2d739d ci: add registry credentials to trivy image scan 2026-02-23 14:01:31 +00:00
b7e11da2da ci: replace gitleaks action with CLI to avoid license requirement 2026-02-23 13:59:12 +00:00
dd3813edff ci: use github-runner own rust toolchain instead of ai-user path 2026-02-23 13:54:23 +00:00
adade10c67 chore: bump version to 0.2.21 2026-02-23 12:05:19 +00:00
6ad710ff32 ci: add security scanning and SBOM to release pipeline
- ci.yml: add security job (gitleaks, cargo-audit, cargo-deny, trivy fs)
- release.yml: restructure into build-binary + build-docker matrix + release
  - build binary once on self-hosted, reuse across all Docker builds
  - trivy image scan per matrix variant, results to GitHub Security tab
  - SBOM generation in SPDX and CycloneDX formats attached to release
- deny.toml: cargo-deny policy (allowed licenses, banned openssl, crates.io only)
- Dockerfile: remove Rust build stage, use pre-built binary
- Dockerfile.astra, Dockerfile.redos: FROM scratch for Russian certified OS support
2026-02-23 11:37:27 +00:00
037204a3eb fix: use FROM scratch for Astra and RedOS builds
Russian OS registries (registry.astralinux.ru, registry.red-soft.ru)
require auth not available in CI. Use scratch base with static musl
binary instead — runs on any Linux including Astra SE and RED OS.
Comment in each Dockerfile shows how to switch to official base image
once registry access is configured.
2026-02-23 08:43:13 +00:00
1e01d4df56 ci: add Astra Linux and RedOS parallel builds
Add Dockerfile.astra (astralinux/alse) and Dockerfile.redos (redos/redos)
for FSTEC-certified Russian OS targets. Update release.yml with a matrix
strategy that produces three image variants per release:
  - ghcr.io/.../nora:0.x.x          (Alpine, default)
  - ghcr.io/.../nora:0.x.x-astra    (Astra Linux SE)
  - ghcr.io/.../nora:0.x.x-redos    (RED OS)

Build stage is shared (musl static binary) across all variants.
2026-02-23 08:24:48 +00:00
ab5ed3f488 ci: remove unnecessary QEMU step for amd64-only builds 2026-02-23 08:05:54 +00:00
8336166e0e style: apply rustfmt to registry handlers 2026-02-23 07:48:20 +00:00
42e71b9195 refactor: use shared reqwest::Client across all registry handlers
Add http_client field to AppState, initialized once at startup.
Replace per-request Client::builder() calls in npm, maven, pypi,
and docker registry handlers with the shared instance.
This reuses the connection pool across requests instead of
creating a new client on every proxy fetch.

Bump version to 0.2.20.
2026-02-23 07:45:44 +00:00
ffac4f0286 fix(auth): replace starts_with with explicit matches for token paths
Prevent accidental exposure of unknown /api/tokens/* sub-paths.
Only the three known routes are now explicitly whitelisted in
is_public_path: /api/tokens, /api/tokens/list, /api/tokens/revoke.
2026-02-22 20:35:04 +00:00
078ef94153 chore: bump version to 0.2.19 2026-02-22 13:33:25 +00:00
94c92e5bc3 fix: use div_ceil instead of manual implementation 2026-01-31 16:51:37 +00:00
7326f9b0e2 chore: add pre-commit hook to prevent sensitive file commits
- Whitelist approach: only known safe extensions allowed (.rs, .toml, .yml, etc.)
- Block sensitive patterns (.env, .key, .pem, secrets, credentials)
- Warn but allow .md files
- Check only NEW files, modifications to tracked files always allowed
- Block large files (>5MB) with warning
- Run cargo fmt check on Rust files
- Update CONTRIBUTING.md with hook setup instructions
2026-01-31 16:39:04 +00:00
a2cb7c639c style: fix formatting and ignore txt files 2026-01-31 16:29:39 +00:00
eb77060114 perf: add in-memory repo index with pagination
- Add repo_index.rs with lazy rebuild on write operations
- Double-checked locking to prevent race conditions
- npm optimization: count tarballs instead of parsing metadata.json
- Add pagination to all registry list pages (?page=1&limit=50)
- Invalidate index on PUT/proxy cache in docker/maven/npm/pypi

Performance: 500-800x faster list page loads after first rebuild
2026-01-31 15:59:00 +00:00
8da3eab734 docs: add badges to README 2026-01-31 13:02:27 +00:00
f82e252e39 docs: add CONTRIBUTING.md and SECURITY.md 2026-01-31 12:39:41 +00:00
7763b85b94 chore: add copyright headers to all source files
Copyright (c) 2026 Volkov Pavel | DevITWay
SPDX-License-Identifier: MIT
2026-01-31 12:39:31 +00:00
47a3690384 style: fix O alignment in NORA logo on dashboard 2026-01-31 12:39:31 +00:00
a9125e6287 style: fix formatting 2026-01-31 10:49:50 +00:00
3f0b84c831 style: add chipmunk emoji and styled O to NORA logo 2026-01-31 10:48:15 +00:00
ce30c5b57d fix: docker dashboard shows actual image size from manifest layers 2026-01-31 10:41:55 +00:00
f76c6d6075 fix: npm dashboard shows versions and sizes from metadata.json 2026-01-31 09:16:24 +00:00
e6bd9b6ead docs: fix Docker image path in README 2026-01-31 08:55:51 +00:00
cf55a19acf docs: sync CHANGELOG and OpenAPI with actual implementation
- Fix CHANGELOG: add missing versions v0.2.4-v0.2.12
- Implement GET /v2/_catalog endpoint for Docker repository listing
- Add missing OpenAPI endpoints:
  - Docker: PUT manifest, POST/PATCH/PUT blob uploads, HEAD blob
  - Maven: PUT artifact upload
  - Cargo: GET metadata, GET download (was completely undocumented)
  - Metrics: GET /metrics
- Update OpenAPI version to 0.2.12
2026-01-31 07:54:19 +00:00
e33da13dc7 chore: update gitignore 2026-01-30 23:32:21 +00:00
bbdefff07c style: fix formatting 2026-01-30 23:29:34 +00:00
b29a0309d4 feat: add S3 authentication and fix Docker multi-segment routes
S3 Storage:
- Implement AWS Signature v4 for S3-compatible storage (MinIO, AWS)
- Add s3_access_key, s3_secret_key, s3_region config options
- Support both authenticated and anonymous S3 access
- Add proper URI encoding for S3 canonical requests

Docker Registry:
- Fix routing for multi-segment image names (e.g., library/alpine)
- Add namespace routes for two-segment paths (/v2/{ns}/{name}/...)
- Add debug tracing for upstream proxy operations

Config:
- Add NORA_STORAGE_S3_ACCESS_KEY env var
- Add NORA_STORAGE_S3_SECRET_KEY env var
- Add NORA_STORAGE_S3_REGION env var (default: us-east-1)
2026-01-30 23:22:22 +00:00
38003db6f8 docs: add bilingual onboarding (EN/RU) 2026-01-30 16:19:48 +00:00
dab3ee805e fix: clippy let_and_return warning 2026-01-30 16:15:21 +00:00
ac4020d34f style: fix formatting 2026-01-30 16:06:40 +00:00
5fc4237ac5 feat: add Docker image metadata support
- Store metadata (.meta.json) alongside manifests with:
  - push_timestamp, last_pulled, downloads counter
  - size_bytes, os, arch, variant
  - layers list with digest and size
- Update metadata on manifest pull (increment downloads, update last_pulled)
- Extract OS/arch from config blob on push
- Extend UI API TagInfo with metadata fields
- Add public_url config option for pull commands
- Add Docker upstream proxy with auth support
- Add raw repository support
- Bump version to 0.2.12
2026-01-30 15:52:29 +00:00
ee4e01467a feat: add secrets provider architecture
Trait-based secrets management for secure credential handling:
- SecretsProvider trait for pluggable backends
- EnvProvider as default (12-Factor App pattern)
- ProtectedString with zeroize (memory zeroed on drop)
- Redacted Debug impl prevents secret leakage in logs
- S3Credentials struct for future AWS S3 integration
- Config: [secrets] section with provider and clear_env options

Foundation for AWS Secrets Manager, Vault, K8s (v0.4.0+)
2026-01-30 10:02:58 +00:00
3265e217e7 feat: add configurable rate limiting
Rate limits now configurable via config.toml and ENV variables:
- New [rate_limit] config section with auth/upload/general settings
- ENV: NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}
- Rate limit configuration logged at startup
- Functions accept &RateLimitConfig instead of hardcoded values
2026-01-30 08:20:50 +00:00
cf9feee5b2 Fix clippy warnings 2026-01-26 19:43:51 +00:00
0a97b00278 Fix code formatting 2026-01-26 19:42:20 +00:00
d162e96841 Add i18n support, PyPI proxy, and UI improvements
- Add Russian/English language switcher with cookie persistence
- Add PyPI proxy support with caching (like npm)
- Add height limits to Activity Log and Mount Points tables
- Change Cargo icon to delivery truck
- Replace graphical logo with styled text "NORA"
- Bump version to 0.2.11
2026-01-26 19:31:28 +00:00
4aa7529aa4 Bump version to 0.2.10 2026-01-26 18:43:21 +00:00
411bc75e5e Apply dark theme to all UI pages
- Convert registry list, docker detail, package detail, maven detail pages to dark theme
- Use layout_dark instead of layout for all pages
- Update colors: bg-[#1e293b] cards, slate-700 borders, slate-200/400 text
- Mark unused light theme functions with #[allow(dead_code)]
2026-01-26 18:43:11 +00:00
d2fec9ad15 Bump version to 0.2.9 2026-01-26 18:02:43 +00:00
00910dd69e Bump version to 0.2.8 2026-01-26 17:46:34 +00:00
4332b74636 Add dashboard endpoint to OpenAPI documentation
- Add /api/ui/dashboard endpoint with dashboard tag
- Add schemas: DashboardResponse, GlobalStats, RegistryCardStats, MountPoint, ActivityEntry
- Update API version to 0.2.7 in OpenAPI spec
2026-01-26 17:45:54 +00:00
86130a80ce Display version dynamically in UI sidebar
- Add VERSION constant using CARGO_PKG_VERSION
- Show version in both light and dark theme sidebars
- Update workspace version to 0.2.7
2026-01-26 17:31:39 +00:00
2f86b4852a Fix clippy warnings 2026-01-26 16:44:01 +00:00
08eea07cfe Fix formatting 2026-01-26 16:39:48 +00:00
a13d7b8cfc Add dashboard metrics, activity log, and dark theme
- Add DashboardMetrics for tracking downloads/uploads/cache hits per registry
- Add ActivityLog for recent activity with bounded size (50 entries)
- Instrument Docker, npm, Maven, and Cargo handlers with metrics
- Add /api/ui/dashboard endpoint with global stats and activity
- Implement dark theme dashboard with real-time polling (5s interval)
- Add mount points table showing registry paths and proxy upstreams
2026-01-26 16:21:25 +00:00
f1cda800a2 Fix Docker push/pull: add PATCH endpoint for chunked uploads
- Add PATCH handler for /v2/{name}/blobs/uploads/{uuid} to support
  chunked blob uploads (Docker sends data chunks via PATCH)
- Include Range header in PATCH response to indicate bytes received
- Add Docker-Content-Digest header to GET manifest responses
- Store manifests by both tag and digest for proper pull support
- Add parking_lot dependency for upload session state management
2026-01-26 12:01:05 +00:00
da219dc794 Fix rate limiting: exempt health/metrics, increase upload limits
- Health, metrics, UI, and API docs are now exempt from rate limiting
- Increased upload rate limits to 200 req/s with burst of 500 for Docker compatibility
2026-01-26 11:04:14 +00:00
1152308f6c Use self-hosted runner for release builds
16-core runner should be 3-4x faster than GitHub's 2-core runners
2026-01-26 10:39:04 +00:00
6c53b2da84 Speed up release workflow
- Remove duplicate tests (already run on push to main)
- Build only for amd64 (arm64 rarely needed for VPS)
2026-01-26 10:18:11 +00:00
c7098a4aed Fix formatting 2026-01-26 10:14:11 +00:00
937266a4c7 Increase upload rate limits for Docker parallel requests
Docker client sends many parallel requests when pushing layers.
Increased upload rate limiter from 10 req/s to 50 req/s and burst from 20 to 100.
2026-01-26 10:10:45 +00:00
00fbd20112 fix: resolve clippy warnings and format code 2026-01-26 08:31:00 +00:00
63 changed files with 2895 additions and 1317 deletions

142
.githooks/pre-commit Executable file
View File

@@ -0,0 +1,142 @@
#!/bin/bash
# Pre-commit hook to prevent accidental commits of sensitive files
# Enable: git config core.hooksPath .githooks
set -e
RED='\033[0;31m'
YELLOW='\033[1;33m'
GREEN='\033[0;32m'
NC='\033[0m'
# Allowed file extensions (whitelist)
ALLOWED_EXTENSIONS=(
'\.rs$'
'\.toml$'
'\.lock$'
'\.yml$'
'\.yaml$'
'\.json$'
'\.sh$'
'\.html$'
'\.css$'
'\.js$'
'\.gitignore$'
'\.dockerignore$'
'Dockerfile$'
'LICENSE$'
'Makefile$'
)
# Extensions that trigger a warning (not blocked)
WARN_EXTENSIONS=(
'\.md$'
)
# Always blocked patterns (regardless of extension)
BLOCKED_PATTERNS=(
'\.env$'
'\.env\.'
'\.key$'
'\.pem$'
'\.p12$'
'\.pfx$'
'\.htpasswd$'
'secret'
'credential'
'password'
'\.bak$'
'\.swp$'
'\.swo$'
'node_modules/'
'target/debug/'
'\.DS_Store'
)
# Get staged files (only NEW files, not already tracked)
STAGED_FILES=$(git diff --cached --name-only --diff-filter=A)
if [ -z "$STAGED_FILES" ]; then
# No new files, only modifications to existing - allow
exit 0
fi
# Build patterns
ALLOWED_PATTERN=$(IFS='|'; echo "${ALLOWED_EXTENSIONS[*]}")
WARN_PATTERN=$(IFS='|'; echo "${WARN_EXTENSIONS[*]}")
BLOCKED_PATTERN=$(IFS='|'; echo "${BLOCKED_PATTERNS[*]}")
# Check for blocked patterns first
BLOCKED_FILES=$(echo "$STAGED_FILES" | grep -iE "$BLOCKED_PATTERN" || true)
if [ -n "$BLOCKED_FILES" ]; then
echo -e "${RED}BLOCKED: Suspicious files detected in commit${NC}"
echo ""
echo -e "${YELLOW}Files:${NC}"
echo "$BLOCKED_FILES" | sed 's/^/ - /'
echo ""
echo "If intentional, use: git commit --no-verify"
exit 1
fi
# Check for files with unknown extensions
UNKNOWN_FILES=""
WARN_FILES=""
while IFS= read -r file; do
[ -z "$file" ] && continue
if echo "$file" | grep -qE "$BLOCKED_PATTERN"; then
continue # Already handled above
elif echo "$file" | grep -qE "$WARN_PATTERN"; then
WARN_FILES="$WARN_FILES$file"$'\n'
elif ! echo "$file" | grep -qE "$ALLOWED_PATTERN"; then
UNKNOWN_FILES="$UNKNOWN_FILES$file"$'\n'
fi
done <<< "$STAGED_FILES"
# Warn about .md files
if [ -n "$WARN_FILES" ]; then
echo -e "${YELLOW}WARNING: Markdown files in commit:${NC}"
echo "$WARN_FILES" | sed '/^$/d' | sed 's/^/ - /'
echo ""
fi
# Block unknown extensions
if [ -n "$UNKNOWN_FILES" ]; then
echo -e "${RED}BLOCKED: Files with unknown extensions:${NC}"
echo "$UNKNOWN_FILES" | sed '/^$/d' | sed 's/^/ - /'
echo ""
echo "Allowed extensions: rs, toml, lock, yml, yaml, json, sh, html, css, js, md"
echo "If intentional, use: git commit --no-verify"
exit 1
fi
# Check for large files (>5MB)
LARGE_FILES=$(echo "$STAGED_FILES" | while read f; do
if [ -f "$f" ]; then
size=$(stat -f%z "$f" 2>/dev/null || stat -c%s "$f" 2>/dev/null || echo 0)
if [ "$size" -gt 5242880 ]; then
echo "$f ($(numfmt --to=iec $size 2>/dev/null || echo "${size}B"))"
fi
fi
done)
if [ -n "$LARGE_FILES" ]; then
echo -e "${YELLOW}WARNING: Large files (>5MB) in commit:${NC}"
echo "$LARGE_FILES" | sed 's/^/ - /'
echo ""
fi
# Run cargo fmt check if Rust files changed
if git diff --cached --name-only | grep -q '\.rs$'; then
if command -v cargo &> /dev/null; then
if ! cargo fmt --check &> /dev/null; then
echo -e "${RED}BLOCKED: cargo fmt check failed${NC}"
echo "Run: cargo fmt"
exit 1
fi
fi
fi
exit 0

16
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,16 @@
version: 2
updates:
# GitHub Actions — обновляет версии actions в workflows
- package-ecosystem: github-actions
directory: /
schedule:
interval: weekly
labels: [dependencies, ci]
# Cargo — только security-апдейты, без шума от minor/patch
- package-ecosystem: cargo
directory: /
schedule:
interval: weekly
open-pull-requests-limit: 5
labels: [dependencies, rust]

View File

@@ -11,7 +11,7 @@ jobs:
name: Test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/checkout@v6
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
@@ -27,3 +27,63 @@ jobs:
- name: Run tests
run: cargo test --package nora-registry
security:
name: Security
runs-on: ubuntu-latest
permissions:
contents: read
security-events: write # for uploading SARIF to GitHub Security tab
steps:
- uses: actions/checkout@v6
with:
fetch-depth: 0 # full history required for gitleaks
- name: Install Rust
uses: dtolnay/rust-toolchain@stable
- name: Cache cargo
uses: Swatinem/rust-cache@v2
# ── Secrets ────────────────────────────────────────────────────────────
- name: Gitleaks — scan for hardcoded secrets
run: |
curl -sL https://github.com/gitleaks/gitleaks/releases/download/v8.21.2/gitleaks_8.21.2_linux_x64.tar.gz \
| tar xz -C /usr/local/bin gitleaks
gitleaks detect --source . --exit-code 1 --report-format sarif --report-path gitleaks.sarif || true
continue-on-error: true # findings are reported, do not block the pipeline
# ── CVE in Rust dependencies ────────────────────────────────────────────
- name: Install cargo-audit
run: cargo install cargo-audit --locked
- name: cargo audit — RustSec advisory database
run: cargo audit
continue-on-error: true # warn only; known CVEs should not block CI until triaged
# ── Licenses, banned crates, supply chain policy ────────────────────────
- name: cargo deny — licenses and banned crates
uses: EmbarkStudios/cargo-deny-action@v2
with:
command: check
arguments: --all-features
# ── CVE scan of source tree and Cargo.lock ──────────────────────────────
- name: Trivy — filesystem scan (Cargo.lock + source)
if: always()
uses: aquasecurity/trivy-action@0.34.1
with:
scan-type: fs
scan-ref: .
format: sarif
output: trivy-fs.sarif
severity: HIGH,CRITICAL
exit-code: 0 # warn only; change to 1 to block the pipeline
- name: Upload Trivy fs results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v4
if: always()
with:
sarif_file: trivy-fs.sarif
category: trivy-fs

View File

@@ -11,7 +11,7 @@ env:
jobs:
build:
name: Build & Push
runs-on: self-hosted
runs-on: [self-hosted, nora]
permissions:
contents: read
packages: write
@@ -19,8 +19,16 @@ jobs:
steps:
- uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Rust
run: |
echo "/home/github-runner/.cargo/bin" >> $GITHUB_PATH
echo "RUSTUP_HOME=/home/github-runner/.rustup" >> $GITHUB_ENV
echo "CARGO_HOME=/home/github-runner/.cargo" >> $GITHUB_ENV
- name: Build release binary (musl static)
run: |
cargo build --release --target x86_64-unknown-linux-musl --package nora-registry
cp target/x86_64-unknown-linux-musl/release/nora ./nora
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
@@ -32,49 +40,222 @@ jobs:
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Extract metadata
id: meta
# ── Alpine (standard) ────────────────────────────────────────────────────
- name: Extract metadata (alpine)
id: meta-alpine
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=semver,pattern={{major}}
type=raw,value=latest
- name: Build and push
- name: Build and push (alpine)
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile
platforms: linux/amd64
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha
cache-to: type=gha,mode=max
tags: ${{ steps.meta-alpine.outputs.tags }}
labels: ${{ steps.meta-alpine.outputs.labels }}
cache-from: type=gha,scope=alpine
cache-to: type=gha,mode=max,scope=alpine
# ── RED OS ───────────────────────────────────────────────────────────────
- name: Extract metadata (redos)
id: meta-redos
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
flavor: suffix=-redos,onlatest=true
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=redos
- name: Build and push (redos)
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile.redos
platforms: linux/amd64
push: true
tags: ${{ steps.meta-redos.outputs.tags }}
labels: ${{ steps.meta-redos.outputs.labels }}
cache-from: type=gha,scope=redos
cache-to: type=gha,mode=max,scope=redos
# ── Astra Linux SE ───────────────────────────────────────────────────────
- name: Extract metadata (astra)
id: meta-astra
uses: docker/metadata-action@v5
with:
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
flavor: suffix=-astra,onlatest=true
tags: |
type=semver,pattern={{version}}
type=semver,pattern={{major}}.{{minor}}
type=raw,value=astra
- name: Build and push (astra)
uses: docker/build-push-action@v5
with:
context: .
file: Dockerfile.astra
platforms: linux/amd64
push: true
tags: ${{ steps.meta-astra.outputs.tags }}
labels: ${{ steps.meta-astra.outputs.labels }}
cache-from: type=gha,scope=astra
cache-to: type=gha,mode=max,scope=astra
scan:
name: Scan (${{ matrix.name }})
runs-on: ubuntu-latest
needs: build
permissions:
contents: read
packages: read
security-events: write
strategy:
fail-fast: false
matrix:
include:
- name: alpine
suffix: ""
- name: redos
suffix: "-redos"
- name: astra
suffix: "-astra"
steps:
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set version tag (strip leading v)
id: ver
run: echo "tag=${GITHUB_REF_NAME#v}" >> $GITHUB_OUTPUT
# ── CVE scan of the pushed image ────────────────────────────────────────
# Images are FROM scratch — no OS packages, only binary CVE scan
- name: Trivy — image scan (${{ matrix.name }})
uses: aquasecurity/trivy-action@0.30.0
with:
scan-type: image
image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}${{ matrix.suffix }}
format: sarif
output: trivy-image-${{ matrix.name }}.sarif
severity: HIGH,CRITICAL
exit-code: 1 # block release on HIGH/CRITICAL vulnerabilities
- name: Upload Trivy image results to GitHub Security tab
uses: github/codeql-action/upload-sarif@v4
if: always()
with:
sarif_file: trivy-image-${{ matrix.name }}.sarif
category: trivy-image-${{ matrix.name }}
release:
name: GitHub Release
runs-on: ubuntu-latest
needs: build
needs: [build, scan]
permissions:
contents: write
packages: read # to pull image for SBOM generation
steps:
- uses: actions/checkout@v4
- name: Log in to Container Registry
uses: docker/login-action@v3
with:
registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Set version tag (strip leading v)
id: ver
run: echo "tag=${GITHUB_REF_NAME#v}" >> $GITHUB_OUTPUT
# ── Binary — extract from Docker image ──────────────────────────────────
- name: Extract binary from image
run: |
docker create --name nora-extract \
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
docker cp nora-extract:/usr/local/bin/nora ./nora-linux-amd64
docker rm nora-extract
chmod +x ./nora-linux-amd64
sha256sum ./nora-linux-amd64 > nora-linux-amd64.sha256
echo "Binary size: $(du -sh nora-linux-amd64 | cut -f1)"
cat nora-linux-amd64.sha256
# ── SBOM — Software Bill of Materials ───────────────────────────────────
- name: Generate SBOM (SPDX)
uses: anchore/sbom-action@v0
with:
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
format: spdx-json
output-file: nora-${{ github.ref_name }}.sbom.spdx.json
registry-username: ${{ github.actor }}
registry-password: ${{ secrets.GITHUB_TOKEN }}
- name: Generate SBOM (CycloneDX)
uses: anchore/sbom-action@v0
with:
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
format: cyclonedx-json
output-file: nora-${{ github.ref_name }}.sbom.cdx.json
registry-username: ${{ github.actor }}
registry-password: ${{ secrets.GITHUB_TOKEN }}
- name: Create Release
uses: softprops/action-gh-release@v1
with:
generate_release_notes: true
files: |
nora-linux-amd64
nora-linux-amd64.sha256
nora-${{ github.ref_name }}.sbom.spdx.json
nora-${{ github.ref_name }}.sbom.cdx.json
body: |
## Docker
## Install
```bash
curl -fsSL https://getnora.io/install.sh | sh
```
Or download the binary directly:
```bash
curl -LO https://github.com/${{ github.repository }}/releases/download/${{ github.ref_name }}/nora-linux-amd64
chmod +x nora-linux-amd64
sudo mv nora-linux-amd64 /usr/local/bin/nora
```
## Docker
**Alpine (standard):**
```bash
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}
```
**RED OS:**
```bash
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}-redos
```
**Astra Linux SE:**
```bash
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}-astra
```
## Changelog
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md)

12
.gitignore vendored
View File

@@ -5,3 +5,15 @@ data/
.env.*
*.log
internal config
# Internal files
SESSION*.md
TODO.md
ROADMAP*.md
docs-site/
docs/
*.txt
## Internal files
.internal/
examples/

View File

@@ -4,18 +4,159 @@ All notable changes to NORA will be documented in this file.
---
## [0.3.0] - 2026-01-30
## [0.2.24] - 2026-02-24
### Added / Добавлено
- `install.sh` installer script live at <https://getnora.io/install.sh> — `curl -fsSL https://getnora.io/install.sh | sh`
- Скрипт установки `install.sh` доступен на <https://getnora.io/install.sh>
### CI/CD
- Restore Astra Linux SE Docker image build, Trivy scan, and release artifact (`-astra` tag)
- Восстановлена сборка Docker-образа для Astra Linux SE, сканирование Trivy и артефакт релиза (тег `-astra`)
---
## [0.2.23] - 2026-02-24
### Added / Добавлено
- Binary (`nora`) + SHA-256 checksum attached to every GitHub Release
- Бинарник (`nora`) и SHA-256 контрольная сумма прикреплены к каждому релизу GitHub
### Fixed / Исправлено
- Security: bump `prometheus` 0.13 → 0.14 (CVE-2025-53605) and `bytes` 1.11.0 → 1.11.1 (CVE-2026-25541)
- Безопасность: обновлены `prometheus` 0.13 → 0.14 (CVE-2025-53605) и `bytes` 1.11.0 → 1.11.1 (CVE-2026-25541)
### CI/CD
- Add Dependabot for automated dependency updates / Добавлен Dependabot для автоматического обновления зависимостей
- Pin `aquasecurity/trivy-action` to `0.30.0`, bump to `0.34.1`; scan gate blocks release on HIGH/CRITICAL CVE
- Закреплён `trivy-action@0.30.0`, обновлён до `0.34.1`; сканирование блокирует релиз при HIGH/CRITICAL CVE
- Upgrade `codeql-action` v3 → v4 / Обновлён `codeql-action` v3 → v4
- Fix `deny.toml` deprecated keys (`copyleft`, `unlicensed` removed in `cargo-deny`) / Исправлены устаревшие ключи в `deny.toml`
- Fix binary path in Docker image (`/usr/local/bin/nora`) / Исправлен путь бинарника в Docker-образе
- Pin build job to `nora` runner label / Джоб сборки закреплён за runner'ом с меткой `nora`
- Allow `CDLA-Permissive-2.0` license (`webpki-roots`) / Разрешена лицензия `CDLA-Permissive-2.0`
- Ignore `RUSTSEC-2025-0119` (unmaintained transitive dep `number_prefix` via `indicatif`)
### Dependencies / Зависимости
- `chrono` 0.4.43 → 0.4.44
- `quick-xml` 0.31.0 → 0.39.2
- `toml` 0.8.23 → 1.0.3+spec-1.1.0
- `flate2` 1.1.8 → 1.1.9
- `softprops/action-gh-release` 1 → 2
- `actions/checkout` 4 → 6
- `docker/build-push-action` 5 → 6
### Documentation / Документация
- Replace text title with SVG logo; `O` styled in blue-600 / Заголовок заменён SVG-логотипом; буква `O` стилизована в blue-600
---
## [0.2.22] - 2026-02-24
### Changed / Изменено
- First stable release with Docker images published to container registry
- Первый стабильный релиз с Docker-образами, опубликованными в container registry
---
## [0.2.21] - 2026-02-24
### CI/CD
- Consolidate all Docker builds into a single job to fix runner network issues / Все Docker-сборки объединены в один job для устранения сетевых проблем runner'а
- Build musl static binary for maximum portability / Сборка musl-бинарника для максимальной переносимости
- Add security scanning (Trivy) + SBOM generation to release pipeline / Добавлено сканирование безопасности (Trivy) и генерация SBOM в pipeline релиза
- Add Cargo cache to speed up builds / Добавлен кэш Cargo для ускорения сборок
- Replace `gitleaks` GitHub Action with CLI (no license requirement) / `gitleaks` Action заменён CLI-вызовом (лицензия не требуется)
- Use GitHub-runner's own Rust toolchain (avoid path conflicts) / Используется Rust toolchain самого GitHub-runner'а
- Use shared runner filesystem instead of artifact API (avoids network upload latency) / Общая файловая система runner'а вместо artifact API
- Remove Astra Linux build temporarily / Сборка для Astra Linux временно удалена
---
## [0.2.20] - 2026-02-23
### Added / Добавлено
- Parallel CI builds for Astra Linux and RedOS / Параллельная сборка в CI для Astra Linux и RedOS
### Changed / Изменено
- Use `FROM scratch` base image for Astra Linux and RedOS Docker builds / Базовый образ `FROM scratch` для Docker-сборок Astra Linux и RedOS
- Shared `reqwest::Client` across all registry handlers / Общий `reqwest::Client` для всех registry-обработчиков
### Fixed / Исправлено
- Auth: replace `starts_with` with explicit `matches!` for token path checks / Аутентификация: `starts_with` заменён явной проверкой `matches!` для путей с токенами
- Remove unnecessary QEMU step for amd64-only builds / Удалён лишний шаг QEMU для amd64-сборок
---
## [0.2.19] - 2026-01-31
### Added / Добавлено
- Pre-commit hook to prevent accidental commits of sensitive files / Pre-commit хук для защиты от случайного коммита чувствительных файлов
- README badges: build status, version, license / Бейджи в README: статус сборки, версия, лицензия
### Performance / Производительность
- In-memory repository index with pagination for faster dashboard load / Индекс репозитория в памяти с пагинацией для ускорения загрузки дашборда
### Fixed / Исправлено
- Use `div_ceil` instead of manual ceiling division / Использован `div_ceil` вместо ручной реализации деления с округлением вверх
---
## [0.2.18] - 2026-01-31
### Changed
- Logo styling refinements
---
## [0.2.17] - 2026-01-31
### Added
- Copyright headers to all source files (Volkov Pavel | DevITWay)
- SPDX-License-Identifier: MIT in all .rs files
---
## [0.2.16] - 2026-01-31
### Changed
- N○RA branding: stylized O logo across dashboard
- Fixed O letter alignment in logo
---
## [0.2.15] - 2026-01-31
### Fixed
- Code formatting (cargo fmt)
---
## [0.2.14] - 2026-01-31
### Fixed
- Docker dashboard now shows actual image size from manifest layers (config + layers sum)
- Previously showed only manifest file size (~500 B instead of actual image size)
---
## [0.2.13] - 2026-01-31
### Fixed
- npm dashboard now shows correct version count and package sizes
- Parses metadata.json for versions, dist.unpackedSize, and time.modified
- Previously showed 0 versions / 0 B for all packages
---
## [0.2.12] - 2026-01-30
### Added
#### Configurable Rate Limiting
- Rate limits now configurable via `config.toml` and environment variables
- New config section `[rate_limit]` with 6 parameters:
- `auth_rps` / `auth_burst` - Authentication endpoints (brute-force protection)
- `upload_rps` / `upload_burst` - Upload endpoints (Docker push, etc.)
- `general_rps` / `general_burst` - General API endpoints
- New config section `[rate_limit]` with parameters: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
- Environment variables: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
- Rate limit configuration logged at startup
#### Secrets Provider Architecture
- Trait-based secrets management (`SecretsProvider` trait)
@@ -23,14 +164,78 @@ All notable changes to NORA will be documented in this file.
- Protected secrets with `zeroize` (memory zeroed on drop)
- Redacted Debug impl prevents secret leakage in logs
- New config section `[secrets]` with `provider` and `clear_env` options
- Foundation for future AWS Secrets Manager, Vault, K8s integration
#### Docker Image Metadata
- Support for image metadata retrieval
#### Documentation
- Bilingual onboarding guide (EN/RU)
---
## [0.2.11] - 2026-01-26
### Added
- Internationalization (i18n) support
- PyPI registry proxy
- UI improvements
---
## [0.2.10] - 2026-01-26
### Changed
- Rate limiting functions now accept `&RateLimitConfig` parameter
- Improved error messages with `.expect()` instead of `.unwrap()`
- Dark theme applied to all UI pages
---
## [0.2.9] - 2026-01-26
### Changed
- Version bump release
---
## [0.2.8] - 2026-01-26
### Added
- Dashboard endpoint added to OpenAPI documentation
---
## [0.2.7] - 2026-01-26
### Added
- Dynamic version display in UI sidebar
---
## [0.2.6] - 2026-01-26
### Added
#### Dashboard Metrics
- Global stats panel: downloads, uploads, artifacts, cache hit rate, storage
- Extended registry cards with artifact count, size, counters
- Activity log (last 20 events)
#### UI
- Dark theme (bg: #0f172a, cards: #1e293b)
---
## [0.2.5] - 2026-01-26
### Fixed
- Rate limiting was hardcoded in v0.2.0, now user-configurable
- Docker push/pull: added PATCH endpoint for chunked uploads
---
## [0.2.4] - 2026-01-26
### Fixed
- Rate limiting: health/metrics endpoints now exempt
- Increased upload rate limits for Docker parallel requests
---
@@ -82,7 +287,6 @@ All notable changes to NORA will be documented in this file.
- JSON error responses with request_id support
### Changed
- `StorageError` now uses `thiserror` derive macro
- `TokenError` now uses `thiserror` derive macro
- Storage wrapper validates keys before delegating to backend
@@ -90,7 +294,6 @@ All notable changes to NORA will be documented in this file.
- Body size limit set to 100MB default via `DefaultBodyLimit`
### Dependencies Added
- `thiserror = "2"` - typed error handling
- `tower_governor = "0.8"` - rate limiting
- `governor = "0.10"` - rate limiting backend
@@ -98,7 +301,6 @@ All notable changes to NORA will be documented in this file.
- `wiremock = "0.6"` (dev) - HTTP mocking for S3 tests
### Files Added
- `src/validation.rs` - input validation module
- `src/migrate.rs` - storage migration module
- `src/error.rs` - application error types
@@ -110,7 +312,6 @@ All notable changes to NORA will be documented in this file.
## [0.1.0] - 2026-01-24
### Added
- Multi-protocol support: Docker Registry v2, Maven, npm, Cargo, PyPI
- Web UI dashboard
- Swagger UI (`/api-docs`)
@@ -125,7 +326,6 @@ All notable changes to NORA will be documented in this file.
- Graceful shutdown (SIGTERM/SIGINT)
- Backup/restore commands
---
---
# Журнал изменений (RU)
@@ -134,6 +334,96 @@ All notable changes to NORA will be documented in this file.
---
## [0.2.12] - 2026-01-30
### Добавлено
#### Настраиваемый Rate Limiting
- Rate limits настраиваются через `config.toml` и переменные окружения
- Новая секция `[rate_limit]` с параметрами: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
- Переменные окружения: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
#### Архитектура Secrets Provider
- Trait-based управление секретами (`SecretsProvider` trait)
- ENV provider по умолчанию (12-Factor App паттерн)
- Защищённые секреты с `zeroize` (память обнуляется при drop)
- Redacted Debug impl предотвращает утечку секретов в логи
- Новая секция `[secrets]` с опциями `provider` и `clear_env`
#### Docker Image Metadata
- Поддержка получения метаданных образов
#### Документация
- Двуязычный onboarding guide (EN/RU)
---
## [0.2.11] - 2026-01-26
### Добавлено
- Поддержка интернационализации (i18n)
- PyPI registry proxy
- Улучшения UI
---
## [0.2.10] - 2026-01-26
### Изменено
- Тёмная тема применена ко всем страницам UI
---
## [0.2.9] - 2026-01-26
### Изменено
- Релиз с обновлением версии
---
## [0.2.8] - 2026-01-26
### Добавлено
- Dashboard endpoint добавлен в OpenAPI документацию
---
## [0.2.7] - 2026-01-26
### Добавлено
- Динамическое отображение версии в сайдбаре UI
---
## [0.2.6] - 2026-01-26
### Добавлено
#### Dashboard Metrics
- Глобальная панель статистики: downloads, uploads, artifacts, cache hit rate, storage
- Расширенные карточки реестров с количеством артефактов, размером, счётчиками
- Лог активности (последние 20 событий)
#### UI
- Тёмная тема (bg: #0f172a, cards: #1e293b)
---
## [0.2.5] - 2026-01-26
### Исправлено
- Docker push/pull: добавлен PATCH endpoint для chunked uploads
---
## [0.2.4] - 2026-01-26
### Исправлено
- Rate limiting: health/metrics endpoints теперь исключены
- Увеличены лимиты upload для параллельных Docker запросов
---
## [0.2.0] - 2026-01-25
### Добавлено
@@ -182,7 +472,6 @@ All notable changes to NORA will be documented in this file.
- JSON-ответы об ошибках с поддержкой request_id
### Изменено
- `StorageError` теперь использует макрос `thiserror`
- `TokenError` теперь использует макрос `thiserror`
- Storage wrapper валидирует ключи перед делегированием backend
@@ -190,7 +479,6 @@ All notable changes to NORA will be documented in this file.
- Лимит размера body установлен в 100MB через `DefaultBodyLimit`
### Добавлены зависимости
- `thiserror = "2"` - типизированная обработка ошибок
- `tower_governor = "0.8"` - rate limiting
- `governor = "0.10"` - backend для rate limiting
@@ -198,7 +486,6 @@ All notable changes to NORA will be documented in this file.
- `wiremock = "0.6"` (dev) - HTTP-мокирование для S3 тестов
### Добавлены файлы
- `src/validation.rs` - модуль валидации ввода
- `src/migrate.rs` - модуль миграции хранилища
- `src/error.rs` - типы ошибок приложения
@@ -210,7 +497,6 @@ All notable changes to NORA will be documented in this file.
## [0.1.0] - 2026-01-24
### Добавлено
- Мульти-протокольная поддержка: Docker Registry v2, Maven, npm, Cargo, PyPI
- Web UI дашборд
- Swagger UI (`/api-docs`)

View File

@@ -1,100 +1,71 @@
# Contributing to NORA
Thanks for your interest in contributing to NORA!
Thank you for your interest in contributing to NORA!
## Getting Started
1. **Fork** the repository
2. **Clone** your fork:
```bash
git clone https://github.com/your-username/nora.git
cd nora
```
3. **Create a branch**:
```bash
git checkout -b feature/your-feature-name
```
1. Fork the repository
2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/nora.git`
3. Create a branch: `git checkout -b feature/your-feature`
## Development Setup
### Prerequisites
- Rust 1.75+ (`rustup update`)
- Docker (for testing)
- Git
### Build
```bash
# Install Rust (if needed)
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
# Enable pre-commit hooks (important!)
git config core.hooksPath .githooks
# Build
cargo build
```
### Run
```bash
cargo run --bin nora
```
### Test
```bash
# Run tests
cargo test
cargo clippy
cargo fmt --check
# Run locally
cargo run --bin nora -- serve
```
## Making Changes
1. **Write code** following Rust conventions
2. **Add tests** for new features
3. **Update docs** if needed
4. **Run checks**:
```bash
cargo fmt
cargo clippy -- -D warnings
cargo test
```
## Commit Messages
Follow [Conventional Commits](https://www.conventionalcommits.org/):
- `feat:` - New feature
- `fix:` - Bug fix
- `docs:` - Documentation
- `test:` - Tests
- `refactor:` - Code refactoring
- `chore:` - Maintenance
Example:
```bash
git commit -m "feat: add S3 storage migration"
```
## Pull Request Process
1. **Push** to your fork:
```bash
git push origin feature/your-feature-name
```
2. **Open a Pull Request** on GitHub
3. **Wait for review** - maintainers will review your PR
## Code Style
- Follow Rust conventions
- Use `cargo fmt` for formatting
- Pass `cargo clippy` with no warnings
- Write meaningful commit messages
- Run `cargo fmt` before committing
- Run `cargo clippy` and fix warnings
- Follow Rust naming conventions
## Questions?
## Pull Request Process
- Open an [Issue](https://github.com/getnora-io/nora/issues)
- Ask in [Discussions](https://github.com/getnora-io/nora/discussions)
- Reach out on [Telegram](https://t.me/DevITWay)
1. Update documentation if needed
2. Add tests for new features
3. Ensure all tests pass: `cargo test`
4. Ensure code is formatted: `cargo fmt --check`
5. Ensure no clippy warnings: `cargo clippy`
---
## Commit Messages
Built with love by the NORA community
Use conventional commits:
- `feat:` - new feature
- `fix:` - bug fix
- `docs:` - documentation
- `style:` - formatting
- `refactor:` - code refactoring
- `test:` - adding tests
- `chore:` - maintenance
Example: `feat: add OAuth2 authentication`
## Reporting Issues
- Use GitHub Issues
- Include steps to reproduce
- Include NORA version and OS
## License
By contributing, you agree that your contributions will be licensed under the MIT License.
## Contact
- Telegram: [@DevITWay](https://t.me/DevITWay)
- GitHub Issues: [getnora-io/nora](https://github.com/getnora-io/nora/issues)

171
Cargo.lock generated
View File

@@ -234,15 +234,15 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "bytes"
version = "1.11.0"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
[[package]]
name = "cc"
version = "1.2.54"
version = "1.2.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583"
checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29"
dependencies = [
"find-msvc-tools",
"shlex",
@@ -262,9 +262,9 @@ checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "chrono"
version = "0.4.43"
version = "0.4.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118"
checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0"
dependencies = [
"iana-time-zone",
"js-sys",
@@ -286,9 +286,9 @@ dependencies = [
[[package]]
name = "clap"
version = "4.5.54"
version = "4.5.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394"
checksum = "a75ca66430e33a14957acc24c5077b503e7d374151b2b4b3a10c83b4ceb4be0e"
dependencies = [
"clap_builder",
"clap_derive",
@@ -296,9 +296,9 @@ dependencies = [
[[package]]
name = "clap_builder"
version = "4.5.54"
version = "4.5.56"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00"
checksum = "793207c7fa6300a0608d1080b858e5fdbe713cdc1c8db9fb17777d8a13e63df0"
dependencies = [
"anstream",
"anstyle",
@@ -308,9 +308,9 @@ dependencies = [
[[package]]
name = "clap_derive"
version = "4.5.49"
version = "4.5.55"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671"
checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5"
dependencies = [
"heck",
"proc-macro2",
@@ -434,6 +434,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer",
"crypto-common",
"subtle",
]
[[package]]
@@ -488,15 +489,15 @@ dependencies = [
[[package]]
name = "find-msvc-tools"
version = "0.1.8"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db"
checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
[[package]]
name = "flate2"
version = "1.1.8"
version = "1.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b375d6465b98090a5f25b1c7703f3859783755aa9a80433b36e0379a3ec2f369"
checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c"
dependencies = [
"crc32fast",
"miniz_oxide",
@@ -737,6 +738,21 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
[[package]]
name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
[[package]]
name = "hmac"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
dependencies = [
"digest",
]
[[package]]
name = "http"
version = "1.4.0"
@@ -861,9 +877,9 @@ dependencies = [
[[package]]
name = "iana-time-zone"
version = "0.1.64"
version = "0.1.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb"
checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@@ -1185,7 +1201,7 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
[[package]]
name = "nora-cli"
version = "0.2.12"
version = "0.2.22"
dependencies = [
"clap",
"flate2",
@@ -1199,7 +1215,7 @@ dependencies = [
[[package]]
name = "nora-registry"
version = "0.2.12"
version = "0.2.22"
dependencies = [
"async-trait",
"axum",
@@ -1209,6 +1225,8 @@ dependencies = [
"clap",
"flate2",
"governor",
"hex",
"hmac",
"httpdate",
"indicatif",
"lazy_static",
@@ -1235,7 +1253,7 @@ dependencies = [
[[package]]
name = "nora-storage"
version = "0.2.12"
version = "0.2.22"
dependencies = [
"axum",
"base64",
@@ -1394,9 +1412,9 @@ dependencies = [
[[package]]
name = "prometheus"
version = "0.13.4"
version = "0.14.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1"
checksum = "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a"
dependencies = [
"cfg-if",
"fnv",
@@ -1404,14 +1422,28 @@ dependencies = [
"memchr",
"parking_lot",
"protobuf",
"thiserror 1.0.69",
"thiserror 2.0.18",
]
[[package]]
name = "protobuf"
version = "2.28.0"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "106dd99e98437432fed6519dedecfade6a06a73bb7b2a1e019fdd2bee5778d94"
checksum = "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4"
dependencies = [
"once_cell",
"protobuf-support",
"thiserror 1.0.69",
]
[[package]]
name = "protobuf-support"
version = "3.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6"
dependencies = [
"thiserror 1.0.69",
]
[[package]]
name = "quanta"
@@ -1430,9 +1462,9 @@ dependencies = [
[[package]]
name = "quick-xml"
version = "0.31.0"
version = "0.39.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33"
checksum = "958f21e8e7ceb5a1aa7fa87fab28e7c75976e0bfe7e23ff069e0a260f894067d"
dependencies = [
"memchr",
"serde",
@@ -1818,11 +1850,11 @@ dependencies = [
[[package]]
name = "serde_spanned"
version = "0.6.9"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
dependencies = [
"serde",
"serde_core",
]
[[package]]
@@ -2121,50 +2153,48 @@ dependencies = [
[[package]]
name = "toml"
version = "0.8.23"
version = "1.0.3+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
dependencies = [
"serde",
"serde_spanned",
"toml_datetime",
"toml_edit",
]
[[package]]
name = "toml_datetime"
version = "0.6.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
dependencies = [
"serde",
]
[[package]]
name = "toml_edit"
version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
checksum = "c7614eaf19ad818347db24addfa201729cf2a9b6fdfd9eb0ab870fcacc606c0c"
dependencies = [
"indexmap",
"serde",
"serde_core",
"serde_spanned",
"toml_datetime",
"toml_write",
"toml_parser",
"toml_writer",
"winnow",
]
[[package]]
name = "toml_write"
version = "0.1.2"
name = "toml_datetime"
version = "1.0.0+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
checksum = "32c2555c699578a4f59f0cc68e5116c8d7cabbd45e1409b989d4be085b53f13e"
dependencies = [
"serde_core",
]
[[package]]
name = "toml_parser"
version = "1.0.9+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4"
dependencies = [
"winnow",
]
[[package]]
name = "toml_writer"
version = "1.0.6+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607"
[[package]]
name = "tonic"
version = "0.14.2"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a"
dependencies = [
"async-trait",
"axum",
@@ -2838,9 +2868,6 @@ name = "winnow"
version = "0.7.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829"
dependencies = [
"memchr",
]
[[package]]
name = "wiremock"
@@ -2912,18 +2939,18 @@ dependencies = [
[[package]]
name = "zerocopy"
version = "0.8.33"
version = "0.8.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
checksum = "7456cf00f0685ad319c5b1693f291a650eaf345e941d082fc4e03df8a03996ac"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.8.33"
version = "0.8.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
checksum = "1328722bbf2115db7e19d69ebcc15e795719e2d66b60827c6a69a117365e37a0"
dependencies = [
"proc-macro2",
"quote",
@@ -3020,15 +3047,15 @@ dependencies = [
[[package]]
name = "zlib-rs"
version = "0.5.5"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
checksum = "c745c48e1007337ed136dc99df34128b9faa6ed542d80a1c673cf55a6d7236c8"
[[package]]
name = "zmij"
version = "1.0.16"
version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439"
[[package]]
name = "zopfli"

View File

@@ -7,7 +7,7 @@ members = [
]
[workspace.package]
version = "0.2.12"
version = "0.2.24"
edition = "2021"
license = "MIT"
authors = ["DevITWay <devitway@gmail.com>"]
@@ -24,3 +24,5 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
sha2 = "0.10"
async-trait = "0.1"
hmac = "0.12"
hex = "0.4"

View File

@@ -1,58 +1,11 @@
# syntax=docker/dockerfile:1.4
# Build stage
FROM rust:1.83-alpine AS builder
RUN apk add --no-cache musl-dev curl
WORKDIR /app
# Copy manifests
COPY Cargo.toml Cargo.lock ./
COPY nora-registry/Cargo.toml nora-registry/
COPY nora-storage/Cargo.toml nora-storage/
COPY nora-cli/Cargo.toml nora-cli/
# Create dummy sources for dependency caching
RUN mkdir -p nora-registry/src nora-storage/src nora-cli/src && \
echo "fn main() {}" > nora-registry/src/main.rs && \
echo "fn main() {}" > nora-storage/src/main.rs && \
echo "fn main() {}" > nora-cli/src/main.rs
# Build dependencies only (with cache)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/app/target \
cargo build --release --package nora-registry && \
rm -rf nora-registry/src nora-storage/src nora-cli/src
# Copy real sources
COPY nora-registry/src nora-registry/src
COPY nora-storage/src nora-storage/src
COPY nora-cli/src nora-cli/src
# Build release binary (with cache)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/app/target \
touch nora-registry/src/main.rs && \
cargo build --release --package nora-registry && \
cp /app/target/release/nora /usr/local/bin/nora
# Runtime stage
# Binary is pre-built by CI (cargo build --release) and passed via context
FROM alpine:3.20
RUN apk add --no-cache ca-certificates
RUN apk add --no-cache ca-certificates && mkdir -p /data
WORKDIR /app
COPY nora /usr/local/bin/nora
# Copy binary
COPY --from=builder /usr/local/bin/nora /usr/local/bin/nora
# Create data directory
RUN mkdir -p /data
# Default environment
ENV RUST_LOG=info
ENV NORA_HOST=0.0.0.0
ENV NORA_PORT=4000
@@ -64,5 +17,5 @@ EXPOSE 4000
VOLUME ["/data"]
ENTRYPOINT ["nora"]
ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"]

28
Dockerfile.astra Normal file
View File

@@ -0,0 +1,28 @@
# syntax=docker/dockerfile:1.4
# Binary is pre-built by CI (cargo build --release) and passed via context
# Runtime: scratch — compatible with Astra Linux SE (FSTEC certified)
# To switch to official base: replace FROM scratch with
# FROM registry.astralinux.ru/library/alse:latest
# RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/*
FROM alpine:3.20 AS certs
RUN apk add --no-cache ca-certificates
FROM scratch
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY nora /usr/local/bin/nora
ENV RUST_LOG=info
ENV NORA_HOST=0.0.0.0
ENV NORA_PORT=4000
ENV NORA_STORAGE_MODE=local
ENV NORA_STORAGE_PATH=/data/storage
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
EXPOSE 4000
VOLUME ["/data"]
ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"]

28
Dockerfile.redos Normal file
View File

@@ -0,0 +1,28 @@
# syntax=docker/dockerfile:1.4
# Binary is pre-built by CI (cargo build --release) and passed via context
# Runtime: scratch — compatible with RED OS (FSTEC certified)
# To switch to official base: replace FROM scratch with
# FROM registry.red-soft.ru/redos/redos:8
# RUN dnf install -y ca-certificates && dnf clean all
FROM alpine:3.20 AS certs
RUN apk add --no-cache ca-certificates
FROM scratch
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY nora /usr/local/bin/nora
ENV RUST_LOG=info
ENV NORA_HOST=0.0.0.0
ENV NORA_PORT=4000
ENV NORA_STORAGE_MODE=local
ENV NORA_STORAGE_PATH=/data/storage
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
EXPOSE 4000
VOLUME ["/data"]
ENTRYPOINT ["/usr/local/bin/nora"]
CMD ["serve"]

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2026 DevITWay
Copyright (c) 2026 Volkov Pavel | DevITWay
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -1,6 +1,10 @@
# NORA
<img src="logo.jpg" alt="NORA" height="120" />
[![License: MIT](https://img.shields.io/badge/License-MIT-blue.svg)](LICENSE)
[![Release](https://img.shields.io/github/v/release/getnora-io/nora)](https://github.com/getnora-io/nora/releases)
[![CI](https://img.shields.io/github/actions/workflow/status/getnora-io/nora/ci.yml?label=CI)](https://github.com/getnora-io/nora/actions)
[![Rust](https://img.shields.io/badge/rust-%23000000.svg?logo=rust&logoColor=white)](https://www.rust-lang.org/)
[![Telegram](https://img.shields.io/badge/Telegram-DevITWay-blue?logo=telegram)](https://t.me/DevITWay)
> **Your Cloud-Native Artifact Registry**
@@ -40,7 +44,7 @@ Fast. Organized. Feel at Home.
### Docker (Recommended)
```bash
docker run -d -p 4000:4000 -v nora-data:/data getnora/nora
docker run -d -p 4000:4000 -v nora-data:/data ghcr.io/getnora-io/nora:latest
```
### From Source
@@ -191,4 +195,4 @@ Copyright (c) 2026 DevITWay
---
**NORA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
**🐿️ N○RA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)

53
SECURITY.md Normal file
View File

@@ -0,0 +1,53 @@
# Security Policy
## Supported Versions
| Version | Supported |
| ------- | ------------------ |
| 0.2.x | :white_check_mark: |
| < 0.2 | :x: |
## Reporting a Vulnerability
**Please do not report security vulnerabilities through public GitHub issues.**
Instead, please report them via:
1. **Email:** devitway@gmail.com
2. **Telegram:** [@DevITWay](https://t.me/DevITWay) (private message)
### What to Include
- Type of vulnerability
- Steps to reproduce
- Potential impact
- Suggested fix (if any)
### Response Timeline
- **Initial response:** within 48 hours
- **Status update:** within 7 days
- **Fix timeline:** depends on severity
### Severity Levels
| Severity | Description | Response |
|----------|-------------|----------|
| Critical | Remote code execution, auth bypass | Immediate fix |
| High | Data exposure, privilege escalation | Fix within 7 days |
| Medium | Limited impact vulnerabilities | Fix in next release |
| Low | Minor issues | Scheduled fix |
## Security Best Practices
When deploying NORA:
1. **Enable authentication** - Set `NORA_AUTH_ENABLED=true`
2. **Use HTTPS** - Put NORA behind a reverse proxy with TLS
3. **Limit network access** - Use firewall rules
4. **Regular updates** - Keep NORA updated to latest version
5. **Secure credentials** - Use strong passwords, rotate tokens
## Acknowledgments
We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities.

View File

@@ -1,152 +0,0 @@
# NORA Development Session Notes
---
## 2026-01-26 - Dashboard Expansion
### Iteration 1: Planning & Exploration
- Received detailed implementation plan for dashboard expansion
- Explored codebase structure using Task agent
- Identified key files to modify:
- `main.rs` - AppState
- `ui/api.rs`, `ui/mod.rs`, `ui/components.rs`, `ui/templates.rs`
- `registry/docker.rs`, `npm.rs`, `maven.rs`, `cargo_registry.rs`
### Iteration 2: Infrastructure (Phase 1)
- Created `src/dashboard_metrics.rs`:
- `DashboardMetrics` struct with AtomicU64 counters
- Per-registry tracking (docker, npm, maven, cargo, pypi)
- `record_download()`, `record_upload()`, `record_cache_hit/miss()`
- `cache_hit_rate()` calculation
- Created `src/activity_log.rs`:
- `ActionType` enum: Pull, Push, CacheHit, ProxyFetch
- `ActivityEntry` struct with timestamp, action, artifact, registry, source
- `ActivityLog` with RwLock<VecDeque> (bounded to 50 entries)
### Iteration 3: AppState Update (Phase 2)
- Updated `main.rs`:
- Added `mod activity_log` and `mod dashboard_metrics`
- Extended `AppState` with `metrics: DashboardMetrics` and `activity: ActivityLog`
- Initialized in `run_server()`
### Iteration 4: API Endpoint (Phase 3)
- Updated `ui/api.rs`:
- Added structs: `DashboardResponse`, `GlobalStats`, `RegistryCardStats`, `MountPoint`
- Implemented `api_dashboard()` - aggregates all metrics, storage stats, activity
- Updated `ui/mod.rs`:
- Added route `/api/ui/dashboard`
- Modified `dashboard()` handler to use new response
### Iteration 5: Dark Theme UI (Phase 4)
- Updated `ui/components.rs` with ~400 new lines:
- `layout_dark()` - dark theme wrapper (#0f172a background)
- `sidebar_dark()`, `header_dark()` - dark theme navigation
- `render_global_stats()` - 5-column stats grid
- `render_registry_card()` - extended card with metrics
- `render_mount_points_table()` - registry paths and proxies
- `render_activity_row()`, `render_activity_log()` - activity display
- `render_polling_script()` - 5-second auto-refresh JS
### Iteration 6: Dashboard Template (Phase 5)
- Updated `ui/templates.rs`:
- Refactored `render_dashboard()` to accept `DashboardResponse`
- Added uptime display, global stats, registry cards grid
- Added mount points table and activity log
- Added `format_relative_time()` helper
### Iteration 7: Registry Instrumentation (Phase 6)
- `registry/docker.rs`:
- `download_blob()` - record download + cache hit + activity
- `get_manifest()` - record download + cache hit + activity
- `upload_blob()` - record upload + activity
- `put_manifest()` - record upload + activity
- `registry/npm.rs`:
- Cache hit tracking for local storage
- Cache miss + proxy fetch tracking
- `registry/maven.rs`:
- `download()` - cache hit/miss + activity
- `upload()` - record upload + activity
- `registry/cargo_registry.rs`:
- `download()` - record download + activity
### Iteration 8: Build & Test
- `cargo build` - compiled successfully with minor warnings
- Fixed warnings:
- Removed unused `RegistryStats` import
- Added `#[allow(dead_code)]` to `stat_card()`
- `cargo test` - all 75 tests passed
### Iteration 9: Server Testing
- Started server: `cargo run --release --bin nora`
- Tested endpoints:
```
GET /health - OK
GET /api/ui/dashboard - returns full metrics JSON
GET /ui/ - dark theme dashboard HTML
GET /v2/test/manifests/v1 - triggered Docker metrics
GET /npm/lodash/-/lodash-4.17.21.tgz - triggered npm proxy metrics
```
- Verified metrics tracking:
- Downloads: 3 (2 Docker + 1 npm)
- Cache hit rate: 66.67%
- Activity log populated with Pull, ProxyFetch events
### Iteration 10: Git Commit & Push
- Staged 11 files (2 new, 9 modified)
- Commit: `93f9655 Add dashboard metrics, activity log, and dark theme`
- Pushed to `origin/main`
### Iteration 11: Documentation
- Updated `TODO.md` with v0.2.1 section
- Created this `SESSION_NOTES.md`
---
### Key Decisions Made
1. **In-memory metrics** - AtomicU64 for thread-safety, reset on restart
2. **Bounded activity log** - 50 entries max, oldest evicted
3. **Polling over WebSocket** - simpler, 5-second interval sufficient
4. **Dark theme only for dashboard** - registry list pages keep light theme
### Files Changed Summary
```
New:
nora-registry/src/activity_log.rs
nora-registry/src/dashboard_metrics.rs
Modified:
nora-registry/src/main.rs (+8 lines)
nora-registry/src/registry/cargo_registry.rs (+13 lines)
nora-registry/src/registry/docker.rs (+47 lines)
nora-registry/src/registry/maven.rs (+36 lines)
nora-registry/src/registry/npm.rs (+29 lines)
nora-registry/src/ui/api.rs (+154 lines)
nora-registry/src/ui/components.rs (+394 lines)
nora-registry/src/ui/mod.rs (+5 lines)
nora-registry/src/ui/templates.rs (+180/-79 lines)
Total: ~1004 insertions, 79 deletions
```
### Useful Commands
```bash
# Start server
cargo run --release --bin nora
# Test dashboard
curl http://127.0.0.1:4000/api/ui/dashboard
# View UI
open http://127.0.0.1:4000/ui/
# Trigger metrics
curl http://127.0.0.1:4000/v2/test/manifests/v1
curl http://127.0.0.1:4000/npm/lodash/-/lodash-4.17.21.tgz -o /dev/null
```
---

503
TODO.md
View File

@@ -1,503 +0,0 @@
# NORA Roadmap / TODO
## v0.2.0 - DONE
- [x] Unit tests (75 tests passing)
- [x] Input validation (path traversal protection)
- [x] Rate limiting (brute-force protection)
- [x] Request ID tracking
- [x] Migrate command (local <-> S3)
- [x] Error handling (thiserror)
- [x] SVG brand icons
---
## v0.2.1 - Dashboard Expansion (2026-01-26) - DONE
### Commit: 93f9655
### New Files
- `nora-registry/src/dashboard_metrics.rs` - AtomicU64 counters for metrics
- `nora-registry/src/activity_log.rs` - Bounded activity log (50 entries)
### Modified Files
- `nora-registry/src/main.rs` - Added modules, updated AppState
- `nora-registry/src/ui/api.rs` - Added DashboardResponse, api_dashboard()
- `nora-registry/src/ui/mod.rs` - Added /api/ui/dashboard route
- `nora-registry/src/ui/components.rs` - Dark theme components
- `nora-registry/src/ui/templates.rs` - New render_dashboard()
- `nora-registry/src/registry/docker.rs` - Instrumented handlers
- `nora-registry/src/registry/npm.rs` - Instrumented with cache tracking
- `nora-registry/src/registry/maven.rs` - Instrumented download/upload
- `nora-registry/src/registry/cargo_registry.rs` - Instrumented download
### Features Implemented
- [x] Global stats panel (downloads, uploads, artifacts, cache hit %, storage)
- [x] Per-registry metrics (Docker, Maven, npm, Cargo, PyPI)
- [x] Mount points table with proxy upstreams
- [x] Activity log (last 20 events)
- [x] Dark theme (#0f172a background, #1e293b cards)
- [x] Auto-refresh polling (5 seconds)
- [x] Cache hit/miss tracking
### API Endpoints
- `GET /api/ui/dashboard` - Full dashboard data as JSON
### Dark Theme Colors
```
Background: #0f172a (slate-950)
Cards: #1e293b (slate-800)
Borders: slate-700
Text primary: slate-200
Text secondary: slate-400
Accent: blue-400
```
### Testing Commands
```bash
# Test dashboard API
curl http://127.0.0.1:4000/api/ui/dashboard
# Test Docker pull (triggers metrics)
curl http://127.0.0.1:4000/v2/test/manifests/v1
# Test npm proxy (triggers cache miss)
curl http://127.0.0.1:4000/npm/lodash/-/lodash-4.17.21.tgz -o /dev/null
```
### Future Improvements (Dashboard)
- [ ] Add PyPI download instrumentation
- [ ] Persist metrics to disk (currently reset on restart)
- [ ] Add WebSocket for real-time updates (instead of polling)
- [ ] Add graphs/charts for metrics over time
- [ ] Add user/client tracking in activity log
- [ ] Dark/light theme toggle
---
## v0.3.0 - OIDC / Workload Identity Federation
### Killer Feature: OIDC for CI/CD
Zero-secret authentication for GitHub Actions, GitLab CI, etc.
**Goal:** Replace manual `ROBOT_TOKEN` rotation with federated identity.
```yaml
# GitHub Actions example
permissions:
id-token: write
steps:
- name: Login to NORA
uses: nora/login-action@v1
```
### Config Structure (draft)
```toml
[auth.oidc]
enabled = true
# GitHub Actions
[[auth.oidc.providers]]
name = "github-actions"
issuer = "https://token.actions.githubusercontent.com"
audience = "https://nora.example.com"
[[auth.oidc.providers.rules]]
# Claim matching (supports glob)
match = { repository = "my-org/*", ref = "refs/heads/main" }
# Granted permissions
permissions = ["push:my-org/*", "pull:*"]
[[auth.oidc.providers.rules]]
match = { repository = "my-org/*", ref = "refs/heads/*" }
permissions = ["pull:*"]
# GitLab CI
[[auth.oidc.providers]]
name = "gitlab-ci"
issuer = "https://gitlab.com"
audience = "https://nora.example.com"
[[auth.oidc.providers.rules]]
match = { project_path = "my-group/*" }
permissions = ["push:my-group/*", "pull:*"]
```
### Implementation Tasks
- [ ] JWT validation library (jsonwebtoken crate)
- [ ] OIDC discovery (/.well-known/openid-configuration)
- [ ] JWKS fetching and caching
- [ ] Claims extraction and glob matching
- [ ] Permission resolution from rules
- [ ] Token exchange endpoint (POST /auth/oidc/token)
- [ ] GitHub Action: `nora/login-action`
---
## v0.4.0 - Transparent Docker Hub Proxy
### Pain Point
Harbor forces tag changes: `docker pull my-harbor/proxy-cache/library/nginx`
This breaks Helm charts hardcoded to `nginx`.
### Goal
Transparent pull-through cache:
```bash
docker pull nora.example.com/nginx # -> proxies to Docker Hub
```
### Implementation Tasks
- [ ] Registry v2 API interception
- [ ] Upstream registry configuration
- [ ] Cache layer management
- [ ] Rate limit handling (Docker Hub limits)
---
## v0.5.0 - Repo-level RBAC
### Challenge
Per-repository permissions need fast lookup (100 layers per push).
### Solution
Glob patterns for 90% of cases:
```toml
[[auth.rules]]
subject = "team-frontend"
permissions = ["push:frontend/*", "pull:*"]
[[auth.rules]]
subject = "ci-bot"
permissions = ["push:*/release-*", "pull:*"]
```
### Implementation Tasks
- [ ] In-memory permission cache
- [ ] Glob pattern matcher (globset crate)
- [ ] Permission inheritance (org -> project -> repo)
---
## Target Audience
1. DevOps engineers tired of Java/Go monsters
2. Edge/IoT installations (Raspberry Pi, branch offices)
3. Educational platforms (student labs)
4. CI/CD pipelines (GitHub Actions, GitLab CI)
## Competitive Advantages
| Feature | NORA | Harbor | Nexus |
|---------|------|--------|-------|
| Memory | <100MB | 2GB+ | 4GB+ |
| OIDC for CI | v0.3.0 | No | No |
| Transparent proxy | v0.4.0 | No (tag rewrite) | Partial |
| Single binary | Yes | No (microservices) | No (Java) |
| Zero-config upgrade | Yes | Complex | Complex |
---
## v0.6.0 - Online Garbage Collection
### Pain Point
Harbor GC blocks registry for hours. Can't push during cleanup.
### Goal
Non-blocking garbage collection with zero downtime.
### Implementation Tasks
- [ ] Mark-and-sweep without locking
- [ ] Background blob cleanup
- [ ] Progress reporting via API/CLI
- [ ] `nora gc --dry-run` preview
---
## v0.7.0 - Retention Policies
### Pain Point
"Keep last 10 tags" sounds simple, works poorly everywhere.
### Goal
Declarative retention rules in config:
```toml
[[retention]]
match = "*/dev-*"
keep_last = 5
[[retention]]
match = "*/release-*"
keep_last = 20
older_than = "90d"
[[retention]]
match = "**/pr-*"
older_than = "7d"
```
### Implementation Tasks
- [ ] Glob pattern matching for repos/tags
- [ ] Age-based and count-based rules
- [ ] Dry-run mode
- [ ] Scheduled execution (cron-style)
---
## v0.8.0 - Multi-tenancy & Quotas
### Pain Point
Harbor projects have quotas but configuration is painful. Nexus has no real isolation.
### Goal
Simple namespaces with limits:
```toml
[[tenants]]
name = "team-frontend"
storage_quota = "50GB"
rate_limit = { push = 100, pull = 1000 } # per hour
[[tenants]]
name = "team-backend"
storage_quota = "100GB"
```
### Implementation Tasks
- [ ] Tenant isolation (namespace prefix)
- [ ] Storage quota tracking
- [ ] Per-tenant rate limiting
- [ ] Usage reporting API
---
## v0.9.0 - Smart Replication
### Pain Point
Harbor replication rules are complex, errors silently swallowed.
### Goal
Simple CLI-driven replication with clear feedback:
```bash
nora replicate --to remote-dc --filter "prod/*" --dry-run
nora replicate --from gcr.io/my-project/* --to local/imported/
```
### Implementation Tasks
- [ ] Push-based replication to remote NORA
- [ ] Pull-based import from external registries (Docker Hub, GCR, ECR, Quay)
- [ ] Filter by glob patterns
- [ ] Progress bar and detailed logs
- [ ] Retry logic with exponential backoff
---
## v1.0.0 - Production Ready
### Features to polish
- [ ] Full CLI (`nora images ls`, `nora tag`, `nora delete`)
- [ ] Webhooks with filters and retry logic
- [ ] Enhanced Prometheus metrics (per-repo stats, cache hit ratio, bandwidth per tenant)
- [ ] TUI dashboard (optional)
- [ ] Helm chart for Kubernetes deployment
- [ ] Official Docker image on ghcr.io
---
## Future Ideas (v1.x+)
### Cold Storage Tiering
Auto-move old tags to S3 Glacier:
```toml
[[storage.tiering]]
match = "*"
older_than = "180d"
move_to = "s3-glacier"
```
### Vulnerability Scanning Integration
Not built-in (use Trivy), but:
- [ ] Webhook on push -> trigger external scan
- [ ] Store scan results as OCI artifacts
- [ ] Block pull if critical CVEs (policy)
### Image Signing (Cosign/Notation)
- [ ] Signature storage (OCI artifacts)
- [ ] Policy enforcement (reject unsigned)
### P2P Distribution (Dragonfly/Kraken style)
For large clusters pulling same image simultaneously.
---
---
## Architecture / DDD
### Current State (v0.2.0)
Monolithic structure, all in `nora-registry/src/`:
```
src/
├── main.rs # CLI + server setup
├── auth.rs # htpasswd + basic auth
├── tokens.rs # API tokens
├── storage/ # Storage backends (local, s3)
├── registry/ # Protocol handlers (docker, maven, npm, cargo, pypi)
├── ui/ # Web dashboard
└── ...
```
### Target Architecture (v1.0+)
#### Domain-Driven Design Boundaries
```
nora/
├── nora-core/ # Domain layer (no dependencies)
│ ├── src/
│ │ ├── artifact.rs # Artifact, Digest, Tag, Manifest
│ │ ├── repository.rs # Repository, Namespace
│ │ ├── identity.rs # User, ServiceAccount, Token
│ │ ├── policy.rs # Permission, Rule, Quota
│ │ └── events.rs # DomainEvent (ArtifactPushed, etc.)
├── nora-auth/ # Authentication bounded context
│ ├── src/
│ │ ├── htpasswd.rs # Basic auth provider
│ │ ├── oidc.rs # OIDC/JWT provider
│ │ ├── token.rs # API token provider
│ │ └── rbac.rs # Permission resolver
├── nora-storage/ # Storage bounded context
│ ├── src/
│ │ ├── backend.rs # StorageBackend trait
│ │ ├── local.rs # Filesystem
│ │ ├── s3.rs # S3-compatible
│ │ ├── tiered.rs # Hot/cold tiering
│ │ └── gc.rs # Garbage collection
├── nora-registry/ # Application layer (HTTP API)
│ ├── src/
│ │ ├── api/
│ │ │ ├── oci.rs # OCI Distribution API (/v2/)
│ │ │ ├── maven.rs # Maven repository
│ │ │ ├── npm.rs # npm registry
│ │ │ ├── cargo.rs # Cargo registry
│ │ │ └── pypi.rs # PyPI (simple API)
│ │ ├── proxy/ # Upstream proxy/cache
│ │ ├── webhook/ # Event webhooks
│ │ └── ui/ # Web dashboard
├── nora-cli/ # CLI application
│ ├── src/
│ │ ├── commands/
│ │ │ ├── serve.rs
│ │ │ ├── images.rs # nora images ls/delete/tag
│ │ │ ├── gc.rs # nora gc
│ │ │ ├── backup.rs # nora backup/restore
│ │ │ ├── migrate.rs # nora migrate
│ │ │ └── replicate.rs
│ │ └── tui/ # Optional TUI dashboard
└── nora-sdk/ # Client SDK (for nora/login-action)
└── src/
├── client.rs # HTTP client
└── oidc.rs # Token exchange
```
#### Key Principles
1. **Hexagonal Architecture**
- Core domain has no external dependencies
- Ports (traits) define boundaries
- Adapters implement ports (S3, filesystem, OIDC providers)
2. **Event-Driven**
- Domain events: `ArtifactPushed`, `ArtifactDeleted`, `TagCreated`
- Webhooks subscribe to events
- Async processing for GC, replication
3. **CQRS-lite**
- Commands: Push, Delete, CreateToken
- Queries: List, Get, Search
- Separate read/write paths for hot endpoints
4. **Configuration as Code**
- All policies in `nora.toml`
- No database for config (file-based)
- GitOps friendly
#### Trait Boundaries (Ports)
```rust
// nora-core/src/ports.rs
#[async_trait]
pub trait ArtifactStore {
async fn push_blob(&self, digest: &Digest, data: Bytes) -> Result<()>;
async fn get_blob(&self, digest: &Digest) -> Result<Bytes>;
async fn push_manifest(&self, repo: &Repository, tag: &Tag, manifest: &Manifest) -> Result<()>;
async fn get_manifest(&self, repo: &Repository, reference: &Reference) -> Result<Manifest>;
async fn list_tags(&self, repo: &Repository) -> Result<Vec<Tag>>;
async fn delete(&self, repo: &Repository, reference: &Reference) -> Result<()>;
}
#[async_trait]
pub trait IdentityProvider {
async fn authenticate(&self, credentials: &Credentials) -> Result<Identity>;
async fn authorize(&self, identity: &Identity, action: &Action, resource: &Resource) -> Result<bool>;
}
#[async_trait]
pub trait EventPublisher {
async fn publish(&self, event: DomainEvent) -> Result<()>;
}
```
#### Migration Path
| Phase | Action |
|-------|--------|
| v0.3 | Extract `nora-auth` crate (OIDC work) |
| v0.4 | Extract `nora-core` domain types |
| v0.5 | Extract `nora-storage` with trait boundaries |
| v0.6+ | Refactor registry handlers to use ports |
| v1.0 | Full hexagonal architecture |
### Technical Debt to Address
- [ ] Remove `unwrap()` in non-test code (started in e9984cf)
- [ ] Add tracing spans to all handlers
- [ ] Consistent error types across modules
- [ ] Extract hardcoded limits to config
- [ ] Add OpenTelemetry support (traces, not just metrics)
### Performance Requirements
| Metric | Target |
|--------|--------|
| Memory (idle) | <50MB |
| Memory (under load) | <100MB |
| Startup time | <1s |
| Blob throughput | Wire speed (no processing overhead) |
| Manifest latency | <10ms p99 |
| Auth check | <1ms (cached) |
### Security Requirements
- [ ] No secrets in logs (already redacting)
- [ ] TLS termination (or trust reverse proxy)
- [ ] Content-addressable storage (immutable blobs)
- [ ] Audit log for all mutations
- [ ] SBOM generation for NORA itself
---
## Notes
- S3 storage: already implemented
- Web UI: minimalist read-only dashboard (done)
- TUI: consider for v1.0
- Vulnerability scanning: out of scope (use Trivy externally)
- Image signing: out of scope for now (use cosign externally)

40
deny.toml Normal file
View File

@@ -0,0 +1,40 @@
# cargo-deny configuration
# https://embarkstudios.github.io/cargo-deny/
[advisories]
# Vulnerability database (RustSec)
db-urls = ["https://github.com/rustsec/advisory-db"]
ignore = [
"RUSTSEC-2025-0119", # number_prefix unmaintained, transitive via indicatif; no fix available
]
[licenses]
# Allowed open-source licenses
allow = [
"MIT",
"Apache-2.0",
"Apache-2.0 WITH LLVM-exception",
"BSD-2-Clause",
"BSD-3-Clause",
"ISC",
"Unicode-DFS-2016",
"Unicode-3.0",
"CC0-1.0",
"OpenSSL",
"Zlib",
"CDLA-Permissive-2.0", # webpki-roots (CA certificates bundle)
"MPL-2.0",
]
[bans]
multiple-versions = "warn"
deny = [
{ name = "openssl-sys" },
{ name = "openssl" },
]
skip = []
[sources]
unknown-registry = "warn"
unknown-git = "warn"
allow-registry = ["https://github.com/rust-lang/crates.io-index"]

98
install.sh Normal file
View File

@@ -0,0 +1,98 @@
#!/usr/bin/env sh
# NORA installer — https://getnora.io/install.sh
# Usage: curl -fsSL https://getnora.io/install.sh | sh
set -e
REPO="getnora-io/nora"
BINARY="nora"
INSTALL_DIR="/usr/local/bin"
# ── Detect OS and architecture ──────────────────────────────────────────────
OS="$(uname -s)"
ARCH="$(uname -m)"
case "$OS" in
Linux) os="linux" ;;
Darwin) os="darwin" ;;
*)
echo "Unsupported OS: $OS"
echo "Please download manually: https://github.com/$REPO/releases/latest"
exit 1
;;
esac
case "$ARCH" in
x86_64 | amd64) arch="amd64" ;;
aarch64 | arm64) arch="arm64" ;;
*)
echo "Unsupported architecture: $ARCH"
echo "Please download manually: https://github.com/$REPO/releases/latest"
exit 1
;;
esac
ASSET="${BINARY}-${os}-${arch}"
# ── Get latest release version ──────────────────────────────────────────────
VERSION="$(curl -fsSL "https://api.github.com/repos/$REPO/releases/latest" \
| grep '"tag_name"' \
| sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')"
if [ -z "$VERSION" ]; then
echo "Failed to get latest version"
exit 1
fi
echo "Installing NORA $VERSION ($os/$arch)..."
# ── Download binary and checksum ────────────────────────────────────────────
BASE_URL="https://github.com/$REPO/releases/download/$VERSION"
TMP_DIR="$(mktemp -d)"
trap 'rm -rf "$TMP_DIR"' EXIT
echo "Downloading $ASSET..."
curl -fsSL "$BASE_URL/$ASSET" -o "$TMP_DIR/$BINARY"
curl -fsSL "$BASE_URL/$ASSET.sha256" -o "$TMP_DIR/$ASSET.sha256"
# ── Verify checksum ─────────────────────────────────────────────────────────
echo "Verifying checksum..."
EXPECTED="$(awk '{print $1}' "$TMP_DIR/$ASSET.sha256")"
ACTUAL="$(sha256sum "$TMP_DIR/$BINARY" | awk '{print $1}')"
if [ "$EXPECTED" != "$ACTUAL" ]; then
echo "Checksum mismatch!"
echo " Expected: $EXPECTED"
echo " Actual: $ACTUAL"
exit 1
fi
echo "Checksum OK"
# ── Install ─────────────────────────────────────────────────────────────────
chmod +x "$TMP_DIR/$BINARY"
if [ -w "$INSTALL_DIR" ]; then
mv "$TMP_DIR/$BINARY" "$INSTALL_DIR/$BINARY"
elif command -v sudo >/dev/null 2>&1; then
sudo mv "$TMP_DIR/$BINARY" "$INSTALL_DIR/$BINARY"
else
# Fallback to ~/.local/bin
INSTALL_DIR="$HOME/.local/bin"
mkdir -p "$INSTALL_DIR"
mv "$TMP_DIR/$BINARY" "$INSTALL_DIR/$BINARY"
echo "Installed to $INSTALL_DIR/$BINARY"
echo "Make sure $INSTALL_DIR is in your PATH"
fi
# ── Done ────────────────────────────────────────────────────────────────────
echo ""
echo "NORA $VERSION installed to $INSTALL_DIR/$BINARY"
echo ""
nora --version 2>/dev/null || true

BIN
logo.jpg Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

5
logo.svg Normal file
View File

@@ -0,0 +1,5 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 300 72" width="300" height="72">
<text font-family="'SF Mono', 'Fira Code', 'Cascadia Code', monospace" font-weight="800" fill="#0f172a" letter-spacing="1">
<tspan x="8" y="58" font-size="52">N</tspan><tspan font-size="68" dy="-10" fill="#2563EB">O</tspan><tspan font-size="52" dy="10">RA</tspan>
</text>
</svg>

After

Width:  |  Height:  |  Size: 373 B

View File

@@ -20,4 +20,4 @@ serde_json.workspace = true
clap = { version = "4", features = ["derive"] }
indicatif = "0.17"
tar = "0.4"
flate2 = "1.0"
flate2 = "1.1"

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use clap::{Parser, Subcommand};
#[derive(Parser)]

View File

@@ -24,18 +24,20 @@ tracing-subscriber.workspace = true
reqwest.workspace = true
sha2.workspace = true
async-trait.workspace = true
toml = "0.8"
hmac.workspace = true
hex.workspace = true
toml = "1.0"
uuid = { version = "1", features = ["v4"] }
bcrypt = "0.17"
base64 = "0.22"
prometheus = "0.13"
prometheus = "0.14"
lazy_static = "1.5"
httpdate = "1"
utoipa = { version = "5", features = ["axum_extras"] }
utoipa-swagger-ui = { version = "9", features = ["axum", "reqwest"] }
clap = { version = "4", features = ["derive"] }
tar = "0.4"
flate2 = "1.0"
flate2 = "1.1"
indicatif = "0.17"
chrono = { version = "0.4", features = ["serde"] }
thiserror = "2"

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use chrono::{DateTime, Utc};
use parking_lot::RwLock;
use serde::Serialize;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use axum::{
body::Body,
extract::State,
@@ -60,11 +63,17 @@ impl HtpasswdAuth {
fn is_public_path(path: &str) -> bool {
matches!(
path,
"/" | "/health" | "/ready" | "/metrics" | "/v2/" | "/v2"
"/" | "/health"
| "/ready"
| "/metrics"
| "/v2/"
| "/v2"
| "/api/tokens"
| "/api/tokens/list"
| "/api/tokens/revoke"
) || path.starts_with("/ui")
|| path.starts_with("/api-docs")
|| path.starts_with("/api/ui")
|| path.starts_with("/api/tokens")
}
/// Auth middleware - supports Basic auth and Bearer tokens
@@ -401,8 +410,12 @@ mod tests {
assert!(is_public_path("/api/ui/stats"));
assert!(is_public_path("/api/tokens"));
assert!(is_public_path("/api/tokens/list"));
assert!(is_public_path("/api/tokens/revoke"));
// Protected paths
assert!(!is_public_path("/api/tokens/unknown"));
assert!(!is_public_path("/api/tokens/admin"));
assert!(!is_public_path("/api/tokens/extra/path"));
assert!(!is_public_path("/v2/myimage/blobs/sha256:abc"));
assert!(!is_public_path("/v2/library/nginx/manifests/latest"));
assert!(!is_public_path(

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Backup and restore functionality for Nora
//!
//! Exports all artifacts to a tar.gz file and restores from backups.

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use std::env;
use std::fs;
@@ -53,6 +56,19 @@ pub struct StorageConfig {
pub s3_url: String,
#[serde(default = "default_bucket")]
pub bucket: String,
/// S3 access key (optional, uses anonymous access if not set)
#[serde(default)]
pub s3_access_key: Option<String>,
/// S3 secret key (optional, uses anonymous access if not set)
#[serde(default)]
pub s3_secret_key: Option<String>,
/// S3 region (default: us-east-1)
#[serde(default = "default_s3_region")]
pub s3_region: String,
}
fn default_s3_region() -> String {
"us-east-1".to_string()
}
fn default_storage_path() -> String {
@@ -325,6 +341,15 @@ impl Config {
if let Ok(val) = env::var("NORA_STORAGE_BUCKET") {
self.storage.bucket = val;
}
if let Ok(val) = env::var("NORA_STORAGE_S3_ACCESS_KEY") {
self.storage.s3_access_key = if val.is_empty() { None } else { Some(val) };
}
if let Ok(val) = env::var("NORA_STORAGE_S3_SECRET_KEY") {
self.storage.s3_secret_key = if val.is_empty() { None } else { Some(val) };
}
if let Ok(val) = env::var("NORA_STORAGE_S3_REGION") {
self.storage.s3_region = val;
}
// Auth config
if let Ok(val) = env::var("NORA_AUTH_ENABLED") {
@@ -455,6 +480,9 @@ impl Default for Config {
path: String::from("data/storage"),
s3_url: String::from("http://127.0.0.1:3000"),
bucket: String::from("registry"),
s3_access_key: None,
s3_secret_key: None,
s3_region: String::from("us-east-1"),
},
maven: MavenConfig::default(),
npm: NpmConfig::default(),

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::Instant;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
#![allow(dead_code)]
//! Application error handling with HTTP response conversion
//!

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use axum::{extract::State, http::StatusCode, response::Json, routing::get, Router};
use serde::Serialize;
use std::sync::Arc;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod activity_log;
mod auth;
mod backup;
@@ -10,6 +13,7 @@ mod migrate;
mod openapi;
mod rate_limit;
mod registry;
mod repo_index;
mod request_id;
mod secrets;
mod storage;
@@ -30,6 +34,7 @@ use activity_log::ActivityLog;
use auth::HtpasswdAuth;
use config::{Config, StorageMode};
use dashboard_metrics::DashboardMetrics;
use repo_index::RepoIndex;
pub use storage::Storage;
use tokens::TokenStore;
@@ -79,6 +84,8 @@ pub struct AppState {
pub metrics: DashboardMetrics,
pub activity: ActivityLog,
pub docker_auth: registry::DockerAuth,
pub repo_index: RepoIndex,
pub http_client: reqwest::Client,
}
#[tokio::main]
@@ -104,10 +111,18 @@ async fn main() {
info!(
s3_url = %config.storage.s3_url,
bucket = %config.storage.bucket,
region = %config.storage.s3_region,
has_credentials = config.storage.s3_access_key.is_some(),
"Using S3 storage"
);
}
Storage::new_s3(&config.storage.s3_url, &config.storage.bucket)
Storage::new_s3(
&config.storage.s3_url,
&config.storage.bucket,
&config.storage.s3_region,
config.storage.s3_access_key.as_deref(),
config.storage.s3_secret_key.as_deref(),
)
}
};
@@ -131,7 +146,13 @@ async fn main() {
Some(Commands::Migrate { from, to, dry_run }) => {
let source = match from.as_str() {
"local" => Storage::new_local(&config.storage.path),
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
"s3" => Storage::new_s3(
&config.storage.s3_url,
&config.storage.bucket,
&config.storage.s3_region,
config.storage.s3_access_key.as_deref(),
config.storage.s3_secret_key.as_deref(),
),
_ => {
error!("Invalid source: '{}'. Use 'local' or 's3'", from);
std::process::exit(1);
@@ -140,7 +161,13 @@ async fn main() {
let dest = match to.as_str() {
"local" => Storage::new_local(&config.storage.path),
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
"s3" => Storage::new_s3(
&config.storage.s3_url,
&config.storage.bucket,
&config.storage.s3_region,
config.storage.s3_access_key.as_deref(),
config.storage.s3_secret_key.as_deref(),
),
_ => {
error!("Invalid destination: '{}'. Use 'local' or 's3'", to);
std::process::exit(1);
@@ -245,6 +272,8 @@ async fn run_server(config: Config, storage: Storage) {
// Initialize Docker auth with proxy timeout
let docker_auth = registry::DockerAuth::new(config.docker.proxy_timeout);
let http_client = reqwest::Client::new();
let state = Arc::new(AppState {
storage,
config,
@@ -254,6 +283,8 @@ async fn run_server(config: Config, storage: Storage) {
metrics: DashboardMetrics::new(),
activity: ActivityLog::new(50),
docker_auth,
repo_index: RepoIndex::new(),
http_client,
});
// Token routes with strict rate limiting (brute-force protection)

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use axum::{
body::Body,
extract::MatchedPath,

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Migration between storage backends
//!
//! Supports migrating artifacts from one storage backend to another

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! OpenAPI documentation and Swagger UI
//!
//! Functions in this module are stubs used only for generating OpenAPI documentation.
@@ -15,7 +18,7 @@ use crate::AppState;
#[openapi(
info(
title = "Nora",
version = "0.2.10",
version = "0.2.12",
description = "Multi-protocol package registry supporting Docker, Maven, npm, Cargo, and PyPI",
license(name = "MIT"),
contact(name = "DevITWay", url = "https://github.com/getnora-io/nora")
@@ -25,6 +28,7 @@ use crate::AppState;
),
tags(
(name = "health", description = "Health check endpoints"),
(name = "metrics", description = "Prometheus metrics"),
(name = "dashboard", description = "Dashboard & Metrics API"),
(name = "docker", description = "Docker Registry v2 API"),
(name = "maven", description = "Maven Repository API"),
@@ -37,18 +41,30 @@ use crate::AppState;
// Health
crate::openapi::health_check,
crate::openapi::readiness_check,
// Metrics
crate::openapi::prometheus_metrics,
// Dashboard
crate::openapi::dashboard_metrics,
// Docker
// Docker - Read
crate::openapi::docker_version,
crate::openapi::docker_catalog,
crate::openapi::docker_tags,
crate::openapi::docker_manifest,
crate::openapi::docker_blob,
crate::openapi::docker_manifest_get,
crate::openapi::docker_blob_head,
crate::openapi::docker_blob_get,
// Docker - Write
crate::openapi::docker_manifest_put,
crate::openapi::docker_blob_upload_start,
crate::openapi::docker_blob_upload_patch,
crate::openapi::docker_blob_upload_put,
// Maven
crate::openapi::maven_artifact,
crate::openapi::maven_artifact_get,
crate::openapi::maven_artifact_put,
// npm
crate::openapi::npm_package,
// Cargo
crate::openapi::cargo_metadata,
crate::openapi::cargo_download,
// PyPI
crate::openapi::pypi_simple,
crate::openapi::pypi_package,
@@ -258,6 +274,8 @@ pub struct ActivityEntry {
// ============ Path Operations (documentation only) ============
// -------------------- Health --------------------
/// Health check endpoint
#[utoipa::path(
get,
@@ -282,6 +300,23 @@ pub async fn health_check() {}
)]
pub async fn readiness_check() {}
// -------------------- Metrics --------------------
/// Prometheus metrics endpoint
///
/// Returns metrics in Prometheus text format for scraping.
#[utoipa::path(
get,
path = "/metrics",
tag = "metrics",
responses(
(status = 200, description = "Prometheus metrics", content_type = "text/plain")
)
)]
pub async fn prometheus_metrics() {}
// -------------------- Dashboard --------------------
/// Dashboard metrics and activity
///
/// Returns comprehensive metrics including downloads, uploads, cache statistics,
@@ -296,6 +331,8 @@ pub async fn readiness_check() {}
)]
pub async fn dashboard_metrics() {}
// -------------------- Docker Registry v2 - Read Operations --------------------
/// Docker Registry version check
#[utoipa::path(
get,
@@ -325,7 +362,7 @@ pub async fn docker_catalog() {}
path = "/v2/{name}/tags/list",
tag = "docker",
params(
("name" = String, Path, description = "Repository name")
("name" = String, Path, description = "Repository name (e.g., 'alpine' or 'library/nginx')")
),
responses(
(status = 200, description = "Tag list", body = DockerTags),
@@ -341,14 +378,30 @@ pub async fn docker_tags() {}
tag = "docker",
params(
("name" = String, Path, description = "Repository name"),
("reference" = String, Path, description = "Tag or digest")
("reference" = String, Path, description = "Tag or digest (sha256:...)")
),
responses(
(status = 200, description = "Manifest content"),
(status = 404, description = "Manifest not found")
)
)]
pub async fn docker_manifest() {}
pub async fn docker_manifest_get() {}
/// Check if blob exists
#[utoipa::path(
head,
path = "/v2/{name}/blobs/{digest}",
tag = "docker",
params(
("name" = String, Path, description = "Repository name"),
("digest" = String, Path, description = "Blob digest (sha256:...)")
),
responses(
(status = 200, description = "Blob exists, Content-Length header contains size"),
(status = 404, description = "Blob not found")
)
)]
pub async fn docker_blob_head() {}
/// Get blob
#[utoipa::path(
@@ -364,7 +417,79 @@ pub async fn docker_manifest() {}
(status = 404, description = "Blob not found")
)
)]
pub async fn docker_blob() {}
pub async fn docker_blob_get() {}
// -------------------- Docker Registry v2 - Write Operations --------------------
/// Push manifest
#[utoipa::path(
put,
path = "/v2/{name}/manifests/{reference}",
tag = "docker",
params(
("name" = String, Path, description = "Repository name"),
("reference" = String, Path, description = "Tag or digest")
),
responses(
(status = 201, description = "Manifest created, Docker-Content-Digest header contains digest"),
(status = 400, description = "Invalid manifest")
)
)]
pub async fn docker_manifest_put() {}
/// Start blob upload
///
/// Initiates a resumable blob upload. Returns a Location header with the upload URL.
#[utoipa::path(
post,
path = "/v2/{name}/blobs/uploads/",
tag = "docker",
params(
("name" = String, Path, description = "Repository name")
),
responses(
(status = 202, description = "Upload started, Location header contains upload URL")
)
)]
pub async fn docker_blob_upload_start() {}
/// Upload blob chunk (chunked upload)
///
/// Uploads a chunk of data to an in-progress upload session.
#[utoipa::path(
patch,
path = "/v2/{name}/blobs/uploads/{uuid}",
tag = "docker",
params(
("name" = String, Path, description = "Repository name"),
("uuid" = String, Path, description = "Upload session UUID")
),
responses(
(status = 202, description = "Chunk accepted, Range header indicates bytes received")
)
)]
pub async fn docker_blob_upload_patch() {}
/// Complete blob upload
///
/// Finalizes the blob upload. Can include final chunk data in the body.
#[utoipa::path(
put,
path = "/v2/{name}/blobs/uploads/{uuid}",
tag = "docker",
params(
("name" = String, Path, description = "Repository name"),
("uuid" = String, Path, description = "Upload session UUID"),
("digest" = String, Query, description = "Expected blob digest (sha256:...)")
),
responses(
(status = 201, description = "Blob created"),
(status = 400, description = "Digest mismatch or missing")
)
)]
pub async fn docker_blob_upload_put() {}
// -------------------- Maven --------------------
/// Get Maven artifact
#[utoipa::path(
@@ -379,7 +504,24 @@ pub async fn docker_blob() {}
(status = 404, description = "Artifact not found, trying upstream proxies")
)
)]
pub async fn maven_artifact() {}
pub async fn maven_artifact_get() {}
/// Upload Maven artifact
#[utoipa::path(
put,
path = "/maven2/{path}",
tag = "maven",
params(
("path" = String, Path, description = "Artifact path")
),
responses(
(status = 201, description = "Artifact uploaded"),
(status = 500, description = "Storage error")
)
)]
pub async fn maven_artifact_put() {}
// -------------------- npm --------------------
/// Get npm package metadata
#[utoipa::path(
@@ -387,7 +529,7 @@ pub async fn maven_artifact() {}
path = "/npm/{name}",
tag = "npm",
params(
("name" = String, Path, description = "Package name")
("name" = String, Path, description = "Package name (e.g., 'lodash' or '@scope/package')")
),
responses(
(status = 200, description = "Package metadata (JSON)"),
@@ -396,6 +538,41 @@ pub async fn maven_artifact() {}
)]
pub async fn npm_package() {}
// -------------------- Cargo --------------------
/// Get Cargo crate metadata
#[utoipa::path(
get,
path = "/cargo/api/v1/crates/{crate_name}",
tag = "cargo",
params(
("crate_name" = String, Path, description = "Crate name")
),
responses(
(status = 200, description = "Crate metadata (JSON)"),
(status = 404, description = "Crate not found")
)
)]
pub async fn cargo_metadata() {}
/// Download Cargo crate
#[utoipa::path(
get,
path = "/cargo/api/v1/crates/{crate_name}/{version}/download",
tag = "cargo",
params(
("crate_name" = String, Path, description = "Crate name"),
("version" = String, Path, description = "Crate version")
),
responses(
(status = 200, description = "Crate file (.crate)"),
(status = 404, description = "Crate version not found")
)
)]
pub async fn cargo_download() {}
// -------------------- PyPI --------------------
/// PyPI Simple index
#[utoipa::path(
get,
@@ -422,6 +599,8 @@ pub async fn pypi_simple() {}
)]
pub async fn pypi_package() {}
// -------------------- Auth / Tokens --------------------
/// Create API token
#[utoipa::path(
post,

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Rate limiting configuration and middleware
//!
//! Provides rate limiting to protect against:

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::registry::docker_auth::DockerAuth;
use crate::storage::Storage;
@@ -47,6 +50,8 @@ static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, Vec<u8>>>> =
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
.route("/v2/", get(check))
.route("/v2/_catalog", get(catalog))
// Single-segment name routes (e.g., /v2/alpine/...)
.route("/v2/{name}/blobs/{digest}", head(check_blob))
.route("/v2/{name}/blobs/{digest}", get(download_blob))
.route(
@@ -60,12 +65,52 @@ pub fn routes() -> Router<Arc<AppState>> {
.route("/v2/{name}/manifests/{reference}", get(get_manifest))
.route("/v2/{name}/manifests/{reference}", put(put_manifest))
.route("/v2/{name}/tags/list", get(list_tags))
// Two-segment name routes (e.g., /v2/library/alpine/...)
.route("/v2/{ns}/{name}/blobs/{digest}", head(check_blob_ns))
.route("/v2/{ns}/{name}/blobs/{digest}", get(download_blob_ns))
.route(
"/v2/{ns}/{name}/blobs/uploads/",
axum::routing::post(start_upload_ns),
)
.route(
"/v2/{ns}/{name}/blobs/uploads/{uuid}",
patch(patch_blob_ns).put(upload_blob_ns),
)
.route(
"/v2/{ns}/{name}/manifests/{reference}",
get(get_manifest_ns),
)
.route(
"/v2/{ns}/{name}/manifests/{reference}",
put(put_manifest_ns),
)
.route("/v2/{ns}/{name}/tags/list", get(list_tags_ns))
}
async fn check() -> (StatusCode, Json<Value>) {
(StatusCode::OK, Json(json!({})))
}
/// List all repositories in the registry
async fn catalog(State(state): State<Arc<AppState>>) -> Json<Value> {
let keys = state.storage.list("docker/").await;
// Extract unique repository names from paths like "docker/{name}/manifests/..."
let mut repos: Vec<String> = keys
.iter()
.filter_map(|k| {
k.strip_prefix("docker/")
.and_then(|rest| rest.split('/').next())
.map(String::from)
})
.collect();
repos.sort();
repos.dedup();
Json(json!({ "repositories": repos }))
}
async fn check_blob(
State(state): State<Arc<AppState>>,
Path((name, digest)): Path<(String, String)>,
@@ -122,6 +167,7 @@ async fn download_blob(
// Try upstream proxies
for upstream in &state.config.docker.upstreams {
if let Ok(data) = fetch_blob_from_upstream(
&state.http_client,
&upstream.url,
&name,
&digest,
@@ -147,6 +193,8 @@ async fn download_blob(
let _ = storage.put(&key_clone, &data_clone).await;
});
state.repo_index.invalidate("docker");
return (
StatusCode::OK,
[(header::CONTENT_TYPE, "application/octet-stream")],
@@ -257,6 +305,7 @@ async fn upload_blob(
"docker",
"LOCAL",
));
state.repo_index.invalidate("docker");
let location = format!("/v2/{}/blobs/{}", name, digest);
(StatusCode::CREATED, [(header::LOCATION, location)]).into_response()
}
@@ -312,8 +361,14 @@ async fn get_manifest(
}
// Try upstream proxies
tracing::debug!(
upstreams_count = state.config.docker.upstreams.len(),
"Trying upstream proxies"
);
for upstream in &state.config.docker.upstreams {
tracing::debug!(upstream_url = %upstream.url, "Trying upstream");
if let Ok((data, content_type)) = fetch_manifest_from_upstream(
&state.http_client,
&upstream.url,
&name,
&reference,
@@ -363,6 +418,8 @@ async fn get_manifest(
}
});
state.repo_index.invalidate("docker");
return (
StatusCode::OK,
[
@@ -424,6 +481,7 @@ async fn put_manifest(
"docker",
"LOCAL",
));
state.repo_index.invalidate("docker");
let location = format!("/v2/{}/manifests/{}", name, reference);
(
@@ -454,8 +512,78 @@ async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>)
(StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response()
}
// ============================================================================
// Namespace handlers (for two-segment names like library/alpine)
// These combine ns/name into a single name and delegate to the main handlers
// ============================================================================
async fn check_blob_ns(
state: State<Arc<AppState>>,
Path((ns, name, digest)): Path<(String, String, String)>,
) -> Response {
let full_name = format!("{}/{}", ns, name);
check_blob(state, Path((full_name, digest))).await
}
async fn download_blob_ns(
state: State<Arc<AppState>>,
Path((ns, name, digest)): Path<(String, String, String)>,
) -> Response {
let full_name = format!("{}/{}", ns, name);
download_blob(state, Path((full_name, digest))).await
}
async fn start_upload_ns(Path((ns, name)): Path<(String, String)>) -> Response {
let full_name = format!("{}/{}", ns, name);
start_upload(Path(full_name)).await
}
async fn patch_blob_ns(
Path((ns, name, uuid)): Path<(String, String, String)>,
body: Bytes,
) -> Response {
let full_name = format!("{}/{}", ns, name);
patch_blob(Path((full_name, uuid)), body).await
}
async fn upload_blob_ns(
state: State<Arc<AppState>>,
Path((ns, name, uuid)): Path<(String, String, String)>,
query: axum::extract::Query<std::collections::HashMap<String, String>>,
body: Bytes,
) -> Response {
let full_name = format!("{}/{}", ns, name);
upload_blob(state, Path((full_name, uuid)), query, body).await
}
async fn get_manifest_ns(
state: State<Arc<AppState>>,
Path((ns, name, reference)): Path<(String, String, String)>,
) -> Response {
let full_name = format!("{}/{}", ns, name);
get_manifest(state, Path((full_name, reference))).await
}
async fn put_manifest_ns(
state: State<Arc<AppState>>,
Path((ns, name, reference)): Path<(String, String, String)>,
body: Bytes,
) -> Response {
let full_name = format!("{}/{}", ns, name);
put_manifest(state, Path((full_name, reference)), body).await
}
async fn list_tags_ns(
state: State<Arc<AppState>>,
Path((ns, name)): Path<(String, String)>,
) -> Response {
let full_name = format!("{}/{}", ns, name);
list_tags(state, Path(full_name)).await
}
/// Fetch a blob from an upstream Docker registry
async fn fetch_blob_from_upstream(
client: &reqwest::Client,
upstream_url: &str,
name: &str,
digest: &str,
@@ -469,13 +597,13 @@ async fn fetch_blob_from_upstream(
digest
);
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(timeout))
.build()
.map_err(|_| ())?;
// First try without auth
let response = client.get(&url).send().await.map_err(|_| ())?;
let response = client
.get(&url)
.timeout(Duration::from_secs(timeout))
.send()
.await
.map_err(|_| ())?;
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
// Get Www-Authenticate header and fetch token
@@ -512,6 +640,7 @@ async fn fetch_blob_from_upstream(
/// Fetch a manifest from an upstream Docker registry
/// Returns (manifest_bytes, content_type)
async fn fetch_manifest_from_upstream(
client: &reqwest::Client,
upstream_url: &str,
name: &str,
reference: &str,
@@ -525,10 +654,7 @@ async fn fetch_manifest_from_upstream(
reference
);
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(timeout))
.build()
.map_err(|_| ())?;
tracing::debug!(url = %url, "Fetching manifest from upstream");
// Request with Accept header for manifest types
let accept_header = "application/vnd.docker.distribution.manifest.v2+json, \
@@ -539,10 +665,15 @@ async fn fetch_manifest_from_upstream(
// First try without auth
let response = client
.get(&url)
.timeout(Duration::from_secs(timeout))
.header("Accept", accept_header)
.send()
.await
.map_err(|_| ())?;
.map_err(|e| {
tracing::error!(error = %e, url = %url, "Failed to send request to upstream");
})?;
tracing::debug!(status = %response.status(), "Initial upstream response");
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
// Get Www-Authenticate header and fetch token
@@ -552,25 +683,34 @@ async fn fetch_manifest_from_upstream(
.and_then(|v| v.to_str().ok())
.map(String::from);
tracing::debug!(www_auth = ?www_auth, "Got 401, fetching token");
if let Some(token) = docker_auth
.get_token(upstream_url, name, www_auth.as_deref())
.await
{
tracing::debug!("Token acquired, retrying with auth");
client
.get(&url)
.header("Accept", accept_header)
.header("Authorization", format!("Bearer {}", token))
.send()
.await
.map_err(|_| ())?
.map_err(|e| {
tracing::error!(error = %e, "Failed to send authenticated request");
})?
} else {
tracing::error!("Failed to acquire token");
return Err(());
}
} else {
response
};
tracing::debug!(status = %response.status(), "Final upstream response");
if !response.status().is_success() {
tracing::warn!(status = %response.status(), "Upstream returned non-success status");
return Err(());
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use parking_lot::RwLock;
use std::collections::HashMap;
use std::time::{Duration, Instant};
@@ -77,9 +80,12 @@ impl DockerAuth {
let scope = format!("repository:{}:pull", name);
let url = format!("{}?service={}&scope={}", realm, service, scope);
tracing::debug!(url = %url, "Fetching auth token");
let response = self.client.get(&url).send().await.ok()?;
if !response.status().is_success() {
tracing::warn!(status = %response.status(), "Token request failed");
return None;
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{
@@ -20,7 +23,6 @@ pub fn routes() -> Router<Arc<AppState>> {
async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
let key = format!("maven/{}", path);
// Extract artifact name for logging (last 2-3 path components)
let artifact_name = path
.split('/')
.rev()
@@ -31,7 +33,6 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
.collect::<Vec<_>>()
.join("/");
// Try local storage first
if let Ok(data) = state.storage.get(&key).await {
state.metrics.record_download("maven");
state.metrics.record_cache_hit();
@@ -44,11 +45,10 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
return with_content_type(&path, data).into_response();
}
// Try proxy servers
for proxy_url in &state.config.maven.proxies {
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
match fetch_from_proxy(&url, state.config.maven.proxy_timeout).await {
match fetch_from_proxy(&state.http_client, &url, state.config.maven.proxy_timeout).await {
Ok(data) => {
state.metrics.record_download("maven");
state.metrics.record_cache_miss();
@@ -59,7 +59,6 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
"PROXY",
));
// Cache in local storage (fire and forget)
let storage = state.storage.clone();
let key_clone = key.clone();
let data_clone = data.clone();
@@ -67,6 +66,8 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
let _ = storage.put(&key_clone, &data_clone).await;
});
state.repo_index.invalidate("maven");
return with_content_type(&path, data.into()).into_response();
}
Err(_) => continue,
@@ -83,7 +84,6 @@ async fn upload(
) -> StatusCode {
let key = format!("maven/{}", path);
// Extract artifact name for logging
let artifact_name = path
.split('/')
.rev()
@@ -103,20 +103,25 @@ async fn upload(
"maven",
"LOCAL",
));
state.repo_index.invalidate("maven");
StatusCode::CREATED
}
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
}
}
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
let client = reqwest::Client::builder()
async fn fetch_from_proxy(
client: &reqwest::Client,
url: &str,
timeout_secs: u64,
) -> Result<Vec<u8>, ()> {
let response = client
.get(url)
.timeout(Duration::from_secs(timeout_secs))
.build()
.send()
.await
.map_err(|_| ())?;
let response = client.get(url).send().await.map_err(|_| ())?;
if !response.status().is_success() {
return Err(());
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod cargo_registry;
pub mod docker;
pub mod docker_auth;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{
@@ -16,7 +19,6 @@ pub fn routes() -> Router<Arc<AppState>> {
}
async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
// Determine if this is a tarball request or metadata request
let is_tarball = path.contains("/-/");
let key = if is_tarball {
@@ -30,14 +32,12 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
format!("npm/{}/metadata.json", path)
};
// Extract package name for logging
let package_name = if is_tarball {
path.split("/-/").next().unwrap_or(&path).to_string()
} else {
path.clone()
};
// Try local storage first
if let Ok(data) = state.storage.get(&key).await {
if is_tarball {
state.metrics.record_download("npm");
@@ -52,17 +52,12 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
return with_content_type(is_tarball, data).into_response();
}
// Try proxy if configured
if let Some(proxy_url) = &state.config.npm.proxy {
let url = if is_tarball {
// Tarball URL: https://registry.npmjs.org/package/-/package-version.tgz
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
} else {
// Metadata URL: https://registry.npmjs.org/package
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
};
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
if let Ok(data) = fetch_from_proxy(&url, state.config.npm.proxy_timeout).await {
if let Ok(data) =
fetch_from_proxy(&state.http_client, &url, state.config.npm.proxy_timeout).await
{
if is_tarball {
state.metrics.record_download("npm");
state.metrics.record_cache_miss();
@@ -74,7 +69,6 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
));
}
// Cache in local storage (fire and forget)
let storage = state.storage.clone();
let key_clone = key.clone();
let data_clone = data.clone();
@@ -82,6 +76,10 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
let _ = storage.put(&key_clone, &data_clone).await;
});
if is_tarball {
state.repo_index.invalidate("npm");
}
return with_content_type(is_tarball, data.into()).into_response();
}
}
@@ -89,14 +87,18 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
StatusCode::NOT_FOUND.into_response()
}
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
let client = reqwest::Client::builder()
async fn fetch_from_proxy(
client: &reqwest::Client,
url: &str,
timeout_secs: u64,
) -> Result<Vec<u8>, ()> {
let response = client
.get(url)
.timeout(Duration::from_secs(timeout_secs))
.build()
.send()
.await
.map_err(|_| ())?;
let response = client.get(url).send().await.map_err(|_| ())?;
if !response.status().is_success() {
return Err(());
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{
@@ -82,7 +85,9 @@ async fn package_versions(
if let Some(proxy_url) = &state.config.pypi.proxy {
let url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
if let Ok(html) = fetch_package_page(&url, state.config.pypi.proxy_timeout).await {
if let Ok(html) =
fetch_package_page(&state.http_client, &url, state.config.pypi.proxy_timeout).await
{
// Rewrite URLs in the HTML to point to our registry
let rewritten = rewrite_pypi_links(&html, &normalized);
return (StatusCode::OK, Html(rewritten)).into_response();
@@ -127,10 +132,22 @@ async fn download_file(
// First, fetch the package page to find the actual download URL
let page_url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
if let Ok(html) = fetch_package_page(&page_url, state.config.pypi.proxy_timeout).await {
if let Ok(html) = fetch_package_page(
&state.http_client,
&page_url,
state.config.pypi.proxy_timeout,
)
.await
{
// Find the URL for this specific file
if let Some(file_url) = find_file_url(&html, &filename) {
if let Ok(data) = fetch_file(&file_url, state.config.pypi.proxy_timeout).await {
if let Ok(data) = fetch_file(
&state.http_client,
&file_url,
state.config.pypi.proxy_timeout,
)
.await
{
state.metrics.record_download("pypi");
state.metrics.record_cache_miss();
state.activity.push(ActivityEntry::new(
@@ -148,6 +165,8 @@ async fn download_file(
let _ = storage.put(&key_clone, &data_clone).await;
});
state.repo_index.invalidate("pypi");
let content_type = if filename.ends_with(".whl") {
"application/zip"
} else if filename.ends_with(".tar.gz") || filename.ends_with(".tgz") {
@@ -172,14 +191,14 @@ fn normalize_name(name: &str) -> String {
}
/// Fetch package page from upstream
async fn fetch_package_page(url: &str, timeout_secs: u64) -> Result<String, ()> {
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(timeout_secs))
.build()
.map_err(|_| ())?;
async fn fetch_package_page(
client: &reqwest::Client,
url: &str,
timeout_secs: u64,
) -> Result<String, ()> {
let response = client
.get(url)
.timeout(Duration::from_secs(timeout_secs))
.header("Accept", "text/html")
.send()
.await
@@ -193,14 +212,14 @@ async fn fetch_package_page(url: &str, timeout_secs: u64) -> Result<String, ()>
}
/// Fetch file from upstream
async fn fetch_file(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
let client = reqwest::Client::builder()
async fn fetch_file(client: &reqwest::Client, url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
let response = client
.get(url)
.timeout(Duration::from_secs(timeout_secs))
.build()
.send()
.await
.map_err(|_| ())?;
let response = client.get(url).send().await.map_err(|_| ())?;
if !response.status().is_success() {
return Err(());
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use crate::activity_log::{ActionType, ActivityEntry};
use crate::AppState;
use axum::{

View File

@@ -0,0 +1,351 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! In-memory repository index with lazy rebuild on invalidation.
//!
//! Design (designed for efficiency):
//! - Rebuild happens ONLY on write operations, not TTL
//! - Double-checked locking prevents duplicate rebuilds
//! - Arc<Vec> for zero-cost reads
//! - Single rebuild at a time per registry (rebuild_lock)
use crate::storage::Storage;
use crate::ui::components::format_timestamp;
use parking_lot::RwLock;
use serde::Serialize;
use std::collections::HashMap;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use tokio::sync::Mutex as AsyncMutex;
use tracing::info;
/// Repository info for UI display
#[derive(Debug, Clone, Serialize)]
pub struct RepoInfo {
pub name: String,
pub versions: usize,
pub size: u64,
pub updated: String,
}
/// Index for a single registry type
pub struct RegistryIndex {
data: RwLock<Arc<Vec<RepoInfo>>>,
dirty: AtomicBool,
rebuild_lock: AsyncMutex<()>,
}
impl RegistryIndex {
pub fn new() -> Self {
Self {
data: RwLock::new(Arc::new(Vec::new())),
dirty: AtomicBool::new(true),
rebuild_lock: AsyncMutex::new(()),
}
}
/// Mark index as needing rebuild
pub fn invalidate(&self) {
self.dirty.store(true, Ordering::Release);
}
fn is_dirty(&self) -> bool {
self.dirty.load(Ordering::Acquire)
}
fn get_cached(&self) -> Arc<Vec<RepoInfo>> {
Arc::clone(&self.data.read())
}
fn set(&self, data: Vec<RepoInfo>) {
*self.data.write() = Arc::new(data);
self.dirty.store(false, Ordering::Release);
}
pub fn count(&self) -> usize {
self.data.read().len()
}
}
impl Default for RegistryIndex {
fn default() -> Self {
Self::new()
}
}
/// Main repository index for all registries
pub struct RepoIndex {
pub docker: RegistryIndex,
pub maven: RegistryIndex,
pub npm: RegistryIndex,
pub cargo: RegistryIndex,
pub pypi: RegistryIndex,
}
impl RepoIndex {
pub fn new() -> Self {
Self {
docker: RegistryIndex::new(),
maven: RegistryIndex::new(),
npm: RegistryIndex::new(),
cargo: RegistryIndex::new(),
pypi: RegistryIndex::new(),
}
}
/// Invalidate a specific registry index
pub fn invalidate(&self, registry: &str) {
match registry {
"docker" => self.docker.invalidate(),
"maven" => self.maven.invalidate(),
"npm" => self.npm.invalidate(),
"cargo" => self.cargo.invalidate(),
"pypi" => self.pypi.invalidate(),
_ => {}
}
}
/// Get index with double-checked locking (prevents race condition)
pub async fn get(&self, registry: &str, storage: &Storage) -> Arc<Vec<RepoInfo>> {
let index = match registry {
"docker" => &self.docker,
"maven" => &self.maven,
"npm" => &self.npm,
"cargo" => &self.cargo,
"pypi" => &self.pypi,
_ => return Arc::new(Vec::new()),
};
// Fast path: not dirty, return cached
if !index.is_dirty() {
return index.get_cached();
}
// Slow path: acquire rebuild lock (only one thread rebuilds)
let _guard = index.rebuild_lock.lock().await;
// Double-check under lock (another thread may have rebuilt)
if index.is_dirty() {
let data = match registry {
"docker" => build_docker_index(storage).await,
"maven" => build_maven_index(storage).await,
"npm" => build_npm_index(storage).await,
"cargo" => build_cargo_index(storage).await,
"pypi" => build_pypi_index(storage).await,
_ => Vec::new(),
};
info!(registry = registry, count = data.len(), "Index rebuilt");
index.set(data);
}
index.get_cached()
}
/// Get counts for stats (no rebuild, just current state)
pub fn counts(&self) -> (usize, usize, usize, usize, usize) {
(
self.docker.count(),
self.maven.count(),
self.npm.count(),
self.cargo.count(),
self.pypi.count(),
)
}
}
impl Default for RepoIndex {
fn default() -> Self {
Self::new()
}
}
// ============================================================================
// Index builders
// ============================================================================
async fn build_docker_index(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("docker/").await;
let mut repos: HashMap<String, (usize, u64, u64)> = HashMap::new();
for key in &keys {
if key.ends_with(".meta.json") {
continue;
}
if let Some(rest) = key.strip_prefix("docker/") {
let parts: Vec<_> = rest.split('/').collect();
if parts.len() >= 3 && parts[1] == "manifests" && key.ends_with(".json") {
let name = parts[0].to_string();
let entry = repos.entry(name).or_insert((0, 0, 0));
entry.0 += 1;
if let Ok(data) = storage.get(key).await {
if let Ok(m) = serde_json::from_slice::<serde_json::Value>(&data) {
let cfg = m
.get("config")
.and_then(|c| c.get("size"))
.and_then(|s| s.as_u64())
.unwrap_or(0);
let layers: u64 = m
.get("layers")
.and_then(|l| l.as_array())
.map(|arr| {
arr.iter()
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
.sum()
})
.unwrap_or(0);
entry.1 += cfg + layers;
}
}
if let Some(meta) = storage.stat(key).await {
if meta.modified > entry.2 {
entry.2 = meta.modified;
}
}
}
}
}
to_sorted_vec(repos)
}
async fn build_maven_index(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("maven/").await;
let mut repos: HashMap<String, (usize, u64, u64)> = HashMap::new();
for key in &keys {
if let Some(rest) = key.strip_prefix("maven/") {
let parts: Vec<_> = rest.split('/').collect();
if parts.len() >= 2 {
let path = parts[..parts.len() - 1].join("/");
let entry = repos.entry(path).or_insert((0, 0, 0));
entry.0 += 1;
if let Some(meta) = storage.stat(key).await {
entry.1 += meta.size;
if meta.modified > entry.2 {
entry.2 = meta.modified;
}
}
}
}
}
to_sorted_vec(repos)
}
async fn build_npm_index(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("npm/").await;
let mut packages: HashMap<String, (usize, u64, u64)> = HashMap::new();
// Count tarballs instead of parsing metadata.json (faster than parsing JSON)
for key in &keys {
if let Some(rest) = key.strip_prefix("npm/") {
// Pattern: npm/{package}/tarballs/{file}.tgz
if rest.contains("/tarballs/") && key.ends_with(".tgz") {
let parts: Vec<_> = rest.split('/').collect();
if !parts.is_empty() {
let name = parts[0].to_string();
let entry = packages.entry(name).or_insert((0, 0, 0));
entry.0 += 1;
if let Some(meta) = storage.stat(key).await {
entry.1 += meta.size;
if meta.modified > entry.2 {
entry.2 = meta.modified;
}
}
}
}
}
}
to_sorted_vec(packages)
}
async fn build_cargo_index(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("cargo/").await;
let mut crates: HashMap<String, (usize, u64, u64)> = HashMap::new();
for key in &keys {
if key.ends_with(".crate") {
if let Some(rest) = key.strip_prefix("cargo/") {
let parts: Vec<_> = rest.split('/').collect();
if !parts.is_empty() {
let name = parts[0].to_string();
let entry = crates.entry(name).or_insert((0, 0, 0));
entry.0 += 1;
if let Some(meta) = storage.stat(key).await {
entry.1 += meta.size;
if meta.modified > entry.2 {
entry.2 = meta.modified;
}
}
}
}
}
}
to_sorted_vec(crates)
}
async fn build_pypi_index(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("pypi/").await;
let mut packages: HashMap<String, (usize, u64, u64)> = HashMap::new();
for key in &keys {
if let Some(rest) = key.strip_prefix("pypi/") {
let parts: Vec<_> = rest.split('/').collect();
if parts.len() >= 2 {
let name = parts[0].to_string();
let entry = packages.entry(name).or_insert((0, 0, 0));
entry.0 += 1;
if let Some(meta) = storage.stat(key).await {
entry.1 += meta.size;
if meta.modified > entry.2 {
entry.2 = meta.modified;
}
}
}
}
}
to_sorted_vec(packages)
}
/// Convert HashMap to sorted Vec<RepoInfo>
fn to_sorted_vec(map: HashMap<String, (usize, u64, u64)>) -> Vec<RepoInfo> {
let mut result: Vec<_> = map
.into_iter()
.map(|(name, (versions, size, modified))| RepoInfo {
name,
versions,
size,
updated: if modified > 0 {
format_timestamp(modified)
} else {
"N/A".to_string()
},
})
.collect();
result.sort_by(|a, b| a.name.cmp(&b.name));
result
}
/// Pagination helper
pub fn paginate<T: Clone>(data: &[T], page: usize, limit: usize) -> (Vec<T>, usize) {
let total = data.len();
let start = page.saturating_sub(1) * limit;
if start >= total {
return (Vec::new(), total);
}
let end = (start + limit).min(total);
(data[start..end].to_vec(), total)
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Request ID middleware for request tracking and correlation
//!
//! Generates a unique ID for each request that can be used for:

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Environment variables secrets provider
//!
//! Reads secrets from environment variables. This is the default provider

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
#![allow(dead_code)] // Foundational code for future S3/Vault integration
//! Secrets management for NORA

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
//! Protected secret types with memory safety
//!
//! Secrets are automatically zeroed on drop and redacted in Debug output.

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use async_trait::async_trait;
use axum::body::Bytes;
use std::path::PathBuf;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod local;
mod s3;
@@ -59,9 +62,17 @@ impl Storage {
}
}
pub fn new_s3(s3_url: &str, bucket: &str) -> Self {
pub fn new_s3(
s3_url: &str,
bucket: &str,
region: &str,
access_key: Option<&str>,
secret_key: Option<&str>,
) -> Self {
Self {
inner: Arc::new(S3Storage::new(s3_url, bucket)),
inner: Arc::new(S3Storage::new(
s3_url, bucket, region, access_key, secret_key,
)),
}
}

View File

@@ -1,24 +1,146 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use async_trait::async_trait;
use axum::body::Bytes;
use chrono::Utc;
use hmac::{Hmac, Mac};
use sha2::{Digest, Sha256};
use super::{FileMeta, Result, StorageBackend, StorageError};
type HmacSha256 = Hmac<Sha256>;
/// S3-compatible storage backend (MinIO, AWS S3)
pub struct S3Storage {
s3_url: String,
bucket: String,
region: String,
access_key: Option<String>,
secret_key: Option<String>,
client: reqwest::Client,
}
impl S3Storage {
pub fn new(s3_url: &str, bucket: &str) -> Self {
/// Create new S3 storage with optional credentials
pub fn new(
s3_url: &str,
bucket: &str,
region: &str,
access_key: Option<&str>,
secret_key: Option<&str>,
) -> Self {
Self {
s3_url: s3_url.to_string(),
s3_url: s3_url.trim_end_matches('/').to_string(),
bucket: bucket.to_string(),
region: region.to_string(),
access_key: access_key.map(String::from),
secret_key: secret_key.map(String::from),
client: reqwest::Client::new(),
}
}
/// Sign a request using AWS Signature v4
fn sign_request(
&self,
method: &str,
path: &str,
payload_hash: &str,
timestamp: &str,
date: &str,
) -> Option<String> {
let (access_key, secret_key) = match (&self.access_key, &self.secret_key) {
(Some(ak), Some(sk)) => (ak.as_str(), sk.as_str()),
_ => return None,
};
// Parse host from URL
let host = self
.s3_url
.trim_start_matches("http://")
.trim_start_matches("https://");
// Canonical request
// URI must be URL-encoded (except /)
let encoded_path = uri_encode(path);
let canonical_uri = format!("/{}/{}", self.bucket, encoded_path);
let canonical_query = "";
let canonical_headers = format!(
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
host, payload_hash, timestamp
);
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
// AWS Signature v4 canonical request format:
// HTTPMethod\nCanonicalURI\nCanonicalQueryString\nCanonicalHeaders\n\nSignedHeaders\nHashedPayload
// Note: CanonicalHeaders already ends with \n, plus blank line before SignedHeaders
let canonical_request = format!(
"{}\n{}\n{}\n{}\n{}\n{}",
method, canonical_uri, canonical_query, canonical_headers, signed_headers, payload_hash
);
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
// String to sign
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
timestamp, credential_scope, canonical_request_hash
);
// Calculate signature
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
let k_service = hmac_sha256(&k_region, b"s3");
let k_signing = hmac_sha256(&k_service, b"aws4_request");
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
// Authorization header
Some(format!(
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
access_key, credential_scope, signed_headers, signature
))
}
/// Make a signed request
async fn signed_request(
&self,
method: reqwest::Method,
key: &str,
body: Option<&[u8]>,
) -> std::result::Result<reqwest::Response, StorageError> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let now = Utc::now();
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
let date = now.format("%Y%m%d").to_string();
let payload_hash = match body {
Some(data) => hex::encode(Sha256::digest(data)),
None => hex::encode(Sha256::digest(b"")),
};
let mut request = self
.client
.request(method.clone(), &url)
.header("x-amz-date", &timestamp)
.header("x-amz-content-sha256", &payload_hash);
if let Some(auth) =
self.sign_request(method.as_str(), key, &payload_hash, &timestamp, &date)
{
request = request.header("Authorization", auth);
}
if let Some(data) = body {
request = request.body(data.to_vec());
}
request
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))
}
fn parse_s3_keys(xml: &str, prefix: &str) -> Vec<String> {
xml.split("<Key>")
.filter_map(|part| part.split("</Key>").next())
@@ -28,17 +150,34 @@ impl S3Storage {
}
}
/// URL-encode a string for S3 canonical URI (encode all except A-Za-z0-9-_.~/)
fn uri_encode(s: &str) -> String {
let mut result = String::with_capacity(s.len() * 3);
for c in s.chars() {
match c {
'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '_' | '.' | '~' | '/' => result.push(c),
_ => {
for b in c.to_string().as_bytes() {
result.push_str(&format!("%{:02X}", b));
}
}
}
}
result
}
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC can take key of any size");
mac.update(data);
mac.finalize().into_bytes().to_vec()
}
#[async_trait]
impl StorageBackend for S3Storage {
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self
.client
.put(&url)
.body(data.to_vec())
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))?;
.signed_request(reqwest::Method::PUT, key, Some(data))
.await?;
if response.status().is_success() {
Ok(())
@@ -51,13 +190,7 @@ impl StorageBackend for S3Storage {
}
async fn get(&self, key: &str) -> Result<Bytes> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self
.client
.get(&url)
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))?;
let response = self.signed_request(reqwest::Method::GET, key, None).await?;
if response.status().is_success() {
response
@@ -75,13 +208,9 @@ impl StorageBackend for S3Storage {
}
async fn delete(&self, key: &str) -> Result<()> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self
.client
.delete(&url)
.send()
.await
.map_err(|e| StorageError::Network(e.to_string()))?;
.signed_request(reqwest::Method::DELETE, key, None)
.await?;
if response.status().is_success() || response.status().as_u16() == 204 {
Ok(())
@@ -96,8 +225,59 @@ impl StorageBackend for S3Storage {
}
async fn list(&self, prefix: &str) -> Vec<String> {
// For listing, we need to make a request to the bucket
let url = format!("{}/{}", self.s3_url, self.bucket);
match self.client.get(&url).send().await {
let now = Utc::now();
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
let date = now.format("%Y%m%d").to_string();
let payload_hash = hex::encode(Sha256::digest(b""));
let host = self
.s3_url
.trim_start_matches("http://")
.trim_start_matches("https://");
let mut request = self
.client
.get(&url)
.header("x-amz-date", &timestamp)
.header("x-amz-content-sha256", &payload_hash);
// Sign for bucket listing (different path)
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
let canonical_uri = format!("/{}", self.bucket);
let canonical_headers = format!(
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
host, payload_hash, timestamp
);
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
let canonical_request = format!(
"GET\n{}\n\n{}\n{}\n{}",
canonical_uri, canonical_headers, signed_headers, payload_hash
);
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
timestamp, credential_scope, canonical_request_hash
);
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
let k_service = hmac_sha256(&k_region, b"s3");
let k_signing = hmac_sha256(&k_service, b"aws4_request");
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
let auth = format!(
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
access_key, credential_scope, signed_headers, signature
);
request = request.header("Authorization", auth);
}
match request.send().await {
Ok(response) if response.status().is_success() => {
if let Ok(xml) = response.text().await {
Self::parse_s3_keys(&xml, prefix)
@@ -110,18 +290,22 @@ impl StorageBackend for S3Storage {
}
async fn stat(&self, key: &str) -> Option<FileMeta> {
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
let response = self.client.head(&url).send().await.ok()?;
let response = self
.signed_request(reqwest::Method::HEAD, key, None)
.await
.ok()?;
if !response.status().is_success() {
return None;
}
let size = response
.headers()
.get("content-length")
.and_then(|v| v.to_str().ok())
.and_then(|v| v.parse().ok())
.unwrap_or(0);
// S3 uses Last-Modified header, but for simplicity use current time if unavailable
let modified = response
.headers()
.get("last-modified")
@@ -133,12 +317,63 @@ impl StorageBackend for S3Storage {
.as_secs()
})
.unwrap_or(0);
Some(FileMeta { size, modified })
}
async fn health_check(&self) -> bool {
// Try HEAD on the bucket
let url = format!("{}/{}", self.s3_url, self.bucket);
match self.client.head(&url).send().await {
let now = Utc::now();
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
let date = now.format("%Y%m%d").to_string();
let payload_hash = hex::encode(Sha256::digest(b""));
let host = self
.s3_url
.trim_start_matches("http://")
.trim_start_matches("https://");
let mut request = self
.client
.head(&url)
.header("x-amz-date", &timestamp)
.header("x-amz-content-sha256", &payload_hash);
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
let canonical_uri = format!("/{}", self.bucket);
let canonical_headers = format!(
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
host, payload_hash, timestamp
);
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
let canonical_request = format!(
"HEAD\n{}\n\n{}\n{}\n{}",
canonical_uri, canonical_headers, signed_headers, payload_hash
);
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
let string_to_sign = format!(
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
timestamp, credential_scope, canonical_request_hash
);
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
let k_service = hmac_sha256(&k_region, b"s3");
let k_signing = hmac_sha256(&k_service, b"aws4_request");
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
let auth = format!(
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
access_key, credential_scope, signed_headers, signature
);
request = request.header("Authorization", auth);
}
match request.send().await {
Ok(response) => response.status().is_success() || response.status().as_u16() == 404,
Err(_) => false,
}
@@ -152,173 +387,28 @@ impl StorageBackend for S3Storage {
#[cfg(test)]
mod tests {
use super::*;
use wiremock::matchers::{method, path};
use wiremock::{Mock, MockServer, ResponseTemplate};
#[tokio::test]
async fn test_put_success() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("PUT"))
.and(path("/test-bucket/test-key"))
.respond_with(ResponseTemplate::new(200))
.mount(&mock_server)
.await;
let result = storage.put("test-key", b"data").await;
assert!(result.is_ok());
}
#[tokio::test]
async fn test_put_failure() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("PUT"))
.and(path("/test-bucket/test-key"))
.respond_with(ResponseTemplate::new(500))
.mount(&mock_server)
.await;
let result = storage.put("test-key", b"data").await;
assert!(matches!(result, Err(StorageError::Network(_))));
}
#[tokio::test]
async fn test_get_success() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("GET"))
.and(path("/test-bucket/test-key"))
.respond_with(ResponseTemplate::new(200).set_body_bytes(b"test data".to_vec()))
.mount(&mock_server)
.await;
let data = storage.get("test-key").await.unwrap();
assert_eq!(&*data, b"test data");
}
#[tokio::test]
async fn test_get_not_found() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("GET"))
.and(path("/test-bucket/missing"))
.respond_with(ResponseTemplate::new(404))
.mount(&mock_server)
.await;
let result = storage.get("missing").await;
assert!(matches!(result, Err(StorageError::NotFound)));
}
#[tokio::test]
async fn test_list() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
let xml_response = r#"<?xml version="1.0"?>
<ListBucketResult>
<Key>docker/image1</Key>
<Key>docker/image2</Key>
<Key>maven/artifact</Key>
</ListBucketResult>"#;
Mock::given(method("GET"))
.and(path("/test-bucket"))
.respond_with(ResponseTemplate::new(200).set_body_string(xml_response))
.mount(&mock_server)
.await;
let keys = storage.list("docker/").await;
assert_eq!(keys.len(), 2);
assert!(keys.iter().all(|k| k.starts_with("docker/")));
}
#[tokio::test]
async fn test_stat_success() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("HEAD"))
.and(path("/test-bucket/test-key"))
.respond_with(
ResponseTemplate::new(200)
.insert_header("content-length", "1234")
.insert_header("last-modified", "Sun, 06 Nov 1994 08:49:37 GMT"),
)
.mount(&mock_server)
.await;
let meta = storage.stat("test-key").await.unwrap();
assert_eq!(meta.size, 1234);
assert!(meta.modified > 0);
}
#[tokio::test]
async fn test_stat_not_found() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("HEAD"))
.and(path("/test-bucket/missing"))
.respond_with(ResponseTemplate::new(404))
.mount(&mock_server)
.await;
let meta = storage.stat("missing").await;
assert!(meta.is_none());
}
#[tokio::test]
async fn test_health_check_healthy() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("HEAD"))
.and(path("/test-bucket"))
.respond_with(ResponseTemplate::new(200))
.mount(&mock_server)
.await;
assert!(storage.health_check().await);
}
#[tokio::test]
async fn test_health_check_bucket_not_found_is_ok() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("HEAD"))
.and(path("/test-bucket"))
.respond_with(ResponseTemplate::new(404))
.mount(&mock_server)
.await;
// 404 is OK for health check (bucket may be empty)
assert!(storage.health_check().await);
}
#[tokio::test]
async fn test_health_check_server_error() {
let mock_server = MockServer::start().await;
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
Mock::given(method("HEAD"))
.and(path("/test-bucket"))
.respond_with(ResponseTemplate::new(500))
.mount(&mock_server)
.await;
assert!(!storage.health_check().await);
}
#[test]
fn test_backend_name() {
let storage = S3Storage::new("http://localhost:9000", "bucket");
let storage = S3Storage::new(
"http://localhost:9000",
"test-bucket",
"us-east-1",
Some("access"),
Some("secret"),
);
assert_eq!(storage.backend_name(), "s3");
}
#[test]
fn test_s3_storage_creation_anonymous() {
let storage = S3Storage::new(
"http://localhost:9000",
"test-bucket",
"us-east-1",
None,
None,
);
assert_eq!(storage.backend_name(), "s3");
}
@@ -328,4 +418,10 @@ mod tests {
let keys = S3Storage::parse_s3_keys(xml, "docker/");
assert_eq!(keys, vec!["docker/a", "docker/b"]);
}
#[test]
fn test_hmac_sha256() {
let result = hmac_sha256(b"key", b"data");
assert!(!result.is_empty());
}
}

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::fs;

View File

@@ -1,6 +1,10 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use super::components::{format_size, format_timestamp, html_escape};
use super::templates::encode_uri_component;
use crate::activity_log::ActivityEntry;
use crate::repo_index::RepoInfo;
use crate::AppState;
use crate::Storage;
use axum::{
@@ -21,14 +25,6 @@ pub struct RegistryStats {
pub pypi: usize,
}
#[derive(Serialize, Clone)]
pub struct RepoInfo {
pub name: String,
pub versions: usize,
pub size: u64,
pub updated: String,
}
#[derive(Serialize)]
pub struct TagInfo {
pub name: String,
@@ -112,44 +108,44 @@ pub struct MountPoint {
// ============ API Handlers ============
pub async fn api_stats(State(state): State<Arc<AppState>>) -> Json<RegistryStats> {
let stats = get_registry_stats(&state.storage).await;
Json(stats)
// Trigger index rebuild if needed, then get counts
let _ = state.repo_index.get("docker", &state.storage).await;
let _ = state.repo_index.get("maven", &state.storage).await;
let _ = state.repo_index.get("npm", &state.storage).await;
let _ = state.repo_index.get("cargo", &state.storage).await;
let _ = state.repo_index.get("pypi", &state.storage).await;
let (docker, maven, npm, cargo, pypi) = state.repo_index.counts();
Json(RegistryStats {
docker,
maven,
npm,
cargo,
pypi,
})
}
pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<DashboardResponse> {
let registry_stats = get_registry_stats(&state.storage).await;
// Get indexes (will rebuild if dirty)
let docker_repos = state.repo_index.get("docker", &state.storage).await;
let maven_repos = state.repo_index.get("maven", &state.storage).await;
let npm_repos = state.repo_index.get("npm", &state.storage).await;
let cargo_repos = state.repo_index.get("cargo", &state.storage).await;
let pypi_repos = state.repo_index.get("pypi", &state.storage).await;
// Calculate total storage size
let all_keys = state.storage.list("").await;
let mut total_storage: u64 = 0;
let mut docker_size: u64 = 0;
let mut maven_size: u64 = 0;
let mut npm_size: u64 = 0;
let mut cargo_size: u64 = 0;
let mut pypi_size: u64 = 0;
// Calculate sizes from cached index
let docker_size: u64 = docker_repos.iter().map(|r| r.size).sum();
let maven_size: u64 = maven_repos.iter().map(|r| r.size).sum();
let npm_size: u64 = npm_repos.iter().map(|r| r.size).sum();
let cargo_size: u64 = cargo_repos.iter().map(|r| r.size).sum();
let pypi_size: u64 = pypi_repos.iter().map(|r| r.size).sum();
let total_storage = docker_size + maven_size + npm_size + cargo_size + pypi_size;
for key in &all_keys {
if let Some(meta) = state.storage.stat(key).await {
total_storage += meta.size;
if key.starts_with("docker/") {
docker_size += meta.size;
} else if key.starts_with("maven/") {
maven_size += meta.size;
} else if key.starts_with("npm/") {
npm_size += meta.size;
} else if key.starts_with("cargo/") {
cargo_size += meta.size;
} else if key.starts_with("pypi/") {
pypi_size += meta.size;
}
}
}
let total_artifacts = registry_stats.docker
+ registry_stats.maven
+ registry_stats.npm
+ registry_stats.cargo
+ registry_stats.pypi;
let total_artifacts = docker_repos.len()
+ maven_repos.len()
+ npm_repos.len()
+ cargo_repos.len()
+ pypi_repos.len();
let global_stats = GlobalStats {
downloads: state.metrics.downloads.load(Ordering::Relaxed),
@@ -162,35 +158,35 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
let registry_card_stats = vec![
RegistryCardStats {
name: "docker".to_string(),
artifact_count: registry_stats.docker,
artifact_count: docker_repos.len(),
downloads: state.metrics.get_registry_downloads("docker"),
uploads: state.metrics.get_registry_uploads("docker"),
size_bytes: docker_size,
},
RegistryCardStats {
name: "maven".to_string(),
artifact_count: registry_stats.maven,
artifact_count: maven_repos.len(),
downloads: state.metrics.get_registry_downloads("maven"),
uploads: state.metrics.get_registry_uploads("maven"),
size_bytes: maven_size,
},
RegistryCardStats {
name: "npm".to_string(),
artifact_count: registry_stats.npm,
artifact_count: npm_repos.len(),
downloads: state.metrics.get_registry_downloads("npm"),
uploads: 0,
size_bytes: npm_size,
},
RegistryCardStats {
name: "cargo".to_string(),
artifact_count: registry_stats.cargo,
artifact_count: cargo_repos.len(),
downloads: state.metrics.get_registry_downloads("cargo"),
uploads: 0,
size_bytes: cargo_size,
},
RegistryCardStats {
name: "pypi".to_string(),
artifact_count: registry_stats.pypi,
artifact_count: pypi_repos.len(),
downloads: state.metrics.get_registry_downloads("pypi"),
uploads: 0,
size_bytes: pypi_size,
@@ -241,15 +237,8 @@ pub async fn api_list(
State(state): State<Arc<AppState>>,
Path(registry_type): Path<String>,
) -> Json<Vec<RepoInfo>> {
let repos = match registry_type.as_str() {
"docker" => get_docker_repos(&state.storage).await,
"maven" => get_maven_repos(&state.storage).await,
"npm" => get_npm_packages(&state.storage).await,
"cargo" => get_cargo_crates(&state.storage).await,
"pypi" => get_pypi_packages(&state.storage).await,
_ => vec![],
};
Json(repos)
let repos = state.repo_index.get(&registry_type, &state.storage).await;
Json((*repos).clone())
}
pub async fn api_detail(
@@ -280,20 +269,13 @@ pub async fn api_search(
) -> axum::response::Html<String> {
let query = params.q.unwrap_or_default().to_lowercase();
let repos = match registry_type.as_str() {
"docker" => get_docker_repos(&state.storage).await,
"maven" => get_maven_repos(&state.storage).await,
"npm" => get_npm_packages(&state.storage).await,
"cargo" => get_cargo_crates(&state.storage).await,
"pypi" => get_pypi_packages(&state.storage).await,
_ => vec![],
};
let repos = state.repo_index.get(&registry_type, &state.storage).await;
let filtered: Vec<_> = if query.is_empty() {
repos
let filtered: Vec<&RepoInfo> = if query.is_empty() {
repos.iter().collect()
} else {
repos
.into_iter()
.iter()
.filter(|r| r.name.to_lowercase().contains(&query))
.collect()
};
@@ -338,7 +320,9 @@ pub async fn api_search(
}
// ============ Data Fetching Functions ============
// NOTE: Legacy functions below - kept for reference, will be removed in future cleanup
#[allow(dead_code)]
pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
let all_keys = storage.list("").await;
@@ -390,12 +374,18 @@ pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
}
}
#[allow(dead_code)]
pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("docker/").await;
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new(); // (info, latest_modified)
for key in &keys {
// Skip .meta.json files
if key.ends_with(".meta.json") {
continue;
}
if let Some(rest) = key.strip_prefix("docker/") {
let parts: Vec<_> = rest.split('/').collect();
if parts.len() >= 3 {
@@ -412,10 +402,35 @@ pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
)
});
if parts[1] == "manifests" {
if parts[1] == "manifests" && key.ends_with(".json") {
entry.0.versions += 1;
// Parse manifest to get actual image size (config + layers)
if let Ok(manifest_data) = storage.get(key).await {
if let Ok(manifest) =
serde_json::from_slice::<serde_json::Value>(&manifest_data)
{
let config_size = manifest
.get("config")
.and_then(|c| c.get("size"))
.and_then(|s| s.as_u64())
.unwrap_or(0);
let layers_size: u64 = manifest
.get("layers")
.and_then(|l| l.as_array())
.map(|layers| {
layers
.iter()
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
.sum()
})
.unwrap_or(0);
entry.0.size += config_size + layers_size;
}
}
// Update timestamp
if let Some(meta) = storage.stat(key).await {
entry.0.size += meta.size;
if meta.modified > entry.1 {
entry.1 = meta.modified;
entry.0.updated = format_timestamp(meta.modified);
@@ -470,11 +485,37 @@ pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
"N/A".to_string()
};
// Use size from metadata if available, otherwise from file
// Calculate size from manifest layers (config + layers)
let size = if metadata.size_bytes > 0 {
metadata.size_bytes
} else {
state.storage.stat(key).await.map(|m| m.size).unwrap_or(0)
// Parse manifest to get actual image size
if let Ok(manifest_data) = state.storage.get(key).await {
if let Ok(manifest) =
serde_json::from_slice::<serde_json::Value>(&manifest_data)
{
let config_size = manifest
.get("config")
.and_then(|c| c.get("size"))
.and_then(|s| s.as_u64())
.unwrap_or(0);
let layers_size: u64 = manifest
.get("layers")
.and_then(|l| l.as_array())
.map(|layers| {
layers
.iter()
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
.sum()
})
.unwrap_or(0);
config_size + layers_size
} else {
0
}
} else {
0
}
};
// Format last_pulled
@@ -512,6 +553,7 @@ pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
DockerDetail { tags }
}
#[allow(dead_code)]
pub async fn get_maven_repos(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("maven/").await;
@@ -571,75 +613,125 @@ pub async fn get_maven_detail(storage: &Storage, path: &str) -> MavenDetail {
MavenDetail { artifacts }
}
#[allow(dead_code)]
pub async fn get_npm_packages(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("npm/").await;
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
let mut packages: HashMap<String, RepoInfo> = HashMap::new();
// Find all metadata.json files
for key in &keys {
if let Some(rest) = key.strip_prefix("npm/") {
let parts: Vec<_> = rest.split('/').collect();
if !parts.is_empty() {
let name = parts[0].to_string();
let entry = packages.entry(name.clone()).or_insert_with(|| {
(
RepoInfo {
name,
versions: 0,
size: 0,
updated: "N/A".to_string(),
},
0,
)
});
if key.ends_with("/metadata.json") {
if let Some(name) = key
.strip_prefix("npm/")
.and_then(|s| s.strip_suffix("/metadata.json"))
{
// Parse metadata to get version count and info
if let Ok(data) = storage.get(key).await {
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
let versions_count = metadata
.get("versions")
.and_then(|v| v.as_object())
.map(|v| v.len())
.unwrap_or(0);
if parts.len() >= 3 && parts[1] == "tarballs" {
entry.0.versions += 1;
if let Some(meta) = storage.stat(key).await {
entry.0.size += meta.size;
if meta.modified > entry.1 {
entry.1 = meta.modified;
entry.0.updated = format_timestamp(meta.modified);
}
// Calculate total size from dist.unpackedSize or estimate
let total_size: u64 = metadata
.get("versions")
.and_then(|v| v.as_object())
.map(|versions| {
versions
.values()
.filter_map(|v| {
v.get("dist")
.and_then(|d| d.get("unpackedSize"))
.and_then(|s| s.as_u64())
})
.sum()
})
.unwrap_or(0);
// Get latest version time for "updated"
let updated = metadata
.get("time")
.and_then(|t| t.get("modified"))
.and_then(|m| m.as_str())
.map(|s| s[..10].to_string()) // Take just date part
.unwrap_or_else(|| "N/A".to_string());
packages.insert(
name.to_string(),
RepoInfo {
name: name.to_string(),
versions: versions_count,
size: total_size,
updated,
},
);
}
}
}
}
}
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
let mut result: Vec<_> = packages.into_values().collect();
result.sort_by(|a, b| a.name.cmp(&b.name));
result
}
pub async fn get_npm_detail(storage: &Storage, name: &str) -> PackageDetail {
let prefix = format!("npm/{}/tarballs/", name);
let keys = storage.list(&prefix).await;
let metadata_key = format!("npm/{}/metadata.json", name);
let mut versions = Vec::new();
for key in &keys {
if let Some(tarball) = key.strip_prefix(&prefix) {
if let Some(version) = tarball
.strip_prefix(&format!("{}-", name))
.and_then(|s| s.strip_suffix(".tgz"))
{
let (size, published) = if let Some(meta) = storage.stat(key).await {
(meta.size, format_timestamp(meta.modified))
} else {
(0, "N/A".to_string())
};
versions.push(VersionInfo {
version: version.to_string(),
size,
published,
});
// Parse metadata.json for version info
if let Ok(data) = storage.get(&metadata_key).await {
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
if let Some(versions_obj) = metadata.get("versions").and_then(|v| v.as_object()) {
let time_obj = metadata.get("time").and_then(|t| t.as_object());
for (version, info) in versions_obj {
let size = info
.get("dist")
.and_then(|d| d.get("unpackedSize"))
.and_then(|s| s.as_u64())
.unwrap_or(0);
let published = time_obj
.and_then(|t| t.get(version))
.and_then(|p| p.as_str())
.map(|s| s[..10].to_string())
.unwrap_or_else(|| "N/A".to_string());
versions.push(VersionInfo {
version: version.clone(),
size,
published,
});
}
}
}
}
// Sort by version (semver-like, newest first)
versions.sort_by(|a, b| {
let a_parts: Vec<u32> = a
.version
.split('.')
.filter_map(|s| s.parse().ok())
.collect();
let b_parts: Vec<u32> = b
.version
.split('.')
.filter_map(|s| s.parse().ok())
.collect();
b_parts.cmp(&a_parts)
});
PackageDetail { versions }
}
#[allow(dead_code)]
pub async fn get_cargo_crates(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("cargo/").await;
@@ -707,6 +799,7 @@ pub async fn get_cargo_detail(storage: &Storage, name: &str) -> PackageDetail {
PackageDetail { versions }
}
#[allow(dead_code)]
pub async fn get_pypi_packages(storage: &Storage) -> Vec<RepoInfo> {
let keys = storage.list("pypi/").await;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use super::i18n::{get_translations, Lang, Translations};
/// Application version from Cargo.toml
@@ -137,7 +140,7 @@ fn sidebar_dark(active_page: Option<&str>, t: &Translations) -> String {
<div id="sidebar" class="fixed md:static inset-y-0 left-0 z-50 w-64 bg-slate-800 text-white flex flex-col transform -translate-x-full md:translate-x-0 transition-transform duration-200 ease-in-out">
<div class="h-16 flex items-center justify-between px-6 border-b border-slate-700">
<div class="flex items-center">
<span class="text-2xl font-bold tracking-tight">N<span class="inline-block w-5 h-5 rounded-full border-2 border-current align-middle relative -top-0.5 mx-0.5"></span>RA</span>
<span class="text-xl font-bold tracking-tight">N<span class="inline-block w-4 h-4 rounded-full border-2 border-current align-middle mx-px"></span>RA</span>
</div>
<button onclick="toggleSidebar()" class="md:hidden p-1 rounded-lg hover:bg-slate-700">
<svg class="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
/// Internationalization support for the UI
use serde::{Deserialize, Serialize};

File diff suppressed because one or more lines are too long

View File

@@ -1,9 +1,13 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod api;
mod components;
pub mod components;
pub mod i18n;
mod logo;
mod templates;
use crate::repo_index::paginate;
use crate::AppState;
use axum::{
extract::{Path, Query, State},
@@ -22,6 +26,15 @@ struct LangQuery {
lang: Option<String>,
}
#[derive(Debug, serde::Deserialize)]
struct ListQuery {
lang: Option<String>,
page: Option<usize>,
limit: Option<usize>,
}
const DEFAULT_PAGE_SIZE: usize = 50;
fn extract_lang(query: &Query<LangQuery>, cookie_header: Option<&str>) -> Lang {
// Priority: query param > cookie > default
if let Some(ref lang) = query.lang {
@@ -41,6 +54,23 @@ fn extract_lang(query: &Query<LangQuery>, cookie_header: Option<&str>) -> Lang {
Lang::default()
}
fn extract_lang_from_list(query: &ListQuery, cookie_header: Option<&str>) -> Lang {
if let Some(ref lang) = query.lang {
return Lang::from_str(lang);
}
if let Some(cookies) = cookie_header {
for part in cookies.split(';') {
let part = part.trim();
if let Some(value) = part.strip_prefix("nora_lang=") {
return Lang::from_str(value);
}
}
}
Lang::default()
}
pub fn routes() -> Router<Arc<AppState>> {
Router::new()
// UI Pages
@@ -82,18 +112,23 @@ async fn dashboard(
// Docker pages
async fn docker_list(
State(state): State<Arc<AppState>>,
Query(query): Query<LangQuery>,
Query(query): Query<ListQuery>,
headers: axum::http::HeaderMap,
) -> impl IntoResponse {
let lang = extract_lang(
&Query(query),
headers.get("cookie").and_then(|v| v.to_str().ok()),
);
let repos = get_docker_repos(&state.storage).await;
Html(render_registry_list(
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
let page = query.page.unwrap_or(1).max(1);
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
let all_repos = state.repo_index.get("docker", &state.storage).await;
let (repos, total) = paginate(&all_repos, page, limit);
Html(render_registry_list_paginated(
"docker",
"Docker Registry",
&repos,
page,
limit,
total,
lang,
))
}
@@ -115,18 +150,23 @@ async fn docker_detail(
// Maven pages
async fn maven_list(
State(state): State<Arc<AppState>>,
Query(query): Query<LangQuery>,
Query(query): Query<ListQuery>,
headers: axum::http::HeaderMap,
) -> impl IntoResponse {
let lang = extract_lang(
&Query(query),
headers.get("cookie").and_then(|v| v.to_str().ok()),
);
let repos = get_maven_repos(&state.storage).await;
Html(render_registry_list(
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
let page = query.page.unwrap_or(1).max(1);
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
let all_repos = state.repo_index.get("maven", &state.storage).await;
let (repos, total) = paginate(&all_repos, page, limit);
Html(render_registry_list_paginated(
"maven",
"Maven Repository",
&repos,
page,
limit,
total,
lang,
))
}
@@ -148,15 +188,25 @@ async fn maven_detail(
// npm pages
async fn npm_list(
State(state): State<Arc<AppState>>,
Query(query): Query<LangQuery>,
Query(query): Query<ListQuery>,
headers: axum::http::HeaderMap,
) -> impl IntoResponse {
let lang = extract_lang(
&Query(query),
headers.get("cookie").and_then(|v| v.to_str().ok()),
);
let packages = get_npm_packages(&state.storage).await;
Html(render_registry_list("npm", "npm Registry", &packages, lang))
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
let page = query.page.unwrap_or(1).max(1);
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
let all_packages = state.repo_index.get("npm", &state.storage).await;
let (packages, total) = paginate(&all_packages, page, limit);
Html(render_registry_list_paginated(
"npm",
"npm Registry",
&packages,
page,
limit,
total,
lang,
))
}
async fn npm_detail(
@@ -176,18 +226,23 @@ async fn npm_detail(
// Cargo pages
async fn cargo_list(
State(state): State<Arc<AppState>>,
Query(query): Query<LangQuery>,
Query(query): Query<ListQuery>,
headers: axum::http::HeaderMap,
) -> impl IntoResponse {
let lang = extract_lang(
&Query(query),
headers.get("cookie").and_then(|v| v.to_str().ok()),
);
let crates = get_cargo_crates(&state.storage).await;
Html(render_registry_list(
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
let page = query.page.unwrap_or(1).max(1);
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
let all_crates = state.repo_index.get("cargo", &state.storage).await;
let (crates, total) = paginate(&all_crates, page, limit);
Html(render_registry_list_paginated(
"cargo",
"Cargo Registry",
&crates,
page,
limit,
total,
lang,
))
}
@@ -209,18 +264,23 @@ async fn cargo_detail(
// PyPI pages
async fn pypi_list(
State(state): State<Arc<AppState>>,
Query(query): Query<LangQuery>,
Query(query): Query<ListQuery>,
headers: axum::http::HeaderMap,
) -> impl IntoResponse {
let lang = extract_lang(
&Query(query),
headers.get("cookie").and_then(|v| v.to_str().ok()),
);
let packages = get_pypi_packages(&state.storage).await;
Html(render_registry_list(
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
let page = query.page.unwrap_or(1).max(1);
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
let all_packages = state.repo_index.get("pypi", &state.storage).await;
let (packages, total) = paginate(&all_packages, page, limit);
Html(render_registry_list_paginated(
"pypi",
"PyPI Repository",
&packages,
page,
limit,
total,
lang,
))
}

View File

@@ -1,6 +1,10 @@
use super::api::{DashboardResponse, DockerDetail, MavenDetail, PackageDetail, RepoInfo};
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use super::api::{DashboardResponse, DockerDetail, MavenDetail, PackageDetail};
use super::components::*;
use super::i18n::{get_translations, Lang};
use crate::repo_index::RepoInfo;
/// Renders the main dashboard page with dark theme
pub fn render_dashboard(data: &DashboardResponse, lang: Lang) -> String {
@@ -163,6 +167,7 @@ fn format_relative_time(timestamp: &chrono::DateTime<chrono::Utc>) -> String {
}
/// Renders a registry list page (docker, maven, npm, cargo, pypi)
#[allow(dead_code)]
pub fn render_registry_list(
registry_type: &str,
title: &str,
@@ -273,6 +278,220 @@ pub fn render_registry_list(
layout_dark(title, &content, Some(registry_type), "", lang)
}
/// Renders a registry list page with pagination
pub fn render_registry_list_paginated(
registry_type: &str,
title: &str,
repos: &[RepoInfo],
page: usize,
limit: usize,
total: usize,
lang: Lang,
) -> String {
let t = get_translations(lang);
let icon = get_registry_icon(registry_type);
let table_rows = if repos.is_empty() && page == 1 {
format!(
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
<div class="text-4xl mb-2">📭</div>
<div>{}</div>
<div class="text-sm mt-1">{}</div>
</td></tr>"##,
t.no_repos_found, t.push_first_artifact
)
} else if repos.is_empty() {
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
<div class="text-4xl mb-2">📭</div>
<div>No more items on this page</div>
</td></tr>"##
.to_string()
} else {
repos
.iter()
.map(|repo| {
let detail_url =
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
format!(
r##"
<tr class="hover:bg-slate-700 cursor-pointer" onclick="window.location='{}'">
<td class="px-6 py-4">
<a href="{}" class="text-blue-400 hover:text-blue-300 font-medium">{}</a>
</td>
<td class="px-6 py-4 text-slate-400">{}</td>
<td class="px-6 py-4 text-slate-400">{}</td>
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
</tr>
"##,
detail_url,
detail_url,
html_escape(&repo.name),
repo.versions,
format_size(repo.size),
&repo.updated
)
})
.collect::<Vec<_>>()
.join("")
};
let version_label = match registry_type {
"docker" => t.tags,
_ => t.versions,
};
// Pagination
let total_pages = total.div_ceil(limit);
let start_item = if total == 0 {
0
} else {
(page - 1) * limit + 1
};
let end_item = (start_item + repos.len()).saturating_sub(1);
let pagination = if total_pages > 1 {
let mut pages_html = String::new();
// Previous button
if page > 1 {
pages_html.push_str(&format!(
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded bg-slate-700 hover:bg-slate-600 text-slate-300">←</a>"##,
registry_type, page - 1, limit
));
} else {
pages_html.push_str(r##"<span class="px-3 py-1 rounded bg-slate-800 text-slate-600 cursor-not-allowed">←</span>"##);
}
// Page numbers (show max 7 pages around current)
let start_page = if page <= 4 { 1 } else { page - 3 };
let end_page = (start_page + 6).min(total_pages);
if start_page > 1 {
pages_html.push_str(&format!(
r##"<a href="/ui/{}?page=1&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">1</a>"##,
registry_type, limit
));
if start_page > 2 {
pages_html.push_str(r##"<span class="px-2 text-slate-600">...</span>"##);
}
}
for p in start_page..=end_page {
if p == page {
pages_html.push_str(&format!(
r##"<span class="px-3 py-1 rounded bg-blue-600 text-white font-medium">{}</span>"##,
p
));
} else {
pages_html.push_str(&format!(
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">{}</a>"##,
registry_type, p, limit, p
));
}
}
if end_page < total_pages {
if end_page < total_pages - 1 {
pages_html.push_str(r##"<span class="px-2 text-slate-600">...</span>"##);
}
pages_html.push_str(&format!(
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">{}</a>"##,
registry_type, total_pages, limit, total_pages
));
}
// Next button
if page < total_pages {
pages_html.push_str(&format!(
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded bg-slate-700 hover:bg-slate-600 text-slate-300">→</a>"##,
registry_type, page + 1, limit
));
} else {
pages_html.push_str(r##"<span class="px-3 py-1 rounded bg-slate-800 text-slate-600 cursor-not-allowed">→</span>"##);
}
format!(
r##"
<div class="mt-4 flex items-center justify-between">
<div class="text-sm text-slate-500">
Showing {}-{} of {} items
</div>
<div class="flex items-center gap-1">
{}
</div>
</div>
"##,
start_item, end_item, total, pages_html
)
} else if total > 0 {
format!(
r##"<div class="mt-4 text-sm text-slate-500">Showing all {} items</div>"##,
total
)
} else {
String::new()
};
let content = format!(
r##"
<div class="mb-6 flex items-center justify-between">
<div class="flex items-center">
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
<div>
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
<p class="text-slate-500">{} {}</p>
</div>
</div>
<div class="flex items-center gap-4">
<div class="relative">
<input type="text"
placeholder="{}"
class="pl-10 pr-4 py-2 bg-slate-800 border border-slate-600 text-slate-200 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent placeholder-slate-500"
hx-get="/api/ui/{}/search"
hx-trigger="keyup changed delay:300ms"
hx-target="#repo-table-body"
name="q">
<svg class="absolute left-3 top-2.5 h-5 w-5 text-slate-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"/>
</svg>
</div>
</div>
</div>
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
<table class="w-full">
<thead class="bg-slate-800 border-b border-slate-700">
<tr>
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
</tr>
</thead>
<tbody id="repo-table-body" class="divide-y divide-slate-700">
{}
</tbody>
</table>
</div>
{}
"##,
icon,
title,
total,
t.repositories,
t.search_placeholder,
registry_type,
t.name,
version_label,
t.size,
t.updated,
table_rows,
pagination
);
layout_dark(title, &content, Some(registry_type), "", lang)
}
/// Renders Docker image detail page
pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> String {
let _t = get_translations(lang);

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
#![allow(dead_code)]
//! Input validation for artifact registry paths and identifiers
//!

View File

@@ -19,10 +19,10 @@ serde.workspace = true
serde_json.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
toml = "0.8"
toml = "1.0"
uuid = { version = "1", features = ["v4"] }
sha2 = "0.10"
base64 = "0.22"
httpdate = "1"
chrono = { version = "0.4", features = ["serde"] }
quick-xml = { version = "0.31", features = ["serialize"] }
quick-xml = { version = "0.39", features = ["serialize"] }

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use std::fs;

View File

@@ -1,3 +1,6 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod config;
use axum::extract::DefaultBodyLimit;