mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-12 16:10:31 +00:00
Compare commits
4 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 835a6f0b14 | |||
| 340c49bf12 | |||
| c84d13c26e | |||
| 7e8978533a |
@@ -1,142 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Pre-commit hook to prevent accidental commits of sensitive files
|
||||
# Enable: git config core.hooksPath .githooks
|
||||
|
||||
set -e
|
||||
|
||||
RED='\033[0;31m'
|
||||
YELLOW='\033[1;33m'
|
||||
GREEN='\033[0;32m'
|
||||
NC='\033[0m'
|
||||
|
||||
# Allowed file extensions (whitelist)
|
||||
ALLOWED_EXTENSIONS=(
|
||||
'\.rs$'
|
||||
'\.toml$'
|
||||
'\.lock$'
|
||||
'\.yml$'
|
||||
'\.yaml$'
|
||||
'\.json$'
|
||||
'\.sh$'
|
||||
'\.html$'
|
||||
'\.css$'
|
||||
'\.js$'
|
||||
'\.gitignore$'
|
||||
'\.dockerignore$'
|
||||
'Dockerfile$'
|
||||
'LICENSE$'
|
||||
'Makefile$'
|
||||
)
|
||||
|
||||
# Extensions that trigger a warning (not blocked)
|
||||
WARN_EXTENSIONS=(
|
||||
'\.md$'
|
||||
)
|
||||
|
||||
# Always blocked patterns (regardless of extension)
|
||||
BLOCKED_PATTERNS=(
|
||||
'\.env$'
|
||||
'\.env\.'
|
||||
'\.key$'
|
||||
'\.pem$'
|
||||
'\.p12$'
|
||||
'\.pfx$'
|
||||
'\.htpasswd$'
|
||||
'secret'
|
||||
'credential'
|
||||
'password'
|
||||
'\.bak$'
|
||||
'\.swp$'
|
||||
'\.swo$'
|
||||
'node_modules/'
|
||||
'target/debug/'
|
||||
'\.DS_Store'
|
||||
)
|
||||
|
||||
# Get staged files (only NEW files, not already tracked)
|
||||
STAGED_FILES=$(git diff --cached --name-only --diff-filter=A)
|
||||
|
||||
if [ -z "$STAGED_FILES" ]; then
|
||||
# No new files, only modifications to existing - allow
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Build patterns
|
||||
ALLOWED_PATTERN=$(IFS='|'; echo "${ALLOWED_EXTENSIONS[*]}")
|
||||
WARN_PATTERN=$(IFS='|'; echo "${WARN_EXTENSIONS[*]}")
|
||||
BLOCKED_PATTERN=$(IFS='|'; echo "${BLOCKED_PATTERNS[*]}")
|
||||
|
||||
# Check for blocked patterns first
|
||||
BLOCKED_FILES=$(echo "$STAGED_FILES" | grep -iE "$BLOCKED_PATTERN" || true)
|
||||
|
||||
if [ -n "$BLOCKED_FILES" ]; then
|
||||
echo -e "${RED}BLOCKED: Suspicious files detected in commit${NC}"
|
||||
echo ""
|
||||
echo -e "${YELLOW}Files:${NC}"
|
||||
echo "$BLOCKED_FILES" | sed 's/^/ - /'
|
||||
echo ""
|
||||
echo "If intentional, use: git commit --no-verify"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for files with unknown extensions
|
||||
UNKNOWN_FILES=""
|
||||
WARN_FILES=""
|
||||
|
||||
while IFS= read -r file; do
|
||||
[ -z "$file" ] && continue
|
||||
|
||||
if echo "$file" | grep -qE "$BLOCKED_PATTERN"; then
|
||||
continue # Already handled above
|
||||
elif echo "$file" | grep -qE "$WARN_PATTERN"; then
|
||||
WARN_FILES="$WARN_FILES$file"$'\n'
|
||||
elif ! echo "$file" | grep -qE "$ALLOWED_PATTERN"; then
|
||||
UNKNOWN_FILES="$UNKNOWN_FILES$file"$'\n'
|
||||
fi
|
||||
done <<< "$STAGED_FILES"
|
||||
|
||||
# Warn about .md files
|
||||
if [ -n "$WARN_FILES" ]; then
|
||||
echo -e "${YELLOW}WARNING: Markdown files in commit:${NC}"
|
||||
echo "$WARN_FILES" | sed '/^$/d' | sed 's/^/ - /'
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Block unknown extensions
|
||||
if [ -n "$UNKNOWN_FILES" ]; then
|
||||
echo -e "${RED}BLOCKED: Files with unknown extensions:${NC}"
|
||||
echo "$UNKNOWN_FILES" | sed '/^$/d' | sed 's/^/ - /'
|
||||
echo ""
|
||||
echo "Allowed extensions: rs, toml, lock, yml, yaml, json, sh, html, css, js, md"
|
||||
echo "If intentional, use: git commit --no-verify"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check for large files (>5MB)
|
||||
LARGE_FILES=$(echo "$STAGED_FILES" | while read f; do
|
||||
if [ -f "$f" ]; then
|
||||
size=$(stat -f%z "$f" 2>/dev/null || stat -c%s "$f" 2>/dev/null || echo 0)
|
||||
if [ "$size" -gt 5242880 ]; then
|
||||
echo "$f ($(numfmt --to=iec $size 2>/dev/null || echo "${size}B"))"
|
||||
fi
|
||||
fi
|
||||
done)
|
||||
|
||||
if [ -n "$LARGE_FILES" ]; then
|
||||
echo -e "${YELLOW}WARNING: Large files (>5MB) in commit:${NC}"
|
||||
echo "$LARGE_FILES" | sed 's/^/ - /'
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Run cargo fmt check if Rust files changed
|
||||
if git diff --cached --name-only | grep -q '\.rs$'; then
|
||||
if command -v cargo &> /dev/null; then
|
||||
if ! cargo fmt --check &> /dev/null; then
|
||||
echo -e "${RED}BLOCKED: cargo fmt check failed${NC}"
|
||||
echo "Run: cargo fmt"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
exit 0
|
||||
60
.github/workflows/ci.yml
vendored
60
.github/workflows/ci.yml
vendored
@@ -27,63 +27,3 @@ jobs:
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --package nora-registry
|
||||
|
||||
security:
|
||||
name: Security
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write # for uploading SARIF to GitHub Security tab
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0 # full history required for gitleaks
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@v2
|
||||
|
||||
# ── Secrets ────────────────────────────────────────────────────────────
|
||||
- name: Gitleaks — scan for hardcoded secrets
|
||||
run: |
|
||||
curl -sL https://github.com/gitleaks/gitleaks/releases/download/v8.21.2/gitleaks_8.21.2_linux_x64.tar.gz \
|
||||
| tar xz -C /usr/local/bin gitleaks
|
||||
gitleaks detect --source . --exit-code 1 --report-format sarif --report-path gitleaks.sarif || true
|
||||
continue-on-error: true # findings are reported, do not block the pipeline
|
||||
|
||||
# ── CVE in Rust dependencies ────────────────────────────────────────────
|
||||
- name: Install cargo-audit
|
||||
run: cargo install cargo-audit --locked
|
||||
|
||||
- name: cargo audit — RustSec advisory database
|
||||
run: cargo audit
|
||||
continue-on-error: true # warn only; known CVEs should not block CI until triaged
|
||||
|
||||
# ── Licenses, banned crates, supply chain policy ────────────────────────
|
||||
- name: cargo deny — licenses and banned crates
|
||||
uses: EmbarkStudios/cargo-deny-action@v2
|
||||
with:
|
||||
command: check
|
||||
arguments: --all-features
|
||||
|
||||
# ── CVE scan of source tree and Cargo.lock ──────────────────────────────
|
||||
- name: Trivy — filesystem scan (Cargo.lock + source)
|
||||
if: always()
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
scan-type: fs
|
||||
scan-ref: .
|
||||
format: sarif
|
||||
output: trivy-fs.sarif
|
||||
severity: HIGH,CRITICAL
|
||||
exit-code: 0 # warn only; change to 1 to block the pipeline
|
||||
|
||||
- name: Upload Trivy fs results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
if: always()
|
||||
with:
|
||||
sarif_file: trivy-fs.sarif
|
||||
category: trivy-fs
|
||||
|
||||
146
.github/workflows/release.yml
vendored
146
.github/workflows/release.yml
vendored
@@ -11,7 +11,7 @@ env:
|
||||
jobs:
|
||||
build:
|
||||
name: Build & Push
|
||||
runs-on: self-hosted
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -19,16 +19,8 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Rust
|
||||
run: |
|
||||
echo "/home/github-runner/.cargo/bin" >> $GITHUB_PATH
|
||||
echo "RUSTUP_HOME=/home/github-runner/.rustup" >> $GITHUB_ENV
|
||||
echo "CARGO_HOME=/home/github-runner/.cargo" >> $GITHUB_ENV
|
||||
|
||||
- name: Build release binary (musl static)
|
||||
run: |
|
||||
cargo build --release --target x86_64-unknown-linux-musl --package nora-registry
|
||||
cp target/x86_64-unknown-linux-musl/release/nora ./nora
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
@@ -40,163 +32,49 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# ── Alpine (standard) ────────────────────────────────────────────────────
|
||||
- name: Extract metadata (alpine)
|
||||
id: meta-alpine
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=semver,pattern={{major}}
|
||||
type=raw,value=latest
|
||||
|
||||
- name: Build and push (alpine)
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
cache-from: type=gha,scope=alpine
|
||||
cache-to: type=gha,mode=max,scope=alpine
|
||||
|
||||
# ── RED OS ───────────────────────────────────────────────────────────────
|
||||
- name: Extract metadata (redos)
|
||||
id: meta-redos
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
flavor: suffix=-redos,onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=redos
|
||||
|
||||
- name: Build and push (redos)
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
file: Dockerfile.redos
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: ${{ steps.meta-redos.outputs.tags }}
|
||||
labels: ${{ steps.meta-redos.outputs.labels }}
|
||||
cache-from: type=gha,scope=redos
|
||||
cache-to: type=gha,mode=max,scope=redos
|
||||
|
||||
scan:
|
||||
name: Scan (${{ matrix.name }})
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
security-events: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: alpine
|
||||
suffix: ""
|
||||
- name: redos
|
||||
suffix: "-redos"
|
||||
|
||||
steps:
|
||||
- name: Log in to Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set version tag (strip leading v)
|
||||
id: ver
|
||||
run: echo "tag=${GITHUB_REF_NAME#v}" >> $GITHUB_OUTPUT
|
||||
|
||||
# ── CVE scan of the pushed image ────────────────────────────────────────
|
||||
# Images are FROM scratch — no OS packages, only binary CVE scan
|
||||
- name: Trivy — image scan (${{ matrix.name }})
|
||||
uses: aquasecurity/trivy-action@master
|
||||
with:
|
||||
scan-type: image
|
||||
image-ref: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}${{ matrix.suffix }}
|
||||
format: sarif
|
||||
output: trivy-image-${{ matrix.name }}.sarif
|
||||
severity: HIGH,CRITICAL
|
||||
exit-code: 0 # warn only; change to 1 to block on vulnerabilities
|
||||
|
||||
- name: Upload Trivy image results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@v3
|
||||
if: always()
|
||||
with:
|
||||
sarif_file: trivy-image-${{ matrix.name }}.sarif
|
||||
category: trivy-image-${{ matrix.name }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
release:
|
||||
name: GitHub Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build, scan]
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
packages: read # to pull image for SBOM generation
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Log in to Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set version tag (strip leading v)
|
||||
id: ver
|
||||
run: echo "tag=${GITHUB_REF_NAME#v}" >> $GITHUB_OUTPUT
|
||||
|
||||
# ── SBOM — Software Bill of Materials ───────────────────────────────────
|
||||
- name: Generate SBOM (SPDX)
|
||||
uses: anchore/sbom-action@v0
|
||||
with:
|
||||
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
|
||||
format: spdx-json
|
||||
output-file: nora-${{ github.ref_name }}.sbom.spdx.json
|
||||
registry-username: ${{ github.actor }}
|
||||
registry-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate SBOM (CycloneDX)
|
||||
uses: anchore/sbom-action@v0
|
||||
with:
|
||||
image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
|
||||
format: cyclonedx-json
|
||||
output-file: nora-${{ github.ref_name }}.sbom.cdx.json
|
||||
registry-username: ${{ github.actor }}
|
||||
registry-password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
generate_release_notes: true
|
||||
files: |
|
||||
nora-${{ github.ref_name }}.sbom.spdx.json
|
||||
nora-${{ github.ref_name }}.sbom.cdx.json
|
||||
body: |
|
||||
## Docker
|
||||
|
||||
**Alpine (standard):**
|
||||
```bash
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}
|
||||
```
|
||||
|
||||
**RED OS:**
|
||||
```bash
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}-redos
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
See [CHANGELOG.md](https://github.com/${{ github.repository }}/blob/main/CHANGELOG.md)
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -5,15 +5,3 @@ data/
|
||||
.env.*
|
||||
*.log
|
||||
internal config
|
||||
|
||||
# Internal files
|
||||
SESSION*.md
|
||||
TODO.md
|
||||
ROADMAP*.md
|
||||
docs-site/
|
||||
docs/
|
||||
*.txt
|
||||
|
||||
## Internal files
|
||||
.internal/
|
||||
examples/
|
||||
|
||||
236
CHANGELOG.md
236
CHANGELOG.md
@@ -4,143 +4,6 @@ All notable changes to NORA will be documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [0.2.18] - 2026-01-31
|
||||
|
||||
### Changed
|
||||
- Logo styling refinements
|
||||
|
||||
---
|
||||
|
||||
## [0.2.17] - 2026-01-31
|
||||
|
||||
### Added
|
||||
- Copyright headers to all source files (Volkov Pavel | DevITWay)
|
||||
- SPDX-License-Identifier: MIT in all .rs files
|
||||
|
||||
---
|
||||
|
||||
## [0.2.16] - 2026-01-31
|
||||
|
||||
### Changed
|
||||
- N○RA branding: stylized O logo across dashboard
|
||||
- Fixed O letter alignment in logo
|
||||
|
||||
---
|
||||
|
||||
## [0.2.15] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- Code formatting (cargo fmt)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.14] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- Docker dashboard now shows actual image size from manifest layers (config + layers sum)
|
||||
- Previously showed only manifest file size (~500 B instead of actual image size)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.13] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- npm dashboard now shows correct version count and package sizes
|
||||
- Parses metadata.json for versions, dist.unpackedSize, and time.modified
|
||||
- Previously showed 0 versions / 0 B for all packages
|
||||
|
||||
---
|
||||
|
||||
## [0.2.12] - 2026-01-30
|
||||
|
||||
### Added
|
||||
|
||||
#### Configurable Rate Limiting
|
||||
- Rate limits now configurable via `config.toml` and environment variables
|
||||
- New config section `[rate_limit]` with parameters: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
|
||||
- Environment variables: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
|
||||
|
||||
#### Secrets Provider Architecture
|
||||
- Trait-based secrets management (`SecretsProvider` trait)
|
||||
- ENV provider as default (12-Factor App pattern)
|
||||
- Protected secrets with `zeroize` (memory zeroed on drop)
|
||||
- Redacted Debug impl prevents secret leakage in logs
|
||||
- New config section `[secrets]` with `provider` and `clear_env` options
|
||||
|
||||
#### Docker Image Metadata
|
||||
- Support for image metadata retrieval
|
||||
|
||||
#### Documentation
|
||||
- Bilingual onboarding guide (EN/RU)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.11] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Internationalization (i18n) support
|
||||
- PyPI registry proxy
|
||||
- UI improvements
|
||||
|
||||
---
|
||||
|
||||
## [0.2.10] - 2026-01-26
|
||||
|
||||
### Changed
|
||||
- Dark theme applied to all UI pages
|
||||
|
||||
---
|
||||
|
||||
## [0.2.9] - 2026-01-26
|
||||
|
||||
### Changed
|
||||
- Version bump release
|
||||
|
||||
---
|
||||
|
||||
## [0.2.8] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Dashboard endpoint added to OpenAPI documentation
|
||||
|
||||
---
|
||||
|
||||
## [0.2.7] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Dynamic version display in UI sidebar
|
||||
|
||||
---
|
||||
|
||||
## [0.2.6] - 2026-01-26
|
||||
|
||||
### Added
|
||||
|
||||
#### Dashboard Metrics
|
||||
- Global stats panel: downloads, uploads, artifacts, cache hit rate, storage
|
||||
- Extended registry cards with artifact count, size, counters
|
||||
- Activity log (last 20 events)
|
||||
|
||||
#### UI
|
||||
- Dark theme (bg: #0f172a, cards: #1e293b)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.5] - 2026-01-26
|
||||
|
||||
### Fixed
|
||||
- Docker push/pull: added PATCH endpoint for chunked uploads
|
||||
|
||||
---
|
||||
|
||||
## [0.2.4] - 2026-01-26
|
||||
|
||||
### Fixed
|
||||
- Rate limiting: health/metrics endpoints now exempt
|
||||
- Increased upload rate limits for Docker parallel requests
|
||||
|
||||
---
|
||||
|
||||
## [0.2.0] - 2026-01-25
|
||||
|
||||
### Added
|
||||
@@ -189,6 +52,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- JSON error responses with request_id support
|
||||
|
||||
### Changed
|
||||
|
||||
- `StorageError` now uses `thiserror` derive macro
|
||||
- `TokenError` now uses `thiserror` derive macro
|
||||
- Storage wrapper validates keys before delegating to backend
|
||||
@@ -196,6 +60,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- Body size limit set to 100MB default via `DefaultBodyLimit`
|
||||
|
||||
### Dependencies Added
|
||||
|
||||
- `thiserror = "2"` - typed error handling
|
||||
- `tower_governor = "0.8"` - rate limiting
|
||||
- `governor = "0.10"` - rate limiting backend
|
||||
@@ -203,6 +68,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- `wiremock = "0.6"` (dev) - HTTP mocking for S3 tests
|
||||
|
||||
### Files Added
|
||||
|
||||
- `src/validation.rs` - input validation module
|
||||
- `src/migrate.rs` - storage migration module
|
||||
- `src/error.rs` - application error types
|
||||
@@ -214,6 +80,7 @@ All notable changes to NORA will be documented in this file.
|
||||
## [0.1.0] - 2026-01-24
|
||||
|
||||
### Added
|
||||
|
||||
- Multi-protocol support: Docker Registry v2, Maven, npm, Cargo, PyPI
|
||||
- Web UI dashboard
|
||||
- Swagger UI (`/api-docs`)
|
||||
@@ -228,6 +95,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- Graceful shutdown (SIGTERM/SIGINT)
|
||||
- Backup/restore commands
|
||||
|
||||
---
|
||||
---
|
||||
|
||||
# Журнал изменений (RU)
|
||||
@@ -236,96 +104,6 @@ All notable changes to NORA will be documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [0.2.12] - 2026-01-30
|
||||
|
||||
### Добавлено
|
||||
|
||||
#### Настраиваемый Rate Limiting
|
||||
- Rate limits настраиваются через `config.toml` и переменные окружения
|
||||
- Новая секция `[rate_limit]` с параметрами: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
|
||||
- Переменные окружения: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
|
||||
|
||||
#### Архитектура Secrets Provider
|
||||
- Trait-based управление секретами (`SecretsProvider` trait)
|
||||
- ENV provider по умолчанию (12-Factor App паттерн)
|
||||
- Защищённые секреты с `zeroize` (память обнуляется при drop)
|
||||
- Redacted Debug impl предотвращает утечку секретов в логи
|
||||
- Новая секция `[secrets]` с опциями `provider` и `clear_env`
|
||||
|
||||
#### Docker Image Metadata
|
||||
- Поддержка получения метаданных образов
|
||||
|
||||
#### Документация
|
||||
- Двуязычный onboarding guide (EN/RU)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.11] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Поддержка интернационализации (i18n)
|
||||
- PyPI registry proxy
|
||||
- Улучшения UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.10] - 2026-01-26
|
||||
|
||||
### Изменено
|
||||
- Тёмная тема применена ко всем страницам UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.9] - 2026-01-26
|
||||
|
||||
### Изменено
|
||||
- Релиз с обновлением версии
|
||||
|
||||
---
|
||||
|
||||
## [0.2.8] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Dashboard endpoint добавлен в OpenAPI документацию
|
||||
|
||||
---
|
||||
|
||||
## [0.2.7] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Динамическое отображение версии в сайдбаре UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.6] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
|
||||
#### Dashboard Metrics
|
||||
- Глобальная панель статистики: downloads, uploads, artifacts, cache hit rate, storage
|
||||
- Расширенные карточки реестров с количеством артефактов, размером, счётчиками
|
||||
- Лог активности (последние 20 событий)
|
||||
|
||||
#### UI
|
||||
- Тёмная тема (bg: #0f172a, cards: #1e293b)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.5] - 2026-01-26
|
||||
|
||||
### Исправлено
|
||||
- Docker push/pull: добавлен PATCH endpoint для chunked uploads
|
||||
|
||||
---
|
||||
|
||||
## [0.2.4] - 2026-01-26
|
||||
|
||||
### Исправлено
|
||||
- Rate limiting: health/metrics endpoints теперь исключены
|
||||
- Увеличены лимиты upload для параллельных Docker запросов
|
||||
|
||||
---
|
||||
|
||||
## [0.2.0] - 2026-01-25
|
||||
|
||||
### Добавлено
|
||||
@@ -374,6 +152,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- JSON-ответы об ошибках с поддержкой request_id
|
||||
|
||||
### Изменено
|
||||
|
||||
- `StorageError` теперь использует макрос `thiserror`
|
||||
- `TokenError` теперь использует макрос `thiserror`
|
||||
- Storage wrapper валидирует ключи перед делегированием backend
|
||||
@@ -381,6 +160,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- Лимит размера body установлен в 100MB через `DefaultBodyLimit`
|
||||
|
||||
### Добавлены зависимости
|
||||
|
||||
- `thiserror = "2"` - типизированная обработка ошибок
|
||||
- `tower_governor = "0.8"` - rate limiting
|
||||
- `governor = "0.10"` - backend для rate limiting
|
||||
@@ -388,6 +168,7 @@ All notable changes to NORA will be documented in this file.
|
||||
- `wiremock = "0.6"` (dev) - HTTP-мокирование для S3 тестов
|
||||
|
||||
### Добавлены файлы
|
||||
|
||||
- `src/validation.rs` - модуль валидации ввода
|
||||
- `src/migrate.rs` - модуль миграции хранилища
|
||||
- `src/error.rs` - типы ошибок приложения
|
||||
@@ -399,6 +180,7 @@ All notable changes to NORA will be documented in this file.
|
||||
## [0.1.0] - 2026-01-24
|
||||
|
||||
### Добавлено
|
||||
|
||||
- Мульти-протокольная поддержка: Docker Registry v2, Maven, npm, Cargo, PyPI
|
||||
- Web UI дашборд
|
||||
- Swagger UI (`/api-docs`)
|
||||
|
||||
119
CONTRIBUTING.md
119
CONTRIBUTING.md
@@ -1,71 +1,100 @@
|
||||
# Contributing to NORA
|
||||
|
||||
Thank you for your interest in contributing to NORA!
|
||||
Thanks for your interest in contributing to NORA!
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository
|
||||
2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/nora.git`
|
||||
3. Create a branch: `git checkout -b feature/your-feature`
|
||||
1. **Fork** the repository
|
||||
2. **Clone** your fork:
|
||||
```bash
|
||||
git clone https://github.com/your-username/nora.git
|
||||
cd nora
|
||||
```
|
||||
3. **Create a branch**:
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Rust 1.75+ (`rustup update`)
|
||||
- Docker (for testing)
|
||||
- Git
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# Install Rust (if needed)
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# Enable pre-commit hooks (important!)
|
||||
git config core.hooksPath .githooks
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run tests
|
||||
cargo test
|
||||
|
||||
# Run locally
|
||||
cargo run --bin nora -- serve
|
||||
```
|
||||
|
||||
## Code Style
|
||||
### Run
|
||||
|
||||
- Run `cargo fmt` before committing
|
||||
- Run `cargo clippy` and fix warnings
|
||||
- Follow Rust naming conventions
|
||||
```bash
|
||||
cargo run --bin nora
|
||||
```
|
||||
|
||||
## Pull Request Process
|
||||
### Test
|
||||
|
||||
1. Update documentation if needed
|
||||
2. Add tests for new features
|
||||
3. Ensure all tests pass: `cargo test`
|
||||
4. Ensure code is formatted: `cargo fmt --check`
|
||||
5. Ensure no clippy warnings: `cargo clippy`
|
||||
```bash
|
||||
cargo test
|
||||
cargo clippy
|
||||
cargo fmt --check
|
||||
```
|
||||
|
||||
## Making Changes
|
||||
|
||||
1. **Write code** following Rust conventions
|
||||
2. **Add tests** for new features
|
||||
3. **Update docs** if needed
|
||||
4. **Run checks**:
|
||||
```bash
|
||||
cargo fmt
|
||||
cargo clippy -- -D warnings
|
||||
cargo test
|
||||
```
|
||||
|
||||
## Commit Messages
|
||||
|
||||
Use conventional commits:
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
- `feat:` - new feature
|
||||
- `fix:` - bug fix
|
||||
- `docs:` - documentation
|
||||
- `style:` - formatting
|
||||
- `refactor:` - code refactoring
|
||||
- `test:` - adding tests
|
||||
- `chore:` - maintenance
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `docs:` - Documentation
|
||||
- `test:` - Tests
|
||||
- `refactor:` - Code refactoring
|
||||
- `chore:` - Maintenance
|
||||
|
||||
Example: `feat: add OAuth2 authentication`
|
||||
Example:
|
||||
```bash
|
||||
git commit -m "feat: add S3 storage migration"
|
||||
```
|
||||
|
||||
## Reporting Issues
|
||||
## Pull Request Process
|
||||
|
||||
- Use GitHub Issues
|
||||
- Include steps to reproduce
|
||||
- Include NORA version and OS
|
||||
1. **Push** to your fork:
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
|
||||
## License
|
||||
2. **Open a Pull Request** on GitHub
|
||||
|
||||
By contributing, you agree that your contributions will be licensed under the MIT License.
|
||||
3. **Wait for review** - maintainers will review your PR
|
||||
|
||||
## Contact
|
||||
## Code Style
|
||||
|
||||
- Telegram: [@DevITWay](https://t.me/DevITWay)
|
||||
- GitHub Issues: [getnora-io/nora](https://github.com/getnora-io/nora/issues)
|
||||
- Follow Rust conventions
|
||||
- Use `cargo fmt` for formatting
|
||||
- Pass `cargo clippy` with no warnings
|
||||
- Write meaningful commit messages
|
||||
|
||||
## Questions?
|
||||
|
||||
- Open an [Issue](https://github.com/getnora-io/nora/issues)
|
||||
- Ask in [Discussions](https://github.com/getnora-io/nora/discussions)
|
||||
- Reach out on [Telegram](https://t.me/DevITWay)
|
||||
|
||||
---
|
||||
|
||||
Built with love by the NORA community
|
||||
|
||||
80
Cargo.lock
generated
80
Cargo.lock
generated
@@ -240,9 +240,9 @@ checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.55"
|
||||
version = "1.2.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29"
|
||||
checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"shlex",
|
||||
@@ -286,9 +286,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.56"
|
||||
version = "4.5.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75ca66430e33a14957acc24c5077b503e7d374151b2b4b3a10c83b4ceb4be0e"
|
||||
checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -296,9 +296,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.56"
|
||||
version = "4.5.54"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "793207c7fa6300a0608d1080b858e5fdbe713cdc1c8db9fb17777d8a13e63df0"
|
||||
checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -308,9 +308,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.55"
|
||||
version = "4.5.49"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5"
|
||||
checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -434,7 +434,6 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -489,9 +488,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.9"
|
||||
version = "0.1.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
|
||||
checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db"
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
@@ -738,21 +737,6 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.4.0"
|
||||
@@ -877,9 +861,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.65"
|
||||
version = "0.1.64"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470"
|
||||
checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb"
|
||||
dependencies = [
|
||||
"android_system_properties",
|
||||
"core-foundation-sys",
|
||||
@@ -1201,7 +1185,7 @@ checksum = "38bf9645c8b145698bb0b18a4637dcacbc421ea49bef2317e4fd8065a387cf21"
|
||||
|
||||
[[package]]
|
||||
name = "nora-cli"
|
||||
version = "0.2.20"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"clap",
|
||||
"flate2",
|
||||
@@ -1215,7 +1199,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "nora-registry"
|
||||
version = "0.2.20"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
@@ -1225,12 +1209,9 @@ dependencies = [
|
||||
"clap",
|
||||
"flate2",
|
||||
"governor",
|
||||
"hex",
|
||||
"hmac",
|
||||
"httpdate",
|
||||
"indicatif",
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"prometheus",
|
||||
"reqwest",
|
||||
"serde",
|
||||
@@ -1248,12 +1229,11 @@ dependencies = [
|
||||
"utoipa-swagger-ui",
|
||||
"uuid",
|
||||
"wiremock",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "nora-storage"
|
||||
version = "0.2.20"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"base64",
|
||||
@@ -2180,9 +2160,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.14.3"
|
||||
version = "0.14.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a"
|
||||
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
@@ -2930,18 +2910,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.37"
|
||||
version = "0.8.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7456cf00f0685ad319c5b1693f291a650eaf345e941d082fc4e03df8a03996ac"
|
||||
checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.37"
|
||||
version = "0.8.33"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1328722bbf2115db7e19d69ebcc15e795719e2d66b60827c6a69a117365e37a0"
|
||||
checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -2974,20 +2954,6 @@ name = "zeroize"
|
||||
version = "1.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
|
||||
dependencies = [
|
||||
"zeroize_derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerotrie"
|
||||
@@ -3044,9 +3010,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "1.0.17"
|
||||
version = "1.0.16"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439"
|
||||
checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
|
||||
@@ -7,7 +7,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.2.22"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
authors = ["DevITWay <devitway@gmail.com>"]
|
||||
@@ -24,5 +24,3 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
sha2 = "0.10"
|
||||
async-trait = "0.1"
|
||||
hmac = "0.12"
|
||||
hex = "0.4"
|
||||
|
||||
55
Dockerfile
55
Dockerfile
@@ -1,11 +1,58 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
|
||||
# Build stage
|
||||
FROM rust:1.83-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev curl
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY nora-registry/Cargo.toml nora-registry/
|
||||
COPY nora-storage/Cargo.toml nora-storage/
|
||||
COPY nora-cli/Cargo.toml nora-cli/
|
||||
|
||||
# Create dummy sources for dependency caching
|
||||
RUN mkdir -p nora-registry/src nora-storage/src nora-cli/src && \
|
||||
echo "fn main() {}" > nora-registry/src/main.rs && \
|
||||
echo "fn main() {}" > nora-storage/src/main.rs && \
|
||||
echo "fn main() {}" > nora-cli/src/main.rs
|
||||
|
||||
# Build dependencies only (with cache)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/app/target \
|
||||
cargo build --release --package nora-registry && \
|
||||
rm -rf nora-registry/src nora-storage/src nora-cli/src
|
||||
|
||||
# Copy real sources
|
||||
COPY nora-registry/src nora-registry/src
|
||||
COPY nora-storage/src nora-storage/src
|
||||
COPY nora-cli/src nora-cli/src
|
||||
|
||||
# Build release binary (with cache)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/app/target \
|
||||
touch nora-registry/src/main.rs && \
|
||||
cargo build --release --package nora-registry && \
|
||||
cp /app/target/release/nora /usr/local/bin/nora
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:3.20
|
||||
|
||||
RUN apk add --no-cache ca-certificates && mkdir -p /data
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
COPY nora /usr/local/bin/nora
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary
|
||||
COPY --from=builder /usr/local/bin/nora /usr/local/bin/nora
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data
|
||||
|
||||
# Default environment
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
@@ -17,5 +64,5 @@ EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
ENTRYPOINT ["nora"]
|
||||
CMD ["serve"]
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with Astra Linux SE (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.astralinux.ru/library/alse:latest
|
||||
# RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM alpine:3.20 AS certs
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
ENV NORA_STORAGE_MODE=local
|
||||
ENV NORA_STORAGE_PATH=/data/storage
|
||||
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
@@ -1,28 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with RED OS (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.red-soft.ru/redos/redos:8
|
||||
# RUN dnf install -y ca-certificates && dnf clean all
|
||||
|
||||
FROM alpine:3.20 AS certs
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
ENV NORA_STORAGE_MODE=local
|
||||
ENV NORA_STORAGE_PATH=/data/storage
|
||||
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
Copyright (c) 2026 DevITWay
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
34
README.md
34
README.md
@@ -1,9 +1,6 @@
|
||||
# 🐿️ N○RA
|
||||
# NORA
|
||||
|
||||
[](LICENSE)
|
||||
[](https://github.com/getnora-io/nora/releases)
|
||||
[](https://github.com/getnora-io/nora/actions)
|
||||
[](https://www.rust-lang.org/)
|
||||
[](https://t.me/DevITWay)
|
||||
|
||||
> **Your Cloud-Native Artifact Registry**
|
||||
@@ -43,7 +40,7 @@ Fast. Organized. Feel at Home.
|
||||
### Docker (Recommended)
|
||||
|
||||
```bash
|
||||
docker run -d -p 4000:4000 -v nora-data:/data ghcr.io/getnora-io/nora:latest
|
||||
docker run -d -p 4000:4000 -v nora-data:/data getnora/nora
|
||||
```
|
||||
|
||||
### From Source
|
||||
@@ -108,14 +105,6 @@ nora migrate --from local --to s3
|
||||
| `NORA_STORAGE_S3_URL` | - | S3 endpoint URL |
|
||||
| `NORA_STORAGE_BUCKET` | registry | S3 bucket name |
|
||||
| `NORA_AUTH_ENABLED` | false | Enable authentication |
|
||||
| `NORA_RATE_LIMIT_AUTH_RPS` | 1 | Auth requests per second |
|
||||
| `NORA_RATE_LIMIT_AUTH_BURST` | 5 | Auth burst size |
|
||||
| `NORA_RATE_LIMIT_UPLOAD_RPS` | 200 | Upload requests per second |
|
||||
| `NORA_RATE_LIMIT_UPLOAD_BURST` | 500 | Upload burst size |
|
||||
| `NORA_RATE_LIMIT_GENERAL_RPS` | 100 | General requests per second |
|
||||
| `NORA_RATE_LIMIT_GENERAL_BURST` | 200 | General burst size |
|
||||
| `NORA_SECRETS_PROVIDER` | env | Secrets provider (`env`) |
|
||||
| `NORA_SECRETS_CLEAR_ENV` | false | Clear env vars after reading |
|
||||
|
||||
### config.toml
|
||||
|
||||
@@ -131,23 +120,6 @@ path = "data/storage"
|
||||
[auth]
|
||||
enabled = false
|
||||
htpasswd_file = "users.htpasswd"
|
||||
|
||||
[rate_limit]
|
||||
# Strict limits for authentication (brute-force protection)
|
||||
auth_rps = 1
|
||||
auth_burst = 5
|
||||
# High limits for CI/CD upload workloads
|
||||
upload_rps = 200
|
||||
upload_burst = 500
|
||||
# Balanced limits for general API endpoints
|
||||
general_rps = 100
|
||||
general_burst = 200
|
||||
|
||||
[secrets]
|
||||
# Provider: env (default), aws-secrets, vault, k8s (coming soon)
|
||||
provider = "env"
|
||||
# Clear environment variables after reading (security hardening)
|
||||
clear_env = false
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
@@ -194,4 +166,4 @@ Copyright (c) 2026 DevITWay
|
||||
|
||||
---
|
||||
|
||||
**🐿️ N○RA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
**NORA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
|
||||
53
SECURITY.md
53
SECURITY.md
@@ -1,53 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.2.x | :white_check_mark: |
|
||||
| < 0.2 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them via:
|
||||
|
||||
1. **Email:** devitway@gmail.com
|
||||
2. **Telegram:** [@DevITWay](https://t.me/DevITWay) (private message)
|
||||
|
||||
### What to Include
|
||||
|
||||
- Type of vulnerability
|
||||
- Steps to reproduce
|
||||
- Potential impact
|
||||
- Suggested fix (if any)
|
||||
|
||||
### Response Timeline
|
||||
|
||||
- **Initial response:** within 48 hours
|
||||
- **Status update:** within 7 days
|
||||
- **Fix timeline:** depends on severity
|
||||
|
||||
### Severity Levels
|
||||
|
||||
| Severity | Description | Response |
|
||||
|----------|-------------|----------|
|
||||
| Critical | Remote code execution, auth bypass | Immediate fix |
|
||||
| High | Data exposure, privilege escalation | Fix within 7 days |
|
||||
| Medium | Limited impact vulnerabilities | Fix in next release |
|
||||
| Low | Minor issues | Scheduled fix |
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
When deploying NORA:
|
||||
|
||||
1. **Enable authentication** - Set `NORA_AUTH_ENABLED=true`
|
||||
2. **Use HTTPS** - Put NORA behind a reverse proxy with TLS
|
||||
3. **Limit network access** - Use firewall rules
|
||||
4. **Regular updates** - Keep NORA updated to latest version
|
||||
5. **Secure credentials** - Use strong passwords, rotate tokens
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities.
|
||||
440
TODO.md
Normal file
440
TODO.md
Normal file
@@ -0,0 +1,440 @@
|
||||
# NORA Roadmap / TODO
|
||||
|
||||
## v0.2.0 - DONE
|
||||
- [x] Unit tests (75 tests passing)
|
||||
- [x] Input validation (path traversal protection)
|
||||
- [x] Rate limiting (brute-force protection)
|
||||
- [x] Request ID tracking
|
||||
- [x] Migrate command (local <-> S3)
|
||||
- [x] Error handling (thiserror)
|
||||
- [x] SVG brand icons
|
||||
|
||||
---
|
||||
|
||||
## v0.3.0 - OIDC / Workload Identity Federation
|
||||
|
||||
### Killer Feature: OIDC for CI/CD
|
||||
Zero-secret authentication for GitHub Actions, GitLab CI, etc.
|
||||
|
||||
**Goal:** Replace manual `ROBOT_TOKEN` rotation with federated identity.
|
||||
|
||||
```yaml
|
||||
# GitHub Actions example
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Login to NORA
|
||||
uses: nora/login-action@v1
|
||||
```
|
||||
|
||||
### Config Structure (draft)
|
||||
|
||||
```toml
|
||||
[auth.oidc]
|
||||
enabled = true
|
||||
|
||||
# GitHub Actions
|
||||
[[auth.oidc.providers]]
|
||||
name = "github-actions"
|
||||
issuer = "https://token.actions.githubusercontent.com"
|
||||
audience = "https://nora.example.com"
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
# Claim matching (supports glob)
|
||||
match = { repository = "my-org/*", ref = "refs/heads/main" }
|
||||
# Granted permissions
|
||||
permissions = ["push:my-org/*", "pull:*"]
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
match = { repository = "my-org/*", ref = "refs/heads/*" }
|
||||
permissions = ["pull:*"]
|
||||
|
||||
# GitLab CI
|
||||
[[auth.oidc.providers]]
|
||||
name = "gitlab-ci"
|
||||
issuer = "https://gitlab.com"
|
||||
audience = "https://nora.example.com"
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
match = { project_path = "my-group/*" }
|
||||
permissions = ["push:my-group/*", "pull:*"]
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] JWT validation library (jsonwebtoken crate)
|
||||
- [ ] OIDC discovery (/.well-known/openid-configuration)
|
||||
- [ ] JWKS fetching and caching
|
||||
- [ ] Claims extraction and glob matching
|
||||
- [ ] Permission resolution from rules
|
||||
- [ ] Token exchange endpoint (POST /auth/oidc/token)
|
||||
- [ ] GitHub Action: `nora/login-action`
|
||||
|
||||
---
|
||||
|
||||
## v0.4.0 - Transparent Docker Hub Proxy
|
||||
|
||||
### Pain Point
|
||||
Harbor forces tag changes: `docker pull my-harbor/proxy-cache/library/nginx`
|
||||
This breaks Helm charts hardcoded to `nginx`.
|
||||
|
||||
### Goal
|
||||
Transparent pull-through cache:
|
||||
```bash
|
||||
docker pull nora.example.com/nginx # -> proxies to Docker Hub
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Registry v2 API interception
|
||||
- [ ] Upstream registry configuration
|
||||
- [ ] Cache layer management
|
||||
- [ ] Rate limit handling (Docker Hub limits)
|
||||
|
||||
---
|
||||
|
||||
## v0.5.0 - Repo-level RBAC
|
||||
|
||||
### Challenge
|
||||
Per-repository permissions need fast lookup (100 layers per push).
|
||||
|
||||
### Solution
|
||||
Glob patterns for 90% of cases:
|
||||
```toml
|
||||
[[auth.rules]]
|
||||
subject = "team-frontend"
|
||||
permissions = ["push:frontend/*", "pull:*"]
|
||||
|
||||
[[auth.rules]]
|
||||
subject = "ci-bot"
|
||||
permissions = ["push:*/release-*", "pull:*"]
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] In-memory permission cache
|
||||
- [ ] Glob pattern matcher (globset crate)
|
||||
- [ ] Permission inheritance (org -> project -> repo)
|
||||
|
||||
---
|
||||
|
||||
## Target Audience
|
||||
|
||||
1. DevOps engineers tired of Java/Go monsters
|
||||
2. Edge/IoT installations (Raspberry Pi, branch offices)
|
||||
3. Educational platforms (student labs)
|
||||
4. CI/CD pipelines (GitHub Actions, GitLab CI)
|
||||
|
||||
## Competitive Advantages
|
||||
|
||||
| Feature | NORA | Harbor | Nexus |
|
||||
|---------|------|--------|-------|
|
||||
| Memory | <100MB | 2GB+ | 4GB+ |
|
||||
| OIDC for CI | v0.3.0 | No | No |
|
||||
| Transparent proxy | v0.4.0 | No (tag rewrite) | Partial |
|
||||
| Single binary | Yes | No (microservices) | No (Java) |
|
||||
| Zero-config upgrade | Yes | Complex | Complex |
|
||||
|
||||
---
|
||||
|
||||
## v0.6.0 - Online Garbage Collection
|
||||
|
||||
### Pain Point
|
||||
Harbor GC blocks registry for hours. Can't push during cleanup.
|
||||
|
||||
### Goal
|
||||
Non-blocking garbage collection with zero downtime.
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Mark-and-sweep without locking
|
||||
- [ ] Background blob cleanup
|
||||
- [ ] Progress reporting via API/CLI
|
||||
- [ ] `nora gc --dry-run` preview
|
||||
|
||||
---
|
||||
|
||||
## v0.7.0 - Retention Policies
|
||||
|
||||
### Pain Point
|
||||
"Keep last 10 tags" sounds simple, works poorly everywhere.
|
||||
|
||||
### Goal
|
||||
Declarative retention rules in config:
|
||||
|
||||
```toml
|
||||
[[retention]]
|
||||
match = "*/dev-*"
|
||||
keep_last = 5
|
||||
|
||||
[[retention]]
|
||||
match = "*/release-*"
|
||||
keep_last = 20
|
||||
older_than = "90d"
|
||||
|
||||
[[retention]]
|
||||
match = "**/pr-*"
|
||||
older_than = "7d"
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Glob pattern matching for repos/tags
|
||||
- [ ] Age-based and count-based rules
|
||||
- [ ] Dry-run mode
|
||||
- [ ] Scheduled execution (cron-style)
|
||||
|
||||
---
|
||||
|
||||
## v0.8.0 - Multi-tenancy & Quotas
|
||||
|
||||
### Pain Point
|
||||
Harbor projects have quotas but configuration is painful. Nexus has no real isolation.
|
||||
|
||||
### Goal
|
||||
Simple namespaces with limits:
|
||||
|
||||
```toml
|
||||
[[tenants]]
|
||||
name = "team-frontend"
|
||||
storage_quota = "50GB"
|
||||
rate_limit = { push = 100, pull = 1000 } # per hour
|
||||
|
||||
[[tenants]]
|
||||
name = "team-backend"
|
||||
storage_quota = "100GB"
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Tenant isolation (namespace prefix)
|
||||
- [ ] Storage quota tracking
|
||||
- [ ] Per-tenant rate limiting
|
||||
- [ ] Usage reporting API
|
||||
|
||||
---
|
||||
|
||||
## v0.9.0 - Smart Replication
|
||||
|
||||
### Pain Point
|
||||
Harbor replication rules are complex, errors silently swallowed.
|
||||
|
||||
### Goal
|
||||
Simple CLI-driven replication with clear feedback:
|
||||
|
||||
```bash
|
||||
nora replicate --to remote-dc --filter "prod/*" --dry-run
|
||||
nora replicate --from gcr.io/my-project/* --to local/imported/
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Push-based replication to remote NORA
|
||||
- [ ] Pull-based import from external registries (Docker Hub, GCR, ECR, Quay)
|
||||
- [ ] Filter by glob patterns
|
||||
- [ ] Progress bar and detailed logs
|
||||
- [ ] Retry logic with exponential backoff
|
||||
|
||||
---
|
||||
|
||||
## v1.0.0 - Production Ready
|
||||
|
||||
### Features to polish
|
||||
- [ ] Full CLI (`nora images ls`, `nora tag`, `nora delete`)
|
||||
- [ ] Webhooks with filters and retry logic
|
||||
- [ ] Enhanced Prometheus metrics (per-repo stats, cache hit ratio, bandwidth per tenant)
|
||||
- [ ] TUI dashboard (optional)
|
||||
- [ ] Helm chart for Kubernetes deployment
|
||||
- [ ] Official Docker image on ghcr.io
|
||||
|
||||
---
|
||||
|
||||
## Future Ideas (v1.x+)
|
||||
|
||||
### Cold Storage Tiering
|
||||
Auto-move old tags to S3 Glacier:
|
||||
```toml
|
||||
[[storage.tiering]]
|
||||
match = "*"
|
||||
older_than = "180d"
|
||||
move_to = "s3-glacier"
|
||||
```
|
||||
|
||||
### Vulnerability Scanning Integration
|
||||
Not built-in (use Trivy), but:
|
||||
- [ ] Webhook on push -> trigger external scan
|
||||
- [ ] Store scan results as OCI artifacts
|
||||
- [ ] Block pull if critical CVEs (policy)
|
||||
|
||||
### Image Signing (Cosign/Notation)
|
||||
- [ ] Signature storage (OCI artifacts)
|
||||
- [ ] Policy enforcement (reject unsigned)
|
||||
|
||||
### P2P Distribution (Dragonfly/Kraken style)
|
||||
For large clusters pulling same image simultaneously.
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## Architecture / DDD
|
||||
|
||||
### Current State (v0.2.0)
|
||||
Monolithic structure, all in `nora-registry/src/`:
|
||||
```
|
||||
src/
|
||||
├── main.rs # CLI + server setup
|
||||
├── auth.rs # htpasswd + basic auth
|
||||
├── tokens.rs # API tokens
|
||||
├── storage/ # Storage backends (local, s3)
|
||||
├── registry/ # Protocol handlers (docker, maven, npm, cargo, pypi)
|
||||
├── ui/ # Web dashboard
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Target Architecture (v1.0+)
|
||||
|
||||
#### Domain-Driven Design Boundaries
|
||||
|
||||
```
|
||||
nora/
|
||||
├── nora-core/ # Domain layer (no dependencies)
|
||||
│ ├── src/
|
||||
│ │ ├── artifact.rs # Artifact, Digest, Tag, Manifest
|
||||
│ │ ├── repository.rs # Repository, Namespace
|
||||
│ │ ├── identity.rs # User, ServiceAccount, Token
|
||||
│ │ ├── policy.rs # Permission, Rule, Quota
|
||||
│ │ └── events.rs # DomainEvent (ArtifactPushed, etc.)
|
||||
│
|
||||
├── nora-auth/ # Authentication bounded context
|
||||
│ ├── src/
|
||||
│ │ ├── htpasswd.rs # Basic auth provider
|
||||
│ │ ├── oidc.rs # OIDC/JWT provider
|
||||
│ │ ├── token.rs # API token provider
|
||||
│ │ └── rbac.rs # Permission resolver
|
||||
│
|
||||
├── nora-storage/ # Storage bounded context
|
||||
│ ├── src/
|
||||
│ │ ├── backend.rs # StorageBackend trait
|
||||
│ │ ├── local.rs # Filesystem
|
||||
│ │ ├── s3.rs # S3-compatible
|
||||
│ │ ├── tiered.rs # Hot/cold tiering
|
||||
│ │ └── gc.rs # Garbage collection
|
||||
│
|
||||
├── nora-registry/ # Application layer (HTTP API)
|
||||
│ ├── src/
|
||||
│ │ ├── api/
|
||||
│ │ │ ├── oci.rs # OCI Distribution API (/v2/)
|
||||
│ │ │ ├── maven.rs # Maven repository
|
||||
│ │ │ ├── npm.rs # npm registry
|
||||
│ │ │ ├── cargo.rs # Cargo registry
|
||||
│ │ │ └── pypi.rs # PyPI (simple API)
|
||||
│ │ ├── proxy/ # Upstream proxy/cache
|
||||
│ │ ├── webhook/ # Event webhooks
|
||||
│ │ └── ui/ # Web dashboard
|
||||
│
|
||||
├── nora-cli/ # CLI application
|
||||
│ ├── src/
|
||||
│ │ ├── commands/
|
||||
│ │ │ ├── serve.rs
|
||||
│ │ │ ├── images.rs # nora images ls/delete/tag
|
||||
│ │ │ ├── gc.rs # nora gc
|
||||
│ │ │ ├── backup.rs # nora backup/restore
|
||||
│ │ │ ├── migrate.rs # nora migrate
|
||||
│ │ │ └── replicate.rs
|
||||
│ │ └── tui/ # Optional TUI dashboard
|
||||
│
|
||||
└── nora-sdk/ # Client SDK (for nora/login-action)
|
||||
└── src/
|
||||
├── client.rs # HTTP client
|
||||
└── oidc.rs # Token exchange
|
||||
```
|
||||
|
||||
#### Key Principles
|
||||
|
||||
1. **Hexagonal Architecture**
|
||||
- Core domain has no external dependencies
|
||||
- Ports (traits) define boundaries
|
||||
- Adapters implement ports (S3, filesystem, OIDC providers)
|
||||
|
||||
2. **Event-Driven**
|
||||
- Domain events: `ArtifactPushed`, `ArtifactDeleted`, `TagCreated`
|
||||
- Webhooks subscribe to events
|
||||
- Async processing for GC, replication
|
||||
|
||||
3. **CQRS-lite**
|
||||
- Commands: Push, Delete, CreateToken
|
||||
- Queries: List, Get, Search
|
||||
- Separate read/write paths for hot endpoints
|
||||
|
||||
4. **Configuration as Code**
|
||||
- All policies in `nora.toml`
|
||||
- No database for config (file-based)
|
||||
- GitOps friendly
|
||||
|
||||
#### Trait Boundaries (Ports)
|
||||
|
||||
```rust
|
||||
// nora-core/src/ports.rs
|
||||
|
||||
#[async_trait]
|
||||
pub trait ArtifactStore {
|
||||
async fn push_blob(&self, digest: &Digest, data: Bytes) -> Result<()>;
|
||||
async fn get_blob(&self, digest: &Digest) -> Result<Bytes>;
|
||||
async fn push_manifest(&self, repo: &Repository, tag: &Tag, manifest: &Manifest) -> Result<()>;
|
||||
async fn get_manifest(&self, repo: &Repository, reference: &Reference) -> Result<Manifest>;
|
||||
async fn list_tags(&self, repo: &Repository) -> Result<Vec<Tag>>;
|
||||
async fn delete(&self, repo: &Repository, reference: &Reference) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait IdentityProvider {
|
||||
async fn authenticate(&self, credentials: &Credentials) -> Result<Identity>;
|
||||
async fn authorize(&self, identity: &Identity, action: &Action, resource: &Resource) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait EventPublisher {
|
||||
async fn publish(&self, event: DomainEvent) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Migration Path
|
||||
|
||||
| Phase | Action |
|
||||
|-------|--------|
|
||||
| v0.3 | Extract `nora-auth` crate (OIDC work) |
|
||||
| v0.4 | Extract `nora-core` domain types |
|
||||
| v0.5 | Extract `nora-storage` with trait boundaries |
|
||||
| v0.6+ | Refactor registry handlers to use ports |
|
||||
| v1.0 | Full hexagonal architecture |
|
||||
|
||||
### Technical Debt to Address
|
||||
|
||||
- [ ] Remove `unwrap()` in non-test code (started in e9984cf)
|
||||
- [ ] Add tracing spans to all handlers
|
||||
- [ ] Consistent error types across modules
|
||||
- [ ] Extract hardcoded limits to config
|
||||
- [ ] Add OpenTelemetry support (traces, not just metrics)
|
||||
|
||||
### Performance Requirements
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Memory (idle) | <50MB |
|
||||
| Memory (under load) | <100MB |
|
||||
| Startup time | <1s |
|
||||
| Blob throughput | Wire speed (no processing overhead) |
|
||||
| Manifest latency | <10ms p99 |
|
||||
| Auth check | <1ms (cached) |
|
||||
|
||||
### Security Requirements
|
||||
|
||||
- [ ] No secrets in logs (already redacting)
|
||||
- [ ] TLS termination (or trust reverse proxy)
|
||||
- [ ] Content-addressable storage (immutable blobs)
|
||||
- [ ] Audit log for all mutations
|
||||
- [ ] SBOM generation for NORA itself
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- S3 storage: already implemented
|
||||
- Web UI: minimalist read-only dashboard (done)
|
||||
- TUI: consider for v1.0
|
||||
- Vulnerability scanning: out of scope (use Trivy externally)
|
||||
- Image signing: out of scope for now (use cosign externally)
|
||||
41
deny.toml
41
deny.toml
@@ -1,41 +0,0 @@
|
||||
# cargo-deny configuration
|
||||
# https://embarkstudios.github.io/cargo-deny/
|
||||
|
||||
[advisories]
|
||||
# Vulnerability database (RustSec)
|
||||
db-urls = ["https://github.com/rustsec/advisory-db"]
|
||||
ignore = []
|
||||
|
||||
[licenses]
|
||||
# Allowed open-source licenses
|
||||
allow = [
|
||||
"MIT",
|
||||
"Apache-2.0",
|
||||
"Apache-2.0 WITH LLVM-exception",
|
||||
"BSD-2-Clause",
|
||||
"BSD-3-Clause",
|
||||
"ISC",
|
||||
"Unicode-DFS-2016",
|
||||
"Unicode-3.0",
|
||||
"CC0-1.0",
|
||||
"OpenSSL",
|
||||
"Zlib",
|
||||
"MPL-2.0", # Mozilla Public License — ok for binary linking
|
||||
]
|
||||
copyleft = "warn" # GPL etc — warn, don't block
|
||||
unlicensed = "deny"
|
||||
|
||||
[bans]
|
||||
multiple-versions = "warn"
|
||||
deny = [
|
||||
# Prefer rustls over openssl for static builds and supply chain cleanliness
|
||||
{ name = "openssl-sys" },
|
||||
{ name = "openssl" },
|
||||
]
|
||||
skip = []
|
||||
|
||||
[sources]
|
||||
unknown-registry = "warn"
|
||||
unknown-git = "warn"
|
||||
# Allow only the official crates.io index
|
||||
allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||
188
deploy/README.md
188
deploy/README.md
@@ -1,187 +1,57 @@
|
||||
# NORA Demo Deployment
|
||||
|
||||
[English](#english) | [Русский](#russian)
|
||||
## DNS Setup
|
||||
|
||||
---
|
||||
|
||||
<a name="english"></a>
|
||||
## English
|
||||
|
||||
### Quick Start
|
||||
|
||||
```bash
|
||||
# Run NORA with Docker
|
||||
docker run -d \
|
||||
--name nora \
|
||||
-p 4000:4000 \
|
||||
-v nora-data:/data \
|
||||
ghcr.io/getnora-io/nora:latest
|
||||
|
||||
# Check health
|
||||
curl http://localhost:4000/health
|
||||
Add A record:
|
||||
```
|
||||
demo.getnora.io → <VPS_IP>
|
||||
```
|
||||
|
||||
### Push Docker Images
|
||||
|
||||
```bash
|
||||
# Tag your image
|
||||
docker tag myapp:v1 localhost:4000/myapp:v1
|
||||
|
||||
# Push to NORA
|
||||
docker push localhost:4000/myapp:v1
|
||||
|
||||
# Pull from NORA
|
||||
docker pull localhost:4000/myapp:v1
|
||||
```
|
||||
|
||||
### Use as Maven Repository
|
||||
|
||||
```xml
|
||||
<!-- pom.xml -->
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>nora</id>
|
||||
<url>http://localhost:4000/maven2/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
```
|
||||
|
||||
### Use as npm Registry
|
||||
|
||||
```bash
|
||||
npm config set registry http://localhost:4000/npm/
|
||||
npm install lodash
|
||||
```
|
||||
|
||||
### Use as PyPI Index
|
||||
|
||||
```bash
|
||||
pip install --index-url http://localhost:4000/simple/ requests
|
||||
```
|
||||
|
||||
### Production Deployment with HTTPS
|
||||
## Deploy
|
||||
|
||||
```bash
|
||||
# Clone repo
|
||||
git clone https://github.com/getnora-io/nora.git
|
||||
cd nora/deploy
|
||||
|
||||
# Start
|
||||
docker compose up -d
|
||||
|
||||
# Check logs
|
||||
docker compose logs -f
|
||||
```
|
||||
|
||||
### URLs
|
||||
## URLs
|
||||
|
||||
| URL | Description |
|
||||
|-----|-------------|
|
||||
| `/ui/` | Web UI |
|
||||
| `/api-docs` | Swagger API Docs |
|
||||
| `/health` | Health Check |
|
||||
| `/metrics` | Prometheus Metrics |
|
||||
- **Web UI:** https://demo.getnora.io/ui/
|
||||
- **API Docs:** https://demo.getnora.io/api-docs
|
||||
- **Health:** https://demo.getnora.io/health
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `NORA_HOST` | 127.0.0.1 | Bind address |
|
||||
| `NORA_PORT` | 4000 | Port |
|
||||
| `NORA_STORAGE_PATH` | data/storage | Storage path |
|
||||
| `NORA_AUTH_ENABLED` | false | Enable auth |
|
||||
|
||||
---
|
||||
|
||||
<a name="russian"></a>
|
||||
## Русский
|
||||
|
||||
### Быстрый старт
|
||||
## Docker Usage
|
||||
|
||||
```bash
|
||||
# Запуск NORA в Docker
|
||||
docker run -d \
|
||||
--name nora \
|
||||
-p 4000:4000 \
|
||||
-v nora-data:/data \
|
||||
ghcr.io/getnora-io/nora:latest
|
||||
# Tag and push
|
||||
docker tag myimage:latest demo.getnora.io/myimage:latest
|
||||
docker push demo.getnora.io/myimage:latest
|
||||
|
||||
# Проверка работоспособности
|
||||
curl http://localhost:4000/health
|
||||
# Pull
|
||||
docker pull demo.getnora.io/myimage:latest
|
||||
```
|
||||
|
||||
### Загрузка Docker образов
|
||||
## Management
|
||||
|
||||
```bash
|
||||
# Тегируем образ
|
||||
docker tag myapp:v1 localhost:4000/myapp:v1
|
||||
|
||||
# Пушим в NORA
|
||||
docker push localhost:4000/myapp:v1
|
||||
|
||||
# Скачиваем из NORA
|
||||
docker pull localhost:4000/myapp:v1
|
||||
```
|
||||
|
||||
### Использование как Maven репозиторий
|
||||
|
||||
```xml
|
||||
<!-- pom.xml -->
|
||||
<repositories>
|
||||
<repository>
|
||||
<id>nora</id>
|
||||
<url>http://localhost:4000/maven2/</url>
|
||||
</repository>
|
||||
</repositories>
|
||||
```
|
||||
|
||||
### Использование как npm реестр
|
||||
|
||||
```bash
|
||||
npm config set registry http://localhost:4000/npm/
|
||||
npm install lodash
|
||||
```
|
||||
|
||||
### Использование как PyPI индекс
|
||||
|
||||
```bash
|
||||
pip install --index-url http://localhost:4000/simple/ requests
|
||||
```
|
||||
|
||||
### Продакшен с HTTPS
|
||||
|
||||
```bash
|
||||
git clone https://github.com/getnora-io/nora.git
|
||||
cd nora/deploy
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
### Эндпоинты
|
||||
|
||||
| URL | Описание |
|
||||
|-----|----------|
|
||||
| `/ui/` | Веб-интерфейс |
|
||||
| `/api-docs` | Swagger документация |
|
||||
| `/health` | Проверка здоровья |
|
||||
| `/metrics` | Метрики Prometheus |
|
||||
|
||||
### Переменные окружения
|
||||
|
||||
| Переменная | По умолчанию | Описание |
|
||||
|------------|--------------|----------|
|
||||
| `NORA_HOST` | 127.0.0.1 | Адрес привязки |
|
||||
| `NORA_PORT` | 4000 | Порт |
|
||||
| `NORA_STORAGE_PATH` | data/storage | Путь хранилища |
|
||||
| `NORA_AUTH_ENABLED` | false | Включить авторизацию |
|
||||
|
||||
---
|
||||
|
||||
### Management / Управление
|
||||
|
||||
```bash
|
||||
# Stop / Остановить
|
||||
# Stop
|
||||
docker compose down
|
||||
|
||||
# Restart / Перезапустить
|
||||
# Restart
|
||||
docker compose restart
|
||||
|
||||
# Logs / Логи
|
||||
# View logs
|
||||
docker compose logs -f nora
|
||||
docker compose logs -f caddy
|
||||
|
||||
# Update / Обновить
|
||||
docker compose pull && docker compose up -d
|
||||
# Update
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Demo traffic simulator for NORA registry
|
||||
# Generates random registry activity for dashboard demo
|
||||
|
||||
REGISTRY="http://localhost:4000"
|
||||
LOG_FILE="/var/log/nora-demo-traffic.log"
|
||||
|
||||
# Sample packages to fetch
|
||||
NPM_PACKAGES=("lodash" "express" "react" "axios" "moment" "underscore" "chalk" "debug")
|
||||
MAVEN_ARTIFACTS=(
|
||||
"org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.pom"
|
||||
"com/google/guava/guava/31.1-jre/guava-31.1-jre.pom"
|
||||
"org/slf4j/slf4j-api/2.0.0/slf4j-api-2.0.0.pom"
|
||||
)
|
||||
DOCKER_IMAGES=("alpine" "busybox" "hello-world")
|
||||
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
# Random sleep between min and max seconds
|
||||
random_sleep() {
|
||||
local min=$1
|
||||
local max=$2
|
||||
local delay=$((RANDOM % (max - min + 1) + min))
|
||||
sleep $delay
|
||||
}
|
||||
|
||||
# Fetch random npm package
|
||||
fetch_npm() {
|
||||
local pkg=${NPM_PACKAGES[$RANDOM % ${#NPM_PACKAGES[@]}]}
|
||||
log "NPM: fetching $pkg"
|
||||
curl -s "$REGISTRY/npm/$pkg" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Fetch random maven artifact
|
||||
fetch_maven() {
|
||||
local artifact=${MAVEN_ARTIFACTS[$RANDOM % ${#MAVEN_ARTIFACTS[@]}]}
|
||||
log "MAVEN: fetching $artifact"
|
||||
curl -s "$REGISTRY/maven2/$artifact" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Docker push/pull cycle
|
||||
docker_cycle() {
|
||||
local img=${DOCKER_IMAGES[$RANDOM % ${#DOCKER_IMAGES[@]}]}
|
||||
local tag="demo-$(date +%s)"
|
||||
|
||||
log "DOCKER: push/pull cycle for $img"
|
||||
|
||||
# Tag and push
|
||||
docker tag "$img:latest" "localhost:4000/demo/$img:$tag" 2>/dev/null
|
||||
docker push "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
|
||||
|
||||
# Pull back
|
||||
docker rmi "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
|
||||
docker pull "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
|
||||
|
||||
# Cleanup
|
||||
docker rmi "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
|
||||
}
|
||||
|
||||
# Main loop
|
||||
log "Starting demo traffic simulator"
|
||||
|
||||
while true; do
|
||||
# Random operation
|
||||
op=$((RANDOM % 10))
|
||||
|
||||
case $op in
|
||||
0|1|2|3) # 40% npm
|
||||
fetch_npm
|
||||
;;
|
||||
4|5|6) # 30% maven
|
||||
fetch_maven
|
||||
;;
|
||||
7|8|9) # 30% docker
|
||||
docker_cycle
|
||||
;;
|
||||
esac
|
||||
|
||||
# Random delay: 30-120 seconds
|
||||
random_sleep 30 120
|
||||
done
|
||||
@@ -1,15 +0,0 @@
|
||||
[Unit]
|
||||
Description=NORA Demo Traffic Simulator
|
||||
After=docker.service
|
||||
Requires=docker.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/opt/nora/demo-traffic.sh
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
StandardOutput=journal
|
||||
StandardError=journal
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
#[derive(Parser)]
|
||||
|
||||
@@ -24,8 +24,6 @@ tracing-subscriber.workspace = true
|
||||
reqwest.workspace = true
|
||||
sha2.workspace = true
|
||||
async-trait.workspace = true
|
||||
hmac.workspace = true
|
||||
hex.workspace = true
|
||||
toml = "0.8"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
bcrypt = "0.17"
|
||||
@@ -43,8 +41,6 @@ chrono = { version = "0.4", features = ["serde"] }
|
||||
thiserror = "2"
|
||||
tower_governor = "0.8"
|
||||
governor = "0.10"
|
||||
parking_lot = "0.12"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
|
||||
@@ -1,101 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use parking_lot::RwLock;
|
||||
use serde::Serialize;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
/// Type of action that was performed
|
||||
#[derive(Debug, Clone, Serialize, PartialEq)]
|
||||
pub enum ActionType {
|
||||
Pull,
|
||||
Push,
|
||||
CacheHit,
|
||||
ProxyFetch,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ActionType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ActionType::Pull => write!(f, "PULL"),
|
||||
ActionType::Push => write!(f, "PUSH"),
|
||||
ActionType::CacheHit => write!(f, "CACHE"),
|
||||
ActionType::ProxyFetch => write!(f, "PROXY"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A single activity log entry
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct ActivityEntry {
|
||||
pub timestamp: DateTime<Utc>,
|
||||
pub action: ActionType,
|
||||
pub artifact: String,
|
||||
pub registry: String,
|
||||
pub source: String, // "LOCAL", "PROXY", "CACHE"
|
||||
}
|
||||
|
||||
impl ActivityEntry {
|
||||
pub fn new(action: ActionType, artifact: String, registry: &str, source: &str) -> Self {
|
||||
Self {
|
||||
timestamp: Utc::now(),
|
||||
action,
|
||||
artifact,
|
||||
registry: registry.to_string(),
|
||||
source: source.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Thread-safe activity log with bounded size
|
||||
pub struct ActivityLog {
|
||||
entries: RwLock<VecDeque<ActivityEntry>>,
|
||||
max_entries: usize,
|
||||
}
|
||||
|
||||
impl ActivityLog {
|
||||
pub fn new(max: usize) -> Self {
|
||||
Self {
|
||||
entries: RwLock::new(VecDeque::with_capacity(max)),
|
||||
max_entries: max,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a new entry to the log, removing oldest if at capacity
|
||||
pub fn push(&self, entry: ActivityEntry) {
|
||||
let mut entries = self.entries.write();
|
||||
if entries.len() >= self.max_entries {
|
||||
entries.pop_front();
|
||||
}
|
||||
entries.push_back(entry);
|
||||
}
|
||||
|
||||
/// Get the most recent N entries (newest first)
|
||||
pub fn recent(&self, count: usize) -> Vec<ActivityEntry> {
|
||||
let entries = self.entries.read();
|
||||
entries.iter().rev().take(count).cloned().collect()
|
||||
}
|
||||
|
||||
/// Get all entries (newest first)
|
||||
pub fn all(&self) -> Vec<ActivityEntry> {
|
||||
let entries = self.entries.read();
|
||||
entries.iter().rev().cloned().collect()
|
||||
}
|
||||
|
||||
/// Get the total number of entries
|
||||
pub fn len(&self) -> usize {
|
||||
self.entries.read().len()
|
||||
}
|
||||
|
||||
/// Check if the log is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.entries.read().is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ActivityLog {
|
||||
fn default() -> Self {
|
||||
Self::new(50)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::State,
|
||||
@@ -63,17 +60,11 @@ impl HtpasswdAuth {
|
||||
fn is_public_path(path: &str) -> bool {
|
||||
matches!(
|
||||
path,
|
||||
"/" | "/health"
|
||||
| "/ready"
|
||||
| "/metrics"
|
||||
| "/v2/"
|
||||
| "/v2"
|
||||
| "/api/tokens"
|
||||
| "/api/tokens/list"
|
||||
| "/api/tokens/revoke"
|
||||
"/" | "/health" | "/ready" | "/metrics" | "/v2/" | "/v2"
|
||||
) || path.starts_with("/ui")
|
||||
|| path.starts_with("/api-docs")
|
||||
|| path.starts_with("/api/ui")
|
||||
|| path.starts_with("/api/tokens")
|
||||
}
|
||||
|
||||
/// Auth middleware - supports Basic auth and Bearer tokens
|
||||
@@ -410,12 +401,8 @@ mod tests {
|
||||
assert!(is_public_path("/api/ui/stats"));
|
||||
assert!(is_public_path("/api/tokens"));
|
||||
assert!(is_public_path("/api/tokens/list"));
|
||||
assert!(is_public_path("/api/tokens/revoke"));
|
||||
|
||||
// Protected paths
|
||||
assert!(!is_public_path("/api/tokens/unknown"));
|
||||
assert!(!is_public_path("/api/tokens/admin"));
|
||||
assert!(!is_public_path("/api/tokens/extra/path"));
|
||||
assert!(!is_public_path("/v2/myimage/blobs/sha256:abc"));
|
||||
assert!(!is_public_path("/v2/library/nginx/manifests/latest"));
|
||||
assert!(!is_public_path(
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Backup and restore functionality for Nora
|
||||
//!
|
||||
//! Exports all artifacts to a tar.gz file and restores from backups.
|
||||
|
||||
@@ -1,12 +1,7 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
|
||||
pub use crate::secrets::SecretsConfig;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub server: ServerConfig,
|
||||
@@ -16,26 +11,13 @@ pub struct Config {
|
||||
#[serde(default)]
|
||||
pub npm: NpmConfig,
|
||||
#[serde(default)]
|
||||
pub pypi: PypiConfig,
|
||||
#[serde(default)]
|
||||
pub docker: DockerConfig,
|
||||
#[serde(default)]
|
||||
pub raw: RawConfig,
|
||||
#[serde(default)]
|
||||
pub auth: AuthConfig,
|
||||
#[serde(default)]
|
||||
pub rate_limit: RateLimitConfig,
|
||||
#[serde(default)]
|
||||
pub secrets: SecretsConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
/// Public URL for generating pull commands (e.g., "registry.example.com")
|
||||
#[serde(default)]
|
||||
pub public_url: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
|
||||
@@ -56,19 +38,6 @@ pub struct StorageConfig {
|
||||
pub s3_url: String,
|
||||
#[serde(default = "default_bucket")]
|
||||
pub bucket: String,
|
||||
/// S3 access key (optional, uses anonymous access if not set)
|
||||
#[serde(default)]
|
||||
pub s3_access_key: Option<String>,
|
||||
/// S3 secret key (optional, uses anonymous access if not set)
|
||||
#[serde(default)]
|
||||
pub s3_secret_key: Option<String>,
|
||||
/// S3 region (default: us-east-1)
|
||||
#[serde(default = "default_s3_region")]
|
||||
pub s3_region: String,
|
||||
}
|
||||
|
||||
fn default_s3_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
|
||||
fn default_storage_path() -> String {
|
||||
@@ -99,52 +68,6 @@ pub struct NpmConfig {
|
||||
pub proxy_timeout: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct PypiConfig {
|
||||
#[serde(default)]
|
||||
pub proxy: Option<String>,
|
||||
#[serde(default = "default_timeout")]
|
||||
pub proxy_timeout: u64,
|
||||
}
|
||||
|
||||
/// Docker registry configuration with upstream proxy support
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DockerConfig {
|
||||
#[serde(default = "default_docker_timeout")]
|
||||
pub proxy_timeout: u64,
|
||||
#[serde(default)]
|
||||
pub upstreams: Vec<DockerUpstream>,
|
||||
}
|
||||
|
||||
/// Docker upstream registry configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DockerUpstream {
|
||||
pub url: String,
|
||||
#[serde(default)]
|
||||
pub auth: Option<String>, // "user:pass" for basic auth
|
||||
}
|
||||
|
||||
/// Raw repository configuration for simple file storage
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RawConfig {
|
||||
#[serde(default = "default_raw_enabled")]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_max_file_size")]
|
||||
pub max_file_size: u64, // in bytes
|
||||
}
|
||||
|
||||
fn default_docker_timeout() -> u64 {
|
||||
60
|
||||
}
|
||||
|
||||
fn default_raw_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn default_max_file_size() -> u64 {
|
||||
104_857_600 // 100MB
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AuthConfig {
|
||||
#[serde(default)]
|
||||
@@ -185,36 +108,6 @@ impl Default for NpmConfig {
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for PypiConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
proxy: Some("https://pypi.org/simple/".to_string()),
|
||||
proxy_timeout: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DockerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
proxy_timeout: 60,
|
||||
upstreams: vec![DockerUpstream {
|
||||
url: "https://registry-1.docker.io".to_string(),
|
||||
auth: None,
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RawConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: true,
|
||||
max_file_size: 104_857_600, // 100MB
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AuthConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
@@ -225,76 +118,6 @@ impl Default for AuthConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Rate limiting configuration
|
||||
///
|
||||
/// Controls request rate limits for different endpoint types.
|
||||
///
|
||||
/// # Example
|
||||
/// ```toml
|
||||
/// [rate_limit]
|
||||
/// auth_rps = 1
|
||||
/// auth_burst = 5
|
||||
/// upload_rps = 500
|
||||
/// upload_burst = 1000
|
||||
/// general_rps = 100
|
||||
/// general_burst = 200
|
||||
/// ```
|
||||
///
|
||||
/// # Environment Variables
|
||||
/// - `NORA_RATE_LIMIT_AUTH_RPS` - Auth requests per second
|
||||
/// - `NORA_RATE_LIMIT_AUTH_BURST` - Auth burst size
|
||||
/// - `NORA_RATE_LIMIT_UPLOAD_RPS` - Upload requests per second
|
||||
/// - `NORA_RATE_LIMIT_UPLOAD_BURST` - Upload burst size
|
||||
/// - `NORA_RATE_LIMIT_GENERAL_RPS` - General requests per second
|
||||
/// - `NORA_RATE_LIMIT_GENERAL_BURST` - General burst size
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct RateLimitConfig {
|
||||
#[serde(default = "default_auth_rps")]
|
||||
pub auth_rps: u64,
|
||||
#[serde(default = "default_auth_burst")]
|
||||
pub auth_burst: u32,
|
||||
#[serde(default = "default_upload_rps")]
|
||||
pub upload_rps: u64,
|
||||
#[serde(default = "default_upload_burst")]
|
||||
pub upload_burst: u32,
|
||||
#[serde(default = "default_general_rps")]
|
||||
pub general_rps: u64,
|
||||
#[serde(default = "default_general_burst")]
|
||||
pub general_burst: u32,
|
||||
}
|
||||
|
||||
fn default_auth_rps() -> u64 {
|
||||
1
|
||||
}
|
||||
fn default_auth_burst() -> u32 {
|
||||
5
|
||||
}
|
||||
fn default_upload_rps() -> u64 {
|
||||
200
|
||||
}
|
||||
fn default_upload_burst() -> u32 {
|
||||
500
|
||||
}
|
||||
fn default_general_rps() -> u64 {
|
||||
100
|
||||
}
|
||||
fn default_general_burst() -> u32 {
|
||||
200
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
auth_rps: default_auth_rps(),
|
||||
auth_burst: default_auth_burst(),
|
||||
upload_rps: default_upload_rps(),
|
||||
upload_burst: default_upload_burst(),
|
||||
general_rps: default_general_rps(),
|
||||
general_burst: default_general_burst(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Load configuration with priority: ENV > config.toml > defaults
|
||||
pub fn load() -> Self {
|
||||
@@ -321,9 +144,6 @@ impl Config {
|
||||
self.server.port = port;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_PUBLIC_URL") {
|
||||
self.server.public_url = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
|
||||
// Storage config
|
||||
if let Ok(val) = env::var("NORA_STORAGE_MODE") {
|
||||
@@ -341,15 +161,6 @@ impl Config {
|
||||
if let Ok(val) = env::var("NORA_STORAGE_BUCKET") {
|
||||
self.storage.bucket = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_ACCESS_KEY") {
|
||||
self.storage.s3_access_key = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_SECRET_KEY") {
|
||||
self.storage.s3_secret_key = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_REGION") {
|
||||
self.storage.s3_region = val;
|
||||
}
|
||||
|
||||
// Auth config
|
||||
if let Ok(val) = env::var("NORA_AUTH_ENABLED") {
|
||||
@@ -379,91 +190,10 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
// PyPI config
|
||||
if let Ok(val) = env::var("NORA_PYPI_PROXY") {
|
||||
self.pypi.proxy = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_PYPI_PROXY_TIMEOUT") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.pypi.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
|
||||
// Docker config
|
||||
if let Ok(val) = env::var("NORA_DOCKER_PROXY_TIMEOUT") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.docker.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
// NORA_DOCKER_UPSTREAMS format: "url1,url2" or "url1|auth1,url2|auth2"
|
||||
if let Ok(val) = env::var("NORA_DOCKER_UPSTREAMS") {
|
||||
self.docker.upstreams = val
|
||||
.split(',')
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|s| {
|
||||
let parts: Vec<&str> = s.trim().splitn(2, '|').collect();
|
||||
DockerUpstream {
|
||||
url: parts[0].to_string(),
|
||||
auth: parts.get(1).map(|a| a.to_string()),
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
// Raw config
|
||||
if let Ok(val) = env::var("NORA_RAW_ENABLED") {
|
||||
self.raw.enabled = val.to_lowercase() == "true" || val == "1";
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RAW_MAX_FILE_SIZE") {
|
||||
if let Ok(size) = val.parse() {
|
||||
self.raw.max_file_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
// Token storage
|
||||
if let Ok(val) = env::var("NORA_AUTH_TOKEN_STORAGE") {
|
||||
self.auth.token_storage = val;
|
||||
}
|
||||
|
||||
// Rate limit config
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_AUTH_RPS") {
|
||||
if let Ok(v) = val.parse::<u64>() {
|
||||
self.rate_limit.auth_rps = v;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_AUTH_BURST") {
|
||||
if let Ok(v) = val.parse::<u32>() {
|
||||
self.rate_limit.auth_burst = v;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_UPLOAD_RPS") {
|
||||
if let Ok(v) = val.parse::<u64>() {
|
||||
self.rate_limit.upload_rps = v;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_UPLOAD_BURST") {
|
||||
if let Ok(v) = val.parse::<u32>() {
|
||||
self.rate_limit.upload_burst = v;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_GENERAL_RPS") {
|
||||
if let Ok(v) = val.parse::<u64>() {
|
||||
self.rate_limit.general_rps = v;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_RATE_LIMIT_GENERAL_BURST") {
|
||||
if let Ok(v) = val.parse::<u32>() {
|
||||
self.rate_limit.general_burst = v;
|
||||
}
|
||||
}
|
||||
|
||||
// Secrets config
|
||||
if let Ok(val) = env::var("NORA_SECRETS_PROVIDER") {
|
||||
self.secrets.provider = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_SECRETS_CLEAR_ENV") {
|
||||
self.secrets.clear_env = val.to_lowercase() == "true" || val == "1";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,62 +203,16 @@ impl Default for Config {
|
||||
server: ServerConfig {
|
||||
host: String::from("127.0.0.1"),
|
||||
port: 4000,
|
||||
public_url: None,
|
||||
},
|
||||
storage: StorageConfig {
|
||||
mode: StorageMode::Local,
|
||||
path: String::from("data/storage"),
|
||||
s3_url: String::from("http://127.0.0.1:3000"),
|
||||
bucket: String::from("registry"),
|
||||
s3_access_key: None,
|
||||
s3_secret_key: None,
|
||||
s3_region: String::from("us-east-1"),
|
||||
},
|
||||
maven: MavenConfig::default(),
|
||||
npm: NpmConfig::default(),
|
||||
pypi: PypiConfig::default(),
|
||||
docker: DockerConfig::default(),
|
||||
raw: RawConfig::default(),
|
||||
auth: AuthConfig::default(),
|
||||
rate_limit: RateLimitConfig::default(),
|
||||
secrets: SecretsConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rate_limit_default() {
|
||||
let config = RateLimitConfig::default();
|
||||
assert_eq!(config.auth_rps, 1);
|
||||
assert_eq!(config.auth_burst, 5);
|
||||
assert_eq!(config.upload_rps, 200);
|
||||
assert_eq!(config.upload_burst, 500);
|
||||
assert_eq!(config.general_rps, 100);
|
||||
assert_eq!(config.general_burst, 200);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rate_limit_from_toml() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
host = "127.0.0.1"
|
||||
port = 4000
|
||||
|
||||
[storage]
|
||||
mode = "local"
|
||||
|
||||
[rate_limit]
|
||||
auth_rps = 10
|
||||
upload_burst = 1000
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.rate_limit.auth_rps, 10);
|
||||
assert_eq!(config.rate_limit.upload_burst, 1000);
|
||||
assert_eq!(config.rate_limit.auth_burst, 5); // default
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,125 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Instant;
|
||||
|
||||
/// Dashboard metrics for tracking registry activity
|
||||
/// Uses atomic counters for thread-safe access without locks
|
||||
pub struct DashboardMetrics {
|
||||
// Global counters
|
||||
pub downloads: AtomicU64,
|
||||
pub uploads: AtomicU64,
|
||||
pub cache_hits: AtomicU64,
|
||||
pub cache_misses: AtomicU64,
|
||||
|
||||
// Per-registry download counters
|
||||
pub docker_downloads: AtomicU64,
|
||||
pub docker_uploads: AtomicU64,
|
||||
pub npm_downloads: AtomicU64,
|
||||
pub maven_downloads: AtomicU64,
|
||||
pub maven_uploads: AtomicU64,
|
||||
pub cargo_downloads: AtomicU64,
|
||||
pub pypi_downloads: AtomicU64,
|
||||
pub raw_downloads: AtomicU64,
|
||||
pub raw_uploads: AtomicU64,
|
||||
|
||||
pub start_time: Instant,
|
||||
}
|
||||
|
||||
impl DashboardMetrics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
downloads: AtomicU64::new(0),
|
||||
uploads: AtomicU64::new(0),
|
||||
cache_hits: AtomicU64::new(0),
|
||||
cache_misses: AtomicU64::new(0),
|
||||
docker_downloads: AtomicU64::new(0),
|
||||
docker_uploads: AtomicU64::new(0),
|
||||
npm_downloads: AtomicU64::new(0),
|
||||
maven_downloads: AtomicU64::new(0),
|
||||
maven_uploads: AtomicU64::new(0),
|
||||
cargo_downloads: AtomicU64::new(0),
|
||||
pypi_downloads: AtomicU64::new(0),
|
||||
raw_downloads: AtomicU64::new(0),
|
||||
raw_uploads: AtomicU64::new(0),
|
||||
start_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a download event for the specified registry
|
||||
pub fn record_download(&self, registry: &str) {
|
||||
self.downloads.fetch_add(1, Ordering::Relaxed);
|
||||
match registry {
|
||||
"docker" => self.docker_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"npm" => self.npm_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"maven" => self.maven_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"cargo" => self.cargo_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"pypi" => self.pypi_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"raw" => self.raw_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
_ => 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Record an upload event for the specified registry
|
||||
pub fn record_upload(&self, registry: &str) {
|
||||
self.uploads.fetch_add(1, Ordering::Relaxed);
|
||||
match registry {
|
||||
"docker" => self.docker_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
"maven" => self.maven_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
"raw" => self.raw_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
_ => 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Record a cache hit
|
||||
pub fn record_cache_hit(&self) {
|
||||
self.cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Record a cache miss
|
||||
pub fn record_cache_miss(&self) {
|
||||
self.cache_misses.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Calculate the cache hit rate as a percentage
|
||||
pub fn cache_hit_rate(&self) -> f64 {
|
||||
let hits = self.cache_hits.load(Ordering::Relaxed);
|
||||
let misses = self.cache_misses.load(Ordering::Relaxed);
|
||||
let total = hits + misses;
|
||||
if total == 0 {
|
||||
0.0
|
||||
} else {
|
||||
(hits as f64 / total as f64) * 100.0
|
||||
}
|
||||
}
|
||||
|
||||
/// Get download count for a specific registry
|
||||
pub fn get_registry_downloads(&self, registry: &str) -> u64 {
|
||||
match registry {
|
||||
"docker" => self.docker_downloads.load(Ordering::Relaxed),
|
||||
"npm" => self.npm_downloads.load(Ordering::Relaxed),
|
||||
"maven" => self.maven_downloads.load(Ordering::Relaxed),
|
||||
"cargo" => self.cargo_downloads.load(Ordering::Relaxed),
|
||||
"pypi" => self.pypi_downloads.load(Ordering::Relaxed),
|
||||
"raw" => self.raw_downloads.load(Ordering::Relaxed),
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get upload count for a specific registry
|
||||
pub fn get_registry_uploads(&self, registry: &str) -> u64 {
|
||||
match registry {
|
||||
"docker" => self.docker_uploads.load(Ordering::Relaxed),
|
||||
"maven" => self.maven_uploads.load(Ordering::Relaxed),
|
||||
"raw" => self.raw_uploads.load(Ordering::Relaxed),
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DashboardMetrics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! Application error handling with HTTP response conversion
|
||||
//!
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{extract::State, http::StatusCode, response::Json, routing::get, Router};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -1,11 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod activity_log;
|
||||
mod auth;
|
||||
mod backup;
|
||||
mod config;
|
||||
mod dashboard_metrics;
|
||||
mod error;
|
||||
mod health;
|
||||
mod metrics;
|
||||
@@ -13,9 +8,7 @@ mod migrate;
|
||||
mod openapi;
|
||||
mod rate_limit;
|
||||
mod registry;
|
||||
mod repo_index;
|
||||
mod request_id;
|
||||
mod secrets;
|
||||
mod storage;
|
||||
mod tokens;
|
||||
mod ui;
|
||||
@@ -30,11 +23,8 @@ use tokio::signal;
|
||||
use tracing::{error, info, warn};
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
use activity_log::ActivityLog;
|
||||
use auth::HtpasswdAuth;
|
||||
use config::{Config, StorageMode};
|
||||
use dashboard_metrics::DashboardMetrics;
|
||||
use repo_index::RepoIndex;
|
||||
pub use storage::Storage;
|
||||
use tokens::TokenStore;
|
||||
|
||||
@@ -81,11 +71,6 @@ pub struct AppState {
|
||||
pub start_time: Instant,
|
||||
pub auth: Option<HtpasswdAuth>,
|
||||
pub tokens: Option<TokenStore>,
|
||||
pub metrics: DashboardMetrics,
|
||||
pub activity: ActivityLog,
|
||||
pub docker_auth: registry::DockerAuth,
|
||||
pub repo_index: RepoIndex,
|
||||
pub http_client: reqwest::Client,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -111,18 +96,10 @@ async fn main() {
|
||||
info!(
|
||||
s3_url = %config.storage.s3_url,
|
||||
bucket = %config.storage.bucket,
|
||||
region = %config.storage.s3_region,
|
||||
has_credentials = config.storage.s3_access_key.is_some(),
|
||||
"Using S3 storage"
|
||||
);
|
||||
}
|
||||
Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
)
|
||||
Storage::new_s3(&config.storage.s3_url, &config.storage.bucket)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -146,13 +123,7 @@ async fn main() {
|
||||
Some(Commands::Migrate { from, to, dry_run }) => {
|
||||
let source = match from.as_str() {
|
||||
"local" => Storage::new_local(&config.storage.path),
|
||||
"s3" => Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
),
|
||||
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||
_ => {
|
||||
error!("Invalid source: '{}'. Use 'local' or 's3'", from);
|
||||
std::process::exit(1);
|
||||
@@ -161,13 +132,7 @@ async fn main() {
|
||||
|
||||
let dest = match to.as_str() {
|
||||
"local" => Storage::new_local(&config.storage.path),
|
||||
"s3" => Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
),
|
||||
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||
_ => {
|
||||
error!("Invalid destination: '{}'. Use 'local' or 's3'", to);
|
||||
std::process::exit(1);
|
||||
@@ -208,36 +173,6 @@ fn init_logging(json_format: bool) {
|
||||
async fn run_server(config: Config, storage: Storage) {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Log rate limiting configuration
|
||||
info!(
|
||||
auth_rps = config.rate_limit.auth_rps,
|
||||
auth_burst = config.rate_limit.auth_burst,
|
||||
upload_rps = config.rate_limit.upload_rps,
|
||||
upload_burst = config.rate_limit.upload_burst,
|
||||
general_rps = config.rate_limit.general_rps,
|
||||
general_burst = config.rate_limit.general_burst,
|
||||
"Rate limiting configured"
|
||||
);
|
||||
|
||||
// Initialize secrets provider
|
||||
let secrets_provider = match secrets::create_secrets_provider(&config.secrets) {
|
||||
Ok(provider) => {
|
||||
info!(
|
||||
provider = provider.provider_name(),
|
||||
clear_env = config.secrets.clear_env,
|
||||
"Secrets provider initialized"
|
||||
);
|
||||
Some(provider)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Failed to initialize secrets provider, using defaults");
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
// Store secrets provider for future use (S3 credentials, etc.)
|
||||
let _secrets = secrets_provider;
|
||||
|
||||
// Load auth if enabled
|
||||
let auth = if config.auth.enabled {
|
||||
let path = Path::new(&config.auth.htpasswd_file);
|
||||
@@ -264,31 +199,16 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
None
|
||||
};
|
||||
|
||||
// Create rate limiters before moving config to state
|
||||
let auth_limiter = rate_limit::auth_rate_limiter(&config.rate_limit);
|
||||
let upload_limiter = rate_limit::upload_rate_limiter(&config.rate_limit);
|
||||
let general_limiter = rate_limit::general_rate_limiter(&config.rate_limit);
|
||||
|
||||
// Initialize Docker auth with proxy timeout
|
||||
let docker_auth = registry::DockerAuth::new(config.docker.proxy_timeout);
|
||||
|
||||
let http_client = reqwest::Client::new();
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
storage,
|
||||
config,
|
||||
start_time,
|
||||
auth,
|
||||
tokens,
|
||||
metrics: DashboardMetrics::new(),
|
||||
activity: ActivityLog::new(50),
|
||||
docker_auth,
|
||||
repo_index: RepoIndex::new(),
|
||||
http_client,
|
||||
});
|
||||
|
||||
// Token routes with strict rate limiting (brute-force protection)
|
||||
let auth_routes = auth::token_routes().layer(auth_limiter);
|
||||
let auth_routes = auth::token_routes().layer(rate_limit::auth_rate_limiter());
|
||||
|
||||
// Registry routes with upload rate limiting
|
||||
let registry_routes = Router::new()
|
||||
@@ -297,25 +217,16 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
.merge(registry::npm_routes())
|
||||
.merge(registry::cargo_routes())
|
||||
.merge(registry::pypi_routes())
|
||||
.merge(registry::raw_routes())
|
||||
.layer(upload_limiter);
|
||||
.layer(rate_limit::upload_rate_limiter());
|
||||
|
||||
// Routes WITHOUT rate limiting (health, metrics, UI)
|
||||
let public_routes = Router::new()
|
||||
let app = Router::new()
|
||||
.merge(health::routes())
|
||||
.merge(metrics::routes())
|
||||
.merge(ui::routes())
|
||||
.merge(openapi::routes());
|
||||
|
||||
// Routes WITH rate limiting
|
||||
let rate_limited_routes = Router::new()
|
||||
.merge(openapi::routes())
|
||||
.merge(auth_routes)
|
||||
.merge(registry_routes)
|
||||
.layer(general_limiter);
|
||||
|
||||
let app = Router::new()
|
||||
.merge(public_routes)
|
||||
.merge(rate_limited_routes)
|
||||
.layer(rate_limit::general_rate_limiter()) // General rate limit for all routes
|
||||
.layer(DefaultBodyLimit::max(100 * 1024 * 1024)) // 100MB default body limit
|
||||
.layer(middleware::from_fn(request_id::request_id_middleware))
|
||||
.layer(middleware::from_fn(metrics::metrics_middleware))
|
||||
@@ -349,7 +260,6 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
npm = "/npm/",
|
||||
cargo = "/cargo/",
|
||||
pypi = "/simple/",
|
||||
raw = "/raw/",
|
||||
"Available endpoints"
|
||||
);
|
||||
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::MatchedPath,
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Migration between storage backends
|
||||
//!
|
||||
//! Supports migrating artifacts from one storage backend to another
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! OpenAPI documentation and Swagger UI
|
||||
//!
|
||||
//! Functions in this module are stubs used only for generating OpenAPI documentation.
|
||||
@@ -18,7 +15,7 @@ use crate::AppState;
|
||||
#[openapi(
|
||||
info(
|
||||
title = "Nora",
|
||||
version = "0.2.12",
|
||||
version = "0.1.0",
|
||||
description = "Multi-protocol package registry supporting Docker, Maven, npm, Cargo, and PyPI",
|
||||
license(name = "MIT"),
|
||||
contact(name = "DevITWay", url = "https://github.com/getnora-io/nora")
|
||||
@@ -28,8 +25,6 @@ use crate::AppState;
|
||||
),
|
||||
tags(
|
||||
(name = "health", description = "Health check endpoints"),
|
||||
(name = "metrics", description = "Prometheus metrics"),
|
||||
(name = "dashboard", description = "Dashboard & Metrics API"),
|
||||
(name = "docker", description = "Docker Registry v2 API"),
|
||||
(name = "maven", description = "Maven Repository API"),
|
||||
(name = "npm", description = "npm Registry API"),
|
||||
@@ -41,30 +36,16 @@ use crate::AppState;
|
||||
// Health
|
||||
crate::openapi::health_check,
|
||||
crate::openapi::readiness_check,
|
||||
// Metrics
|
||||
crate::openapi::prometheus_metrics,
|
||||
// Dashboard
|
||||
crate::openapi::dashboard_metrics,
|
||||
// Docker - Read
|
||||
// Docker
|
||||
crate::openapi::docker_version,
|
||||
crate::openapi::docker_catalog,
|
||||
crate::openapi::docker_tags,
|
||||
crate::openapi::docker_manifest_get,
|
||||
crate::openapi::docker_blob_head,
|
||||
crate::openapi::docker_blob_get,
|
||||
// Docker - Write
|
||||
crate::openapi::docker_manifest_put,
|
||||
crate::openapi::docker_blob_upload_start,
|
||||
crate::openapi::docker_blob_upload_patch,
|
||||
crate::openapi::docker_blob_upload_put,
|
||||
crate::openapi::docker_manifest,
|
||||
crate::openapi::docker_blob,
|
||||
// Maven
|
||||
crate::openapi::maven_artifact_get,
|
||||
crate::openapi::maven_artifact_put,
|
||||
crate::openapi::maven_artifact,
|
||||
// npm
|
||||
crate::openapi::npm_package,
|
||||
// Cargo
|
||||
crate::openapi::cargo_metadata,
|
||||
crate::openapi::cargo_download,
|
||||
// PyPI
|
||||
crate::openapi::pypi_simple,
|
||||
crate::openapi::pypi_package,
|
||||
@@ -78,11 +59,6 @@ use crate::AppState;
|
||||
HealthResponse,
|
||||
StorageHealth,
|
||||
RegistriesHealth,
|
||||
DashboardResponse,
|
||||
GlobalStats,
|
||||
RegistryCardStats,
|
||||
MountPoint,
|
||||
ActivityEntry,
|
||||
DockerVersion,
|
||||
DockerCatalog,
|
||||
DockerTags,
|
||||
@@ -206,76 +182,8 @@ pub struct ErrorResponse {
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DashboardResponse {
|
||||
/// Global statistics across all registries
|
||||
pub global_stats: GlobalStats,
|
||||
/// Per-registry statistics
|
||||
pub registry_stats: Vec<RegistryCardStats>,
|
||||
/// Registry mount points and proxy configuration
|
||||
pub mount_points: Vec<MountPoint>,
|
||||
/// Recent activity log entries
|
||||
pub activity: Vec<ActivityEntry>,
|
||||
/// Server uptime in seconds
|
||||
pub uptime_seconds: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct GlobalStats {
|
||||
/// Total downloads across all registries
|
||||
pub downloads: u64,
|
||||
/// Total uploads across all registries
|
||||
pub uploads: u64,
|
||||
/// Total artifact count
|
||||
pub artifacts: u64,
|
||||
/// Cache hit percentage (0-100)
|
||||
pub cache_hit_percent: f64,
|
||||
/// Total storage used in bytes
|
||||
pub storage_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct RegistryCardStats {
|
||||
/// Registry name (docker, maven, npm, cargo, pypi)
|
||||
pub name: String,
|
||||
/// Number of artifacts in this registry
|
||||
pub artifact_count: usize,
|
||||
/// Download count for this registry
|
||||
pub downloads: u64,
|
||||
/// Upload count for this registry
|
||||
pub uploads: u64,
|
||||
/// Storage used by this registry in bytes
|
||||
pub size_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct MountPoint {
|
||||
/// Registry display name
|
||||
pub registry: String,
|
||||
/// URL mount path (e.g., /v2/, /maven2/)
|
||||
pub mount_path: String,
|
||||
/// Upstream proxy URL if configured
|
||||
pub proxy_upstream: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ActivityEntry {
|
||||
/// ISO 8601 timestamp
|
||||
pub timestamp: String,
|
||||
/// Action type (Pull, Push, CacheHit, ProxyFetch)
|
||||
pub action: String,
|
||||
/// Artifact name/identifier
|
||||
pub artifact: String,
|
||||
/// Registry type
|
||||
pub registry: String,
|
||||
/// Source (LOCAL, PROXY, CACHE)
|
||||
pub source: String,
|
||||
}
|
||||
|
||||
// ============ Path Operations (documentation only) ============
|
||||
|
||||
// -------------------- Health --------------------
|
||||
|
||||
/// Health check endpoint
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -300,39 +208,6 @@ pub async fn health_check() {}
|
||||
)]
|
||||
pub async fn readiness_check() {}
|
||||
|
||||
// -------------------- Metrics --------------------
|
||||
|
||||
/// Prometheus metrics endpoint
|
||||
///
|
||||
/// Returns metrics in Prometheus text format for scraping.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/metrics",
|
||||
tag = "metrics",
|
||||
responses(
|
||||
(status = 200, description = "Prometheus metrics", content_type = "text/plain")
|
||||
)
|
||||
)]
|
||||
pub async fn prometheus_metrics() {}
|
||||
|
||||
// -------------------- Dashboard --------------------
|
||||
|
||||
/// Dashboard metrics and activity
|
||||
///
|
||||
/// Returns comprehensive metrics including downloads, uploads, cache statistics,
|
||||
/// per-registry stats, mount points configuration, and recent activity log.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/ui/dashboard",
|
||||
tag = "dashboard",
|
||||
responses(
|
||||
(status = 200, description = "Dashboard metrics", body = DashboardResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn dashboard_metrics() {}
|
||||
|
||||
// -------------------- Docker Registry v2 - Read Operations --------------------
|
||||
|
||||
/// Docker Registry version check
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -362,7 +237,7 @@ pub async fn docker_catalog() {}
|
||||
path = "/v2/{name}/tags/list",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name (e.g., 'alpine' or 'library/nginx')")
|
||||
("name" = String, Path, description = "Repository name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Tag list", body = DockerTags),
|
||||
@@ -378,30 +253,14 @@ pub async fn docker_tags() {}
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("reference" = String, Path, description = "Tag or digest (sha256:...)")
|
||||
("reference" = String, Path, description = "Tag or digest")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Manifest content"),
|
||||
(status = 404, description = "Manifest not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_manifest_get() {}
|
||||
|
||||
/// Check if blob exists
|
||||
#[utoipa::path(
|
||||
head,
|
||||
path = "/v2/{name}/blobs/{digest}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("digest" = String, Path, description = "Blob digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blob exists, Content-Length header contains size"),
|
||||
(status = 404, description = "Blob not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_head() {}
|
||||
pub async fn docker_manifest() {}
|
||||
|
||||
/// Get blob
|
||||
#[utoipa::path(
|
||||
@@ -417,79 +276,7 @@ pub async fn docker_blob_head() {}
|
||||
(status = 404, description = "Blob not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_get() {}
|
||||
|
||||
// -------------------- Docker Registry v2 - Write Operations --------------------
|
||||
|
||||
/// Push manifest
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/v2/{name}/manifests/{reference}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("reference" = String, Path, description = "Tag or digest")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Manifest created, Docker-Content-Digest header contains digest"),
|
||||
(status = 400, description = "Invalid manifest")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_manifest_put() {}
|
||||
|
||||
/// Start blob upload
|
||||
///
|
||||
/// Initiates a resumable blob upload. Returns a Location header with the upload URL.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/v2/{name}/blobs/uploads/",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name")
|
||||
),
|
||||
responses(
|
||||
(status = 202, description = "Upload started, Location header contains upload URL")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_start() {}
|
||||
|
||||
/// Upload blob chunk (chunked upload)
|
||||
///
|
||||
/// Uploads a chunk of data to an in-progress upload session.
|
||||
#[utoipa::path(
|
||||
patch,
|
||||
path = "/v2/{name}/blobs/uploads/{uuid}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("uuid" = String, Path, description = "Upload session UUID")
|
||||
),
|
||||
responses(
|
||||
(status = 202, description = "Chunk accepted, Range header indicates bytes received")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_patch() {}
|
||||
|
||||
/// Complete blob upload
|
||||
///
|
||||
/// Finalizes the blob upload. Can include final chunk data in the body.
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/v2/{name}/blobs/uploads/{uuid}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("uuid" = String, Path, description = "Upload session UUID"),
|
||||
("digest" = String, Query, description = "Expected blob digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Blob created"),
|
||||
(status = 400, description = "Digest mismatch or missing")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_put() {}
|
||||
|
||||
// -------------------- Maven --------------------
|
||||
pub async fn docker_blob() {}
|
||||
|
||||
/// Get Maven artifact
|
||||
#[utoipa::path(
|
||||
@@ -504,24 +291,7 @@ pub async fn docker_blob_upload_put() {}
|
||||
(status = 404, description = "Artifact not found, trying upstream proxies")
|
||||
)
|
||||
)]
|
||||
pub async fn maven_artifact_get() {}
|
||||
|
||||
/// Upload Maven artifact
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/maven2/{path}",
|
||||
tag = "maven",
|
||||
params(
|
||||
("path" = String, Path, description = "Artifact path")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Artifact uploaded"),
|
||||
(status = 500, description = "Storage error")
|
||||
)
|
||||
)]
|
||||
pub async fn maven_artifact_put() {}
|
||||
|
||||
// -------------------- npm --------------------
|
||||
pub async fn maven_artifact() {}
|
||||
|
||||
/// Get npm package metadata
|
||||
#[utoipa::path(
|
||||
@@ -529,7 +299,7 @@ pub async fn maven_artifact_put() {}
|
||||
path = "/npm/{name}",
|
||||
tag = "npm",
|
||||
params(
|
||||
("name" = String, Path, description = "Package name (e.g., 'lodash' or '@scope/package')")
|
||||
("name" = String, Path, description = "Package name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Package metadata (JSON)"),
|
||||
@@ -538,41 +308,6 @@ pub async fn maven_artifact_put() {}
|
||||
)]
|
||||
pub async fn npm_package() {}
|
||||
|
||||
// -------------------- Cargo --------------------
|
||||
|
||||
/// Get Cargo crate metadata
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/cargo/api/v1/crates/{crate_name}",
|
||||
tag = "cargo",
|
||||
params(
|
||||
("crate_name" = String, Path, description = "Crate name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Crate metadata (JSON)"),
|
||||
(status = 404, description = "Crate not found")
|
||||
)
|
||||
)]
|
||||
pub async fn cargo_metadata() {}
|
||||
|
||||
/// Download Cargo crate
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/cargo/api/v1/crates/{crate_name}/{version}/download",
|
||||
tag = "cargo",
|
||||
params(
|
||||
("crate_name" = String, Path, description = "Crate name"),
|
||||
("version" = String, Path, description = "Crate version")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Crate file (.crate)"),
|
||||
(status = 404, description = "Crate version not found")
|
||||
)
|
||||
)]
|
||||
pub async fn cargo_download() {}
|
||||
|
||||
// -------------------- PyPI --------------------
|
||||
|
||||
/// PyPI Simple index
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -599,8 +334,6 @@ pub async fn pypi_simple() {}
|
||||
)]
|
||||
pub async fn pypi_package() {}
|
||||
|
||||
// -------------------- Auth / Tokens --------------------
|
||||
|
||||
/// Create API token
|
||||
#[utoipa::path(
|
||||
post,
|
||||
|
||||
@@ -1,6 +1,4 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! Rate limiting configuration and middleware
|
||||
//!
|
||||
//! Provides rate limiting to protect against:
|
||||
@@ -8,109 +6,118 @@
|
||||
//! - DoS attacks on upload endpoints
|
||||
//! - General API abuse
|
||||
|
||||
use crate::config::RateLimitConfig;
|
||||
use tower_governor::governor::GovernorConfigBuilder;
|
||||
|
||||
/// Rate limit configuration
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RateLimitConfig {
|
||||
/// Requests per second for auth endpoints (strict)
|
||||
pub auth_rps: u32,
|
||||
/// Burst size for auth endpoints
|
||||
pub auth_burst: u32,
|
||||
/// Requests per second for upload endpoints
|
||||
pub upload_rps: u32,
|
||||
/// Burst size for upload endpoints
|
||||
pub upload_burst: u32,
|
||||
/// Requests per second for general endpoints (lenient)
|
||||
pub general_rps: u32,
|
||||
/// Burst size for general endpoints
|
||||
pub general_burst: u32,
|
||||
}
|
||||
|
||||
impl Default for RateLimitConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
auth_rps: 1, // 1 req/sec for auth (strict)
|
||||
auth_burst: 5, // Allow burst of 5
|
||||
upload_rps: 50, // 50 req/sec for uploads (Docker needs parallel)
|
||||
upload_burst: 100, // Allow burst of 100
|
||||
general_rps: 100, // 100 req/sec general
|
||||
general_burst: 200, // Allow burst of 200
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create rate limiter layer for auth endpoints (strict protection against brute-force)
|
||||
pub fn auth_rate_limiter(
|
||||
config: &RateLimitConfig,
|
||||
) -> tower_governor::GovernorLayer<
|
||||
///
|
||||
/// Default: 1 request per second, burst of 5
|
||||
pub fn auth_rate_limiter() -> tower_governor::GovernorLayer<
|
||||
tower_governor::key_extractor::PeerIpKeyExtractor,
|
||||
governor::middleware::StateInformationMiddleware,
|
||||
axum::body::Body,
|
||||
> {
|
||||
let gov_config = GovernorConfigBuilder::default()
|
||||
.per_second(config.auth_rps)
|
||||
.burst_size(config.auth_burst)
|
||||
let config = GovernorConfigBuilder::default()
|
||||
.per_second(1)
|
||||
.burst_size(5)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("Failed to build auth rate limiter");
|
||||
.unwrap();
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
tower_governor::GovernorLayer::new(config)
|
||||
}
|
||||
|
||||
/// Create rate limiter layer for upload endpoints
|
||||
///
|
||||
/// High limits to accommodate Docker client's aggressive parallel layer uploads
|
||||
pub fn upload_rate_limiter(
|
||||
config: &RateLimitConfig,
|
||||
) -> tower_governor::GovernorLayer<
|
||||
/// Default: 50 requests per second, burst of 100
|
||||
/// Higher limits to accommodate Docker client's parallel layer uploads
|
||||
pub fn upload_rate_limiter() -> tower_governor::GovernorLayer<
|
||||
tower_governor::key_extractor::PeerIpKeyExtractor,
|
||||
governor::middleware::StateInformationMiddleware,
|
||||
axum::body::Body,
|
||||
> {
|
||||
let gov_config = GovernorConfigBuilder::default()
|
||||
.per_second(config.upload_rps)
|
||||
.burst_size(config.upload_burst)
|
||||
let config = GovernorConfigBuilder::default()
|
||||
.per_second(50)
|
||||
.burst_size(100)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("Failed to build upload rate limiter");
|
||||
.unwrap();
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
tower_governor::GovernorLayer::new(config)
|
||||
}
|
||||
|
||||
/// Create rate limiter layer for general endpoints (lenient)
|
||||
pub fn general_rate_limiter(
|
||||
config: &RateLimitConfig,
|
||||
) -> tower_governor::GovernorLayer<
|
||||
///
|
||||
/// Default: 100 requests per second, burst of 200
|
||||
pub fn general_rate_limiter() -> tower_governor::GovernorLayer<
|
||||
tower_governor::key_extractor::PeerIpKeyExtractor,
|
||||
governor::middleware::StateInformationMiddleware,
|
||||
axum::body::Body,
|
||||
> {
|
||||
let gov_config = GovernorConfigBuilder::default()
|
||||
.per_second(config.general_rps)
|
||||
.burst_size(config.general_burst)
|
||||
let config = GovernorConfigBuilder::default()
|
||||
.per_second(100)
|
||||
.burst_size(200)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("Failed to build general rate limiter");
|
||||
.unwrap();
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
tower_governor::GovernorLayer::new(config)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::RateLimitConfig;
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = RateLimitConfig::default();
|
||||
assert_eq!(config.auth_rps, 1);
|
||||
assert_eq!(config.auth_burst, 5);
|
||||
assert_eq!(config.upload_rps, 200);
|
||||
assert_eq!(config.upload_rps, 50);
|
||||
assert_eq!(config.general_rps, 100);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_rate_limiter_creation() {
|
||||
let config = RateLimitConfig::default();
|
||||
let _limiter = auth_rate_limiter(&config);
|
||||
let _limiter = auth_rate_limiter();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_upload_rate_limiter_creation() {
|
||||
let config = RateLimitConfig::default();
|
||||
let _limiter = upload_rate_limiter(&config);
|
||||
let _limiter = upload_rate_limiter();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_general_rate_limiter_creation() {
|
||||
let config = RateLimitConfig::default();
|
||||
let _limiter = general_rate_limiter(&config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_config() {
|
||||
let config = RateLimitConfig {
|
||||
auth_rps: 10,
|
||||
auth_burst: 20,
|
||||
upload_rps: 500,
|
||||
upload_burst: 1000,
|
||||
general_rps: 200,
|
||||
general_burst: 400,
|
||||
};
|
||||
let _auth = auth_rate_limiter(&config);
|
||||
let _upload = upload_rate_limiter(&config);
|
||||
let _general = general_rate_limiter(&config);
|
||||
let _limiter = general_rate_limiter();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
@@ -41,17 +37,7 @@ async fn download(
|
||||
crate_name, version, crate_name, version
|
||||
);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => {
|
||||
state.metrics.record_download("cargo");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("{}@{}", crate_name, version),
|
||||
"cargo",
|
||||
"LOCAL",
|
||||
));
|
||||
(StatusCode::OK, data).into_response()
|
||||
}
|
||||
Ok(data) => (StatusCode::OK, data).into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,9 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::registry::docker_auth::DockerAuth;
|
||||
use crate::storage::Storage;
|
||||
use crate::validation::{validate_digest, validate_docker_name, validate_docker_reference};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
@@ -11,106 +5,31 @@ use axum::{
|
||||
extract::{Path, State},
|
||||
http::{header, HeaderName, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::{get, head, patch, put},
|
||||
routing::{get, head, put},
|
||||
Json, Router,
|
||||
};
|
||||
use parking_lot::RwLock;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{json, Value};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
/// Metadata for a Docker image stored alongside manifests
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ImageMetadata {
|
||||
pub push_timestamp: u64,
|
||||
pub last_pulled: u64,
|
||||
pub downloads: u64,
|
||||
pub size_bytes: u64,
|
||||
pub os: String,
|
||||
pub arch: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub variant: Option<String>,
|
||||
pub layers: Vec<LayerInfo>,
|
||||
}
|
||||
|
||||
/// Information about a single layer in a Docker image
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LayerInfo {
|
||||
pub digest: String,
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
/// In-progress upload sessions for chunked uploads
|
||||
/// Maps UUID -> accumulated data
|
||||
static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, Vec<u8>>>> =
|
||||
std::sync::LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/v2/", get(check))
|
||||
.route("/v2/_catalog", get(catalog))
|
||||
// Single-segment name routes (e.g., /v2/alpine/...)
|
||||
.route("/v2/{name}/blobs/{digest}", head(check_blob))
|
||||
.route("/v2/{name}/blobs/{digest}", get(download_blob))
|
||||
.route(
|
||||
"/v2/{name}/blobs/uploads/",
|
||||
axum::routing::post(start_upload),
|
||||
)
|
||||
.route(
|
||||
"/v2/{name}/blobs/uploads/{uuid}",
|
||||
patch(patch_blob).put(upload_blob),
|
||||
)
|
||||
.route("/v2/{name}/blobs/uploads/{uuid}", put(upload_blob))
|
||||
.route("/v2/{name}/manifests/{reference}", get(get_manifest))
|
||||
.route("/v2/{name}/manifests/{reference}", put(put_manifest))
|
||||
.route("/v2/{name}/tags/list", get(list_tags))
|
||||
// Two-segment name routes (e.g., /v2/library/alpine/...)
|
||||
.route("/v2/{ns}/{name}/blobs/{digest}", head(check_blob_ns))
|
||||
.route("/v2/{ns}/{name}/blobs/{digest}", get(download_blob_ns))
|
||||
.route(
|
||||
"/v2/{ns}/{name}/blobs/uploads/",
|
||||
axum::routing::post(start_upload_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/blobs/uploads/{uuid}",
|
||||
patch(patch_blob_ns).put(upload_blob_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/manifests/{reference}",
|
||||
get(get_manifest_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/manifests/{reference}",
|
||||
put(put_manifest_ns),
|
||||
)
|
||||
.route("/v2/{ns}/{name}/tags/list", get(list_tags_ns))
|
||||
}
|
||||
|
||||
async fn check() -> (StatusCode, Json<Value>) {
|
||||
(StatusCode::OK, Json(json!({})))
|
||||
}
|
||||
|
||||
/// List all repositories in the registry
|
||||
async fn catalog(State(state): State<Arc<AppState>>) -> Json<Value> {
|
||||
let keys = state.storage.list("docker/").await;
|
||||
|
||||
// Extract unique repository names from paths like "docker/{name}/manifests/..."
|
||||
let mut repos: Vec<String> = keys
|
||||
.iter()
|
||||
.filter_map(|k| {
|
||||
k.strip_prefix("docker/")
|
||||
.and_then(|rest| rest.split('/').next())
|
||||
.map(String::from)
|
||||
})
|
||||
.collect();
|
||||
|
||||
repos.sort();
|
||||
repos.dedup();
|
||||
|
||||
Json(json!({ "repositories": repos }))
|
||||
}
|
||||
|
||||
async fn check_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, digest)): Path<(String, String)>,
|
||||
@@ -145,66 +64,15 @@ async fn download_blob(
|
||||
}
|
||||
|
||||
let key = format!("docker/{}/blobs/{}", name, digest);
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
state.metrics.record_download("docker");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("{}@{}", name, &digest[..19.min(digest.len())]),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
return (
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_TYPE, "application/octet-stream")],
|
||||
data,
|
||||
)
|
||||
.into_response();
|
||||
.into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
|
||||
// Try upstream proxies
|
||||
for upstream in &state.config.docker.upstreams {
|
||||
if let Ok(data) = fetch_blob_from_upstream(
|
||||
&state.http_client,
|
||||
&upstream.url,
|
||||
&name,
|
||||
&digest,
|
||||
&state.docker_auth,
|
||||
state.config.docker.proxy_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
state.metrics.record_download("docker");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
format!("{}@{}", name, &digest[..19.min(digest.len())]),
|
||||
"docker",
|
||||
"PROXY",
|
||||
));
|
||||
|
||||
// Cache in storage (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("docker");
|
||||
|
||||
return (
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_TYPE, "application/octet-stream")],
|
||||
Bytes::from(data),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn start_upload(Path(name): Path<String>) -> Response {
|
||||
@@ -224,46 +92,9 @@ async fn start_upload(Path(name): Path<String>) -> Response {
|
||||
.into_response()
|
||||
}
|
||||
|
||||
/// PATCH handler for chunked blob uploads
|
||||
/// Docker client sends data chunks via PATCH, then finalizes with PUT
|
||||
async fn patch_blob(Path((name, uuid)): Path<(String, String)>, body: Bytes) -> Response {
|
||||
if let Err(e) = validate_docker_name(&name) {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
// Append data to the upload session and get total size
|
||||
let total_size = {
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
let session = sessions.entry(uuid.clone()).or_default();
|
||||
session.extend_from_slice(&body);
|
||||
session.len()
|
||||
};
|
||||
|
||||
let location = format!("/v2/{}/blobs/uploads/{}", name, uuid);
|
||||
// Range header indicates bytes 0 to (total_size - 1) have been received
|
||||
let range = if total_size > 0 {
|
||||
format!("0-{}", total_size - 1)
|
||||
} else {
|
||||
"0-0".to_string()
|
||||
};
|
||||
|
||||
(
|
||||
StatusCode::ACCEPTED,
|
||||
[
|
||||
(header::LOCATION, location),
|
||||
(header::RANGE, range),
|
||||
(HeaderName::from_static("docker-upload-uuid"), uuid),
|
||||
],
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
/// PUT handler for completing blob uploads
|
||||
/// Handles both monolithic uploads (body contains all data) and
|
||||
/// chunked upload finalization (body may be empty, data in session)
|
||||
async fn upload_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, uuid)): Path<(String, String)>,
|
||||
Path((name, _uuid)): Path<(String, String)>,
|
||||
axum::extract::Query(params): axum::extract::Query<std::collections::HashMap<String, String>>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
@@ -280,32 +111,9 @@ async fn upload_blob(
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
// Get data from chunked session if exists, otherwise use body directly
|
||||
let data = {
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
if let Some(mut session_data) = sessions.remove(&uuid) {
|
||||
// Chunked upload: append any final body data and use session
|
||||
if !body.is_empty() {
|
||||
session_data.extend_from_slice(&body);
|
||||
}
|
||||
session_data
|
||||
} else {
|
||||
// Monolithic upload: use body directly
|
||||
body.to_vec()
|
||||
}
|
||||
};
|
||||
|
||||
let key = format!("docker/{}/blobs/{}", name, digest);
|
||||
match state.storage.put(&key, &data).await {
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
state.metrics.record_upload("docker");
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Push,
|
||||
format!("{}@{}", name, &digest[..19.min(digest.len())]),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
state.repo_index.invalidate("docker");
|
||||
let location = format!("/v2/{}/blobs/{}", name, digest);
|
||||
(StatusCode::CREATED, [(header::LOCATION, location)]).into_response()
|
||||
}
|
||||
@@ -325,114 +133,18 @@ async fn get_manifest(
|
||||
}
|
||||
|
||||
let key = format!("docker/{}/manifests/{}.json", name, reference);
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
state.metrics.record_download("docker");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("{}:{}", name, reference),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
|
||||
// Calculate digest for Docker-Content-Digest header
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
|
||||
// Detect manifest media type from content
|
||||
let content_type = detect_manifest_media_type(&data);
|
||||
|
||||
// Update metadata (downloads, last_pulled) in background
|
||||
let meta_key = format!("docker/{}/manifests/{}.meta.json", name, reference);
|
||||
let storage_clone = state.storage.clone();
|
||||
tokio::spawn(update_metadata_on_pull(storage_clone, meta_key));
|
||||
|
||||
return (
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (
|
||||
StatusCode::OK,
|
||||
[
|
||||
(header::CONTENT_TYPE, content_type),
|
||||
(HeaderName::from_static("docker-content-digest"), digest),
|
||||
],
|
||||
[(
|
||||
header::CONTENT_TYPE,
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
)],
|
||||
data,
|
||||
)
|
||||
.into_response();
|
||||
.into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
|
||||
// Try upstream proxies
|
||||
tracing::debug!(
|
||||
upstreams_count = state.config.docker.upstreams.len(),
|
||||
"Trying upstream proxies"
|
||||
);
|
||||
for upstream in &state.config.docker.upstreams {
|
||||
tracing::debug!(upstream_url = %upstream.url, "Trying upstream");
|
||||
if let Ok((data, content_type)) = fetch_manifest_from_upstream(
|
||||
&state.http_client,
|
||||
&upstream.url,
|
||||
&name,
|
||||
&reference,
|
||||
&state.docker_auth,
|
||||
state.config.docker.proxy_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
state.metrics.record_download("docker");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
format!("{}:{}", name, reference),
|
||||
"docker",
|
||||
"PROXY",
|
||||
));
|
||||
|
||||
// Calculate digest for Docker-Content-Digest header
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
|
||||
// Cache manifest and create metadata (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
let name_clone = name.clone();
|
||||
let reference_clone = reference.clone();
|
||||
let digest_clone = digest.clone();
|
||||
tokio::spawn(async move {
|
||||
// Store manifest by tag and digest
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
let digest_key = format!("docker/{}/manifests/{}.json", name_clone, digest_clone);
|
||||
let _ = storage.put(&digest_key, &data_clone).await;
|
||||
|
||||
// Extract and save metadata
|
||||
let metadata = extract_metadata(&data_clone, &storage, &name_clone).await;
|
||||
if let Ok(meta_json) = serde_json::to_vec(&metadata) {
|
||||
let meta_key = format!(
|
||||
"docker/{}/manifests/{}.meta.json",
|
||||
name_clone, reference_clone
|
||||
);
|
||||
let _ = storage.put(&meta_key, &meta_json).await;
|
||||
|
||||
let digest_meta_key =
|
||||
format!("docker/{}/manifests/{}.meta.json", name_clone, digest_clone);
|
||||
let _ = storage.put(&digest_meta_key, &meta_json).await;
|
||||
}
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("docker");
|
||||
|
||||
return (
|
||||
StatusCode::OK,
|
||||
[
|
||||
(header::CONTENT_TYPE, content_type),
|
||||
(HeaderName::from_static("docker-content-digest"), digest),
|
||||
],
|
||||
Bytes::from(data),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn put_manifest(
|
||||
@@ -447,51 +159,23 @@ async fn put_manifest(
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
// Calculate digest
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body));
|
||||
|
||||
// Store by tag/reference
|
||||
let key = format!("docker/{}/manifests/{}.json", name, reference);
|
||||
if state.storage.put(&key, &body).await.is_err() {
|
||||
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body));
|
||||
let location = format!("/v2/{}/manifests/{}", name, reference);
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
[
|
||||
(header::LOCATION, location),
|
||||
(HeaderName::from_static("docker-content-digest"), digest),
|
||||
],
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
|
||||
// Also store by digest for direct digest lookups
|
||||
let digest_key = format!("docker/{}/manifests/{}.json", name, digest);
|
||||
if state.storage.put(&digest_key, &body).await.is_err() {
|
||||
return StatusCode::INTERNAL_SERVER_ERROR.into_response();
|
||||
}
|
||||
|
||||
// Extract and save metadata
|
||||
let metadata = extract_metadata(&body, &state.storage, &name).await;
|
||||
let meta_key = format!("docker/{}/manifests/{}.meta.json", name, reference);
|
||||
if let Ok(meta_json) = serde_json::to_vec(&metadata) {
|
||||
let _ = state.storage.put(&meta_key, &meta_json).await;
|
||||
|
||||
// Also save metadata by digest
|
||||
let digest_meta_key = format!("docker/{}/manifests/{}.meta.json", name, digest);
|
||||
let _ = state.storage.put(&digest_meta_key, &meta_json).await;
|
||||
}
|
||||
|
||||
state.metrics.record_upload("docker");
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Push,
|
||||
format!("{}:{}", name, reference),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
state.repo_index.invalidate("docker");
|
||||
|
||||
let location = format!("/v2/{}/manifests/{}", name, reference);
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
[
|
||||
(header::LOCATION, location),
|
||||
(HeaderName::from_static("docker-content-digest"), digest),
|
||||
],
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>) -> Response {
|
||||
@@ -511,396 +195,3 @@ async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>)
|
||||
.collect();
|
||||
(StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Namespace handlers (for two-segment names like library/alpine)
|
||||
// These combine ns/name into a single name and delegate to the main handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn check_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, digest)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
check_blob(state, Path((full_name, digest))).await
|
||||
}
|
||||
|
||||
async fn download_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, digest)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
download_blob(state, Path((full_name, digest))).await
|
||||
}
|
||||
|
||||
async fn start_upload_ns(Path((ns, name)): Path<(String, String)>) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
start_upload(Path(full_name)).await
|
||||
}
|
||||
|
||||
async fn patch_blob_ns(
|
||||
Path((ns, name, uuid)): Path<(String, String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
patch_blob(Path((full_name, uuid)), body).await
|
||||
}
|
||||
|
||||
async fn upload_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, uuid)): Path<(String, String, String)>,
|
||||
query: axum::extract::Query<std::collections::HashMap<String, String>>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
upload_blob(state, Path((full_name, uuid)), query, body).await
|
||||
}
|
||||
|
||||
async fn get_manifest_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, reference)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
get_manifest(state, Path((full_name, reference))).await
|
||||
}
|
||||
|
||||
async fn put_manifest_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, reference)): Path<(String, String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
put_manifest(state, Path((full_name, reference)), body).await
|
||||
}
|
||||
|
||||
async fn list_tags_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
list_tags(state, Path(full_name)).await
|
||||
}
|
||||
|
||||
/// Fetch a blob from an upstream Docker registry
|
||||
async fn fetch_blob_from_upstream(
|
||||
client: &reqwest::Client,
|
||||
upstream_url: &str,
|
||||
name: &str,
|
||||
digest: &str,
|
||||
docker_auth: &DockerAuth,
|
||||
timeout: u64,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let url = format!(
|
||||
"{}/v2/{}/blobs/{}",
|
||||
upstream_url.trim_end_matches('/'),
|
||||
name,
|
||||
digest
|
||||
);
|
||||
|
||||
// First try without auth
|
||||
let response = client
|
||||
.get(&url)
|
||||
.timeout(Duration::from_secs(timeout))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?;
|
||||
|
||||
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
// Get Www-Authenticate header and fetch token
|
||||
let www_auth = response
|
||||
.headers()
|
||||
.get("www-authenticate")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(String::from);
|
||||
|
||||
if let Some(token) = docker_auth
|
||||
.get_token(upstream_url, name, www_auth.as_deref())
|
||||
.await
|
||||
{
|
||||
client
|
||||
.get(&url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?
|
||||
} else {
|
||||
return Err(());
|
||||
}
|
||||
} else {
|
||||
response
|
||||
};
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
/// Fetch a manifest from an upstream Docker registry
|
||||
/// Returns (manifest_bytes, content_type)
|
||||
async fn fetch_manifest_from_upstream(
|
||||
client: &reqwest::Client,
|
||||
upstream_url: &str,
|
||||
name: &str,
|
||||
reference: &str,
|
||||
docker_auth: &DockerAuth,
|
||||
timeout: u64,
|
||||
) -> Result<(Vec<u8>, String), ()> {
|
||||
let url = format!(
|
||||
"{}/v2/{}/manifests/{}",
|
||||
upstream_url.trim_end_matches('/'),
|
||||
name,
|
||||
reference
|
||||
);
|
||||
|
||||
tracing::debug!(url = %url, "Fetching manifest from upstream");
|
||||
|
||||
// Request with Accept header for manifest types
|
||||
let accept_header = "application/vnd.docker.distribution.manifest.v2+json, \
|
||||
application/vnd.docker.distribution.manifest.list.v2+json, \
|
||||
application/vnd.oci.image.manifest.v1+json, \
|
||||
application/vnd.oci.image.index.v1+json";
|
||||
|
||||
// First try without auth
|
||||
let response = client
|
||||
.get(&url)
|
||||
.timeout(Duration::from_secs(timeout))
|
||||
.header("Accept", accept_header)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!(error = %e, url = %url, "Failed to send request to upstream");
|
||||
})?;
|
||||
|
||||
tracing::debug!(status = %response.status(), "Initial upstream response");
|
||||
|
||||
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
// Get Www-Authenticate header and fetch token
|
||||
let www_auth = response
|
||||
.headers()
|
||||
.get("www-authenticate")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(String::from);
|
||||
|
||||
tracing::debug!(www_auth = ?www_auth, "Got 401, fetching token");
|
||||
|
||||
if let Some(token) = docker_auth
|
||||
.get_token(upstream_url, name, www_auth.as_deref())
|
||||
.await
|
||||
{
|
||||
tracing::debug!("Token acquired, retrying with auth");
|
||||
client
|
||||
.get(&url)
|
||||
.header("Accept", accept_header)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!(error = %e, "Failed to send authenticated request");
|
||||
})?
|
||||
} else {
|
||||
tracing::error!("Failed to acquire token");
|
||||
return Err(());
|
||||
}
|
||||
} else {
|
||||
response
|
||||
};
|
||||
|
||||
tracing::debug!(status = %response.status(), "Final upstream response");
|
||||
|
||||
if !response.status().is_success() {
|
||||
tracing::warn!(status = %response.status(), "Upstream returned non-success status");
|
||||
return Err(());
|
||||
}
|
||||
|
||||
let content_type = response
|
||||
.headers()
|
||||
.get("content-type")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or("application/vnd.docker.distribution.manifest.v2+json")
|
||||
.to_string();
|
||||
|
||||
let bytes = response.bytes().await.map_err(|_| ())?;
|
||||
|
||||
Ok((bytes.to_vec(), content_type))
|
||||
}
|
||||
|
||||
/// Detect manifest media type from its JSON content
|
||||
fn detect_manifest_media_type(data: &[u8]) -> String {
|
||||
// Try to parse as JSON and extract mediaType
|
||||
if let Ok(json) = serde_json::from_slice::<Value>(data) {
|
||||
if let Some(media_type) = json.get("mediaType").and_then(|v| v.as_str()) {
|
||||
return media_type.to_string();
|
||||
}
|
||||
|
||||
// Check schemaVersion for older manifests
|
||||
if let Some(schema_version) = json.get("schemaVersion").and_then(|v| v.as_u64()) {
|
||||
if schema_version == 1 {
|
||||
return "application/vnd.docker.distribution.manifest.v1+json".to_string();
|
||||
}
|
||||
// schemaVersion 2 without mediaType is likely docker manifest v2
|
||||
if json.get("config").is_some() {
|
||||
return "application/vnd.docker.distribution.manifest.v2+json".to_string();
|
||||
}
|
||||
// If it has "manifests" array, it's an index/list
|
||||
if json.get("manifests").is_some() {
|
||||
return "application/vnd.oci.image.index.v1+json".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default fallback
|
||||
"application/vnd.docker.distribution.manifest.v2+json".to_string()
|
||||
}
|
||||
|
||||
/// Extract metadata from a Docker manifest
|
||||
/// Handles both single-arch manifests and multi-arch indexes
|
||||
async fn extract_metadata(manifest: &[u8], storage: &Storage, name: &str) -> ImageMetadata {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let mut metadata = ImageMetadata {
|
||||
push_timestamp: now,
|
||||
last_pulled: 0,
|
||||
downloads: 0,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let Ok(json) = serde_json::from_slice::<Value>(manifest) else {
|
||||
return metadata;
|
||||
};
|
||||
|
||||
// Check if this is a manifest list/index (multi-arch)
|
||||
if json.get("manifests").is_some() {
|
||||
// For multi-arch, extract info from the first platform manifest
|
||||
if let Some(manifests) = json.get("manifests").and_then(|m| m.as_array()) {
|
||||
// Sum sizes from all platform manifests
|
||||
let total_size: u64 = manifests
|
||||
.iter()
|
||||
.filter_map(|m| m.get("size").and_then(|s| s.as_u64()))
|
||||
.sum();
|
||||
metadata.size_bytes = total_size;
|
||||
|
||||
// Get OS/arch from first platform (usually linux/amd64)
|
||||
if let Some(first) = manifests.first() {
|
||||
if let Some(platform) = first.get("platform") {
|
||||
metadata.os = platform
|
||||
.get("os")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("multi-arch")
|
||||
.to_string();
|
||||
metadata.arch = platform
|
||||
.get("architecture")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("multi")
|
||||
.to_string();
|
||||
metadata.variant = platform
|
||||
.get("variant")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(String::from);
|
||||
}
|
||||
}
|
||||
}
|
||||
return metadata;
|
||||
}
|
||||
|
||||
// Single-arch manifest - extract layers
|
||||
if let Some(layers) = json.get("layers").and_then(|l| l.as_array()) {
|
||||
let mut total_size: u64 = 0;
|
||||
for layer in layers {
|
||||
let digest = layer
|
||||
.get("digest")
|
||||
.and_then(|d| d.as_str())
|
||||
.unwrap_or("")
|
||||
.to_string();
|
||||
let size = layer.get("size").and_then(|s| s.as_u64()).unwrap_or(0);
|
||||
total_size += size;
|
||||
metadata.layers.push(LayerInfo { digest, size });
|
||||
}
|
||||
metadata.size_bytes = total_size;
|
||||
}
|
||||
|
||||
// Try to get OS/arch from config blob
|
||||
if let Some(config) = json.get("config") {
|
||||
if let Some(config_digest) = config.get("digest").and_then(|d| d.as_str()) {
|
||||
let (os, arch, variant) = get_config_info(storage, name, config_digest).await;
|
||||
metadata.os = os;
|
||||
metadata.arch = arch;
|
||||
metadata.variant = variant;
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't get OS/arch, set defaults
|
||||
if metadata.os.is_empty() {
|
||||
metadata.os = "unknown".to_string();
|
||||
}
|
||||
if metadata.arch.is_empty() {
|
||||
metadata.arch = "unknown".to_string();
|
||||
}
|
||||
|
||||
metadata
|
||||
}
|
||||
|
||||
/// Get OS/arch information from a config blob
|
||||
async fn get_config_info(
|
||||
storage: &Storage,
|
||||
name: &str,
|
||||
config_digest: &str,
|
||||
) -> (String, String, Option<String>) {
|
||||
let key = format!("docker/{}/blobs/{}", name, config_digest);
|
||||
|
||||
let Ok(data) = storage.get(&key).await else {
|
||||
return ("unknown".to_string(), "unknown".to_string(), None);
|
||||
};
|
||||
|
||||
let Ok(config) = serde_json::from_slice::<Value>(&data) else {
|
||||
return ("unknown".to_string(), "unknown".to_string(), None);
|
||||
};
|
||||
|
||||
let os = config
|
||||
.get("os")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
let arch = config
|
||||
.get("architecture")
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
|
||||
let variant = config
|
||||
.get("variant")
|
||||
.and_then(|v| v.as_str())
|
||||
.map(String::from);
|
||||
|
||||
(os, arch, variant)
|
||||
}
|
||||
|
||||
/// Update metadata when a manifest is pulled
|
||||
/// Increments download counter and updates last_pulled timestamp
|
||||
async fn update_metadata_on_pull(storage: Storage, meta_key: String) {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
// Try to read existing metadata
|
||||
let mut metadata = if let Ok(data) = storage.get(&meta_key).await {
|
||||
serde_json::from_slice::<ImageMetadata>(&data).unwrap_or_default()
|
||||
} else {
|
||||
ImageMetadata::default()
|
||||
};
|
||||
|
||||
// Update pull stats
|
||||
metadata.downloads += 1;
|
||||
metadata.last_pulled = now;
|
||||
|
||||
// Save back
|
||||
if let Ok(json) = serde_json::to_vec(&metadata) {
|
||||
let _ = storage.put(&meta_key, &json).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,195 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// Cached Docker registry token
|
||||
struct CachedToken {
|
||||
token: String,
|
||||
expires_at: Instant,
|
||||
}
|
||||
|
||||
/// Docker registry authentication handler
|
||||
/// Manages Bearer token acquisition and caching for upstream registries
|
||||
pub struct DockerAuth {
|
||||
tokens: RwLock<HashMap<String, CachedToken>>,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl DockerAuth {
|
||||
pub fn new(timeout: u64) -> Self {
|
||||
Self {
|
||||
tokens: RwLock::new(HashMap::new()),
|
||||
client: reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout))
|
||||
.build()
|
||||
.unwrap_or_default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a valid token for the given registry and repository scope
|
||||
/// Returns cached token if still valid, otherwise fetches a new one
|
||||
pub async fn get_token(
|
||||
&self,
|
||||
registry_url: &str,
|
||||
name: &str,
|
||||
www_authenticate: Option<&str>,
|
||||
) -> Option<String> {
|
||||
let cache_key = format!("{}:{}", registry_url, name);
|
||||
|
||||
// Check cache first
|
||||
{
|
||||
let tokens = self.tokens.read();
|
||||
if let Some(cached) = tokens.get(&cache_key) {
|
||||
if cached.expires_at > Instant::now() {
|
||||
return Some(cached.token.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Need to fetch a new token
|
||||
let www_auth = www_authenticate?;
|
||||
let token = self.fetch_token(www_auth, name).await?;
|
||||
|
||||
// Cache the token (default 5 minute expiry)
|
||||
{
|
||||
let mut tokens = self.tokens.write();
|
||||
tokens.insert(
|
||||
cache_key,
|
||||
CachedToken {
|
||||
token: token.clone(),
|
||||
expires_at: Instant::now() + Duration::from_secs(300),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
Some(token)
|
||||
}
|
||||
|
||||
/// Parse Www-Authenticate header and fetch token from auth server
|
||||
/// Format: Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/alpine:pull"
|
||||
async fn fetch_token(&self, www_authenticate: &str, name: &str) -> Option<String> {
|
||||
let params = parse_www_authenticate(www_authenticate)?;
|
||||
|
||||
let realm = params.get("realm")?;
|
||||
let service = params.get("service").map(|s| s.as_str()).unwrap_or("");
|
||||
|
||||
// Build token request URL
|
||||
let scope = format!("repository:{}:pull", name);
|
||||
let url = format!("{}?service={}&scope={}", realm, service, scope);
|
||||
|
||||
tracing::debug!(url = %url, "Fetching auth token");
|
||||
|
||||
let response = self.client.get(&url).send().await.ok()?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
tracing::warn!(status = %response.status(), "Token request failed");
|
||||
return None;
|
||||
}
|
||||
|
||||
let json: serde_json::Value = response.json().await.ok()?;
|
||||
|
||||
// Docker Hub returns "token", some registries return "access_token"
|
||||
json.get("token")
|
||||
.or_else(|| json.get("access_token"))
|
||||
.and_then(|v| v.as_str())
|
||||
.map(String::from)
|
||||
}
|
||||
|
||||
/// Make an authenticated request to an upstream registry
|
||||
pub async fn fetch_with_auth(
|
||||
&self,
|
||||
url: &str,
|
||||
registry_url: &str,
|
||||
name: &str,
|
||||
) -> Result<reqwest::Response, ()> {
|
||||
// First try without auth
|
||||
let response = self.client.get(url).send().await.map_err(|_| ())?;
|
||||
|
||||
if response.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
// Extract Www-Authenticate header
|
||||
let www_auth = response
|
||||
.headers()
|
||||
.get("www-authenticate")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(String::from);
|
||||
|
||||
// Get token and retry
|
||||
if let Some(token) = self
|
||||
.get_token(registry_url, name, www_auth.as_deref())
|
||||
.await
|
||||
{
|
||||
return self
|
||||
.client
|
||||
.get(url)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ());
|
||||
}
|
||||
|
||||
return Err(());
|
||||
}
|
||||
|
||||
Ok(response)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for DockerAuth {
|
||||
fn default() -> Self {
|
||||
Self::new(60)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse Www-Authenticate header into key-value pairs
|
||||
/// Example: Bearer realm="https://auth.docker.io/token",service="registry.docker.io"
|
||||
fn parse_www_authenticate(header: &str) -> Option<HashMap<String, String>> {
|
||||
let header = header
|
||||
.strip_prefix("Bearer ")
|
||||
.or_else(|| header.strip_prefix("bearer "))?;
|
||||
|
||||
let mut params = HashMap::new();
|
||||
|
||||
for part in header.split(',') {
|
||||
let part = part.trim();
|
||||
if let Some((key, value)) = part.split_once('=') {
|
||||
let value = value.trim_matches('"');
|
||||
params.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
Some(params)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate() {
|
||||
let header = r#"Bearer realm="https://auth.docker.io/token",service="registry.docker.io",scope="repository:library/alpine:pull""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
params.get("realm"),
|
||||
Some(&"https://auth.docker.io/token".to_string())
|
||||
);
|
||||
assert_eq!(
|
||||
params.get("service"),
|
||||
Some(&"registry.docker.io".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_lowercase() {
|
||||
let header = r#"bearer realm="https://ghcr.io/token",service="ghcr.io""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
|
||||
assert_eq!(
|
||||
params.get("realm"),
|
||||
Some(&"https://ghcr.io/token".to_string())
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
@@ -23,42 +19,18 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
let key = format!("maven/{}", path);
|
||||
|
||||
let artifact_name = path
|
||||
.split('/')
|
||||
.rev()
|
||||
.take(3)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.rev()
|
||||
.collect::<Vec<_>>()
|
||||
.join("/");
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
state.metrics.record_download("maven");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::CacheHit,
|
||||
artifact_name,
|
||||
"maven",
|
||||
"CACHE",
|
||||
));
|
||||
return with_content_type(&path, data).into_response();
|
||||
}
|
||||
|
||||
// Try proxy servers
|
||||
for proxy_url in &state.config.maven.proxies {
|
||||
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
|
||||
|
||||
match fetch_from_proxy(&state.http_client, &url, state.config.maven.proxy_timeout).await {
|
||||
match fetch_from_proxy(&url, state.config.maven.proxy_timeout).await {
|
||||
Ok(data) => {
|
||||
state.metrics.record_download("maven");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
artifact_name,
|
||||
"maven",
|
||||
"PROXY",
|
||||
));
|
||||
|
||||
// Cache in local storage (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
@@ -66,8 +38,6 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("maven");
|
||||
|
||||
return with_content_type(&path, data.into()).into_response();
|
||||
}
|
||||
Err(_) => continue,
|
||||
@@ -83,45 +53,20 @@ async fn upload(
|
||||
body: Bytes,
|
||||
) -> StatusCode {
|
||||
let key = format!("maven/{}", path);
|
||||
|
||||
let artifact_name = path
|
||||
.split('/')
|
||||
.rev()
|
||||
.take(3)
|
||||
.collect::<Vec<_>>()
|
||||
.into_iter()
|
||||
.rev()
|
||||
.collect::<Vec<_>>()
|
||||
.join("/");
|
||||
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
state.metrics.record_upload("maven");
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::Push,
|
||||
artifact_name,
|
||||
"maven",
|
||||
"LOCAL",
|
||||
));
|
||||
state.repo_index.invalidate("maven");
|
||||
StatusCode::CREATED
|
||||
}
|
||||
Ok(()) => StatusCode::CREATED,
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_from_proxy(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let response = client
|
||||
.get(url)
|
||||
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.send()
|
||||
.await
|
||||
.build()
|
||||
.map_err(|_| ())?;
|
||||
|
||||
let response = client.get(url).send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@@ -1,18 +1,11 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod cargo_registry;
|
||||
pub mod docker;
|
||||
pub mod docker_auth;
|
||||
mod docker;
|
||||
mod maven;
|
||||
mod npm;
|
||||
mod pypi;
|
||||
mod raw;
|
||||
|
||||
pub use cargo_registry::routes as cargo_routes;
|
||||
pub use docker::routes as docker_routes;
|
||||
pub use docker_auth::DockerAuth;
|
||||
pub use maven::routes as maven_routes;
|
||||
pub use npm::routes as npm_routes;
|
||||
pub use pypi::routes as pypi_routes;
|
||||
pub use raw::routes as raw_routes;
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
@@ -19,6 +15,7 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
}
|
||||
|
||||
async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
// Determine if this is a tarball request or metadata request
|
||||
let is_tarball = path.contains("/-/");
|
||||
|
||||
let key = if is_tarball {
|
||||
@@ -32,43 +29,23 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
format!("npm/{}/metadata.json", path)
|
||||
};
|
||||
|
||||
let package_name = if is_tarball {
|
||||
path.split("/-/").next().unwrap_or(&path).to_string()
|
||||
} else {
|
||||
path.clone()
|
||||
};
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
if is_tarball {
|
||||
state.metrics.record_download("npm");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::CacheHit,
|
||||
package_name,
|
||||
"npm",
|
||||
"CACHE",
|
||||
));
|
||||
}
|
||||
return with_content_type(is_tarball, data).into_response();
|
||||
}
|
||||
|
||||
// Try proxy if configured
|
||||
if let Some(proxy_url) = &state.config.npm.proxy {
|
||||
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
|
||||
|
||||
if let Ok(data) =
|
||||
fetch_from_proxy(&state.http_client, &url, state.config.npm.proxy_timeout).await
|
||||
{
|
||||
if is_tarball {
|
||||
state.metrics.record_download("npm");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
package_name,
|
||||
"npm",
|
||||
"PROXY",
|
||||
));
|
||||
}
|
||||
let url = if is_tarball {
|
||||
// Tarball URL: https://registry.npmjs.org/package/-/package-version.tgz
|
||||
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
|
||||
} else {
|
||||
// Metadata URL: https://registry.npmjs.org/package
|
||||
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
|
||||
};
|
||||
|
||||
if let Ok(data) = fetch_from_proxy(&url, state.config.npm.proxy_timeout).await {
|
||||
// Cache in local storage (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
@@ -76,10 +53,6 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
if is_tarball {
|
||||
state.repo_index.invalidate("npm");
|
||||
}
|
||||
|
||||
return with_content_type(is_tarball, data.into()).into_response();
|
||||
}
|
||||
}
|
||||
@@ -87,18 +60,14 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn fetch_from_proxy(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let response = client
|
||||
.get(url)
|
||||
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.send()
|
||||
.await
|
||||
.build()
|
||||
.map_err(|_| ())?;
|
||||
|
||||
let response = client.get(url).send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@@ -1,328 +1,35 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::{header, StatusCode},
|
||||
response::{Html, IntoResponse, Response},
|
||||
extract::State,
|
||||
http::StatusCode,
|
||||
response::{Html, IntoResponse},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/simple/", get(list_packages))
|
||||
.route("/simple/{name}/", get(package_versions))
|
||||
.route("/simple/{name}/{filename}", get(download_file))
|
||||
Router::new().route("/simple/", get(list_packages))
|
||||
}
|
||||
|
||||
/// List all packages (Simple API index)
|
||||
async fn list_packages(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let keys = state.storage.list("pypi/").await;
|
||||
let mut packages = std::collections::HashSet::new();
|
||||
|
||||
for key in keys {
|
||||
if let Some(pkg) = key.strip_prefix("pypi/").and_then(|k| k.split('/').next()) {
|
||||
if !pkg.is_empty() {
|
||||
packages.insert(pkg.to_string());
|
||||
}
|
||||
packages.insert(pkg.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let mut html = String::from(
|
||||
"<!DOCTYPE html>\n<html><head><title>Simple Index</title></head><body><h1>Simple Index</h1>\n",
|
||||
);
|
||||
let mut html = String::from("<html><body><h1>Simple Index</h1>");
|
||||
let mut pkg_list: Vec<_> = packages.into_iter().collect();
|
||||
pkg_list.sort();
|
||||
|
||||
for pkg in pkg_list {
|
||||
html.push_str(&format!("<a href=\"/simple/{}/\">{}</a><br>\n", pkg, pkg));
|
||||
html.push_str(&format!("<a href=\"/simple/{}/\">{}</a><br>", pkg, pkg));
|
||||
}
|
||||
html.push_str("</body></html>");
|
||||
|
||||
(StatusCode::OK, Html(html))
|
||||
}
|
||||
|
||||
/// List versions/files for a specific package
|
||||
async fn package_versions(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> Response {
|
||||
// Normalize package name (PEP 503)
|
||||
let normalized = normalize_name(&name);
|
||||
|
||||
// Try to get local files first
|
||||
let prefix = format!("pypi/{}/", normalized);
|
||||
let keys = state.storage.list(&prefix).await;
|
||||
|
||||
if !keys.is_empty() {
|
||||
// We have local files
|
||||
let mut html = format!(
|
||||
"<!DOCTYPE html>\n<html><head><title>Links for {}</title></head><body><h1>Links for {}</h1>\n",
|
||||
name, name
|
||||
);
|
||||
|
||||
for key in &keys {
|
||||
if let Some(filename) = key.strip_prefix(&prefix) {
|
||||
if !filename.is_empty() {
|
||||
html.push_str(&format!(
|
||||
"<a href=\"/simple/{}/{}\">{}</a><br>\n",
|
||||
normalized, filename, filename
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
html.push_str("</body></html>");
|
||||
|
||||
return (StatusCode::OK, Html(html)).into_response();
|
||||
}
|
||||
|
||||
// Try proxy if configured
|
||||
if let Some(proxy_url) = &state.config.pypi.proxy {
|
||||
let url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
|
||||
|
||||
if let Ok(html) =
|
||||
fetch_package_page(&state.http_client, &url, state.config.pypi.proxy_timeout).await
|
||||
{
|
||||
// Rewrite URLs in the HTML to point to our registry
|
||||
let rewritten = rewrite_pypi_links(&html, &normalized);
|
||||
return (StatusCode::OK, Html(rewritten)).into_response();
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
/// Download a specific file
|
||||
async fn download_file(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, filename)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let normalized = normalize_name(&name);
|
||||
let key = format!("pypi/{}/{}", normalized, filename);
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
state.metrics.record_download("pypi");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::CacheHit,
|
||||
format!("{}/{}", name, filename),
|
||||
"pypi",
|
||||
"CACHE",
|
||||
));
|
||||
|
||||
let content_type = if filename.ends_with(".whl") {
|
||||
"application/zip"
|
||||
} else if filename.ends_with(".tar.gz") || filename.ends_with(".tgz") {
|
||||
"application/gzip"
|
||||
} else {
|
||||
"application/octet-stream"
|
||||
};
|
||||
|
||||
return (StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data).into_response();
|
||||
}
|
||||
|
||||
// Try proxy if configured
|
||||
if let Some(proxy_url) = &state.config.pypi.proxy {
|
||||
// First, fetch the package page to find the actual download URL
|
||||
let page_url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
|
||||
|
||||
if let Ok(html) = fetch_package_page(
|
||||
&state.http_client,
|
||||
&page_url,
|
||||
state.config.pypi.proxy_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Find the URL for this specific file
|
||||
if let Some(file_url) = find_file_url(&html, &filename) {
|
||||
if let Ok(data) = fetch_file(
|
||||
&state.http_client,
|
||||
&file_url,
|
||||
state.config.pypi.proxy_timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
state.metrics.record_download("pypi");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
format!("{}/{}", name, filename),
|
||||
"pypi",
|
||||
"PROXY",
|
||||
));
|
||||
|
||||
// Cache in local storage
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("pypi");
|
||||
|
||||
let content_type = if filename.ends_with(".whl") {
|
||||
"application/zip"
|
||||
} else if filename.ends_with(".tar.gz") || filename.ends_with(".tgz") {
|
||||
"application/gzip"
|
||||
} else {
|
||||
"application/octet-stream"
|
||||
};
|
||||
|
||||
return (StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data)
|
||||
.into_response();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
/// Normalize package name according to PEP 503
|
||||
fn normalize_name(name: &str) -> String {
|
||||
name.to_lowercase().replace(['-', '_', '.'], "-")
|
||||
}
|
||||
|
||||
/// Fetch package page from upstream
|
||||
async fn fetch_package_page(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
) -> Result<String, ()> {
|
||||
let response = client
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.header("Accept", "text/html")
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.text().await.map_err(|_| ())
|
||||
}
|
||||
|
||||
/// Fetch file from upstream
|
||||
async fn fetch_file(client: &reqwest::Client, url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
|
||||
let response = client
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
/// Rewrite PyPI links to point to our registry
|
||||
fn rewrite_pypi_links(html: &str, package_name: &str) -> String {
|
||||
// Simple regex-free approach: find href="..." and rewrite
|
||||
let mut result = String::with_capacity(html.len());
|
||||
let mut remaining = html;
|
||||
|
||||
while let Some(href_start) = remaining.find("href=\"") {
|
||||
result.push_str(&remaining[..href_start + 6]);
|
||||
remaining = &remaining[href_start + 6..];
|
||||
|
||||
if let Some(href_end) = remaining.find('"') {
|
||||
let url = &remaining[..href_end];
|
||||
|
||||
// Extract filename from URL
|
||||
if let Some(filename) = extract_filename(url) {
|
||||
// Rewrite to our local URL
|
||||
result.push_str(&format!("/simple/{}/{}", package_name, filename));
|
||||
} else {
|
||||
result.push_str(url);
|
||||
}
|
||||
|
||||
remaining = &remaining[href_end..];
|
||||
}
|
||||
}
|
||||
result.push_str(remaining);
|
||||
|
||||
// Remove data-core-metadata and data-dist-info-metadata attributes
|
||||
// as we don't serve .metadata files (PEP 658)
|
||||
let result = remove_attribute(&result, "data-core-metadata");
|
||||
remove_attribute(&result, "data-dist-info-metadata")
|
||||
}
|
||||
|
||||
/// Remove an HTML attribute from all tags
|
||||
fn remove_attribute(html: &str, attr_name: &str) -> String {
|
||||
let mut result = String::with_capacity(html.len());
|
||||
let mut remaining = html;
|
||||
let pattern = format!(" {}=\"", attr_name);
|
||||
|
||||
while let Some(attr_start) = remaining.find(&pattern) {
|
||||
result.push_str(&remaining[..attr_start]);
|
||||
remaining = &remaining[attr_start + pattern.len()..];
|
||||
|
||||
// Skip the attribute value
|
||||
if let Some(attr_end) = remaining.find('"') {
|
||||
remaining = &remaining[attr_end + 1..];
|
||||
}
|
||||
}
|
||||
result.push_str(remaining);
|
||||
result
|
||||
}
|
||||
|
||||
/// Extract filename from PyPI download URL
|
||||
fn extract_filename(url: &str) -> Option<&str> {
|
||||
// PyPI URLs look like:
|
||||
// https://files.pythonhosted.org/packages/.../package-1.0.0.tar.gz#sha256=...
|
||||
// or just the filename directly
|
||||
|
||||
// Remove hash fragment
|
||||
let url = url.split('#').next()?;
|
||||
|
||||
// Get the last path component
|
||||
let filename = url.rsplit('/').next()?;
|
||||
|
||||
// Must be a valid package file
|
||||
if filename.ends_with(".tar.gz")
|
||||
|| filename.ends_with(".tgz")
|
||||
|| filename.ends_with(".whl")
|
||||
|| filename.ends_with(".zip")
|
||||
|| filename.ends_with(".egg")
|
||||
{
|
||||
Some(filename)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the download URL for a specific file in the HTML
|
||||
fn find_file_url(html: &str, target_filename: &str) -> Option<String> {
|
||||
let mut remaining = html;
|
||||
|
||||
while let Some(href_start) = remaining.find("href=\"") {
|
||||
remaining = &remaining[href_start + 6..];
|
||||
|
||||
if let Some(href_end) = remaining.find('"') {
|
||||
let url = &remaining[..href_end];
|
||||
|
||||
if let Some(filename) = extract_filename(url) {
|
||||
if filename == target_filename {
|
||||
// Remove hash fragment for actual download
|
||||
return Some(url.split('#').next().unwrap_or(url).to_string());
|
||||
}
|
||||
}
|
||||
|
||||
remaining = &remaining[href_end..];
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
@@ -1,136 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State},
|
||||
http::{header, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new().route(
|
||||
"/raw/{*path}",
|
||||
get(download)
|
||||
.put(upload)
|
||||
.delete(delete_file)
|
||||
.head(check_exists),
|
||||
)
|
||||
}
|
||||
|
||||
async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
if !state.config.raw.enabled {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
}
|
||||
|
||||
let key = format!("raw/{}", path);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => {
|
||||
state.metrics.record_download("raw");
|
||||
state
|
||||
.activity
|
||||
.push(ActivityEntry::new(ActionType::Pull, path, "raw", "LOCAL"));
|
||||
|
||||
// Guess content type from extension
|
||||
let content_type = guess_content_type(&key);
|
||||
(StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data).into_response()
|
||||
}
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn upload(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(path): Path<String>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
if !state.config.raw.enabled {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
}
|
||||
|
||||
// Check file size limit
|
||||
if body.len() as u64 > state.config.raw.max_file_size {
|
||||
return (
|
||||
StatusCode::PAYLOAD_TOO_LARGE,
|
||||
format!(
|
||||
"File too large. Max size: {} bytes",
|
||||
state.config.raw.max_file_size
|
||||
),
|
||||
)
|
||||
.into_response();
|
||||
}
|
||||
|
||||
let key = format!("raw/{}", path);
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
state.metrics.record_upload("raw");
|
||||
state
|
||||
.activity
|
||||
.push(ActivityEntry::new(ActionType::Push, path, "raw", "LOCAL"));
|
||||
StatusCode::CREATED.into_response()
|
||||
}
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_file(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
if !state.config.raw.enabled {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
}
|
||||
|
||||
let key = format!("raw/{}", path);
|
||||
match state.storage.delete(&key).await {
|
||||
Ok(()) => StatusCode::NO_CONTENT.into_response(),
|
||||
Err(crate::storage::StorageError::NotFound) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_exists(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
if !state.config.raw.enabled {
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
}
|
||||
|
||||
let key = format!("raw/{}", path);
|
||||
match state.storage.stat(&key).await {
|
||||
Some(meta) => (
|
||||
StatusCode::OK,
|
||||
[
|
||||
(header::CONTENT_LENGTH, meta.size.to_string()),
|
||||
(header::CONTENT_TYPE, guess_content_type(&key).to_string()),
|
||||
],
|
||||
)
|
||||
.into_response(),
|
||||
None => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
fn guess_content_type(path: &str) -> &'static str {
|
||||
let ext = path.rsplit('.').next().unwrap_or("");
|
||||
match ext.to_lowercase().as_str() {
|
||||
"json" => "application/json",
|
||||
"xml" => "application/xml",
|
||||
"html" | "htm" => "text/html",
|
||||
"css" => "text/css",
|
||||
"js" => "application/javascript",
|
||||
"txt" => "text/plain",
|
||||
"md" => "text/markdown",
|
||||
"yaml" | "yml" => "application/x-yaml",
|
||||
"toml" => "application/toml",
|
||||
"tar" => "application/x-tar",
|
||||
"gz" | "gzip" => "application/gzip",
|
||||
"zip" => "application/zip",
|
||||
"png" => "image/png",
|
||||
"jpg" | "jpeg" => "image/jpeg",
|
||||
"gif" => "image/gif",
|
||||
"svg" => "image/svg+xml",
|
||||
"pdf" => "application/pdf",
|
||||
"wasm" => "application/wasm",
|
||||
_ => "application/octet-stream",
|
||||
}
|
||||
}
|
||||
@@ -1,351 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! In-memory repository index with lazy rebuild on invalidation.
|
||||
//!
|
||||
//! Design (designed for efficiency):
|
||||
//! - Rebuild happens ONLY on write operations, not TTL
|
||||
//! - Double-checked locking prevents duplicate rebuilds
|
||||
//! - Arc<Vec> for zero-cost reads
|
||||
//! - Single rebuild at a time per registry (rebuild_lock)
|
||||
|
||||
use crate::storage::Storage;
|
||||
use crate::ui::components::format_timestamp;
|
||||
use parking_lot::RwLock;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex as AsyncMutex;
|
||||
use tracing::info;
|
||||
|
||||
/// Repository info for UI display
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
pub struct RepoInfo {
|
||||
pub name: String,
|
||||
pub versions: usize,
|
||||
pub size: u64,
|
||||
pub updated: String,
|
||||
}
|
||||
|
||||
/// Index for a single registry type
|
||||
pub struct RegistryIndex {
|
||||
data: RwLock<Arc<Vec<RepoInfo>>>,
|
||||
dirty: AtomicBool,
|
||||
rebuild_lock: AsyncMutex<()>,
|
||||
}
|
||||
|
||||
impl RegistryIndex {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
data: RwLock::new(Arc::new(Vec::new())),
|
||||
dirty: AtomicBool::new(true),
|
||||
rebuild_lock: AsyncMutex::new(()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Mark index as needing rebuild
|
||||
pub fn invalidate(&self) {
|
||||
self.dirty.store(true, Ordering::Release);
|
||||
}
|
||||
|
||||
fn is_dirty(&self) -> bool {
|
||||
self.dirty.load(Ordering::Acquire)
|
||||
}
|
||||
|
||||
fn get_cached(&self) -> Arc<Vec<RepoInfo>> {
|
||||
Arc::clone(&self.data.read())
|
||||
}
|
||||
|
||||
fn set(&self, data: Vec<RepoInfo>) {
|
||||
*self.data.write() = Arc::new(data);
|
||||
self.dirty.store(false, Ordering::Release);
|
||||
}
|
||||
|
||||
pub fn count(&self) -> usize {
|
||||
self.data.read().len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RegistryIndex {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Main repository index for all registries
|
||||
pub struct RepoIndex {
|
||||
pub docker: RegistryIndex,
|
||||
pub maven: RegistryIndex,
|
||||
pub npm: RegistryIndex,
|
||||
pub cargo: RegistryIndex,
|
||||
pub pypi: RegistryIndex,
|
||||
}
|
||||
|
||||
impl RepoIndex {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
docker: RegistryIndex::new(),
|
||||
maven: RegistryIndex::new(),
|
||||
npm: RegistryIndex::new(),
|
||||
cargo: RegistryIndex::new(),
|
||||
pypi: RegistryIndex::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Invalidate a specific registry index
|
||||
pub fn invalidate(&self, registry: &str) {
|
||||
match registry {
|
||||
"docker" => self.docker.invalidate(),
|
||||
"maven" => self.maven.invalidate(),
|
||||
"npm" => self.npm.invalidate(),
|
||||
"cargo" => self.cargo.invalidate(),
|
||||
"pypi" => self.pypi.invalidate(),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get index with double-checked locking (prevents race condition)
|
||||
pub async fn get(&self, registry: &str, storage: &Storage) -> Arc<Vec<RepoInfo>> {
|
||||
let index = match registry {
|
||||
"docker" => &self.docker,
|
||||
"maven" => &self.maven,
|
||||
"npm" => &self.npm,
|
||||
"cargo" => &self.cargo,
|
||||
"pypi" => &self.pypi,
|
||||
_ => return Arc::new(Vec::new()),
|
||||
};
|
||||
|
||||
// Fast path: not dirty, return cached
|
||||
if !index.is_dirty() {
|
||||
return index.get_cached();
|
||||
}
|
||||
|
||||
// Slow path: acquire rebuild lock (only one thread rebuilds)
|
||||
let _guard = index.rebuild_lock.lock().await;
|
||||
|
||||
// Double-check under lock (another thread may have rebuilt)
|
||||
if index.is_dirty() {
|
||||
let data = match registry {
|
||||
"docker" => build_docker_index(storage).await,
|
||||
"maven" => build_maven_index(storage).await,
|
||||
"npm" => build_npm_index(storage).await,
|
||||
"cargo" => build_cargo_index(storage).await,
|
||||
"pypi" => build_pypi_index(storage).await,
|
||||
_ => Vec::new(),
|
||||
};
|
||||
info!(registry = registry, count = data.len(), "Index rebuilt");
|
||||
index.set(data);
|
||||
}
|
||||
|
||||
index.get_cached()
|
||||
}
|
||||
|
||||
/// Get counts for stats (no rebuild, just current state)
|
||||
pub fn counts(&self) -> (usize, usize, usize, usize, usize) {
|
||||
(
|
||||
self.docker.count(),
|
||||
self.maven.count(),
|
||||
self.npm.count(),
|
||||
self.cargo.count(),
|
||||
self.pypi.count(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RepoIndex {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Index builders
|
||||
// ============================================================================
|
||||
|
||||
async fn build_docker_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("docker/").await;
|
||||
let mut repos: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if key.ends_with(".meta.json") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rest) = key.strip_prefix("docker/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 3 && parts[1] == "manifests" && key.ends_with(".json") {
|
||||
let name = parts[0].to_string();
|
||||
let entry = repos.entry(name).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Ok(data) = storage.get(key).await {
|
||||
if let Ok(m) = serde_json::from_slice::<serde_json::Value>(&data) {
|
||||
let cfg = m
|
||||
.get("config")
|
||||
.and_then(|c| c.get("size"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
let layers: u64 = m
|
||||
.get("layers")
|
||||
.and_then(|l| l.as_array())
|
||||
.map(|arr| {
|
||||
arr.iter()
|
||||
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
entry.1 += cfg + layers;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(repos)
|
||||
}
|
||||
|
||||
async fn build_maven_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("maven/").await;
|
||||
let mut repos: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("maven/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 2 {
|
||||
let path = parts[..parts.len() - 1].join("/");
|
||||
let entry = repos.entry(path).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(repos)
|
||||
}
|
||||
|
||||
async fn build_npm_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("npm/").await;
|
||||
let mut packages: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
// Count tarballs instead of parsing metadata.json (faster than parsing JSON)
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("npm/") {
|
||||
// Pattern: npm/{package}/tarballs/{file}.tgz
|
||||
if rest.contains("/tarballs/") && key.ends_with(".tgz") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(packages)
|
||||
}
|
||||
|
||||
async fn build_cargo_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("cargo/").await;
|
||||
let mut crates: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if key.ends_with(".crate") {
|
||||
if let Some(rest) = key.strip_prefix("cargo/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = crates.entry(name).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(crates)
|
||||
}
|
||||
|
||||
async fn build_pypi_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("pypi/").await;
|
||||
let mut packages: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("pypi/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 2 {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(packages)
|
||||
}
|
||||
|
||||
/// Convert HashMap to sorted Vec<RepoInfo>
|
||||
fn to_sorted_vec(map: HashMap<String, (usize, u64, u64)>) -> Vec<RepoInfo> {
|
||||
let mut result: Vec<_> = map
|
||||
.into_iter()
|
||||
.map(|(name, (versions, size, modified))| RepoInfo {
|
||||
name,
|
||||
versions,
|
||||
size,
|
||||
updated: if modified > 0 {
|
||||
format_timestamp(modified)
|
||||
} else {
|
||||
"N/A".to_string()
|
||||
},
|
||||
})
|
||||
.collect();
|
||||
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
/// Pagination helper
|
||||
pub fn paginate<T: Clone>(data: &[T], page: usize, limit: usize) -> (Vec<T>, usize) {
|
||||
let total = data.len();
|
||||
let start = page.saturating_sub(1) * limit;
|
||||
|
||||
if start >= total {
|
||||
return (Vec::new(), total);
|
||||
}
|
||||
|
||||
let end = (start + limit).min(total);
|
||||
(data[start..end].to_vec(), total)
|
||||
}
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Request ID middleware for request tracking and correlation
|
||||
//!
|
||||
//! Generates a unique ID for each request that can be used for:
|
||||
|
||||
@@ -1,130 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Environment variables secrets provider
|
||||
//!
|
||||
//! Reads secrets from environment variables. This is the default provider
|
||||
//! following 12-Factor App principles.
|
||||
|
||||
use std::env;
|
||||
|
||||
use super::{SecretsError, SecretsProvider};
|
||||
use crate::secrets::protected::ProtectedString;
|
||||
use async_trait::async_trait;
|
||||
|
||||
/// Environment variables secrets provider
|
||||
///
|
||||
/// Reads secrets from environment variables.
|
||||
/// Optionally clears variables after reading for extra security.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EnvProvider {
|
||||
/// Clear environment variables after reading
|
||||
clear_after_read: bool,
|
||||
}
|
||||
|
||||
impl EnvProvider {
|
||||
/// Create a new environment provider
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
clear_after_read: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a provider that clears env vars after reading
|
||||
///
|
||||
/// This prevents secrets from being visible in `/proc/<pid>/environ`
|
||||
pub fn with_clear_after_read(mut self) -> Self {
|
||||
self.clear_after_read = true;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for EnvProvider {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl SecretsProvider for EnvProvider {
|
||||
async fn get_secret(&self, key: &str) -> Result<ProtectedString, SecretsError> {
|
||||
let value = env::var(key).map_err(|_| SecretsError::NotFound(key.to_string()))?;
|
||||
|
||||
if self.clear_after_read {
|
||||
env::remove_var(key);
|
||||
}
|
||||
|
||||
Ok(ProtectedString::new(value))
|
||||
}
|
||||
|
||||
async fn get_secret_optional(&self, key: &str) -> Option<ProtectedString> {
|
||||
env::var(key).ok().map(|v| {
|
||||
if self.clear_after_read {
|
||||
env::remove_var(key);
|
||||
}
|
||||
ProtectedString::new(v)
|
||||
})
|
||||
}
|
||||
|
||||
fn provider_name(&self) -> &'static str {
|
||||
"env"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_secret_exists() {
|
||||
env::set_var("TEST_SECRET_123", "secret-value");
|
||||
let provider = EnvProvider::new();
|
||||
let secret = provider.get_secret("TEST_SECRET_123").await.unwrap();
|
||||
assert_eq!(secret.expose(), "secret-value");
|
||||
env::remove_var("TEST_SECRET_123");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_secret_not_found() {
|
||||
let provider = EnvProvider::new();
|
||||
let result = provider.get_secret("NONEXISTENT_VAR_XYZ").await;
|
||||
assert!(matches!(result, Err(SecretsError::NotFound(_))));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_secret_optional_exists() {
|
||||
env::set_var("TEST_OPTIONAL_123", "optional-value");
|
||||
let provider = EnvProvider::new();
|
||||
let secret = provider.get_secret_optional("TEST_OPTIONAL_123").await;
|
||||
assert!(secret.is_some());
|
||||
assert_eq!(secret.unwrap().expose(), "optional-value");
|
||||
env::remove_var("TEST_OPTIONAL_123");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_secret_optional_not_found() {
|
||||
let provider = EnvProvider::new();
|
||||
let secret = provider
|
||||
.get_secret_optional("NONEXISTENT_OPTIONAL_XYZ")
|
||||
.await;
|
||||
assert!(secret.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_clear_after_read() {
|
||||
env::set_var("TEST_CLEAR_123", "to-be-cleared");
|
||||
let provider = EnvProvider::new().with_clear_after_read();
|
||||
|
||||
let secret = provider.get_secret("TEST_CLEAR_123").await.unwrap();
|
||||
assert_eq!(secret.expose(), "to-be-cleared");
|
||||
|
||||
// Variable should be cleared
|
||||
assert!(env::var("TEST_CLEAR_123").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_provider_name() {
|
||||
let provider = EnvProvider::new();
|
||||
assert_eq!(provider.provider_name(), "env");
|
||||
}
|
||||
}
|
||||
@@ -1,169 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)] // Foundational code for future S3/Vault integration
|
||||
|
||||
//! Secrets management for NORA
|
||||
//!
|
||||
//! Provides a trait-based architecture for secrets providers:
|
||||
//! - `env` - Environment variables (default, 12-Factor App)
|
||||
//! - `aws-secrets` - AWS Secrets Manager (v0.4.0+)
|
||||
//! - `vault` - HashiCorp Vault (v0.5.0+)
|
||||
//! - `k8s` - Kubernetes Secrets (v0.4.0+)
|
||||
//!
|
||||
//! # Example
|
||||
//!
|
||||
//! ```rust,ignore
|
||||
//! use nora::secrets::{create_secrets_provider, SecretsConfig};
|
||||
//!
|
||||
//! let config = SecretsConfig::default(); // Uses ENV provider
|
||||
//! let provider = create_secrets_provider(&config)?;
|
||||
//!
|
||||
//! let api_key = provider.get_secret("API_KEY").await?;
|
||||
//! println!("Got secret (redacted): {:?}", api_key);
|
||||
//! ```
|
||||
|
||||
mod env;
|
||||
pub mod protected;
|
||||
|
||||
pub use env::EnvProvider;
|
||||
#[allow(unused_imports)]
|
||||
pub use protected::{ProtectedString, S3Credentials};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
|
||||
/// Secrets provider error
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SecretsError {
|
||||
#[error("Secret not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[error("Provider error: {0}")]
|
||||
Provider(String),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Unsupported provider: {0}")]
|
||||
UnsupportedProvider(String),
|
||||
}
|
||||
|
||||
/// Secrets provider trait
|
||||
///
|
||||
/// Implement this trait to add new secrets backends.
|
||||
#[async_trait]
|
||||
pub trait SecretsProvider: Send + Sync {
|
||||
/// Get a secret by key (required)
|
||||
async fn get_secret(&self, key: &str) -> Result<ProtectedString, SecretsError>;
|
||||
|
||||
/// Get a secret by key (optional, returns None if not found)
|
||||
async fn get_secret_optional(&self, key: &str) -> Option<ProtectedString> {
|
||||
self.get_secret(key).await.ok()
|
||||
}
|
||||
|
||||
/// Get provider name for logging
|
||||
fn provider_name(&self) -> &'static str;
|
||||
}
|
||||
|
||||
/// Secrets configuration
|
||||
///
|
||||
/// # Example config.toml
|
||||
///
|
||||
/// ```toml
|
||||
/// [secrets]
|
||||
/// provider = "env"
|
||||
/// clear_env = false
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct SecretsConfig {
|
||||
/// Provider type: "env", "aws-secrets", "vault", "k8s"
|
||||
#[serde(default = "default_provider")]
|
||||
pub provider: String,
|
||||
|
||||
/// Clear environment variables after reading (for env provider)
|
||||
#[serde(default)]
|
||||
pub clear_env: bool,
|
||||
}
|
||||
|
||||
fn default_provider() -> String {
|
||||
"env".to_string()
|
||||
}
|
||||
|
||||
impl Default for SecretsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
provider: default_provider(),
|
||||
clear_env: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a secrets provider based on configuration
|
||||
///
|
||||
/// Currently supports:
|
||||
/// - `env` - Environment variables (default)
|
||||
///
|
||||
/// Future versions will add:
|
||||
/// - `aws-secrets` - AWS Secrets Manager
|
||||
/// - `vault` - HashiCorp Vault
|
||||
/// - `k8s` - Kubernetes Secrets
|
||||
pub fn create_secrets_provider(
|
||||
config: &SecretsConfig,
|
||||
) -> Result<Box<dyn SecretsProvider>, SecretsError> {
|
||||
match config.provider.as_str() {
|
||||
"env" => {
|
||||
let mut provider = EnvProvider::new();
|
||||
if config.clear_env {
|
||||
provider = provider.with_clear_after_read();
|
||||
}
|
||||
Ok(Box::new(provider))
|
||||
}
|
||||
// Future providers:
|
||||
// "aws-secrets" => { ... }
|
||||
// "vault" => { ... }
|
||||
// "k8s" => { ... }
|
||||
other => Err(SecretsError::UnsupportedProvider(other.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = SecretsConfig::default();
|
||||
assert_eq!(config.provider, "env");
|
||||
assert!(!config.clear_env);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_env_provider() {
|
||||
let config = SecretsConfig::default();
|
||||
let provider = create_secrets_provider(&config).unwrap();
|
||||
assert_eq!(provider.provider_name(), "env");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_unsupported_provider() {
|
||||
let config = SecretsConfig {
|
||||
provider: "unknown".to_string(),
|
||||
clear_env: false,
|
||||
};
|
||||
let result = create_secrets_provider(&config);
|
||||
assert!(matches!(result, Err(SecretsError::UnsupportedProvider(_))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_toml() {
|
||||
let toml = r#"
|
||||
provider = "env"
|
||||
clear_env = true
|
||||
"#;
|
||||
let config: SecretsConfig = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.provider, "env");
|
||||
assert!(config.clear_env);
|
||||
}
|
||||
}
|
||||
@@ -1,155 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Protected secret types with memory safety
|
||||
//!
|
||||
//! Secrets are automatically zeroed on drop and redacted in Debug output.
|
||||
|
||||
use std::fmt;
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
/// A protected secret string that is zeroed on drop
|
||||
///
|
||||
/// - Implements Zeroize: memory is overwritten with zeros when dropped
|
||||
/// - Debug shows `***REDACTED***` instead of actual value
|
||||
/// - Clone creates a new protected copy
|
||||
#[derive(Clone, Zeroize)]
|
||||
#[zeroize(drop)]
|
||||
pub struct ProtectedString {
|
||||
inner: String,
|
||||
}
|
||||
|
||||
impl ProtectedString {
|
||||
/// Create a new protected string
|
||||
pub fn new(value: String) -> Self {
|
||||
Self { inner: value }
|
||||
}
|
||||
|
||||
/// Get the secret value (use sparingly!)
|
||||
pub fn expose(&self) -> &str {
|
||||
&self.inner
|
||||
}
|
||||
|
||||
/// Consume and return the inner value
|
||||
pub fn into_inner(self) -> Zeroizing<String> {
|
||||
Zeroizing::new(self.inner.clone())
|
||||
}
|
||||
|
||||
/// Check if the secret is empty
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.inner.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ProtectedString {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("ProtectedString")
|
||||
.field("value", &"***REDACTED***")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ProtectedString {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "***REDACTED***")
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for ProtectedString {
|
||||
fn from(value: String) -> Self {
|
||||
Self::new(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for ProtectedString {
|
||||
fn from(value: &str) -> Self {
|
||||
Self::new(value.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// S3 credentials with protected secrets
|
||||
#[derive(Clone, Zeroize)]
|
||||
#[zeroize(drop)]
|
||||
pub struct S3Credentials {
|
||||
pub access_key_id: String,
|
||||
#[zeroize(skip)] // access_key_id is not sensitive
|
||||
pub secret_access_key: ProtectedString,
|
||||
pub region: Option<String>,
|
||||
}
|
||||
|
||||
impl S3Credentials {
|
||||
pub fn new(access_key_id: String, secret_access_key: String) -> Self {
|
||||
Self {
|
||||
access_key_id,
|
||||
secret_access_key: ProtectedString::new(secret_access_key),
|
||||
region: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_region(mut self, region: String) -> Self {
|
||||
self.region = Some(region);
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for S3Credentials {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("S3Credentials")
|
||||
.field("access_key_id", &self.access_key_id)
|
||||
.field("secret_access_key", &"***REDACTED***")
|
||||
.field("region", &self.region)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_protected_string_redacted_debug() {
|
||||
let secret = ProtectedString::new("super-secret-value".to_string());
|
||||
let debug_output = format!("{:?}", secret);
|
||||
assert!(debug_output.contains("REDACTED"));
|
||||
assert!(!debug_output.contains("super-secret-value"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protected_string_redacted_display() {
|
||||
let secret = ProtectedString::new("super-secret-value".to_string());
|
||||
let display_output = format!("{}", secret);
|
||||
assert_eq!(display_output, "***REDACTED***");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protected_string_expose() {
|
||||
let secret = ProtectedString::new("my-secret".to_string());
|
||||
assert_eq!(secret.expose(), "my-secret");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_s3_credentials_redacted_debug() {
|
||||
let creds = S3Credentials::new(
|
||||
"AKIAIOSFODNN7EXAMPLE".to_string(),
|
||||
"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY".to_string(),
|
||||
);
|
||||
let debug_output = format!("{:?}", creds);
|
||||
assert!(debug_output.contains("AKIAIOSFODNN7EXAMPLE"));
|
||||
assert!(!debug_output.contains("wJalrXUtnFEMI"));
|
||||
assert!(debug_output.contains("REDACTED"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protected_string_from_str() {
|
||||
let secret: ProtectedString = "test".into();
|
||||
assert_eq!(secret.expose(), "test");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_protected_string_is_empty() {
|
||||
let empty = ProtectedString::new(String::new());
|
||||
let non_empty = ProtectedString::new("secret".to_string());
|
||||
assert!(empty.is_empty());
|
||||
assert!(!non_empty.is_empty());
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use std::path::PathBuf;
|
||||
@@ -88,20 +85,6 @@ impl StorageBackend for LocalStorage {
|
||||
Ok(Bytes::from(buffer))
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &str) -> Result<()> {
|
||||
let path = self.key_to_path(key);
|
||||
|
||||
if !path.exists() {
|
||||
return Err(StorageError::NotFound);
|
||||
}
|
||||
|
||||
fs::remove_file(&path)
|
||||
.await
|
||||
.map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
let base = self.base_path.clone();
|
||||
let prefix = prefix.to_string();
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod local;
|
||||
mod s3;
|
||||
|
||||
@@ -42,7 +39,6 @@ pub type Result<T> = std::result::Result<T, StorageError>;
|
||||
pub trait StorageBackend: Send + Sync {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()>;
|
||||
async fn get(&self, key: &str) -> Result<Bytes>;
|
||||
async fn delete(&self, key: &str) -> Result<()>;
|
||||
async fn list(&self, prefix: &str) -> Vec<String>;
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta>;
|
||||
async fn health_check(&self) -> bool;
|
||||
@@ -62,17 +58,9 @@ impl Storage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_s3(
|
||||
s3_url: &str,
|
||||
bucket: &str,
|
||||
region: &str,
|
||||
access_key: Option<&str>,
|
||||
secret_key: Option<&str>,
|
||||
) -> Self {
|
||||
pub fn new_s3(s3_url: &str, bucket: &str) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(S3Storage::new(
|
||||
s3_url, bucket, region, access_key, secret_key,
|
||||
)),
|
||||
inner: Arc::new(S3Storage::new(s3_url, bucket)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -86,11 +74,6 @@ impl Storage {
|
||||
self.inner.get(key).await
|
||||
}
|
||||
|
||||
pub async fn delete(&self, key: &str) -> Result<()> {
|
||||
validate_storage_key(key)?;
|
||||
self.inner.delete(key).await
|
||||
}
|
||||
|
||||
pub async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
// Empty prefix is valid for listing all
|
||||
if !prefix.is_empty() && validate_storage_key(prefix).is_err() {
|
||||
|
||||
@@ -1,146 +1,24 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use chrono::Utc;
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use super::{FileMeta, Result, StorageBackend, StorageError};
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// S3-compatible storage backend (MinIO, AWS S3)
|
||||
pub struct S3Storage {
|
||||
s3_url: String,
|
||||
bucket: String,
|
||||
region: String,
|
||||
access_key: Option<String>,
|
||||
secret_key: Option<String>,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl S3Storage {
|
||||
/// Create new S3 storage with optional credentials
|
||||
pub fn new(
|
||||
s3_url: &str,
|
||||
bucket: &str,
|
||||
region: &str,
|
||||
access_key: Option<&str>,
|
||||
secret_key: Option<&str>,
|
||||
) -> Self {
|
||||
pub fn new(s3_url: &str, bucket: &str) -> Self {
|
||||
Self {
|
||||
s3_url: s3_url.trim_end_matches('/').to_string(),
|
||||
s3_url: s3_url.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
region: region.to_string(),
|
||||
access_key: access_key.map(String::from),
|
||||
secret_key: secret_key.map(String::from),
|
||||
client: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sign a request using AWS Signature v4
|
||||
fn sign_request(
|
||||
&self,
|
||||
method: &str,
|
||||
path: &str,
|
||||
payload_hash: &str,
|
||||
timestamp: &str,
|
||||
date: &str,
|
||||
) -> Option<String> {
|
||||
let (access_key, secret_key) = match (&self.access_key, &self.secret_key) {
|
||||
(Some(ak), Some(sk)) => (ak.as_str(), sk.as_str()),
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
// Parse host from URL
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
// Canonical request
|
||||
// URI must be URL-encoded (except /)
|
||||
let encoded_path = uri_encode(path);
|
||||
let canonical_uri = format!("/{}/{}", self.bucket, encoded_path);
|
||||
let canonical_query = "";
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
// AWS Signature v4 canonical request format:
|
||||
// HTTPMethod\nCanonicalURI\nCanonicalQueryString\nCanonicalHeaders\n\nSignedHeaders\nHashedPayload
|
||||
// Note: CanonicalHeaders already ends with \n, plus blank line before SignedHeaders
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
|
||||
// String to sign
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
// Calculate signature
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
// Authorization header
|
||||
Some(format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
))
|
||||
}
|
||||
|
||||
/// Make a signed request
|
||||
async fn signed_request(
|
||||
&self,
|
||||
method: reqwest::Method,
|
||||
key: &str,
|
||||
body: Option<&[u8]>,
|
||||
) -> std::result::Result<reqwest::Response, StorageError> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
|
||||
let payload_hash = match body {
|
||||
Some(data) => hex::encode(Sha256::digest(data)),
|
||||
None => hex::encode(Sha256::digest(b"")),
|
||||
};
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.request(method.clone(), &url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
if let Some(auth) =
|
||||
self.sign_request(method.as_str(), key, &payload_hash, ×tamp, &date)
|
||||
{
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
if let Some(data) = body {
|
||||
request = request.body(data.to_vec());
|
||||
}
|
||||
|
||||
request
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))
|
||||
}
|
||||
|
||||
fn parse_s3_keys(xml: &str, prefix: &str) -> Vec<String> {
|
||||
xml.split("<Key>")
|
||||
.filter_map(|part| part.split("</Key>").next())
|
||||
@@ -150,34 +28,17 @@ impl S3Storage {
|
||||
}
|
||||
}
|
||||
|
||||
/// URL-encode a string for S3 canonical URI (encode all except A-Za-z0-9-_.~/)
|
||||
fn uri_encode(s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len() * 3);
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '_' | '.' | '~' | '/' => result.push(c),
|
||||
_ => {
|
||||
for b in c.to_string().as_bytes() {
|
||||
result.push_str(&format!("%{:02X}", b));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC can take key of any size");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for S3Storage {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.signed_request(reqwest::Method::PUT, key, Some(data))
|
||||
.await?;
|
||||
.client
|
||||
.put(&url)
|
||||
.body(data.to_vec())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
Ok(())
|
||||
@@ -190,7 +51,13 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
let response = self.signed_request(reqwest::Method::GET, key, None).await?;
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
response
|
||||
@@ -207,77 +74,9 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &str) -> Result<()> {
|
||||
let response = self
|
||||
.signed_request(reqwest::Method::DELETE, key, None)
|
||||
.await?;
|
||||
|
||||
if response.status().is_success() || response.status().as_u16() == 204 {
|
||||
Ok(())
|
||||
} else if response.status().as_u16() == 404 {
|
||||
Err(StorageError::NotFound)
|
||||
} else {
|
||||
Err(StorageError::Network(format!(
|
||||
"DELETE failed: {}",
|
||||
response.status()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
// For listing, we need to make a request to the bucket
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
let payload_hash = hex::encode(Sha256::digest(b""));
|
||||
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.get(&url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
// Sign for bucket listing (different path)
|
||||
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
|
||||
let canonical_uri = format!("/{}", self.bucket);
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
let canonical_request = format!(
|
||||
"GET\n{}\n\n{}\n{}\n{}",
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
let auth = format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
);
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
match self.client.get(&url).send().await {
|
||||
Ok(response) if response.status().is_success() => {
|
||||
if let Ok(xml) = response.text().await {
|
||||
Self::parse_s3_keys(&xml, prefix)
|
||||
@@ -290,22 +89,18 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta> {
|
||||
let response = self
|
||||
.signed_request(reqwest::Method::HEAD, key, None)
|
||||
.await
|
||||
.ok()?;
|
||||
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self.client.head(&url).send().await.ok()?;
|
||||
if !response.status().is_success() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let size = response
|
||||
.headers()
|
||||
.get("content-length")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(0);
|
||||
|
||||
// S3 uses Last-Modified header, but for simplicity use current time if unavailable
|
||||
let modified = response
|
||||
.headers()
|
||||
.get("last-modified")
|
||||
@@ -317,63 +112,12 @@ impl StorageBackend for S3Storage {
|
||||
.as_secs()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
|
||||
Some(FileMeta { size, modified })
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> bool {
|
||||
// Try HEAD on the bucket
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
let payload_hash = hex::encode(Sha256::digest(b""));
|
||||
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.head(&url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
|
||||
let canonical_uri = format!("/{}", self.bucket);
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
let canonical_request = format!(
|
||||
"HEAD\n{}\n\n{}\n{}\n{}",
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
let auth = format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
);
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
match self.client.head(&url).send().await {
|
||||
Ok(response) => response.status().is_success() || response.status().as_u16() == 404,
|
||||
Err(_) => false,
|
||||
}
|
||||
@@ -387,28 +131,173 @@ impl StorageBackend for S3Storage {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use wiremock::matchers::{method, path};
|
||||
use wiremock::{Mock, MockServer, ResponseTemplate};
|
||||
|
||||
#[test]
|
||||
fn test_backend_name() {
|
||||
let storage = S3Storage::new(
|
||||
"http://localhost:9000",
|
||||
"test-bucket",
|
||||
"us-east-1",
|
||||
Some("access"),
|
||||
Some("secret"),
|
||||
);
|
||||
assert_eq!(storage.backend_name(), "s3");
|
||||
#[tokio::test]
|
||||
async fn test_put_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("PUT"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(200))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.put("test-key", b"data").await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_failure() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("PUT"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(500))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.put("test-key", b"data").await;
|
||||
assert!(matches!(result, Err(StorageError::Network(_))));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(200).set_body_bytes(b"test data".to_vec()))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let data = storage.get("test-key").await.unwrap();
|
||||
assert_eq!(&*data, b"test data");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_not_found() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket/missing"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.get("missing").await;
|
||||
assert!(matches!(result, Err(StorageError::NotFound)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
let xml_response = r#"<?xml version="1.0"?>
|
||||
<ListBucketResult>
|
||||
<Key>docker/image1</Key>
|
||||
<Key>docker/image2</Key>
|
||||
<Key>maven/artifact</Key>
|
||||
</ListBucketResult>"#;
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(200).set_body_string(xml_response))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let keys = storage.list("docker/").await;
|
||||
assert_eq!(keys.len(), 2);
|
||||
assert!(keys.iter().all(|k| k.starts_with("docker/")));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stat_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-length", "1234")
|
||||
.insert_header("last-modified", "Sun, 06 Nov 1994 08:49:37 GMT"),
|
||||
)
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let meta = storage.stat("test-key").await.unwrap();
|
||||
assert_eq!(meta.size, 1234);
|
||||
assert!(meta.modified > 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stat_not_found() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket/missing"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let meta = storage.stat("missing").await;
|
||||
assert!(meta.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_healthy() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(200))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
assert!(storage.health_check().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_bucket_not_found_is_ok() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
// 404 is OK for health check (bucket may be empty)
|
||||
assert!(storage.health_check().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_server_error() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(500))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
assert!(!storage.health_check().await);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_s3_storage_creation_anonymous() {
|
||||
let storage = S3Storage::new(
|
||||
"http://localhost:9000",
|
||||
"test-bucket",
|
||||
"us-east-1",
|
||||
None,
|
||||
None,
|
||||
);
|
||||
fn test_backend_name() {
|
||||
let storage = S3Storage::new("http://localhost:9000", "bucket");
|
||||
assert_eq!(storage.backend_name(), "s3");
|
||||
}
|
||||
|
||||
@@ -418,10 +307,4 @@ mod tests {
|
||||
let keys = S3Storage::parse_s3_keys(xml, "docker/");
|
||||
assert_eq!(keys, vec!["docker/a", "docker/b"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hmac_sha256() {
|
||||
let result = hmac_sha256(b"key", b"data");
|
||||
assert!(!result.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fs;
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::components::{format_size, format_timestamp, html_escape};
|
||||
use super::templates::encode_uri_component;
|
||||
use crate::activity_log::ActivityEntry;
|
||||
use crate::repo_index::RepoInfo;
|
||||
use crate::AppState;
|
||||
use crate::Storage;
|
||||
use axum::{
|
||||
@@ -13,7 +8,6 @@ use axum::{
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -25,17 +19,19 @@ pub struct RegistryStats {
|
||||
pub pypi: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub struct RepoInfo {
|
||||
pub name: String,
|
||||
pub versions: usize,
|
||||
pub size: u64,
|
||||
pub updated: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct TagInfo {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub created: String,
|
||||
pub downloads: u64,
|
||||
pub last_pulled: Option<String>,
|
||||
pub os: String,
|
||||
pub arch: String,
|
||||
pub layers_count: usize,
|
||||
pub pull_command: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -71,174 +67,26 @@ pub struct SearchQuery {
|
||||
pub q: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct DashboardResponse {
|
||||
pub global_stats: GlobalStats,
|
||||
pub registry_stats: Vec<RegistryCardStats>,
|
||||
pub mount_points: Vec<MountPoint>,
|
||||
pub activity: Vec<ActivityEntry>,
|
||||
pub uptime_seconds: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct GlobalStats {
|
||||
pub downloads: u64,
|
||||
pub uploads: u64,
|
||||
pub artifacts: u64,
|
||||
pub cache_hit_percent: f64,
|
||||
pub storage_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct RegistryCardStats {
|
||||
pub name: String,
|
||||
pub artifact_count: usize,
|
||||
pub downloads: u64,
|
||||
pub uploads: u64,
|
||||
pub size_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MountPoint {
|
||||
pub registry: String,
|
||||
pub mount_path: String,
|
||||
pub proxy_upstream: Option<String>,
|
||||
}
|
||||
|
||||
// ============ API Handlers ============
|
||||
|
||||
pub async fn api_stats(State(state): State<Arc<AppState>>) -> Json<RegistryStats> {
|
||||
// Trigger index rebuild if needed, then get counts
|
||||
let _ = state.repo_index.get("docker", &state.storage).await;
|
||||
let _ = state.repo_index.get("maven", &state.storage).await;
|
||||
let _ = state.repo_index.get("npm", &state.storage).await;
|
||||
let _ = state.repo_index.get("cargo", &state.storage).await;
|
||||
let _ = state.repo_index.get("pypi", &state.storage).await;
|
||||
|
||||
let (docker, maven, npm, cargo, pypi) = state.repo_index.counts();
|
||||
Json(RegistryStats {
|
||||
docker,
|
||||
maven,
|
||||
npm,
|
||||
cargo,
|
||||
pypi,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<DashboardResponse> {
|
||||
// Get indexes (will rebuild if dirty)
|
||||
let docker_repos = state.repo_index.get("docker", &state.storage).await;
|
||||
let maven_repos = state.repo_index.get("maven", &state.storage).await;
|
||||
let npm_repos = state.repo_index.get("npm", &state.storage).await;
|
||||
let cargo_repos = state.repo_index.get("cargo", &state.storage).await;
|
||||
let pypi_repos = state.repo_index.get("pypi", &state.storage).await;
|
||||
|
||||
// Calculate sizes from cached index
|
||||
let docker_size: u64 = docker_repos.iter().map(|r| r.size).sum();
|
||||
let maven_size: u64 = maven_repos.iter().map(|r| r.size).sum();
|
||||
let npm_size: u64 = npm_repos.iter().map(|r| r.size).sum();
|
||||
let cargo_size: u64 = cargo_repos.iter().map(|r| r.size).sum();
|
||||
let pypi_size: u64 = pypi_repos.iter().map(|r| r.size).sum();
|
||||
let total_storage = docker_size + maven_size + npm_size + cargo_size + pypi_size;
|
||||
|
||||
let total_artifacts = docker_repos.len()
|
||||
+ maven_repos.len()
|
||||
+ npm_repos.len()
|
||||
+ cargo_repos.len()
|
||||
+ pypi_repos.len();
|
||||
|
||||
let global_stats = GlobalStats {
|
||||
downloads: state.metrics.downloads.load(Ordering::Relaxed),
|
||||
uploads: state.metrics.uploads.load(Ordering::Relaxed),
|
||||
artifacts: total_artifacts as u64,
|
||||
cache_hit_percent: state.metrics.cache_hit_rate(),
|
||||
storage_bytes: total_storage,
|
||||
};
|
||||
|
||||
let registry_card_stats = vec![
|
||||
RegistryCardStats {
|
||||
name: "docker".to_string(),
|
||||
artifact_count: docker_repos.len(),
|
||||
downloads: state.metrics.get_registry_downloads("docker"),
|
||||
uploads: state.metrics.get_registry_uploads("docker"),
|
||||
size_bytes: docker_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "maven".to_string(),
|
||||
artifact_count: maven_repos.len(),
|
||||
downloads: state.metrics.get_registry_downloads("maven"),
|
||||
uploads: state.metrics.get_registry_uploads("maven"),
|
||||
size_bytes: maven_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "npm".to_string(),
|
||||
artifact_count: npm_repos.len(),
|
||||
downloads: state.metrics.get_registry_downloads("npm"),
|
||||
uploads: 0,
|
||||
size_bytes: npm_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "cargo".to_string(),
|
||||
artifact_count: cargo_repos.len(),
|
||||
downloads: state.metrics.get_registry_downloads("cargo"),
|
||||
uploads: 0,
|
||||
size_bytes: cargo_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "pypi".to_string(),
|
||||
artifact_count: pypi_repos.len(),
|
||||
downloads: state.metrics.get_registry_downloads("pypi"),
|
||||
uploads: 0,
|
||||
size_bytes: pypi_size,
|
||||
},
|
||||
];
|
||||
|
||||
let mount_points = vec![
|
||||
MountPoint {
|
||||
registry: "Docker".to_string(),
|
||||
mount_path: "/v2/".to_string(),
|
||||
proxy_upstream: None,
|
||||
},
|
||||
MountPoint {
|
||||
registry: "Maven".to_string(),
|
||||
mount_path: "/maven2/".to_string(),
|
||||
proxy_upstream: state.config.maven.proxies.first().cloned(),
|
||||
},
|
||||
MountPoint {
|
||||
registry: "npm".to_string(),
|
||||
mount_path: "/npm/".to_string(),
|
||||
proxy_upstream: state.config.npm.proxy.clone(),
|
||||
},
|
||||
MountPoint {
|
||||
registry: "Cargo".to_string(),
|
||||
mount_path: "/cargo/".to_string(),
|
||||
proxy_upstream: None,
|
||||
},
|
||||
MountPoint {
|
||||
registry: "PyPI".to_string(),
|
||||
mount_path: "/simple/".to_string(),
|
||||
proxy_upstream: state.config.pypi.proxy.clone(),
|
||||
},
|
||||
];
|
||||
|
||||
let activity = state.activity.recent(20);
|
||||
let uptime_seconds = state.start_time.elapsed().as_secs();
|
||||
|
||||
Json(DashboardResponse {
|
||||
global_stats,
|
||||
registry_stats: registry_card_stats,
|
||||
mount_points,
|
||||
activity,
|
||||
uptime_seconds,
|
||||
})
|
||||
let stats = get_registry_stats(&state.storage).await;
|
||||
Json(stats)
|
||||
}
|
||||
|
||||
pub async fn api_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(registry_type): Path<String>,
|
||||
) -> Json<Vec<RepoInfo>> {
|
||||
let repos = state.repo_index.get(®istry_type, &state.storage).await;
|
||||
Json((*repos).clone())
|
||||
let repos = match registry_type.as_str() {
|
||||
"docker" => get_docker_repos(&state.storage).await,
|
||||
"maven" => get_maven_repos(&state.storage).await,
|
||||
"npm" => get_npm_packages(&state.storage).await,
|
||||
"cargo" => get_cargo_crates(&state.storage).await,
|
||||
"pypi" => get_pypi_packages(&state.storage).await,
|
||||
_ => vec![],
|
||||
};
|
||||
Json(repos)
|
||||
}
|
||||
|
||||
pub async fn api_detail(
|
||||
@@ -247,7 +95,7 @@ pub async fn api_detail(
|
||||
) -> Json<serde_json::Value> {
|
||||
match registry_type.as_str() {
|
||||
"docker" => {
|
||||
let detail = get_docker_detail(&state, &name).await;
|
||||
let detail = get_docker_detail(&state.storage, &name).await;
|
||||
Json(serde_json::to_value(detail).unwrap_or_default())
|
||||
}
|
||||
"npm" => {
|
||||
@@ -269,13 +117,20 @@ pub async fn api_search(
|
||||
) -> axum::response::Html<String> {
|
||||
let query = params.q.unwrap_or_default().to_lowercase();
|
||||
|
||||
let repos = state.repo_index.get(®istry_type, &state.storage).await;
|
||||
let repos = match registry_type.as_str() {
|
||||
"docker" => get_docker_repos(&state.storage).await,
|
||||
"maven" => get_maven_repos(&state.storage).await,
|
||||
"npm" => get_npm_packages(&state.storage).await,
|
||||
"cargo" => get_cargo_crates(&state.storage).await,
|
||||
"pypi" => get_pypi_packages(&state.storage).await,
|
||||
_ => vec![],
|
||||
};
|
||||
|
||||
let filtered: Vec<&RepoInfo> = if query.is_empty() {
|
||||
repos.iter().collect()
|
||||
let filtered: Vec<_> = if query.is_empty() {
|
||||
repos
|
||||
} else {
|
||||
repos
|
||||
.iter()
|
||||
.into_iter()
|
||||
.filter(|r| r.name.to_lowercase().contains(&query))
|
||||
.collect()
|
||||
};
|
||||
@@ -320,9 +175,7 @@ pub async fn api_search(
|
||||
}
|
||||
|
||||
// ============ Data Fetching Functions ============
|
||||
// NOTE: Legacy functions below - kept for reference, will be removed in future cleanup
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
|
||||
let all_keys = storage.list("").await;
|
||||
|
||||
@@ -374,18 +227,12 @@ pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("docker/").await;
|
||||
|
||||
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new(); // (info, latest_modified)
|
||||
|
||||
for key in &keys {
|
||||
// Skip .meta.json files
|
||||
if key.ends_with(".meta.json") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rest) = key.strip_prefix("docker/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 3 {
|
||||
@@ -402,35 +249,10 @@ pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
)
|
||||
});
|
||||
|
||||
if parts[1] == "manifests" && key.ends_with(".json") {
|
||||
if parts[1] == "manifests" {
|
||||
entry.0.versions += 1;
|
||||
|
||||
// Parse manifest to get actual image size (config + layers)
|
||||
if let Ok(manifest_data) = storage.get(key).await {
|
||||
if let Ok(manifest) =
|
||||
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
||||
{
|
||||
let config_size = manifest
|
||||
.get("config")
|
||||
.and_then(|c| c.get("size"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
let layers_size: u64 = manifest
|
||||
.get("layers")
|
||||
.and_then(|l| l.as_array())
|
||||
.map(|layers| {
|
||||
layers
|
||||
.iter()
|
||||
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
entry.0.size += config_size + layers_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
@@ -446,106 +268,25 @@ pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
|
||||
pub async fn get_docker_detail(storage: &Storage, name: &str) -> DockerDetail {
|
||||
let prefix = format!("docker/{}/manifests/", name);
|
||||
let keys = state.storage.list(&prefix).await;
|
||||
|
||||
// Build public URL for pull commands
|
||||
let registry_host =
|
||||
state.config.server.public_url.clone().unwrap_or_else(|| {
|
||||
format!("{}:{}", state.config.server.host, state.config.server.port)
|
||||
});
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut tags = Vec::new();
|
||||
for key in &keys {
|
||||
// Skip .meta.json files
|
||||
if key.ends_with(".meta.json") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(tag_name) = key
|
||||
.strip_prefix(&prefix)
|
||||
.and_then(|s| s.strip_suffix(".json"))
|
||||
{
|
||||
// Load metadata from .meta.json file
|
||||
let meta_key = format!("{}.meta.json", key.trim_end_matches(".json"));
|
||||
let metadata = if let Ok(meta_data) = state.storage.get(&meta_key).await {
|
||||
serde_json::from_slice::<crate::registry::docker::ImageMetadata>(&meta_data)
|
||||
.unwrap_or_default()
|
||||
let (size, created) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
crate::registry::docker::ImageMetadata::default()
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
|
||||
// Get file stats for created timestamp if metadata doesn't have push_timestamp
|
||||
let created = if metadata.push_timestamp > 0 {
|
||||
format_timestamp(metadata.push_timestamp)
|
||||
} else if let Some(file_meta) = state.storage.stat(key).await {
|
||||
format_timestamp(file_meta.modified)
|
||||
} else {
|
||||
"N/A".to_string()
|
||||
};
|
||||
|
||||
// Calculate size from manifest layers (config + layers)
|
||||
let size = if metadata.size_bytes > 0 {
|
||||
metadata.size_bytes
|
||||
} else {
|
||||
// Parse manifest to get actual image size
|
||||
if let Ok(manifest_data) = state.storage.get(key).await {
|
||||
if let Ok(manifest) =
|
||||
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
||||
{
|
||||
let config_size = manifest
|
||||
.get("config")
|
||||
.and_then(|c| c.get("size"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
let layers_size: u64 = manifest
|
||||
.get("layers")
|
||||
.and_then(|l| l.as_array())
|
||||
.map(|layers| {
|
||||
layers
|
||||
.iter()
|
||||
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
config_size + layers_size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Format last_pulled
|
||||
let last_pulled = if metadata.last_pulled > 0 {
|
||||
Some(format_timestamp(metadata.last_pulled))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Build pull command
|
||||
let pull_command = format!("docker pull {}/{}:{}", registry_host, name, tag_name);
|
||||
|
||||
tags.push(TagInfo {
|
||||
name: tag_name.to_string(),
|
||||
size,
|
||||
created,
|
||||
downloads: metadata.downloads,
|
||||
last_pulled,
|
||||
os: if metadata.os.is_empty() {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
metadata.os
|
||||
},
|
||||
arch: if metadata.arch.is_empty() {
|
||||
"unknown".to_string()
|
||||
} else {
|
||||
metadata.arch
|
||||
},
|
||||
layers_count: metadata.layers.len(),
|
||||
pull_command,
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -553,7 +294,6 @@ pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
|
||||
DockerDetail { tags }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_maven_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("maven/").await;
|
||||
|
||||
@@ -613,125 +353,75 @@ pub async fn get_maven_detail(storage: &Storage, path: &str) -> MavenDetail {
|
||||
MavenDetail { artifacts }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_npm_packages(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("npm/").await;
|
||||
|
||||
let mut packages: HashMap<String, RepoInfo> = HashMap::new();
|
||||
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
|
||||
// Find all metadata.json files
|
||||
for key in &keys {
|
||||
if key.ends_with("/metadata.json") {
|
||||
if let Some(name) = key
|
||||
.strip_prefix("npm/")
|
||||
.and_then(|s| s.strip_suffix("/metadata.json"))
|
||||
{
|
||||
// Parse metadata to get version count and info
|
||||
if let Ok(data) = storage.get(key).await {
|
||||
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
||||
let versions_count = metadata
|
||||
.get("versions")
|
||||
.and_then(|v| v.as_object())
|
||||
.map(|v| v.len())
|
||||
.unwrap_or(0);
|
||||
if let Some(rest) = key.strip_prefix("npm/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
// Calculate total size from dist.unpackedSize or estimate
|
||||
let total_size: u64 = metadata
|
||||
.get("versions")
|
||||
.and_then(|v| v.as_object())
|
||||
.map(|versions| {
|
||||
versions
|
||||
.values()
|
||||
.filter_map(|v| {
|
||||
v.get("dist")
|
||||
.and_then(|d| d.get("unpackedSize"))
|
||||
.and_then(|s| s.as_u64())
|
||||
})
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
|
||||
// Get latest version time for "updated"
|
||||
let updated = metadata
|
||||
.get("time")
|
||||
.and_then(|t| t.get("modified"))
|
||||
.and_then(|m| m.as_str())
|
||||
.map(|s| s[..10].to_string()) // Take just date part
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
|
||||
packages.insert(
|
||||
name.to_string(),
|
||||
RepoInfo {
|
||||
name: name.to_string(),
|
||||
versions: versions_count,
|
||||
size: total_size,
|
||||
updated,
|
||||
},
|
||||
);
|
||||
if parts.len() >= 3 && parts[1] == "tarballs" {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = packages.into_values().collect();
|
||||
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_npm_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
let metadata_key = format!("npm/{}/metadata.json", name);
|
||||
let prefix = format!("npm/{}/tarballs/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
|
||||
// Parse metadata.json for version info
|
||||
if let Ok(data) = storage.get(&metadata_key).await {
|
||||
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
||||
if let Some(versions_obj) = metadata.get("versions").and_then(|v| v.as_object()) {
|
||||
let time_obj = metadata.get("time").and_then(|t| t.as_object());
|
||||
|
||||
for (version, info) in versions_obj {
|
||||
let size = info
|
||||
.get("dist")
|
||||
.and_then(|d| d.get("unpackedSize"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
|
||||
let published = time_obj
|
||||
.and_then(|t| t.get(version))
|
||||
.and_then(|p| p.as_str())
|
||||
.map(|s| s[..10].to_string())
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
|
||||
versions.push(VersionInfo {
|
||||
version: version.clone(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
for key in &keys {
|
||||
if let Some(tarball) = key.strip_prefix(&prefix) {
|
||||
if let Some(version) = tarball
|
||||
.strip_prefix(&format!("{}-", name))
|
||||
.and_then(|s| s.strip_suffix(".tgz"))
|
||||
{
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version: version.to_string(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by version (semver-like, newest first)
|
||||
versions.sort_by(|a, b| {
|
||||
let a_parts: Vec<u32> = a
|
||||
.version
|
||||
.split('.')
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
let b_parts: Vec<u32> = b
|
||||
.version
|
||||
.split('.')
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
b_parts.cmp(&a_parts)
|
||||
});
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_cargo_crates(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("cargo/").await;
|
||||
|
||||
@@ -799,7 +489,6 @@ pub async fn get_cargo_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn get_pypi_packages(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("pypi/").await;
|
||||
|
||||
|
||||
@@ -1,23 +1,8 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::i18n::{get_translations, Lang, Translations};
|
||||
|
||||
/// Application version from Cargo.toml
|
||||
const VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Dark theme layout wrapper for dashboard
|
||||
pub fn layout_dark(
|
||||
title: &str,
|
||||
content: &str,
|
||||
active_page: Option<&str>,
|
||||
extra_scripts: &str,
|
||||
lang: Lang,
|
||||
) -> String {
|
||||
let t = get_translations(lang);
|
||||
/// Main layout wrapper with header and sidebar
|
||||
pub fn layout(title: &str, content: &str, active_page: Option<&str>) -> String {
|
||||
format!(
|
||||
r##"<!DOCTYPE html>
|
||||
<html lang="{}">
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
@@ -29,7 +14,7 @@ pub fn layout_dark(
|
||||
.sidebar-open {{ overflow: hidden; }}
|
||||
</style>
|
||||
</head>
|
||||
<body class="bg-[#0f172a] min-h-screen">
|
||||
<body class="bg-slate-100 min-h-screen">
|
||||
<div class="flex h-screen overflow-hidden">
|
||||
<!-- Mobile sidebar overlay -->
|
||||
<div id="sidebar-overlay" class="fixed inset-0 bg-black/50 z-40 hidden md:hidden" onclick="toggleSidebar()"></div>
|
||||
@@ -65,424 +50,17 @@ pub fn layout_dark(
|
||||
document.body.classList.add('sidebar-open');
|
||||
}}
|
||||
}}
|
||||
|
||||
function setLang(lang) {{
|
||||
document.cookie = 'nora_lang=' + lang + ';path=/;max-age=31536000';
|
||||
window.location.reload();
|
||||
}}
|
||||
</script>
|
||||
{}
|
||||
</body>
|
||||
</html>"##,
|
||||
lang.code(),
|
||||
html_escape(title),
|
||||
sidebar_dark(active_page, t),
|
||||
header_dark(lang),
|
||||
content,
|
||||
extra_scripts
|
||||
sidebar(active_page),
|
||||
header(),
|
||||
content
|
||||
)
|
||||
}
|
||||
|
||||
/// Dark theme sidebar
|
||||
fn sidebar_dark(active_page: Option<&str>, t: &Translations) -> String {
|
||||
let active = active_page.unwrap_or("");
|
||||
|
||||
let docker_icon = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
let maven_icon = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
let npm_icon = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
let pypi_icon = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
|
||||
// Dashboard label is translated, registry names stay as-is
|
||||
let dashboard_label = t.nav_dashboard;
|
||||
|
||||
let nav_items = [
|
||||
(
|
||||
"dashboard",
|
||||
"/ui/",
|
||||
dashboard_label,
|
||||
r#"<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M3 12l2-2m0 0l7-7 7 7M5 10v10a1 1 0 001 1h3m10-11l2 2m-2-2v10a1 1 0 01-1 1h-3m-6 0a1 1 0 001-1v-4a1 1 0 011-1h2a1 1 0 011 1v4a1 1 0 001 1m-6 0h6"/>"#,
|
||||
true,
|
||||
),
|
||||
("docker", "/ui/docker", "Docker", docker_icon, false),
|
||||
("maven", "/ui/maven", "Maven", maven_icon, false),
|
||||
("npm", "/ui/npm", "npm", npm_icon, false),
|
||||
("cargo", "/ui/cargo", "Cargo", cargo_icon, false),
|
||||
("pypi", "/ui/pypi", "PyPI", pypi_icon, false),
|
||||
];
|
||||
|
||||
let nav_html: String = nav_items.iter().map(|(id, href, label, icon_path, is_stroke)| {
|
||||
let is_active = active == *id;
|
||||
let active_class = if is_active {
|
||||
"bg-slate-700 text-white"
|
||||
} else {
|
||||
"text-slate-300 hover:bg-slate-700 hover:text-white"
|
||||
};
|
||||
|
||||
let (fill_attr, stroke_attr) = if *is_stroke {
|
||||
("none", r#" stroke="currentColor""#)
|
||||
} else {
|
||||
("currentColor", "")
|
||||
};
|
||||
|
||||
format!(r##"
|
||||
<a href="{}" class="flex items-center px-4 py-3 text-sm font-medium rounded-lg transition-colors {}">
|
||||
<svg class="w-5 h-5 mr-3" fill="{}"{} viewBox="0 0 24 24">
|
||||
{}
|
||||
</svg>
|
||||
{}
|
||||
</a>
|
||||
"##, href, active_class, fill_attr, stroke_attr, icon_path, label)
|
||||
}).collect();
|
||||
|
||||
format!(
|
||||
r#"
|
||||
<div id="sidebar" class="fixed md:static inset-y-0 left-0 z-50 w-64 bg-slate-800 text-white flex flex-col transform -translate-x-full md:translate-x-0 transition-transform duration-200 ease-in-out">
|
||||
<div class="h-16 flex items-center justify-between px-6 border-b border-slate-700">
|
||||
<div class="flex items-center">
|
||||
<span class="text-xl font-bold tracking-tight">N<span class="inline-block w-4 h-4 rounded-full border-2 border-current align-middle mx-px"></span>RA</span>
|
||||
</div>
|
||||
<button onclick="toggleSidebar()" class="md:hidden p-1 rounded-lg hover:bg-slate-700">
|
||||
<svg class="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M6 18L18 6M6 6l12 12"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
<nav class="flex-1 px-4 py-6 space-y-1 overflow-y-auto">
|
||||
{}
|
||||
<div class="text-xs font-semibold text-slate-400 uppercase tracking-wider px-4 mt-6 mb-3">
|
||||
{}
|
||||
</div>
|
||||
</nav>
|
||||
<div class="px-4 py-4 border-t border-slate-700">
|
||||
<div class="text-xs text-slate-400">
|
||||
Nora v{}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
"#,
|
||||
nav_html, t.nav_registries, VERSION
|
||||
)
|
||||
}
|
||||
|
||||
/// Dark theme header with language switcher
|
||||
fn header_dark(lang: Lang) -> String {
|
||||
let (en_class, ru_class) = match lang {
|
||||
Lang::En => (
|
||||
"text-white font-semibold",
|
||||
"text-slate-400 hover:text-slate-200",
|
||||
),
|
||||
Lang::Ru => (
|
||||
"text-slate-400 hover:text-slate-200",
|
||||
"text-white font-semibold",
|
||||
),
|
||||
};
|
||||
|
||||
format!(
|
||||
r##"
|
||||
<header class="h-16 bg-[#1e293b] border-b border-slate-700 flex items-center justify-between px-4 md:px-6">
|
||||
<div class="flex items-center">
|
||||
<button onclick="toggleSidebar()" class="md:hidden p-2 -ml-2 mr-2 rounded-lg hover:bg-slate-700">
|
||||
<svg class="w-6 h-6 text-slate-300" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M4 6h16M4 12h16M4 18h16"/>
|
||||
</svg>
|
||||
</button>
|
||||
<div class="md:hidden flex items-center">
|
||||
<span class="font-bold text-slate-200 tracking-tight">N<span class="inline-block w-4 h-4 rounded-full border-2 border-current align-middle mx-px"></span>RA</span>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center space-x-2 md:space-x-4">
|
||||
<!-- Language switcher -->
|
||||
<div class="flex items-center border border-slate-600 rounded-lg overflow-hidden text-sm">
|
||||
<button onclick="setLang('en')" class="px-3 py-1.5 {} transition-colors">EN</button>
|
||||
<span class="text-slate-600">|</span>
|
||||
<button onclick="setLang('ru')" class="px-3 py-1.5 {} transition-colors">RU</button>
|
||||
</div>
|
||||
<a href="https://github.com/getnora-io/nora" target="_blank" class="p-2 text-slate-400 hover:text-slate-200 hover:bg-slate-700 rounded-lg">
|
||||
<svg class="w-5 h-5" fill="currentColor" viewBox="0 0 24 24">
|
||||
<path fill-rule="evenodd" d="M12 2C6.477 2 2 6.484 2 12.017c0 4.425 2.865 8.18 6.839 9.504.5.092.682-.217.682-.483 0-.237-.008-.868-.013-1.703-2.782.605-3.369-1.343-3.369-1.343-.454-1.158-1.11-1.466-1.11-1.466-.908-.62.069-.608.069-.608 1.003.07 1.531 1.032 1.531 1.032.892 1.53 2.341 1.088 2.91.832.092-.647.35-1.088.636-1.338-2.22-.253-4.555-1.113-4.555-4.951 0-1.093.39-1.988 1.029-2.688-.103-.253-.446-1.272.098-2.65 0 0 .84-.27 2.75 1.026A9.564 9.564 0 0112 6.844c.85.004 1.705.115 2.504.337 1.909-1.296 2.747-1.027 2.747-1.027.546 1.379.202 2.398.1 2.651.64.7 1.028 1.595 1.028 2.688 0 3.848-2.339 4.695-4.566 4.943.359.309.678.92.678 1.855 0 1.338-.012 2.419-.012 2.747 0 .268.18.58.688.482A10.019 10.019 0 0022 12.017C22 6.484 17.522 2 12 2z" clip-rule="evenodd"/>
|
||||
</svg>
|
||||
</a>
|
||||
<a href="/api-docs" class="p-2 text-slate-400 hover:text-slate-200 hover:bg-slate-700 rounded-lg" title="API Docs">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M9 12h6m-6 4h6m2 5H7a2 2 0 01-2-2V5a2 2 0 012-2h5.586a1 1 0 01.707.293l5.414 5.414a1 1 0 01.293.707V19a2 2 0 01-2 2z"/>
|
||||
</svg>
|
||||
</a>
|
||||
</div>
|
||||
</header>
|
||||
"##,
|
||||
en_class, ru_class
|
||||
)
|
||||
}
|
||||
|
||||
/// Render global stats row (5-column grid)
|
||||
pub fn render_global_stats(
|
||||
downloads: u64,
|
||||
uploads: u64,
|
||||
artifacts: u64,
|
||||
cache_hit_percent: f64,
|
||||
storage_bytes: u64,
|
||||
lang: Lang,
|
||||
) -> String {
|
||||
let t = get_translations(lang);
|
||||
format!(
|
||||
r##"
|
||||
<div class="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-5 gap-4 mb-6">
|
||||
<div class="bg-[#1e293b] rounded-lg p-4 border border-slate-700">
|
||||
<div class="text-slate-400 text-sm mb-1">{}</div>
|
||||
<div id="stat-downloads" class="text-2xl font-bold text-slate-200">{}</div>
|
||||
</div>
|
||||
<div class="bg-[#1e293b] rounded-lg p-4 border border-slate-700">
|
||||
<div class="text-slate-400 text-sm mb-1">{}</div>
|
||||
<div id="stat-uploads" class="text-2xl font-bold text-slate-200">{}</div>
|
||||
</div>
|
||||
<div class="bg-[#1e293b] rounded-lg p-4 border border-slate-700">
|
||||
<div class="text-slate-400 text-sm mb-1">{}</div>
|
||||
<div id="stat-artifacts" class="text-2xl font-bold text-slate-200">{}</div>
|
||||
</div>
|
||||
<div class="bg-[#1e293b] rounded-lg p-4 border border-slate-700">
|
||||
<div class="text-slate-400 text-sm mb-1">{}</div>
|
||||
<div id="stat-cache-hit" class="text-2xl font-bold text-slate-200">{:.1}%</div>
|
||||
</div>
|
||||
<div class="bg-[#1e293b] rounded-lg p-4 border border-slate-700">
|
||||
<div class="text-slate-400 text-sm mb-1">{}</div>
|
||||
<div id="stat-storage" class="text-2xl font-bold text-slate-200">{}</div>
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
t.stat_downloads,
|
||||
downloads,
|
||||
t.stat_uploads,
|
||||
uploads,
|
||||
t.stat_artifacts,
|
||||
artifacts,
|
||||
t.stat_cache_hit,
|
||||
cache_hit_percent,
|
||||
t.stat_storage,
|
||||
format_size(storage_bytes)
|
||||
)
|
||||
}
|
||||
|
||||
/// Render registry card with extended metrics
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn render_registry_card(
|
||||
name: &str,
|
||||
icon_path: &str,
|
||||
artifact_count: usize,
|
||||
downloads: u64,
|
||||
uploads: u64,
|
||||
size_bytes: u64,
|
||||
href: &str,
|
||||
t: &Translations,
|
||||
) -> String {
|
||||
format!(
|
||||
r##"
|
||||
<a href="{}" id="registry-{}" class="block bg-[#1e293b] rounded-lg border border-slate-700 p-4 md:p-6 hover:border-blue-400 transition-all">
|
||||
<div class="flex items-center justify-between mb-3">
|
||||
<svg class="w-8 h-8 text-slate-400" fill="currentColor" viewBox="0 0 24 24">
|
||||
{}
|
||||
</svg>
|
||||
<span class="text-xs font-medium text-green-400 bg-green-400/10 px-2 py-1 rounded-full">{}</span>
|
||||
</div>
|
||||
<div class="text-lg font-semibold text-slate-200 mb-2">{}</div>
|
||||
<div class="grid grid-cols-2 gap-2 text-sm">
|
||||
<div>
|
||||
<span class="text-slate-500">{}</span>
|
||||
<div class="text-slate-300 font-medium">{}</div>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-slate-500">{}</span>
|
||||
<div class="text-slate-300 font-medium">{}</div>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-slate-500">{}</span>
|
||||
<div class="text-slate-300 font-medium">{}</div>
|
||||
</div>
|
||||
<div>
|
||||
<span class="text-slate-500">{}</span>
|
||||
<div class="text-slate-300 font-medium">{}</div>
|
||||
</div>
|
||||
</div>
|
||||
</a>
|
||||
"##,
|
||||
href,
|
||||
name.to_lowercase(),
|
||||
icon_path,
|
||||
t.active,
|
||||
name,
|
||||
t.artifacts,
|
||||
artifact_count,
|
||||
t.size,
|
||||
format_size(size_bytes),
|
||||
t.downloads,
|
||||
downloads,
|
||||
t.uploads,
|
||||
uploads
|
||||
)
|
||||
}
|
||||
|
||||
/// Render mount points table
|
||||
pub fn render_mount_points_table(
|
||||
mount_points: &[(String, String, Option<String>)],
|
||||
t: &Translations,
|
||||
) -> String {
|
||||
let rows: String = mount_points
|
||||
.iter()
|
||||
.map(|(registry, mount_path, proxy)| {
|
||||
let proxy_display = proxy.as_deref().unwrap_or("-");
|
||||
format!(
|
||||
r##"
|
||||
<tr class="border-b border-slate-700">
|
||||
<td class="py-3 text-slate-300">{}</td>
|
||||
<td class="py-3 font-mono text-blue-400">{}</td>
|
||||
<td class="py-3 text-slate-400">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
registry, mount_path, proxy_display
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
format!(
|
||||
r##"
|
||||
<div class="bg-[#1e293b] rounded-lg border border-slate-700 overflow-hidden">
|
||||
<div class="px-4 py-3 border-b border-slate-700">
|
||||
<h3 class="text-slate-200 font-semibold">{}</h3>
|
||||
</div>
|
||||
<div class="overflow-auto max-h-80">
|
||||
<table class="w-full">
|
||||
<thead class="sticky top-0 bg-slate-800">
|
||||
<tr class="text-left text-xs text-slate-500 uppercase border-b border-slate-700">
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="px-4">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
t.mount_points, t.registry, t.mount_path, t.proxy_upstream, rows
|
||||
)
|
||||
}
|
||||
|
||||
/// Render a single activity log row
|
||||
pub fn render_activity_row(
|
||||
timestamp: &str,
|
||||
action: &str,
|
||||
artifact: &str,
|
||||
registry: &str,
|
||||
source: &str,
|
||||
) -> String {
|
||||
let action_color = match action {
|
||||
"PULL" => "text-blue-400",
|
||||
"PUSH" => "text-green-400",
|
||||
"CACHE" => "text-yellow-400",
|
||||
"PROXY" => "text-purple-400",
|
||||
_ => "text-slate-400",
|
||||
};
|
||||
|
||||
format!(
|
||||
r##"
|
||||
<tr class="border-b border-slate-700/50 text-sm">
|
||||
<td class="py-2 text-slate-500">{}</td>
|
||||
<td class="py-2 font-medium {}"><span class="px-2 py-0.5 bg-slate-700 rounded">{}</span></td>
|
||||
<td class="py-2 text-slate-300 font-mono text-xs">{}</td>
|
||||
<td class="py-2 text-slate-400">{}</td>
|
||||
<td class="py-2 text-slate-500">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
timestamp,
|
||||
action_color,
|
||||
action,
|
||||
html_escape(artifact),
|
||||
registry,
|
||||
source
|
||||
)
|
||||
}
|
||||
|
||||
/// Render the activity log container
|
||||
pub fn render_activity_log(rows: &str, t: &Translations) -> String {
|
||||
format!(
|
||||
r##"
|
||||
<div class="bg-[#1e293b] rounded-lg border border-slate-700 overflow-hidden">
|
||||
<div class="px-4 py-3 border-b border-slate-700 flex items-center justify-between">
|
||||
<h3 class="text-slate-200 font-semibold">{}</h3>
|
||||
<span class="text-xs text-slate-500">{}</span>
|
||||
</div>
|
||||
<div class="overflow-auto max-h-80">
|
||||
<table class="w-full" id="activity-log">
|
||||
<thead class="sticky top-0 bg-slate-800">
|
||||
<tr class="text-left text-xs text-slate-500 uppercase border-b border-slate-700">
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
<th class="px-4 py-2">{}</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="px-4">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
t.recent_activity,
|
||||
t.last_n_events,
|
||||
t.time,
|
||||
t.action,
|
||||
t.artifact,
|
||||
t.registry,
|
||||
t.source,
|
||||
rows
|
||||
)
|
||||
}
|
||||
|
||||
/// Render the polling script for auto-refresh
|
||||
pub fn render_polling_script() -> String {
|
||||
r##"
|
||||
<script>
|
||||
setInterval(async () => {
|
||||
try {
|
||||
const data = await fetch('/api/ui/dashboard').then(r => r.json());
|
||||
|
||||
// Update global stats
|
||||
document.getElementById('stat-downloads').textContent = data.global_stats.downloads;
|
||||
document.getElementById('stat-uploads').textContent = data.global_stats.uploads;
|
||||
document.getElementById('stat-artifacts').textContent = data.global_stats.artifacts;
|
||||
document.getElementById('stat-cache-hit').textContent = data.global_stats.cache_hit_percent.toFixed(1) + '%';
|
||||
|
||||
// Format storage size
|
||||
const bytes = data.global_stats.storage_bytes;
|
||||
let sizeStr;
|
||||
if (bytes >= 1073741824) sizeStr = (bytes / 1073741824).toFixed(1) + ' GB';
|
||||
else if (bytes >= 1048576) sizeStr = (bytes / 1048576).toFixed(1) + ' MB';
|
||||
else if (bytes >= 1024) sizeStr = (bytes / 1024).toFixed(1) + ' KB';
|
||||
else sizeStr = bytes + ' B';
|
||||
document.getElementById('stat-storage').textContent = sizeStr;
|
||||
|
||||
// Update uptime
|
||||
const uptime = document.getElementById('uptime');
|
||||
if (uptime) {
|
||||
const secs = data.uptime_seconds;
|
||||
const hours = Math.floor(secs / 3600);
|
||||
const mins = Math.floor((secs % 3600) / 60);
|
||||
uptime.textContent = hours + 'h ' + mins + 'm';
|
||||
}
|
||||
} catch (e) {
|
||||
console.error('Dashboard poll failed:', e);
|
||||
}
|
||||
}, 5000);
|
||||
</script>
|
||||
"##.to_string()
|
||||
}
|
||||
|
||||
/// Sidebar navigation component (light theme, unused)
|
||||
#[allow(dead_code)]
|
||||
/// Sidebar navigation component
|
||||
fn sidebar(active_page: Option<&str>) -> String {
|
||||
let active = active_page.unwrap_or("");
|
||||
|
||||
@@ -490,7 +68,7 @@ fn sidebar(active_page: Option<&str>) -> String {
|
||||
let docker_icon = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
let maven_icon = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
let npm_icon = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M23.834 8.101a13.912 13.912 0 0 1-13.643 11.72 10.105 10.105 0 0 1-1.994-.12 6.111 6.111 0 0 1-5.082-5.761 5.934 5.934 0 0 1 11.867-.084c.025.983-.401 1.846-1.277 1.871-.936 0-1.374-.668-1.374-1.567v-2.5a1.531 1.531 0 0 0-1.52-1.533H8.715a3.648 3.648 0 1 0 2.695 6.08l.073-.11.074.121a2.58 2.58 0 0 0 2.2 1.048 2.909 2.909 0 0 0 2.695-3.04 7.912 7.912 0 0 0-.217-1.933 7.404 7.404 0 0 0-14.64 1.603 7.497 7.497 0 0 0 7.308 7.405 12.822 12.822 0 0 0 2.14-.12 11.927 11.927 0 0 0 9.98-10.023.117.117 0 0 0-.043-.117.115.115 0 0 0-.084-.023l-.09.024a.116.116 0 0 1-.147-.085.116.116 0 0 1 .054-.133zm-14.49 7.072a2.162 2.162 0 1 1 0-4.324 2.162 2.162 0 0 1 0 4.324z"/>"#;
|
||||
let pypi_icon = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
|
||||
let nav_items = [
|
||||
@@ -564,19 +142,17 @@ fn sidebar(active_page: Option<&str>) -> String {
|
||||
<!-- Footer -->
|
||||
<div class="px-4 py-4 border-t border-slate-700">
|
||||
<div class="text-xs text-slate-400">
|
||||
Nora v{}
|
||||
Nora v0.2.0
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
"#,
|
||||
super::logo::LOGO_BASE64,
|
||||
nav_html,
|
||||
VERSION
|
||||
nav_html
|
||||
)
|
||||
}
|
||||
|
||||
/// Header component (light theme, unused)
|
||||
#[allow(dead_code)]
|
||||
/// Header component
|
||||
fn header() -> String {
|
||||
r##"
|
||||
<header class="h-16 bg-white border-b border-slate-200 flex items-center justify-between px-4 md:px-6">
|
||||
@@ -613,12 +189,11 @@ pub mod icons {
|
||||
pub const DOCKER: &str = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
pub const MAVEN: &str = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
pub const NPM: &str = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
pub const CARGO: &str = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
pub const CARGO: &str = r#"<path fill="currentColor" d="M23.834 8.101a13.912 13.912 0 0 1-13.643 11.72 10.105 10.105 0 0 1-1.994-.12 6.111 6.111 0 0 1-5.082-5.761 5.934 5.934 0 0 1 11.867-.084c.025.983-.401 1.846-1.277 1.871-.936 0-1.374-.668-1.374-1.567v-2.5a1.531 1.531 0 0 0-1.52-1.533H8.715a3.648 3.648 0 1 0 2.695 6.08l.073-.11.074.121a2.58 2.58 0 0 0 2.2 1.048 2.909 2.909 0 0 0 2.695-3.04 7.912 7.912 0 0 0-.217-1.933 7.404 7.404 0 0 0-14.64 1.603 7.497 7.497 0 0 0 7.308 7.405 12.822 12.822 0 0 0 2.14-.12 11.927 11.927 0 0 0 9.98-10.023.117.117 0 0 0-.043-.117.115.115 0 0 0-.084-.023l-.09.024a.116.116 0 0 1-.147-.085.116.116 0 0 1 .054-.133zm-14.49 7.072a2.162 2.162 0 1 1 0-4.324 2.162 2.162 0 0 1 0 4.324z"/>"#;
|
||||
pub const PYPI: &str = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
}
|
||||
|
||||
/// Stat card for dashboard with SVG icon (used in light theme pages)
|
||||
#[allow(dead_code)]
|
||||
/// Stat card for dashboard with SVG icon
|
||||
pub fn stat_card(name: &str, icon_path: &str, count: usize, href: &str, unit: &str) -> String {
|
||||
format!(
|
||||
r##"
|
||||
@@ -664,57 +239,6 @@ pub fn html_escape(s: &str) -> String {
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
/// Render the "bragging" footer with NORA stats
|
||||
pub fn render_bragging_footer(lang: Lang) -> String {
|
||||
let t = get_translations(lang);
|
||||
format!(
|
||||
r##"
|
||||
<div class="mt-8 bg-gradient-to-r from-slate-800 to-slate-900 rounded-lg border border-slate-700 p-6">
|
||||
<div class="text-center mb-4">
|
||||
<span class="text-slate-400 text-sm uppercase tracking-wider">{}</span>
|
||||
</div>
|
||||
<div class="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-6 gap-4 text-center">
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-blue-400">34 MB</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-green-400"><1s</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-purple-400">~30 MB</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-yellow-400">5</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-pink-400">{}</div>
|
||||
<div class="text-xs text-slate-500 mt-1">amd64 / arm64</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-cyan-400">{}</div>
|
||||
<div class="text-xs text-slate-500 mt-1">Config</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="text-center mt-4">
|
||||
<span class="text-slate-500 text-xs">{}</span>
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
t.built_for_speed,
|
||||
t.docker_image,
|
||||
t.cold_start,
|
||||
t.memory,
|
||||
t.registries_count,
|
||||
t.multi_arch,
|
||||
t.zero_config,
|
||||
t.tagline
|
||||
)
|
||||
}
|
||||
|
||||
/// Format Unix timestamp as relative time
|
||||
pub fn format_timestamp(ts: u64) -> String {
|
||||
if ts == 0 {
|
||||
|
||||
@@ -1,275 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
/// Internationalization support for the UI
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Lang {
|
||||
#[default]
|
||||
En,
|
||||
Ru,
|
||||
}
|
||||
|
||||
impl Lang {
|
||||
pub fn from_str(s: &str) -> Self {
|
||||
match s.to_lowercase().as_str() {
|
||||
"ru" | "rus" | "russian" => Lang::Ru,
|
||||
_ => Lang::En,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn code(&self) -> &'static str {
|
||||
match self {
|
||||
Lang::En => "en",
|
||||
Lang::Ru => "ru",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// All translatable strings
|
||||
#[allow(dead_code)]
|
||||
pub struct Translations {
|
||||
// Navigation
|
||||
pub nav_dashboard: &'static str,
|
||||
pub nav_registries: &'static str,
|
||||
|
||||
// Dashboard
|
||||
pub dashboard_title: &'static str,
|
||||
pub dashboard_subtitle: &'static str,
|
||||
pub uptime: &'static str,
|
||||
|
||||
// Stats
|
||||
pub stat_downloads: &'static str,
|
||||
pub stat_uploads: &'static str,
|
||||
pub stat_artifacts: &'static str,
|
||||
pub stat_cache_hit: &'static str,
|
||||
pub stat_storage: &'static str,
|
||||
|
||||
// Registry cards
|
||||
pub active: &'static str,
|
||||
pub artifacts: &'static str,
|
||||
pub size: &'static str,
|
||||
pub downloads: &'static str,
|
||||
pub uploads: &'static str,
|
||||
|
||||
// Mount points
|
||||
pub mount_points: &'static str,
|
||||
pub registry: &'static str,
|
||||
pub mount_path: &'static str,
|
||||
pub proxy_upstream: &'static str,
|
||||
|
||||
// Activity
|
||||
pub recent_activity: &'static str,
|
||||
pub last_n_events: &'static str,
|
||||
pub time: &'static str,
|
||||
pub action: &'static str,
|
||||
pub artifact: &'static str,
|
||||
pub source: &'static str,
|
||||
pub no_activity: &'static str,
|
||||
|
||||
// Relative time
|
||||
pub just_now: &'static str,
|
||||
pub min_ago: &'static str,
|
||||
pub mins_ago: &'static str,
|
||||
pub hour_ago: &'static str,
|
||||
pub hours_ago: &'static str,
|
||||
pub day_ago: &'static str,
|
||||
pub days_ago: &'static str,
|
||||
|
||||
// Registry pages
|
||||
pub repositories: &'static str,
|
||||
pub search_placeholder: &'static str,
|
||||
pub no_repos_found: &'static str,
|
||||
pub push_first_artifact: &'static str,
|
||||
pub name: &'static str,
|
||||
pub tags: &'static str,
|
||||
pub versions: &'static str,
|
||||
pub updated: &'static str,
|
||||
|
||||
// Detail pages
|
||||
pub pull_command: &'static str,
|
||||
pub install_command: &'static str,
|
||||
pub maven_dependency: &'static str,
|
||||
pub total: &'static str,
|
||||
pub created: &'static str,
|
||||
pub published: &'static str,
|
||||
pub filename: &'static str,
|
||||
pub files: &'static str,
|
||||
|
||||
// Bragging footer
|
||||
pub built_for_speed: &'static str,
|
||||
pub docker_image: &'static str,
|
||||
pub cold_start: &'static str,
|
||||
pub memory: &'static str,
|
||||
pub registries_count: &'static str,
|
||||
pub multi_arch: &'static str,
|
||||
pub zero_config: &'static str,
|
||||
pub tagline: &'static str,
|
||||
}
|
||||
|
||||
pub fn get_translations(lang: Lang) -> &'static Translations {
|
||||
match lang {
|
||||
Lang::En => &TRANSLATIONS_EN,
|
||||
Lang::Ru => &TRANSLATIONS_RU,
|
||||
}
|
||||
}
|
||||
|
||||
pub static TRANSLATIONS_EN: Translations = Translations {
|
||||
// Navigation
|
||||
nav_dashboard: "Dashboard",
|
||||
nav_registries: "Registries",
|
||||
|
||||
// Dashboard
|
||||
dashboard_title: "Dashboard",
|
||||
dashboard_subtitle: "Overview of all registries",
|
||||
uptime: "Uptime",
|
||||
|
||||
// Stats
|
||||
stat_downloads: "Downloads",
|
||||
stat_uploads: "Uploads",
|
||||
stat_artifacts: "Artifacts",
|
||||
stat_cache_hit: "Cache Hit",
|
||||
stat_storage: "Storage",
|
||||
|
||||
// Registry cards
|
||||
active: "ACTIVE",
|
||||
artifacts: "Artifacts",
|
||||
size: "Size",
|
||||
downloads: "Downloads",
|
||||
uploads: "Uploads",
|
||||
|
||||
// Mount points
|
||||
mount_points: "Mount Points",
|
||||
registry: "Registry",
|
||||
mount_path: "Mount Path",
|
||||
proxy_upstream: "Proxy Upstream",
|
||||
|
||||
// Activity
|
||||
recent_activity: "Recent Activity",
|
||||
last_n_events: "Last 20 events",
|
||||
time: "Time",
|
||||
action: "Action",
|
||||
artifact: "Artifact",
|
||||
source: "Source",
|
||||
no_activity: "No recent activity",
|
||||
|
||||
// Relative time
|
||||
just_now: "just now",
|
||||
min_ago: "min ago",
|
||||
mins_ago: "mins ago",
|
||||
hour_ago: "hour ago",
|
||||
hours_ago: "hours ago",
|
||||
day_ago: "day ago",
|
||||
days_ago: "days ago",
|
||||
|
||||
// Registry pages
|
||||
repositories: "repositories",
|
||||
search_placeholder: "Search repositories...",
|
||||
no_repos_found: "No repositories found",
|
||||
push_first_artifact: "Push your first artifact to see it here",
|
||||
name: "Name",
|
||||
tags: "Tags",
|
||||
versions: "Versions",
|
||||
updated: "Updated",
|
||||
|
||||
// Detail pages
|
||||
pull_command: "Pull Command",
|
||||
install_command: "Install Command",
|
||||
maven_dependency: "Maven Dependency",
|
||||
total: "total",
|
||||
created: "Created",
|
||||
published: "Published",
|
||||
filename: "Filename",
|
||||
files: "files",
|
||||
|
||||
// Bragging footer
|
||||
built_for_speed: "Built for speed",
|
||||
docker_image: "Docker Image",
|
||||
cold_start: "Cold Start",
|
||||
memory: "Memory",
|
||||
registries_count: "Registries",
|
||||
multi_arch: "Multi-arch",
|
||||
zero_config: "Zero",
|
||||
tagline: "Pure Rust. Single binary. OCI compatible.",
|
||||
};
|
||||
|
||||
pub static TRANSLATIONS_RU: Translations = Translations {
|
||||
// Navigation
|
||||
nav_dashboard: "Панель",
|
||||
nav_registries: "Реестры",
|
||||
|
||||
// Dashboard
|
||||
dashboard_title: "Панель управления",
|
||||
dashboard_subtitle: "Обзор всех реестров",
|
||||
uptime: "Аптайм",
|
||||
|
||||
// Stats
|
||||
stat_downloads: "Загрузки",
|
||||
stat_uploads: "Публикации",
|
||||
stat_artifacts: "Артефакты",
|
||||
stat_cache_hit: "Кэш",
|
||||
stat_storage: "Хранилище",
|
||||
|
||||
// Registry cards
|
||||
active: "АКТИВЕН",
|
||||
artifacts: "Артефакты",
|
||||
size: "Размер",
|
||||
downloads: "Загрузки",
|
||||
uploads: "Публикации",
|
||||
|
||||
// Mount points
|
||||
mount_points: "Точки монтирования",
|
||||
registry: "Реестр",
|
||||
mount_path: "Путь",
|
||||
proxy_upstream: "Прокси",
|
||||
|
||||
// Activity
|
||||
recent_activity: "Последняя активность",
|
||||
last_n_events: "Последние 20 событий",
|
||||
time: "Время",
|
||||
action: "Действие",
|
||||
artifact: "Артефакт",
|
||||
source: "Источник",
|
||||
no_activity: "Нет активности",
|
||||
|
||||
// Relative time
|
||||
just_now: "только что",
|
||||
min_ago: "мин назад",
|
||||
mins_ago: "мин назад",
|
||||
hour_ago: "час назад",
|
||||
hours_ago: "ч назад",
|
||||
day_ago: "день назад",
|
||||
days_ago: "дн назад",
|
||||
|
||||
// Registry pages
|
||||
repositories: "репозиториев",
|
||||
search_placeholder: "Поиск репозиториев...",
|
||||
no_repos_found: "Репозитории не найдены",
|
||||
push_first_artifact: "Загрузите первый артефакт, чтобы увидеть его здесь",
|
||||
name: "Название",
|
||||
tags: "Теги",
|
||||
versions: "Версии",
|
||||
updated: "Обновлено",
|
||||
|
||||
// Detail pages
|
||||
pull_command: "Команда загрузки",
|
||||
install_command: "Команда установки",
|
||||
maven_dependency: "Maven зависимость",
|
||||
total: "всего",
|
||||
created: "Создан",
|
||||
published: "Опубликован",
|
||||
filename: "Файл",
|
||||
files: "файлов",
|
||||
|
||||
// Bragging footer
|
||||
built_for_speed: "Создан для скорости",
|
||||
docker_image: "Docker образ",
|
||||
cold_start: "Холодный старт",
|
||||
memory: "Память",
|
||||
registries_count: "Реестров",
|
||||
multi_arch: "Мульти-арх",
|
||||
zero_config: "Без",
|
||||
tagline: "Чистый Rust. Один бинарник. OCI совместимый.",
|
||||
};
|
||||
File diff suppressed because one or more lines are too long
@@ -1,16 +1,11 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod api;
|
||||
pub mod components;
|
||||
pub mod i18n;
|
||||
mod components;
|
||||
mod logo;
|
||||
mod templates;
|
||||
|
||||
use crate::repo_index::paginate;
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
extract::{Path, State},
|
||||
response::{Html, IntoResponse, Redirect},
|
||||
routing::get,
|
||||
Router,
|
||||
@@ -18,59 +13,8 @@ use axum::{
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::*;
|
||||
use i18n::Lang;
|
||||
use templates::*;
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct LangQuery {
|
||||
lang: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct ListQuery {
|
||||
lang: Option<String>,
|
||||
page: Option<usize>,
|
||||
limit: Option<usize>,
|
||||
}
|
||||
|
||||
const DEFAULT_PAGE_SIZE: usize = 50;
|
||||
|
||||
fn extract_lang(query: &Query<LangQuery>, cookie_header: Option<&str>) -> Lang {
|
||||
// Priority: query param > cookie > default
|
||||
if let Some(ref lang) = query.lang {
|
||||
return Lang::from_str(lang);
|
||||
}
|
||||
|
||||
// Try cookie
|
||||
if let Some(cookies) = cookie_header {
|
||||
for part in cookies.split(';') {
|
||||
let part = part.trim();
|
||||
if let Some(value) = part.strip_prefix("nora_lang=") {
|
||||
return Lang::from_str(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Lang::default()
|
||||
}
|
||||
|
||||
fn extract_lang_from_list(query: &ListQuery, cookie_header: Option<&str>) -> Lang {
|
||||
if let Some(ref lang) = query.lang {
|
||||
return Lang::from_str(lang);
|
||||
}
|
||||
|
||||
if let Some(cookies) = cookie_header {
|
||||
for part in cookies.split(';') {
|
||||
let part = part.trim();
|
||||
if let Some(value) = part.strip_prefix("nora_lang=") {
|
||||
return Lang::from_str(value);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Lang::default()
|
||||
}
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
// UI Pages
|
||||
@@ -89,212 +33,83 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
.route("/ui/pypi/{name}", get(pypi_detail))
|
||||
// API endpoints for HTMX
|
||||
.route("/api/ui/stats", get(api_stats))
|
||||
.route("/api/ui/dashboard", get(api_dashboard))
|
||||
.route("/api/ui/{registry_type}/list", get(api_list))
|
||||
.route("/api/ui/{registry_type}/{name}", get(api_detail))
|
||||
.route("/api/ui/{registry_type}/search", get(api_search))
|
||||
}
|
||||
|
||||
// Dashboard page
|
||||
async fn dashboard(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let response = api_dashboard(State(state)).await.0;
|
||||
Html(render_dashboard(&response, lang))
|
||||
async fn dashboard(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let stats = get_registry_stats(&state.storage).await;
|
||||
Html(render_dashboard(&stats))
|
||||
}
|
||||
|
||||
// Docker pages
|
||||
async fn docker_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_repos = state.repo_index.get("docker", &state.storage).await;
|
||||
let (repos, total) = paginate(&all_repos, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"docker",
|
||||
"Docker Registry",
|
||||
&repos,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
async fn docker_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let repos = get_docker_repos(&state.storage).await;
|
||||
Html(render_registry_list("docker", "Docker Registry", &repos))
|
||||
}
|
||||
|
||||
async fn docker_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_docker_detail(&state, &name).await;
|
||||
Html(render_docker_detail(&name, &detail, lang))
|
||||
let detail = get_docker_detail(&state.storage, &name).await;
|
||||
Html(render_docker_detail(&name, &detail))
|
||||
}
|
||||
|
||||
// Maven pages
|
||||
async fn maven_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_repos = state.repo_index.get("maven", &state.storage).await;
|
||||
let (repos, total) = paginate(&all_repos, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"maven",
|
||||
"Maven Repository",
|
||||
&repos,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
async fn maven_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let repos = get_maven_repos(&state.storage).await;
|
||||
Html(render_registry_list("maven", "Maven Repository", &repos))
|
||||
}
|
||||
|
||||
async fn maven_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(path): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_maven_detail(&state.storage, &path).await;
|
||||
Html(render_maven_detail(&path, &detail, lang))
|
||||
Html(render_maven_detail(&path, &detail))
|
||||
}
|
||||
|
||||
// npm pages
|
||||
async fn npm_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_packages = state.repo_index.get("npm", &state.storage).await;
|
||||
let (packages, total) = paginate(&all_packages, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"npm",
|
||||
"npm Registry",
|
||||
&packages,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
async fn npm_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let packages = get_npm_packages(&state.storage).await;
|
||||
Html(render_registry_list("npm", "npm Registry", &packages))
|
||||
}
|
||||
|
||||
async fn npm_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_npm_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("npm", &name, &detail, lang))
|
||||
Html(render_package_detail("npm", &name, &detail))
|
||||
}
|
||||
|
||||
// Cargo pages
|
||||
async fn cargo_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_crates = state.repo_index.get("cargo", &state.storage).await;
|
||||
let (crates, total) = paginate(&all_crates, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"cargo",
|
||||
"Cargo Registry",
|
||||
&crates,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
async fn cargo_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let crates = get_cargo_crates(&state.storage).await;
|
||||
Html(render_registry_list("cargo", "Cargo Registry", &crates))
|
||||
}
|
||||
|
||||
async fn cargo_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_cargo_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("cargo", &name, &detail, lang))
|
||||
Html(render_package_detail("cargo", &name, &detail))
|
||||
}
|
||||
|
||||
// PyPI pages
|
||||
async fn pypi_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_packages = state.repo_index.get("pypi", &state.storage).await;
|
||||
let (packages, total) = paginate(&all_packages, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"pypi",
|
||||
"PyPI Repository",
|
||||
&packages,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
async fn pypi_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let packages = get_pypi_packages(&state.storage).await;
|
||||
Html(render_registry_list("pypi", "PyPI Repository", &packages))
|
||||
}
|
||||
|
||||
async fn pypi_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_pypi_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("pypi", &name, &detail, lang))
|
||||
Html(render_package_detail("pypi", &name, &detail))
|
||||
}
|
||||
|
||||
@@ -1,309 +1,95 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::api::{DashboardResponse, DockerDetail, MavenDetail, PackageDetail};
|
||||
use super::api::{DockerDetail, MavenDetail, PackageDetail, RegistryStats, RepoInfo};
|
||||
use super::components::*;
|
||||
use super::i18n::{get_translations, Lang};
|
||||
use crate::repo_index::RepoInfo;
|
||||
|
||||
/// Renders the main dashboard page with dark theme
|
||||
pub fn render_dashboard(data: &DashboardResponse, lang: Lang) -> String {
|
||||
let t = get_translations(lang);
|
||||
// Render global stats
|
||||
let global_stats = render_global_stats(
|
||||
data.global_stats.downloads,
|
||||
data.global_stats.uploads,
|
||||
data.global_stats.artifacts,
|
||||
data.global_stats.cache_hit_percent,
|
||||
data.global_stats.storage_bytes,
|
||||
lang,
|
||||
);
|
||||
|
||||
// Render registry cards
|
||||
let registry_cards: String = data
|
||||
.registry_stats
|
||||
.iter()
|
||||
.map(|r| {
|
||||
let icon = match r.name.as_str() {
|
||||
"docker" => icons::DOCKER,
|
||||
"maven" => icons::MAVEN,
|
||||
"npm" => icons::NPM,
|
||||
"cargo" => icons::CARGO,
|
||||
"pypi" => icons::PYPI,
|
||||
_ => icons::DOCKER,
|
||||
};
|
||||
let display_name = match r.name.as_str() {
|
||||
"docker" => "Docker",
|
||||
"maven" => "Maven",
|
||||
"npm" => "npm",
|
||||
"cargo" => "Cargo",
|
||||
"pypi" => "PyPI",
|
||||
_ => &r.name,
|
||||
};
|
||||
render_registry_card(
|
||||
display_name,
|
||||
icon,
|
||||
r.artifact_count,
|
||||
r.downloads,
|
||||
r.uploads,
|
||||
r.size_bytes,
|
||||
&format!("/ui/{}", r.name),
|
||||
t,
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Render mount points
|
||||
let mount_data: Vec<(String, String, Option<String>)> = data
|
||||
.mount_points
|
||||
.iter()
|
||||
.map(|m| {
|
||||
(
|
||||
m.registry.clone(),
|
||||
m.mount_path.clone(),
|
||||
m.proxy_upstream.clone(),
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
let mount_points = render_mount_points_table(&mount_data, t);
|
||||
|
||||
// Render activity log
|
||||
let activity_rows: String = if data.activity.is_empty() {
|
||||
format!(
|
||||
r##"<tr><td colspan="5" class="py-8 text-center text-slate-500">{}</td></tr>"##,
|
||||
t.no_activity
|
||||
)
|
||||
} else {
|
||||
data.activity
|
||||
.iter()
|
||||
.map(|entry| {
|
||||
let time_ago = format_relative_time(&entry.timestamp);
|
||||
render_activity_row(
|
||||
&time_ago,
|
||||
&entry.action.to_string(),
|
||||
&entry.artifact,
|
||||
&entry.registry,
|
||||
&entry.source,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
};
|
||||
let activity_log = render_activity_log(&activity_rows, t);
|
||||
|
||||
// Format uptime
|
||||
let hours = data.uptime_seconds / 3600;
|
||||
let mins = (data.uptime_seconds % 3600) / 60;
|
||||
let uptime_str = format!("{}h {}m", hours, mins);
|
||||
|
||||
// Render bragging footer
|
||||
let bragging_footer = render_bragging_footer(lang);
|
||||
|
||||
/// Renders the main dashboard page
|
||||
pub fn render_dashboard(stats: &RegistryStats) -> String {
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center justify-between">
|
||||
<div>
|
||||
<h1 class="text-2xl font-bold text-slate-200 mb-1">{}</h1>
|
||||
<p class="text-slate-400">{}</p>
|
||||
</div>
|
||||
<div class="text-right">
|
||||
<div class="text-sm text-slate-500">{}</div>
|
||||
<div id="uptime" class="text-lg font-semibold text-slate-300">{}</div>
|
||||
</div>
|
||||
<div class="mb-8">
|
||||
<h1 class="text-2xl font-bold text-slate-800 mb-2">Dashboard</h1>
|
||||
<p class="text-slate-500">Overview of all registries</p>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-5 gap-6 mb-8">
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-4">Quick Links</h2>
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
<a href="/ui/docker" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<svg class="w-8 h-8 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Docker Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /v2/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/maven" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<svg class="w-8 h-8 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Maven Repository</div>
|
||||
<div class="text-sm text-slate-500">API: /maven2/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/npm" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<svg class="w-8 h-8 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">npm Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /npm/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/cargo" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<svg class="w-8 h-8 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Cargo Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /cargo/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/pypi" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<svg class="w-8 h-8 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">PyPI Repository</div>
|
||||
<div class="text-sm text-slate-500">API: /simple/</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{}
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-5 gap-4 mb-6">
|
||||
{}
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 lg:grid-cols-2 gap-6 mb-6">
|
||||
{}
|
||||
{}
|
||||
</div>
|
||||
|
||||
{}
|
||||
"##,
|
||||
t.dashboard_title,
|
||||
t.dashboard_subtitle,
|
||||
t.uptime,
|
||||
uptime_str,
|
||||
global_stats,
|
||||
registry_cards,
|
||||
mount_points,
|
||||
activity_log,
|
||||
bragging_footer,
|
||||
stat_card(
|
||||
"Docker",
|
||||
icons::DOCKER,
|
||||
stats.docker,
|
||||
"/ui/docker",
|
||||
"images"
|
||||
),
|
||||
stat_card("Maven", icons::MAVEN, stats.maven, "/ui/maven", "artifacts"),
|
||||
stat_card("npm", icons::NPM, stats.npm, "/ui/npm", "packages"),
|
||||
stat_card("Cargo", icons::CARGO, stats.cargo, "/ui/cargo", "crates"),
|
||||
stat_card("PyPI", icons::PYPI, stats.pypi, "/ui/pypi", "packages"),
|
||||
// Quick Links icons
|
||||
icons::DOCKER,
|
||||
icons::MAVEN,
|
||||
icons::NPM,
|
||||
icons::CARGO,
|
||||
icons::PYPI,
|
||||
);
|
||||
|
||||
let polling_script = render_polling_script();
|
||||
layout_dark(
|
||||
t.dashboard_title,
|
||||
&content,
|
||||
Some("dashboard"),
|
||||
&polling_script,
|
||||
lang,
|
||||
)
|
||||
}
|
||||
|
||||
/// Format timestamp as relative time (e.g., "2 min ago")
|
||||
fn format_relative_time(timestamp: &chrono::DateTime<chrono::Utc>) -> String {
|
||||
let now = chrono::Utc::now();
|
||||
let diff = now.signed_duration_since(*timestamp);
|
||||
|
||||
if diff.num_seconds() < 60 {
|
||||
"just now".to_string()
|
||||
} else if diff.num_minutes() < 60 {
|
||||
let mins = diff.num_minutes();
|
||||
format!("{} min{} ago", mins, if mins == 1 { "" } else { "s" })
|
||||
} else if diff.num_hours() < 24 {
|
||||
let hours = diff.num_hours();
|
||||
format!("{} hour{} ago", hours, if hours == 1 { "" } else { "s" })
|
||||
} else {
|
||||
let days = diff.num_days();
|
||||
format!("{} day{} ago", days, if days == 1 { "" } else { "s" })
|
||||
}
|
||||
layout("Dashboard", &content, Some("dashboard"))
|
||||
}
|
||||
|
||||
/// Renders a registry list page (docker, maven, npm, cargo, pypi)
|
||||
#[allow(dead_code)]
|
||||
pub fn render_registry_list(
|
||||
registry_type: &str,
|
||||
title: &str,
|
||||
repos: &[RepoInfo],
|
||||
lang: Lang,
|
||||
) -> String {
|
||||
let t = get_translations(lang);
|
||||
pub fn render_registry_list(registry_type: &str, title: &str, repos: &[RepoInfo]) -> String {
|
||||
let icon = get_registry_icon(registry_type);
|
||||
|
||||
let table_rows = if repos.is_empty() {
|
||||
format!(
|
||||
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
||||
<div class="text-4xl mb-2">📭</div>
|
||||
<div>{}</div>
|
||||
<div class="text-sm mt-1">{}</div>
|
||||
</td></tr>"##,
|
||||
t.no_repos_found, t.push_first_artifact
|
||||
)
|
||||
} else {
|
||||
repos
|
||||
.iter()
|
||||
.map(|repo| {
|
||||
let detail_url =
|
||||
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-700 cursor-pointer" onclick="window.location='{}'">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-400 hover:text-blue-300 font-medium">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
detail_url,
|
||||
detail_url,
|
||||
html_escape(&repo.name),
|
||||
repo.versions,
|
||||
format_size(repo.size),
|
||||
&repo.updated
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
let version_label = match registry_type {
|
||||
"docker" => t.tags,
|
||||
_ => t.versions,
|
||||
};
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6 flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
|
||||
<p class="text-slate-500">{} {}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-4">
|
||||
<div class="relative">
|
||||
<input type="text"
|
||||
placeholder="{}"
|
||||
class="pl-10 pr-4 py-2 bg-slate-800 border border-slate-600 text-slate-200 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent placeholder-slate-500"
|
||||
hx-get="/api/ui/{}/search"
|
||||
hx-trigger="keyup changed delay:300ms"
|
||||
hx-target="#repo-table-body"
|
||||
name="q">
|
||||
<svg class="absolute left-3 top-2.5 h-5 w-5 text-slate-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"/>
|
||||
</svg>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-800 border-b border-slate-700">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="repo-table-body" class="divide-y divide-slate-700">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"##,
|
||||
icon,
|
||||
title,
|
||||
repos.len(),
|
||||
t.repositories,
|
||||
t.search_placeholder,
|
||||
registry_type,
|
||||
t.name,
|
||||
version_label,
|
||||
t.size,
|
||||
t.updated,
|
||||
table_rows
|
||||
);
|
||||
|
||||
layout_dark(title, &content, Some(registry_type), "", lang)
|
||||
}
|
||||
|
||||
/// Renders a registry list page with pagination
|
||||
pub fn render_registry_list_paginated(
|
||||
registry_type: &str,
|
||||
title: &str,
|
||||
repos: &[RepoInfo],
|
||||
page: usize,
|
||||
limit: usize,
|
||||
total: usize,
|
||||
lang: Lang,
|
||||
) -> String {
|
||||
let t = get_translations(lang);
|
||||
let icon = get_registry_icon(registry_type);
|
||||
|
||||
let table_rows = if repos.is_empty() && page == 1 {
|
||||
format!(
|
||||
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
||||
<div class="text-4xl mb-2">📭</div>
|
||||
<div>{}</div>
|
||||
<div class="text-sm mt-1">{}</div>
|
||||
</td></tr>"##,
|
||||
t.no_repos_found, t.push_first_artifact
|
||||
)
|
||||
} else if repos.is_empty() {
|
||||
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
||||
<div class="text-4xl mb-2">📭</div>
|
||||
<div>No more items on this page</div>
|
||||
<div>No repositories found</div>
|
||||
<div class="text-sm mt-1">Push your first artifact to see it here</div>
|
||||
</td></tr>"##
|
||||
.to_string()
|
||||
} else {
|
||||
@@ -314,12 +100,12 @@ pub fn render_registry_list_paginated(
|
||||
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-700 cursor-pointer" onclick="window.location='{}'">
|
||||
<tr class="hover:bg-slate-50 cursor-pointer" onclick="window.location='{}'">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-400 hover:text-blue-300 font-medium">{}</a>
|
||||
<a href="{}" class="text-blue-600 hover:text-blue-800 font-medium">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
@@ -336,165 +122,66 @@ pub fn render_registry_list_paginated(
|
||||
};
|
||||
|
||||
let version_label = match registry_type {
|
||||
"docker" => t.tags,
|
||||
_ => t.versions,
|
||||
};
|
||||
|
||||
// Pagination
|
||||
let total_pages = total.div_ceil(limit);
|
||||
let start_item = if total == 0 {
|
||||
0
|
||||
} else {
|
||||
(page - 1) * limit + 1
|
||||
};
|
||||
let end_item = (start_item + repos.len()).saturating_sub(1);
|
||||
|
||||
let pagination = if total_pages > 1 {
|
||||
let mut pages_html = String::new();
|
||||
|
||||
// Previous button
|
||||
if page > 1 {
|
||||
pages_html.push_str(&format!(
|
||||
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded bg-slate-700 hover:bg-slate-600 text-slate-300">←</a>"##,
|
||||
registry_type, page - 1, limit
|
||||
));
|
||||
} else {
|
||||
pages_html.push_str(r##"<span class="px-3 py-1 rounded bg-slate-800 text-slate-600 cursor-not-allowed">←</span>"##);
|
||||
}
|
||||
|
||||
// Page numbers (show max 7 pages around current)
|
||||
let start_page = if page <= 4 { 1 } else { page - 3 };
|
||||
let end_page = (start_page + 6).min(total_pages);
|
||||
|
||||
if start_page > 1 {
|
||||
pages_html.push_str(&format!(
|
||||
r##"<a href="/ui/{}?page=1&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">1</a>"##,
|
||||
registry_type, limit
|
||||
));
|
||||
if start_page > 2 {
|
||||
pages_html.push_str(r##"<span class="px-2 text-slate-600">...</span>"##);
|
||||
}
|
||||
}
|
||||
|
||||
for p in start_page..=end_page {
|
||||
if p == page {
|
||||
pages_html.push_str(&format!(
|
||||
r##"<span class="px-3 py-1 rounded bg-blue-600 text-white font-medium">{}</span>"##,
|
||||
p
|
||||
));
|
||||
} else {
|
||||
pages_html.push_str(&format!(
|
||||
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">{}</a>"##,
|
||||
registry_type, p, limit, p
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if end_page < total_pages {
|
||||
if end_page < total_pages - 1 {
|
||||
pages_html.push_str(r##"<span class="px-2 text-slate-600">...</span>"##);
|
||||
}
|
||||
pages_html.push_str(&format!(
|
||||
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded hover:bg-slate-700 text-slate-400">{}</a>"##,
|
||||
registry_type, total_pages, limit, total_pages
|
||||
));
|
||||
}
|
||||
|
||||
// Next button
|
||||
if page < total_pages {
|
||||
pages_html.push_str(&format!(
|
||||
r##"<a href="/ui/{}?page={}&limit={}" class="px-3 py-1 rounded bg-slate-700 hover:bg-slate-600 text-slate-300">→</a>"##,
|
||||
registry_type, page + 1, limit
|
||||
));
|
||||
} else {
|
||||
pages_html.push_str(r##"<span class="px-3 py-1 rounded bg-slate-800 text-slate-600 cursor-not-allowed">→</span>"##);
|
||||
}
|
||||
|
||||
format!(
|
||||
r##"
|
||||
<div class="mt-4 flex items-center justify-between">
|
||||
<div class="text-sm text-slate-500">
|
||||
Showing {}-{} of {} items
|
||||
</div>
|
||||
<div class="flex items-center gap-1">
|
||||
{}
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
start_item, end_item, total, pages_html
|
||||
)
|
||||
} else if total > 0 {
|
||||
format!(
|
||||
r##"<div class="mt-4 text-sm text-slate-500">Showing all {} items</div>"##,
|
||||
total
|
||||
)
|
||||
} else {
|
||||
String::new()
|
||||
"docker" => "Tags",
|
||||
"maven" => "Versions",
|
||||
_ => "Versions",
|
||||
};
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6 flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<svg class="w-10 h-10 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<div>
|
||||
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
|
||||
<p class="text-slate-500">{} {}</p>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
<p class="text-slate-500">{} repositories</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-4">
|
||||
<div class="relative">
|
||||
<input type="text"
|
||||
placeholder="{}"
|
||||
class="pl-10 pr-4 py-2 bg-slate-800 border border-slate-600 text-slate-200 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent placeholder-slate-500"
|
||||
placeholder="Search repositories..."
|
||||
class="pl-10 pr-4 py-2 border border-slate-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent"
|
||||
hx-get="/api/ui/{}/search"
|
||||
hx-trigger="keyup changed delay:300ms"
|
||||
hx-target="#repo-table-body"
|
||||
name="q">
|
||||
<svg class="absolute left-3 top-2.5 h-5 w-5 text-slate-500" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<svg class="absolute left-3 top-2.5 h-5 w-5 text-slate-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"/>
|
||||
</svg>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-800 border-b border-slate-700">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Name</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="repo-table-body" class="divide-y divide-slate-700">
|
||||
<tbody id="repo-table-body" class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
{}
|
||||
"##,
|
||||
icon,
|
||||
title,
|
||||
total,
|
||||
t.repositories,
|
||||
t.search_placeholder,
|
||||
repos.len(),
|
||||
registry_type,
|
||||
t.name,
|
||||
version_label,
|
||||
t.size,
|
||||
t.updated,
|
||||
table_rows,
|
||||
pagination
|
||||
table_rows
|
||||
);
|
||||
|
||||
layout_dark(title, &content, Some(registry_type), "", lang)
|
||||
layout(title, &content, Some(registry_type))
|
||||
}
|
||||
|
||||
/// Renders Docker image detail page
|
||||
pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> String {
|
||||
let _t = get_translations(lang);
|
||||
pub fn render_docker_detail(name: &str, detail: &DockerDetail) -> String {
|
||||
let tags_rows = if detail.tags.is_empty() {
|
||||
r##"<tr><td colspan="3" class="px-6 py-8 text-center text-slate-500">No tags found</td></tr>"##.to_string()
|
||||
} else {
|
||||
@@ -504,11 +191,11 @@ pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> St
|
||||
.map(|tag| {
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-700">
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<span class="font-mono text-sm bg-slate-700 text-slate-200 px-2 py-1 rounded">{}</span>
|
||||
<span class="font-mono text-sm bg-slate-100 px-2 py-1 rounded">{}</span>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
@@ -527,18 +214,18 @@ pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> St
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/docker" class="text-blue-400 hover:text-blue-300">Docker Registry</a>
|
||||
<span class="mx-2 text-slate-500">/</span>
|
||||
<span class="text-slate-200 font-medium">{}</span>
|
||||
<a href="/ui/docker" class="text-blue-600 hover:text-blue-800">Docker Registry</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
|
||||
<svg class="w-10 h-10 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-200 mb-3">Pull Command</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Pull Command</h2>
|
||||
<div class="flex items-center bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm">
|
||||
<code class="flex-1">{}</code>
|
||||
<button onclick="navigator.clipboard.writeText('{}')" class="ml-4 text-slate-400 hover:text-white transition-colors" title="Copy to clipboard">
|
||||
@@ -549,19 +236,19 @@ pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> St
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-700">
|
||||
<h2 class="text-lg font-semibold text-slate-200">Tags ({} total)</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Tags ({} total)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-800 border-b border-slate-700">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Tag</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Created</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Tag</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-700">
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -576,23 +263,11 @@ pub fn render_docker_detail(name: &str, detail: &DockerDetail, lang: Lang) -> St
|
||||
tags_rows
|
||||
);
|
||||
|
||||
layout_dark(
|
||||
&format!("{} - Docker", name),
|
||||
&content,
|
||||
Some("docker"),
|
||||
"",
|
||||
lang,
|
||||
)
|
||||
layout(&format!("{} - Docker", name), &content, Some("docker"))
|
||||
}
|
||||
|
||||
/// Renders package detail page (npm, cargo, pypi)
|
||||
pub fn render_package_detail(
|
||||
registry_type: &str,
|
||||
name: &str,
|
||||
detail: &PackageDetail,
|
||||
lang: Lang,
|
||||
) -> String {
|
||||
let _t = get_translations(lang);
|
||||
pub fn render_package_detail(registry_type: &str, name: &str, detail: &PackageDetail) -> String {
|
||||
let icon = get_registry_icon(registry_type);
|
||||
let registry_title = get_registry_title(registry_type);
|
||||
|
||||
@@ -605,11 +280,11 @@ pub fn render_package_detail(
|
||||
.map(|v| {
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-700">
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<span class="font-mono text-sm bg-slate-700 text-slate-200 px-2 py-1 rounded">{}</span>
|
||||
<span class="font-mono text-sm bg-slate-100 px-2 py-1 rounded">{}</span>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
@@ -636,18 +311,18 @@ pub fn render_package_detail(
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/{}" class="text-blue-400 hover:text-blue-300">{}</a>
|
||||
<span class="mx-2 text-slate-500">/</span>
|
||||
<span class="text-slate-200 font-medium">{}</span>
|
||||
<a href="/ui/{}" class="text-blue-600 hover:text-blue-800">{}</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
|
||||
<svg class="w-10 h-10 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-200 mb-3">Install Command</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Install Command</h2>
|
||||
<div class="flex items-center bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm">
|
||||
<code class="flex-1">{}</code>
|
||||
<button onclick="navigator.clipboard.writeText('{}')" class="ml-4 text-slate-400 hover:text-white transition-colors" title="Copy to clipboard">
|
||||
@@ -658,19 +333,19 @@ pub fn render_package_detail(
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-700">
|
||||
<h2 class="text-lg font-semibold text-slate-200">Versions ({} total)</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Versions ({} total)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-800 border-b border-slate-700">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Version</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Published</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Version</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Published</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-700">
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -687,29 +362,26 @@ pub fn render_package_detail(
|
||||
versions_rows
|
||||
);
|
||||
|
||||
layout_dark(
|
||||
layout(
|
||||
&format!("{} - {}", name, registry_title),
|
||||
&content,
|
||||
Some(registry_type),
|
||||
"",
|
||||
lang,
|
||||
)
|
||||
}
|
||||
|
||||
/// Renders Maven artifact detail page
|
||||
pub fn render_maven_detail(path: &str, detail: &MavenDetail, lang: Lang) -> String {
|
||||
let _t = get_translations(lang);
|
||||
pub fn render_maven_detail(path: &str, detail: &MavenDetail) -> String {
|
||||
let artifact_rows = if detail.artifacts.is_empty() {
|
||||
r##"<tr><td colspan="2" class="px-6 py-8 text-center text-slate-500">No artifacts found</td></tr>"##.to_string()
|
||||
} else {
|
||||
detail.artifacts.iter().map(|a| {
|
||||
let download_url = format!("/maven2/{}/{}", path, a.filename);
|
||||
format!(r##"
|
||||
<tr class="hover:bg-slate-700">
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-400 hover:text-blue-300 font-mono text-sm">{}</a>
|
||||
<a href="{}" class="text-blue-600 hover:text-blue-800 font-mono text-sm">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-400">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
</tr>
|
||||
"##, download_url, html_escape(&a.filename), format_size(a.size))
|
||||
}).collect::<Vec<_>>().join("")
|
||||
@@ -738,33 +410,33 @@ pub fn render_maven_detail(path: &str, detail: &MavenDetail, lang: Lang) -> Stri
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/maven" class="text-blue-400 hover:text-blue-300">Maven Repository</a>
|
||||
<span class="mx-2 text-slate-500">/</span>
|
||||
<span class="text-slate-200 font-medium">{}</span>
|
||||
<a href="/ui/maven" class="text-blue-600 hover:text-blue-800">Maven Repository</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<svg class="w-10 h-10 mr-3 text-slate-400" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-200">{}</h1>
|
||||
<svg class="w-10 h-10 mr-3 text-slate-600" fill="currentColor" viewBox="0 0 24 24">{}</svg>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-200 mb-3">Maven Dependency</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Maven Dependency</h2>
|
||||
<pre class="bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm overflow-x-auto">{}</pre>
|
||||
</div>
|
||||
|
||||
<div class="bg-[#1e293b] rounded-lg shadow-sm border border-slate-700 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-700">
|
||||
<h2 class="text-lg font-semibold text-slate-200">Artifacts ({} files)</h2>
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Artifacts ({} files)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-800 border-b border-slate-700">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Filename</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-400 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Filename</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-700">
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -778,13 +450,7 @@ pub fn render_maven_detail(path: &str, detail: &MavenDetail, lang: Lang) -> Stri
|
||||
artifact_rows
|
||||
);
|
||||
|
||||
layout_dark(
|
||||
&format!("{} - Maven", path),
|
||||
&content,
|
||||
Some("maven"),
|
||||
"",
|
||||
lang,
|
||||
)
|
||||
layout(&format!("{} - Maven", path), &content, Some("maven"))
|
||||
}
|
||||
|
||||
/// Returns SVG icon path for the registry type
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! Input validation for artifact registry paths and identifiers
|
||||
//!
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod config;
|
||||
|
||||
use axum::extract::DefaultBodyLimit;
|
||||
|
||||
Reference in New Issue
Block a user