mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-13 02:40:31 +00:00
Compare commits
37 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| f82e252e39 | |||
| 7763b85b94 | |||
| 47a3690384 | |||
| a9125e6287 | |||
| 3f0b84c831 | |||
| ce30c5b57d | |||
| f76c6d6075 | |||
| e6bd9b6ead | |||
| cf55a19acf | |||
| e33da13dc7 | |||
| bbdefff07c | |||
| b29a0309d4 | |||
| 38003db6f8 | |||
| dab3ee805e | |||
| ac4020d34f | |||
| 5fc4237ac5 | |||
| ee4e01467a | |||
| 3265e217e7 | |||
| cf9feee5b2 | |||
| 0a97b00278 | |||
| d162e96841 | |||
| 4aa7529aa4 | |||
| 411bc75e5e | |||
| d2fec9ad15 | |||
| 00910dd69e | |||
| 4332b74636 | |||
| 86130a80ce | |||
| 2f86b4852a | |||
| 08eea07cfe | |||
| a13d7b8cfc | |||
| f1cda800a2 | |||
| da219dc794 | |||
| 1152308f6c | |||
| 6c53b2da84 | |||
| c7098a4aed | |||
| 937266a4c7 | |||
| 00fbd20112 |
7
.gitignore
vendored
7
.gitignore
vendored
@@ -5,3 +5,10 @@ data/
|
||||
.env.*
|
||||
*.log
|
||||
internal config
|
||||
|
||||
# Internal files
|
||||
SESSION*.md
|
||||
TODO.md
|
||||
ROADMAP*.md
|
||||
docs-site/
|
||||
docs/
|
||||
|
||||
226
CHANGELOG.md
226
CHANGELOG.md
@@ -4,18 +4,61 @@ All notable changes to NORA will be documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [0.3.0] - 2026-01-30
|
||||
## [0.2.18] - 2026-01-31
|
||||
|
||||
### Changed
|
||||
- Logo styling refinements
|
||||
|
||||
---
|
||||
|
||||
## [0.2.17] - 2026-01-31
|
||||
|
||||
### Added
|
||||
- Copyright headers to all source files (Volkov Pavel | DevITWay)
|
||||
- SPDX-License-Identifier: MIT in all .rs files
|
||||
|
||||
---
|
||||
|
||||
## [0.2.16] - 2026-01-31
|
||||
|
||||
### Changed
|
||||
- N○RA branding: stylized O logo across dashboard
|
||||
- Fixed O letter alignment in logo
|
||||
|
||||
---
|
||||
|
||||
## [0.2.15] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- Code formatting (cargo fmt)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.14] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- Docker dashboard now shows actual image size from manifest layers (config + layers sum)
|
||||
- Previously showed only manifest file size (~500 B instead of actual image size)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.13] - 2026-01-31
|
||||
|
||||
### Fixed
|
||||
- npm dashboard now shows correct version count and package sizes
|
||||
- Parses metadata.json for versions, dist.unpackedSize, and time.modified
|
||||
- Previously showed 0 versions / 0 B for all packages
|
||||
|
||||
---
|
||||
|
||||
## [0.2.12] - 2026-01-30
|
||||
|
||||
### Added
|
||||
|
||||
#### Configurable Rate Limiting
|
||||
- Rate limits now configurable via `config.toml` and environment variables
|
||||
- New config section `[rate_limit]` with 6 parameters:
|
||||
- `auth_rps` / `auth_burst` - Authentication endpoints (brute-force protection)
|
||||
- `upload_rps` / `upload_burst` - Upload endpoints (Docker push, etc.)
|
||||
- `general_rps` / `general_burst` - General API endpoints
|
||||
- New config section `[rate_limit]` with parameters: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
|
||||
- Environment variables: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
|
||||
- Rate limit configuration logged at startup
|
||||
|
||||
#### Secrets Provider Architecture
|
||||
- Trait-based secrets management (`SecretsProvider` trait)
|
||||
@@ -23,14 +66,78 @@ All notable changes to NORA will be documented in this file.
|
||||
- Protected secrets with `zeroize` (memory zeroed on drop)
|
||||
- Redacted Debug impl prevents secret leakage in logs
|
||||
- New config section `[secrets]` with `provider` and `clear_env` options
|
||||
- Foundation for future AWS Secrets Manager, Vault, K8s integration
|
||||
|
||||
#### Docker Image Metadata
|
||||
- Support for image metadata retrieval
|
||||
|
||||
#### Documentation
|
||||
- Bilingual onboarding guide (EN/RU)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.11] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Internationalization (i18n) support
|
||||
- PyPI registry proxy
|
||||
- UI improvements
|
||||
|
||||
---
|
||||
|
||||
## [0.2.10] - 2026-01-26
|
||||
|
||||
### Changed
|
||||
- Rate limiting functions now accept `&RateLimitConfig` parameter
|
||||
- Improved error messages with `.expect()` instead of `.unwrap()`
|
||||
- Dark theme applied to all UI pages
|
||||
|
||||
---
|
||||
|
||||
## [0.2.9] - 2026-01-26
|
||||
|
||||
### Changed
|
||||
- Version bump release
|
||||
|
||||
---
|
||||
|
||||
## [0.2.8] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Dashboard endpoint added to OpenAPI documentation
|
||||
|
||||
---
|
||||
|
||||
## [0.2.7] - 2026-01-26
|
||||
|
||||
### Added
|
||||
- Dynamic version display in UI sidebar
|
||||
|
||||
---
|
||||
|
||||
## [0.2.6] - 2026-01-26
|
||||
|
||||
### Added
|
||||
|
||||
#### Dashboard Metrics
|
||||
- Global stats panel: downloads, uploads, artifacts, cache hit rate, storage
|
||||
- Extended registry cards with artifact count, size, counters
|
||||
- Activity log (last 20 events)
|
||||
|
||||
#### UI
|
||||
- Dark theme (bg: #0f172a, cards: #1e293b)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.5] - 2026-01-26
|
||||
|
||||
### Fixed
|
||||
- Rate limiting was hardcoded in v0.2.0, now user-configurable
|
||||
- Docker push/pull: added PATCH endpoint for chunked uploads
|
||||
|
||||
---
|
||||
|
||||
## [0.2.4] - 2026-01-26
|
||||
|
||||
### Fixed
|
||||
- Rate limiting: health/metrics endpoints now exempt
|
||||
- Increased upload rate limits for Docker parallel requests
|
||||
|
||||
---
|
||||
|
||||
@@ -82,7 +189,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- JSON error responses with request_id support
|
||||
|
||||
### Changed
|
||||
|
||||
- `StorageError` now uses `thiserror` derive macro
|
||||
- `TokenError` now uses `thiserror` derive macro
|
||||
- Storage wrapper validates keys before delegating to backend
|
||||
@@ -90,7 +196,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- Body size limit set to 100MB default via `DefaultBodyLimit`
|
||||
|
||||
### Dependencies Added
|
||||
|
||||
- `thiserror = "2"` - typed error handling
|
||||
- `tower_governor = "0.8"` - rate limiting
|
||||
- `governor = "0.10"` - rate limiting backend
|
||||
@@ -98,7 +203,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- `wiremock = "0.6"` (dev) - HTTP mocking for S3 tests
|
||||
|
||||
### Files Added
|
||||
|
||||
- `src/validation.rs` - input validation module
|
||||
- `src/migrate.rs` - storage migration module
|
||||
- `src/error.rs` - application error types
|
||||
@@ -110,7 +214,6 @@ All notable changes to NORA will be documented in this file.
|
||||
## [0.1.0] - 2026-01-24
|
||||
|
||||
### Added
|
||||
|
||||
- Multi-protocol support: Docker Registry v2, Maven, npm, Cargo, PyPI
|
||||
- Web UI dashboard
|
||||
- Swagger UI (`/api-docs`)
|
||||
@@ -125,7 +228,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- Graceful shutdown (SIGTERM/SIGINT)
|
||||
- Backup/restore commands
|
||||
|
||||
---
|
||||
---
|
||||
|
||||
# Журнал изменений (RU)
|
||||
@@ -134,6 +236,96 @@ All notable changes to NORA will be documented in this file.
|
||||
|
||||
---
|
||||
|
||||
## [0.2.12] - 2026-01-30
|
||||
|
||||
### Добавлено
|
||||
|
||||
#### Настраиваемый Rate Limiting
|
||||
- Rate limits настраиваются через `config.toml` и переменные окружения
|
||||
- Новая секция `[rate_limit]` с параметрами: `auth_rps`, `auth_burst`, `upload_rps`, `upload_burst`, `general_rps`, `general_burst`
|
||||
- Переменные окружения: `NORA_RATE_LIMIT_{AUTH|UPLOAD|GENERAL}_{RPS|BURST}`
|
||||
|
||||
#### Архитектура Secrets Provider
|
||||
- Trait-based управление секретами (`SecretsProvider` trait)
|
||||
- ENV provider по умолчанию (12-Factor App паттерн)
|
||||
- Защищённые секреты с `zeroize` (память обнуляется при drop)
|
||||
- Redacted Debug impl предотвращает утечку секретов в логи
|
||||
- Новая секция `[secrets]` с опциями `provider` и `clear_env`
|
||||
|
||||
#### Docker Image Metadata
|
||||
- Поддержка получения метаданных образов
|
||||
|
||||
#### Документация
|
||||
- Двуязычный onboarding guide (EN/RU)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.11] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Поддержка интернационализации (i18n)
|
||||
- PyPI registry proxy
|
||||
- Улучшения UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.10] - 2026-01-26
|
||||
|
||||
### Изменено
|
||||
- Тёмная тема применена ко всем страницам UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.9] - 2026-01-26
|
||||
|
||||
### Изменено
|
||||
- Релиз с обновлением версии
|
||||
|
||||
---
|
||||
|
||||
## [0.2.8] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Dashboard endpoint добавлен в OpenAPI документацию
|
||||
|
||||
---
|
||||
|
||||
## [0.2.7] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
- Динамическое отображение версии в сайдбаре UI
|
||||
|
||||
---
|
||||
|
||||
## [0.2.6] - 2026-01-26
|
||||
|
||||
### Добавлено
|
||||
|
||||
#### Dashboard Metrics
|
||||
- Глобальная панель статистики: downloads, uploads, artifacts, cache hit rate, storage
|
||||
- Расширенные карточки реестров с количеством артефактов, размером, счётчиками
|
||||
- Лог активности (последние 20 событий)
|
||||
|
||||
#### UI
|
||||
- Тёмная тема (bg: #0f172a, cards: #1e293b)
|
||||
|
||||
---
|
||||
|
||||
## [0.2.5] - 2026-01-26
|
||||
|
||||
### Исправлено
|
||||
- Docker push/pull: добавлен PATCH endpoint для chunked uploads
|
||||
|
||||
---
|
||||
|
||||
## [0.2.4] - 2026-01-26
|
||||
|
||||
### Исправлено
|
||||
- Rate limiting: health/metrics endpoints теперь исключены
|
||||
- Увеличены лимиты upload для параллельных Docker запросов
|
||||
|
||||
---
|
||||
|
||||
## [0.2.0] - 2026-01-25
|
||||
|
||||
### Добавлено
|
||||
@@ -182,7 +374,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- JSON-ответы об ошибках с поддержкой request_id
|
||||
|
||||
### Изменено
|
||||
|
||||
- `StorageError` теперь использует макрос `thiserror`
|
||||
- `TokenError` теперь использует макрос `thiserror`
|
||||
- Storage wrapper валидирует ключи перед делегированием backend
|
||||
@@ -190,7 +381,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- Лимит размера body установлен в 100MB через `DefaultBodyLimit`
|
||||
|
||||
### Добавлены зависимости
|
||||
|
||||
- `thiserror = "2"` - типизированная обработка ошибок
|
||||
- `tower_governor = "0.8"` - rate limiting
|
||||
- `governor = "0.10"` - backend для rate limiting
|
||||
@@ -198,7 +388,6 @@ All notable changes to NORA will be documented in this file.
|
||||
- `wiremock = "0.6"` (dev) - HTTP-мокирование для S3 тестов
|
||||
|
||||
### Добавлены файлы
|
||||
|
||||
- `src/validation.rs` - модуль валидации ввода
|
||||
- `src/migrate.rs` - модуль миграции хранилища
|
||||
- `src/error.rs` - типы ошибок приложения
|
||||
@@ -210,7 +399,6 @@ All notable changes to NORA will be documented in this file.
|
||||
## [0.1.0] - 2026-01-24
|
||||
|
||||
### Добавлено
|
||||
|
||||
- Мульти-протокольная поддержка: Docker Registry v2, Maven, npm, Cargo, PyPI
|
||||
- Web UI дашборд
|
||||
- Swagger UI (`/api-docs`)
|
||||
|
||||
128
CONTRIBUTING.md
128
CONTRIBUTING.md
@@ -1,100 +1,68 @@
|
||||
# Contributing to NORA
|
||||
|
||||
Thanks for your interest in contributing to NORA!
|
||||
Thank you for your interest in contributing to NORA!
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Fork** the repository
|
||||
2. **Clone** your fork:
|
||||
```bash
|
||||
git clone https://github.com/your-username/nora.git
|
||||
cd nora
|
||||
```
|
||||
3. **Create a branch**:
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
1. Fork the repository
|
||||
2. Clone your fork: `git clone https://github.com/YOUR_USERNAME/nora.git`
|
||||
3. Create a branch: `git checkout -b feature/your-feature`
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Rust 1.75+ (`rustup update`)
|
||||
- Docker (for testing)
|
||||
- Git
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
# Install Rust (if needed)
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
```
|
||||
|
||||
### Run
|
||||
|
||||
```bash
|
||||
cargo run --bin nora
|
||||
```
|
||||
|
||||
### Test
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
cargo test
|
||||
cargo clippy
|
||||
cargo fmt --check
|
||||
|
||||
# Run locally
|
||||
cargo run --bin nora -- serve
|
||||
```
|
||||
|
||||
## Making Changes
|
||||
|
||||
1. **Write code** following Rust conventions
|
||||
2. **Add tests** for new features
|
||||
3. **Update docs** if needed
|
||||
4. **Run checks**:
|
||||
```bash
|
||||
cargo fmt
|
||||
cargo clippy -- -D warnings
|
||||
cargo test
|
||||
```
|
||||
|
||||
## Commit Messages
|
||||
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `docs:` - Documentation
|
||||
- `test:` - Tests
|
||||
- `refactor:` - Code refactoring
|
||||
- `chore:` - Maintenance
|
||||
|
||||
Example:
|
||||
```bash
|
||||
git commit -m "feat: add S3 storage migration"
|
||||
```
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. **Push** to your fork:
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
|
||||
2. **Open a Pull Request** on GitHub
|
||||
|
||||
3. **Wait for review** - maintainers will review your PR
|
||||
|
||||
## Code Style
|
||||
|
||||
- Follow Rust conventions
|
||||
- Use `cargo fmt` for formatting
|
||||
- Pass `cargo clippy` with no warnings
|
||||
- Write meaningful commit messages
|
||||
- Run `cargo fmt` before committing
|
||||
- Run `cargo clippy` and fix warnings
|
||||
- Follow Rust naming conventions
|
||||
|
||||
## Questions?
|
||||
## Pull Request Process
|
||||
|
||||
- Open an [Issue](https://github.com/getnora-io/nora/issues)
|
||||
- Ask in [Discussions](https://github.com/getnora-io/nora/discussions)
|
||||
- Reach out on [Telegram](https://t.me/DevITWay)
|
||||
1. Update documentation if needed
|
||||
2. Add tests for new features
|
||||
3. Ensure all tests pass: `cargo test`
|
||||
4. Ensure code is formatted: `cargo fmt --check`
|
||||
5. Ensure no clippy warnings: `cargo clippy`
|
||||
|
||||
---
|
||||
## Commit Messages
|
||||
|
||||
Built with love by the NORA community
|
||||
Use conventional commits:
|
||||
|
||||
- `feat:` - new feature
|
||||
- `fix:` - bug fix
|
||||
- `docs:` - documentation
|
||||
- `style:` - formatting
|
||||
- `refactor:` - code refactoring
|
||||
- `test:` - adding tests
|
||||
- `chore:` - maintenance
|
||||
|
||||
Example: `feat: add OAuth2 authentication`
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
- Use GitHub Issues
|
||||
- Include steps to reproduce
|
||||
- Include NORA version and OS
|
||||
|
||||
## License
|
||||
|
||||
By contributing, you agree that your contributions will be licensed under the MIT License.
|
||||
|
||||
## Contact
|
||||
|
||||
- Telegram: [@DevITWay](https://t.me/DevITWay)
|
||||
- GitHub Issues: [getnora-io/nora](https://github.com/getnora-io/nora/issues)
|
||||
|
||||
58
Cargo.lock
generated
58
Cargo.lock
generated
@@ -240,9 +240,9 @@ checksum = "b35204fbdc0b3f4446b89fc1ac2cf84a8a68971995d0bf2e925ec7cd960f9cb3"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.54"
|
||||
version = "1.2.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6354c81bbfd62d9cfa9cb3c773c2b7b2a3a482d569de977fd0e961f6e7c00583"
|
||||
checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"shlex",
|
||||
@@ -286,9 +286,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap"
|
||||
version = "4.5.54"
|
||||
version = "4.5.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c6e6ff9dcd79cff5cd969a17a545d79e84ab086e444102a591e288a8aa3ce394"
|
||||
checksum = "a75ca66430e33a14957acc24c5077b503e7d374151b2b4b3a10c83b4ceb4be0e"
|
||||
dependencies = [
|
||||
"clap_builder",
|
||||
"clap_derive",
|
||||
@@ -296,9 +296,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_builder"
|
||||
version = "4.5.54"
|
||||
version = "4.5.56"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fa42cf4d2b7a41bc8f663a7cab4031ebafa1bf3875705bfaf8466dc60ab52c00"
|
||||
checksum = "793207c7fa6300a0608d1080b858e5fdbe713cdc1c8db9fb17777d8a13e63df0"
|
||||
dependencies = [
|
||||
"anstream",
|
||||
"anstyle",
|
||||
@@ -308,9 +308,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "clap_derive"
|
||||
version = "4.5.49"
|
||||
version = "4.5.55"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671"
|
||||
checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5"
|
||||
dependencies = [
|
||||
"heck",
|
||||
"proc-macro2",
|
||||
@@ -434,6 +434,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -488,9 +489,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.8"
|
||||
version = "0.1.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8591b0bcc8a98a64310a2fae1bb3e9b8564dd10e381e6e28010fde8e8e8568db"
|
||||
checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582"
|
||||
|
||||
[[package]]
|
||||
name = "flate2"
|
||||
@@ -737,6 +738,21 @@ version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
|
||||
|
||||
[[package]]
|
||||
name = "hex"
|
||||
version = "0.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "http"
|
||||
version = "1.4.0"
|
||||
@@ -861,9 +877,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "iana-time-zone"
|
||||
version = "0.1.64"
|
||||
version = "0.1.65"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb"
|
||||
checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470"
|
||||
dependencies = [
|
||||
"android_system_properties",
|
||||
"core-foundation-sys",
|
||||
@@ -1209,6 +1225,8 @@ dependencies = [
|
||||
"clap",
|
||||
"flate2",
|
||||
"governor",
|
||||
"hex",
|
||||
"hmac",
|
||||
"httpdate",
|
||||
"indicatif",
|
||||
"lazy_static",
|
||||
@@ -2162,9 +2180,9 @@ checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
|
||||
|
||||
[[package]]
|
||||
name = "tonic"
|
||||
version = "0.14.2"
|
||||
version = "0.14.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203"
|
||||
checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
@@ -2912,18 +2930,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.33"
|
||||
version = "0.8.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd"
|
||||
checksum = "7456cf00f0685ad319c5b1693f291a650eaf345e941d082fc4e03df8a03996ac"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.33"
|
||||
version = "0.8.37"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1"
|
||||
checksum = "1328722bbf2115db7e19d69ebcc15e795719e2d66b60827c6a69a117365e37a0"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3026,9 +3044,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "1.0.16"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dfcd145825aace48cff44a8844de64bf75feec3080e0aa5cdbde72961ae51a65"
|
||||
checksum = "02aae0f83f69aafc94776e879363e9771d7ecbffe2c7fbb6c14c5e00dfe88439"
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
|
||||
@@ -7,7 +7,7 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.2.12"
|
||||
version = "0.2.18"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
authors = ["DevITWay <devitway@gmail.com>"]
|
||||
@@ -24,3 +24,5 @@ tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
sha2 = "0.10"
|
||||
async-trait = "0.1"
|
||||
hmac = "0.12"
|
||||
hex = "0.4"
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 DevITWay
|
||||
Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# NORA
|
||||
# 🐿️ N○RA
|
||||
|
||||
[](LICENSE)
|
||||
[](https://t.me/DevITWay)
|
||||
@@ -40,7 +40,7 @@ Fast. Organized. Feel at Home.
|
||||
### Docker (Recommended)
|
||||
|
||||
```bash
|
||||
docker run -d -p 4000:4000 -v nora-data:/data getnora/nora
|
||||
docker run -d -p 4000:4000 -v nora-data:/data ghcr.io/getnora-io/nora:latest
|
||||
```
|
||||
|
||||
### From Source
|
||||
@@ -191,4 +191,4 @@ Copyright (c) 2026 DevITWay
|
||||
|
||||
---
|
||||
|
||||
**NORA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
**🐿️ N○RA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
|
||||
53
SECURITY.md
Normal file
53
SECURITY.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.2.x | :white_check_mark: |
|
||||
| < 0.2 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them via:
|
||||
|
||||
1. **Email:** devitway@gmail.com
|
||||
2. **Telegram:** [@DevITWay](https://t.me/DevITWay) (private message)
|
||||
|
||||
### What to Include
|
||||
|
||||
- Type of vulnerability
|
||||
- Steps to reproduce
|
||||
- Potential impact
|
||||
- Suggested fix (if any)
|
||||
|
||||
### Response Timeline
|
||||
|
||||
- **Initial response:** within 48 hours
|
||||
- **Status update:** within 7 days
|
||||
- **Fix timeline:** depends on severity
|
||||
|
||||
### Severity Levels
|
||||
|
||||
| Severity | Description | Response |
|
||||
|----------|-------------|----------|
|
||||
| Critical | Remote code execution, auth bypass | Immediate fix |
|
||||
| High | Data exposure, privilege escalation | Fix within 7 days |
|
||||
| Medium | Limited impact vulnerabilities | Fix in next release |
|
||||
| Low | Minor issues | Scheduled fix |
|
||||
|
||||
## Security Best Practices
|
||||
|
||||
When deploying NORA:
|
||||
|
||||
1. **Enable authentication** - Set `NORA_AUTH_ENABLED=true`
|
||||
2. **Use HTTPS** - Put NORA behind a reverse proxy with TLS
|
||||
3. **Limit network access** - Use firewall rules
|
||||
4. **Regular updates** - Keep NORA updated to latest version
|
||||
5. **Secure credentials** - Use strong passwords, rotate tokens
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities.
|
||||
152
SESSION_NOTES.md
152
SESSION_NOTES.md
@@ -1,152 +0,0 @@
|
||||
# NORA Development Session Notes
|
||||
|
||||
---
|
||||
|
||||
## 2026-01-26 - Dashboard Expansion
|
||||
|
||||
### Iteration 1: Planning & Exploration
|
||||
- Received detailed implementation plan for dashboard expansion
|
||||
- Explored codebase structure using Task agent
|
||||
- Identified key files to modify:
|
||||
- `main.rs` - AppState
|
||||
- `ui/api.rs`, `ui/mod.rs`, `ui/components.rs`, `ui/templates.rs`
|
||||
- `registry/docker.rs`, `npm.rs`, `maven.rs`, `cargo_registry.rs`
|
||||
|
||||
### Iteration 2: Infrastructure (Phase 1)
|
||||
- Created `src/dashboard_metrics.rs`:
|
||||
- `DashboardMetrics` struct with AtomicU64 counters
|
||||
- Per-registry tracking (docker, npm, maven, cargo, pypi)
|
||||
- `record_download()`, `record_upload()`, `record_cache_hit/miss()`
|
||||
- `cache_hit_rate()` calculation
|
||||
|
||||
- Created `src/activity_log.rs`:
|
||||
- `ActionType` enum: Pull, Push, CacheHit, ProxyFetch
|
||||
- `ActivityEntry` struct with timestamp, action, artifact, registry, source
|
||||
- `ActivityLog` with RwLock<VecDeque> (bounded to 50 entries)
|
||||
|
||||
### Iteration 3: AppState Update (Phase 2)
|
||||
- Updated `main.rs`:
|
||||
- Added `mod activity_log` and `mod dashboard_metrics`
|
||||
- Extended `AppState` with `metrics: DashboardMetrics` and `activity: ActivityLog`
|
||||
- Initialized in `run_server()`
|
||||
|
||||
### Iteration 4: API Endpoint (Phase 3)
|
||||
- Updated `ui/api.rs`:
|
||||
- Added structs: `DashboardResponse`, `GlobalStats`, `RegistryCardStats`, `MountPoint`
|
||||
- Implemented `api_dashboard()` - aggregates all metrics, storage stats, activity
|
||||
|
||||
- Updated `ui/mod.rs`:
|
||||
- Added route `/api/ui/dashboard`
|
||||
- Modified `dashboard()` handler to use new response
|
||||
|
||||
### Iteration 5: Dark Theme UI (Phase 4)
|
||||
- Updated `ui/components.rs` with ~400 new lines:
|
||||
- `layout_dark()` - dark theme wrapper (#0f172a background)
|
||||
- `sidebar_dark()`, `header_dark()` - dark theme navigation
|
||||
- `render_global_stats()` - 5-column stats grid
|
||||
- `render_registry_card()` - extended card with metrics
|
||||
- `render_mount_points_table()` - registry paths and proxies
|
||||
- `render_activity_row()`, `render_activity_log()` - activity display
|
||||
- `render_polling_script()` - 5-second auto-refresh JS
|
||||
|
||||
### Iteration 6: Dashboard Template (Phase 5)
|
||||
- Updated `ui/templates.rs`:
|
||||
- Refactored `render_dashboard()` to accept `DashboardResponse`
|
||||
- Added uptime display, global stats, registry cards grid
|
||||
- Added mount points table and activity log
|
||||
- Added `format_relative_time()` helper
|
||||
|
||||
### Iteration 7: Registry Instrumentation (Phase 6)
|
||||
- `registry/docker.rs`:
|
||||
- `download_blob()` - record download + cache hit + activity
|
||||
- `get_manifest()` - record download + cache hit + activity
|
||||
- `upload_blob()` - record upload + activity
|
||||
- `put_manifest()` - record upload + activity
|
||||
|
||||
- `registry/npm.rs`:
|
||||
- Cache hit tracking for local storage
|
||||
- Cache miss + proxy fetch tracking
|
||||
|
||||
- `registry/maven.rs`:
|
||||
- `download()` - cache hit/miss + activity
|
||||
- `upload()` - record upload + activity
|
||||
|
||||
- `registry/cargo_registry.rs`:
|
||||
- `download()` - record download + activity
|
||||
|
||||
### Iteration 8: Build & Test
|
||||
- `cargo build` - compiled successfully with minor warnings
|
||||
- Fixed warnings:
|
||||
- Removed unused `RegistryStats` import
|
||||
- Added `#[allow(dead_code)]` to `stat_card()`
|
||||
- `cargo test` - all 75 tests passed
|
||||
|
||||
### Iteration 9: Server Testing
|
||||
- Started server: `cargo run --release --bin nora`
|
||||
- Tested endpoints:
|
||||
```
|
||||
GET /health - OK
|
||||
GET /api/ui/dashboard - returns full metrics JSON
|
||||
GET /ui/ - dark theme dashboard HTML
|
||||
GET /v2/test/manifests/v1 - triggered Docker metrics
|
||||
GET /npm/lodash/-/lodash-4.17.21.tgz - triggered npm proxy metrics
|
||||
```
|
||||
- Verified metrics tracking:
|
||||
- Downloads: 3 (2 Docker + 1 npm)
|
||||
- Cache hit rate: 66.67%
|
||||
- Activity log populated with Pull, ProxyFetch events
|
||||
|
||||
### Iteration 10: Git Commit & Push
|
||||
- Staged 11 files (2 new, 9 modified)
|
||||
- Commit: `93f9655 Add dashboard metrics, activity log, and dark theme`
|
||||
- Pushed to `origin/main`
|
||||
|
||||
### Iteration 11: Documentation
|
||||
- Updated `TODO.md` with v0.2.1 section
|
||||
- Created this `SESSION_NOTES.md`
|
||||
|
||||
---
|
||||
|
||||
### Key Decisions Made
|
||||
1. **In-memory metrics** - AtomicU64 for thread-safety, reset on restart
|
||||
2. **Bounded activity log** - 50 entries max, oldest evicted
|
||||
3. **Polling over WebSocket** - simpler, 5-second interval sufficient
|
||||
4. **Dark theme only for dashboard** - registry list pages keep light theme
|
||||
|
||||
### Files Changed Summary
|
||||
```
|
||||
New:
|
||||
nora-registry/src/activity_log.rs
|
||||
nora-registry/src/dashboard_metrics.rs
|
||||
|
||||
Modified:
|
||||
nora-registry/src/main.rs (+8 lines)
|
||||
nora-registry/src/registry/cargo_registry.rs (+13 lines)
|
||||
nora-registry/src/registry/docker.rs (+47 lines)
|
||||
nora-registry/src/registry/maven.rs (+36 lines)
|
||||
nora-registry/src/registry/npm.rs (+29 lines)
|
||||
nora-registry/src/ui/api.rs (+154 lines)
|
||||
nora-registry/src/ui/components.rs (+394 lines)
|
||||
nora-registry/src/ui/mod.rs (+5 lines)
|
||||
nora-registry/src/ui/templates.rs (+180/-79 lines)
|
||||
|
||||
Total: ~1004 insertions, 79 deletions
|
||||
```
|
||||
|
||||
### Useful Commands
|
||||
```bash
|
||||
# Start server
|
||||
cargo run --release --bin nora
|
||||
|
||||
# Test dashboard
|
||||
curl http://127.0.0.1:4000/api/ui/dashboard
|
||||
|
||||
# View UI
|
||||
open http://127.0.0.1:4000/ui/
|
||||
|
||||
# Trigger metrics
|
||||
curl http://127.0.0.1:4000/v2/test/manifests/v1
|
||||
curl http://127.0.0.1:4000/npm/lodash/-/lodash-4.17.21.tgz -o /dev/null
|
||||
```
|
||||
|
||||
---
|
||||
503
TODO.md
503
TODO.md
@@ -1,503 +0,0 @@
|
||||
# NORA Roadmap / TODO
|
||||
|
||||
## v0.2.0 - DONE
|
||||
- [x] Unit tests (75 tests passing)
|
||||
- [x] Input validation (path traversal protection)
|
||||
- [x] Rate limiting (brute-force protection)
|
||||
- [x] Request ID tracking
|
||||
- [x] Migrate command (local <-> S3)
|
||||
- [x] Error handling (thiserror)
|
||||
- [x] SVG brand icons
|
||||
|
||||
---
|
||||
|
||||
## v0.2.1 - Dashboard Expansion (2026-01-26) - DONE
|
||||
|
||||
### Commit: 93f9655
|
||||
|
||||
### New Files
|
||||
- `nora-registry/src/dashboard_metrics.rs` - AtomicU64 counters for metrics
|
||||
- `nora-registry/src/activity_log.rs` - Bounded activity log (50 entries)
|
||||
|
||||
### Modified Files
|
||||
- `nora-registry/src/main.rs` - Added modules, updated AppState
|
||||
- `nora-registry/src/ui/api.rs` - Added DashboardResponse, api_dashboard()
|
||||
- `nora-registry/src/ui/mod.rs` - Added /api/ui/dashboard route
|
||||
- `nora-registry/src/ui/components.rs` - Dark theme components
|
||||
- `nora-registry/src/ui/templates.rs` - New render_dashboard()
|
||||
- `nora-registry/src/registry/docker.rs` - Instrumented handlers
|
||||
- `nora-registry/src/registry/npm.rs` - Instrumented with cache tracking
|
||||
- `nora-registry/src/registry/maven.rs` - Instrumented download/upload
|
||||
- `nora-registry/src/registry/cargo_registry.rs` - Instrumented download
|
||||
|
||||
### Features Implemented
|
||||
- [x] Global stats panel (downloads, uploads, artifacts, cache hit %, storage)
|
||||
- [x] Per-registry metrics (Docker, Maven, npm, Cargo, PyPI)
|
||||
- [x] Mount points table with proxy upstreams
|
||||
- [x] Activity log (last 20 events)
|
||||
- [x] Dark theme (#0f172a background, #1e293b cards)
|
||||
- [x] Auto-refresh polling (5 seconds)
|
||||
- [x] Cache hit/miss tracking
|
||||
|
||||
### API Endpoints
|
||||
- `GET /api/ui/dashboard` - Full dashboard data as JSON
|
||||
|
||||
### Dark Theme Colors
|
||||
```
|
||||
Background: #0f172a (slate-950)
|
||||
Cards: #1e293b (slate-800)
|
||||
Borders: slate-700
|
||||
Text primary: slate-200
|
||||
Text secondary: slate-400
|
||||
Accent: blue-400
|
||||
```
|
||||
|
||||
### Testing Commands
|
||||
```bash
|
||||
# Test dashboard API
|
||||
curl http://127.0.0.1:4000/api/ui/dashboard
|
||||
|
||||
# Test Docker pull (triggers metrics)
|
||||
curl http://127.0.0.1:4000/v2/test/manifests/v1
|
||||
|
||||
# Test npm proxy (triggers cache miss)
|
||||
curl http://127.0.0.1:4000/npm/lodash/-/lodash-4.17.21.tgz -o /dev/null
|
||||
```
|
||||
|
||||
### Future Improvements (Dashboard)
|
||||
- [ ] Add PyPI download instrumentation
|
||||
- [ ] Persist metrics to disk (currently reset on restart)
|
||||
- [ ] Add WebSocket for real-time updates (instead of polling)
|
||||
- [ ] Add graphs/charts for metrics over time
|
||||
- [ ] Add user/client tracking in activity log
|
||||
- [ ] Dark/light theme toggle
|
||||
|
||||
---
|
||||
|
||||
## v0.3.0 - OIDC / Workload Identity Federation
|
||||
|
||||
### Killer Feature: OIDC for CI/CD
|
||||
Zero-secret authentication for GitHub Actions, GitLab CI, etc.
|
||||
|
||||
**Goal:** Replace manual `ROBOT_TOKEN` rotation with federated identity.
|
||||
|
||||
```yaml
|
||||
# GitHub Actions example
|
||||
permissions:
|
||||
id-token: write
|
||||
steps:
|
||||
- name: Login to NORA
|
||||
uses: nora/login-action@v1
|
||||
```
|
||||
|
||||
### Config Structure (draft)
|
||||
|
||||
```toml
|
||||
[auth.oidc]
|
||||
enabled = true
|
||||
|
||||
# GitHub Actions
|
||||
[[auth.oidc.providers]]
|
||||
name = "github-actions"
|
||||
issuer = "https://token.actions.githubusercontent.com"
|
||||
audience = "https://nora.example.com"
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
# Claim matching (supports glob)
|
||||
match = { repository = "my-org/*", ref = "refs/heads/main" }
|
||||
# Granted permissions
|
||||
permissions = ["push:my-org/*", "pull:*"]
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
match = { repository = "my-org/*", ref = "refs/heads/*" }
|
||||
permissions = ["pull:*"]
|
||||
|
||||
# GitLab CI
|
||||
[[auth.oidc.providers]]
|
||||
name = "gitlab-ci"
|
||||
issuer = "https://gitlab.com"
|
||||
audience = "https://nora.example.com"
|
||||
|
||||
[[auth.oidc.providers.rules]]
|
||||
match = { project_path = "my-group/*" }
|
||||
permissions = ["push:my-group/*", "pull:*"]
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] JWT validation library (jsonwebtoken crate)
|
||||
- [ ] OIDC discovery (/.well-known/openid-configuration)
|
||||
- [ ] JWKS fetching and caching
|
||||
- [ ] Claims extraction and glob matching
|
||||
- [ ] Permission resolution from rules
|
||||
- [ ] Token exchange endpoint (POST /auth/oidc/token)
|
||||
- [ ] GitHub Action: `nora/login-action`
|
||||
|
||||
---
|
||||
|
||||
## v0.4.0 - Transparent Docker Hub Proxy
|
||||
|
||||
### Pain Point
|
||||
Harbor forces tag changes: `docker pull my-harbor/proxy-cache/library/nginx`
|
||||
This breaks Helm charts hardcoded to `nginx`.
|
||||
|
||||
### Goal
|
||||
Transparent pull-through cache:
|
||||
```bash
|
||||
docker pull nora.example.com/nginx # -> proxies to Docker Hub
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Registry v2 API interception
|
||||
- [ ] Upstream registry configuration
|
||||
- [ ] Cache layer management
|
||||
- [ ] Rate limit handling (Docker Hub limits)
|
||||
|
||||
---
|
||||
|
||||
## v0.5.0 - Repo-level RBAC
|
||||
|
||||
### Challenge
|
||||
Per-repository permissions need fast lookup (100 layers per push).
|
||||
|
||||
### Solution
|
||||
Glob patterns for 90% of cases:
|
||||
```toml
|
||||
[[auth.rules]]
|
||||
subject = "team-frontend"
|
||||
permissions = ["push:frontend/*", "pull:*"]
|
||||
|
||||
[[auth.rules]]
|
||||
subject = "ci-bot"
|
||||
permissions = ["push:*/release-*", "pull:*"]
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] In-memory permission cache
|
||||
- [ ] Glob pattern matcher (globset crate)
|
||||
- [ ] Permission inheritance (org -> project -> repo)
|
||||
|
||||
---
|
||||
|
||||
## Target Audience
|
||||
|
||||
1. DevOps engineers tired of Java/Go monsters
|
||||
2. Edge/IoT installations (Raspberry Pi, branch offices)
|
||||
3. Educational platforms (student labs)
|
||||
4. CI/CD pipelines (GitHub Actions, GitLab CI)
|
||||
|
||||
## Competitive Advantages
|
||||
|
||||
| Feature | NORA | Harbor | Nexus |
|
||||
|---------|------|--------|-------|
|
||||
| Memory | <100MB | 2GB+ | 4GB+ |
|
||||
| OIDC for CI | v0.3.0 | No | No |
|
||||
| Transparent proxy | v0.4.0 | No (tag rewrite) | Partial |
|
||||
| Single binary | Yes | No (microservices) | No (Java) |
|
||||
| Zero-config upgrade | Yes | Complex | Complex |
|
||||
|
||||
---
|
||||
|
||||
## v0.6.0 - Online Garbage Collection
|
||||
|
||||
### Pain Point
|
||||
Harbor GC blocks registry for hours. Can't push during cleanup.
|
||||
|
||||
### Goal
|
||||
Non-blocking garbage collection with zero downtime.
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Mark-and-sweep without locking
|
||||
- [ ] Background blob cleanup
|
||||
- [ ] Progress reporting via API/CLI
|
||||
- [ ] `nora gc --dry-run` preview
|
||||
|
||||
---
|
||||
|
||||
## v0.7.0 - Retention Policies
|
||||
|
||||
### Pain Point
|
||||
"Keep last 10 tags" sounds simple, works poorly everywhere.
|
||||
|
||||
### Goal
|
||||
Declarative retention rules in config:
|
||||
|
||||
```toml
|
||||
[[retention]]
|
||||
match = "*/dev-*"
|
||||
keep_last = 5
|
||||
|
||||
[[retention]]
|
||||
match = "*/release-*"
|
||||
keep_last = 20
|
||||
older_than = "90d"
|
||||
|
||||
[[retention]]
|
||||
match = "**/pr-*"
|
||||
older_than = "7d"
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Glob pattern matching for repos/tags
|
||||
- [ ] Age-based and count-based rules
|
||||
- [ ] Dry-run mode
|
||||
- [ ] Scheduled execution (cron-style)
|
||||
|
||||
---
|
||||
|
||||
## v0.8.0 - Multi-tenancy & Quotas
|
||||
|
||||
### Pain Point
|
||||
Harbor projects have quotas but configuration is painful. Nexus has no real isolation.
|
||||
|
||||
### Goal
|
||||
Simple namespaces with limits:
|
||||
|
||||
```toml
|
||||
[[tenants]]
|
||||
name = "team-frontend"
|
||||
storage_quota = "50GB"
|
||||
rate_limit = { push = 100, pull = 1000 } # per hour
|
||||
|
||||
[[tenants]]
|
||||
name = "team-backend"
|
||||
storage_quota = "100GB"
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Tenant isolation (namespace prefix)
|
||||
- [ ] Storage quota tracking
|
||||
- [ ] Per-tenant rate limiting
|
||||
- [ ] Usage reporting API
|
||||
|
||||
---
|
||||
|
||||
## v0.9.0 - Smart Replication
|
||||
|
||||
### Pain Point
|
||||
Harbor replication rules are complex, errors silently swallowed.
|
||||
|
||||
### Goal
|
||||
Simple CLI-driven replication with clear feedback:
|
||||
|
||||
```bash
|
||||
nora replicate --to remote-dc --filter "prod/*" --dry-run
|
||||
nora replicate --from gcr.io/my-project/* --to local/imported/
|
||||
```
|
||||
|
||||
### Implementation Tasks
|
||||
- [ ] Push-based replication to remote NORA
|
||||
- [ ] Pull-based import from external registries (Docker Hub, GCR, ECR, Quay)
|
||||
- [ ] Filter by glob patterns
|
||||
- [ ] Progress bar and detailed logs
|
||||
- [ ] Retry logic with exponential backoff
|
||||
|
||||
---
|
||||
|
||||
## v1.0.0 - Production Ready
|
||||
|
||||
### Features to polish
|
||||
- [ ] Full CLI (`nora images ls`, `nora tag`, `nora delete`)
|
||||
- [ ] Webhooks with filters and retry logic
|
||||
- [ ] Enhanced Prometheus metrics (per-repo stats, cache hit ratio, bandwidth per tenant)
|
||||
- [ ] TUI dashboard (optional)
|
||||
- [ ] Helm chart for Kubernetes deployment
|
||||
- [ ] Official Docker image on ghcr.io
|
||||
|
||||
---
|
||||
|
||||
## Future Ideas (v1.x+)
|
||||
|
||||
### Cold Storage Tiering
|
||||
Auto-move old tags to S3 Glacier:
|
||||
```toml
|
||||
[[storage.tiering]]
|
||||
match = "*"
|
||||
older_than = "180d"
|
||||
move_to = "s3-glacier"
|
||||
```
|
||||
|
||||
### Vulnerability Scanning Integration
|
||||
Not built-in (use Trivy), but:
|
||||
- [ ] Webhook on push -> trigger external scan
|
||||
- [ ] Store scan results as OCI artifacts
|
||||
- [ ] Block pull if critical CVEs (policy)
|
||||
|
||||
### Image Signing (Cosign/Notation)
|
||||
- [ ] Signature storage (OCI artifacts)
|
||||
- [ ] Policy enforcement (reject unsigned)
|
||||
|
||||
### P2P Distribution (Dragonfly/Kraken style)
|
||||
For large clusters pulling same image simultaneously.
|
||||
|
||||
---
|
||||
|
||||
---
|
||||
|
||||
## Architecture / DDD
|
||||
|
||||
### Current State (v0.2.0)
|
||||
Monolithic structure, all in `nora-registry/src/`:
|
||||
```
|
||||
src/
|
||||
├── main.rs # CLI + server setup
|
||||
├── auth.rs # htpasswd + basic auth
|
||||
├── tokens.rs # API tokens
|
||||
├── storage/ # Storage backends (local, s3)
|
||||
├── registry/ # Protocol handlers (docker, maven, npm, cargo, pypi)
|
||||
├── ui/ # Web dashboard
|
||||
└── ...
|
||||
```
|
||||
|
||||
### Target Architecture (v1.0+)
|
||||
|
||||
#### Domain-Driven Design Boundaries
|
||||
|
||||
```
|
||||
nora/
|
||||
├── nora-core/ # Domain layer (no dependencies)
|
||||
│ ├── src/
|
||||
│ │ ├── artifact.rs # Artifact, Digest, Tag, Manifest
|
||||
│ │ ├── repository.rs # Repository, Namespace
|
||||
│ │ ├── identity.rs # User, ServiceAccount, Token
|
||||
│ │ ├── policy.rs # Permission, Rule, Quota
|
||||
│ │ └── events.rs # DomainEvent (ArtifactPushed, etc.)
|
||||
│
|
||||
├── nora-auth/ # Authentication bounded context
|
||||
│ ├── src/
|
||||
│ │ ├── htpasswd.rs # Basic auth provider
|
||||
│ │ ├── oidc.rs # OIDC/JWT provider
|
||||
│ │ ├── token.rs # API token provider
|
||||
│ │ └── rbac.rs # Permission resolver
|
||||
│
|
||||
├── nora-storage/ # Storage bounded context
|
||||
│ ├── src/
|
||||
│ │ ├── backend.rs # StorageBackend trait
|
||||
│ │ ├── local.rs # Filesystem
|
||||
│ │ ├── s3.rs # S3-compatible
|
||||
│ │ ├── tiered.rs # Hot/cold tiering
|
||||
│ │ └── gc.rs # Garbage collection
|
||||
│
|
||||
├── nora-registry/ # Application layer (HTTP API)
|
||||
│ ├── src/
|
||||
│ │ ├── api/
|
||||
│ │ │ ├── oci.rs # OCI Distribution API (/v2/)
|
||||
│ │ │ ├── maven.rs # Maven repository
|
||||
│ │ │ ├── npm.rs # npm registry
|
||||
│ │ │ ├── cargo.rs # Cargo registry
|
||||
│ │ │ └── pypi.rs # PyPI (simple API)
|
||||
│ │ ├── proxy/ # Upstream proxy/cache
|
||||
│ │ ├── webhook/ # Event webhooks
|
||||
│ │ └── ui/ # Web dashboard
|
||||
│
|
||||
├── nora-cli/ # CLI application
|
||||
│ ├── src/
|
||||
│ │ ├── commands/
|
||||
│ │ │ ├── serve.rs
|
||||
│ │ │ ├── images.rs # nora images ls/delete/tag
|
||||
│ │ │ ├── gc.rs # nora gc
|
||||
│ │ │ ├── backup.rs # nora backup/restore
|
||||
│ │ │ ├── migrate.rs # nora migrate
|
||||
│ │ │ └── replicate.rs
|
||||
│ │ └── tui/ # Optional TUI dashboard
|
||||
│
|
||||
└── nora-sdk/ # Client SDK (for nora/login-action)
|
||||
└── src/
|
||||
├── client.rs # HTTP client
|
||||
└── oidc.rs # Token exchange
|
||||
```
|
||||
|
||||
#### Key Principles
|
||||
|
||||
1. **Hexagonal Architecture**
|
||||
- Core domain has no external dependencies
|
||||
- Ports (traits) define boundaries
|
||||
- Adapters implement ports (S3, filesystem, OIDC providers)
|
||||
|
||||
2. **Event-Driven**
|
||||
- Domain events: `ArtifactPushed`, `ArtifactDeleted`, `TagCreated`
|
||||
- Webhooks subscribe to events
|
||||
- Async processing for GC, replication
|
||||
|
||||
3. **CQRS-lite**
|
||||
- Commands: Push, Delete, CreateToken
|
||||
- Queries: List, Get, Search
|
||||
- Separate read/write paths for hot endpoints
|
||||
|
||||
4. **Configuration as Code**
|
||||
- All policies in `nora.toml`
|
||||
- No database for config (file-based)
|
||||
- GitOps friendly
|
||||
|
||||
#### Trait Boundaries (Ports)
|
||||
|
||||
```rust
|
||||
// nora-core/src/ports.rs
|
||||
|
||||
#[async_trait]
|
||||
pub trait ArtifactStore {
|
||||
async fn push_blob(&self, digest: &Digest, data: Bytes) -> Result<()>;
|
||||
async fn get_blob(&self, digest: &Digest) -> Result<Bytes>;
|
||||
async fn push_manifest(&self, repo: &Repository, tag: &Tag, manifest: &Manifest) -> Result<()>;
|
||||
async fn get_manifest(&self, repo: &Repository, reference: &Reference) -> Result<Manifest>;
|
||||
async fn list_tags(&self, repo: &Repository) -> Result<Vec<Tag>>;
|
||||
async fn delete(&self, repo: &Repository, reference: &Reference) -> Result<()>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait IdentityProvider {
|
||||
async fn authenticate(&self, credentials: &Credentials) -> Result<Identity>;
|
||||
async fn authorize(&self, identity: &Identity, action: &Action, resource: &Resource) -> Result<bool>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait EventPublisher {
|
||||
async fn publish(&self, event: DomainEvent) -> Result<()>;
|
||||
}
|
||||
```
|
||||
|
||||
#### Migration Path
|
||||
|
||||
| Phase | Action |
|
||||
|-------|--------|
|
||||
| v0.3 | Extract `nora-auth` crate (OIDC work) |
|
||||
| v0.4 | Extract `nora-core` domain types |
|
||||
| v0.5 | Extract `nora-storage` with trait boundaries |
|
||||
| v0.6+ | Refactor registry handlers to use ports |
|
||||
| v1.0 | Full hexagonal architecture |
|
||||
|
||||
### Technical Debt to Address
|
||||
|
||||
- [ ] Remove `unwrap()` in non-test code (started in e9984cf)
|
||||
- [ ] Add tracing spans to all handlers
|
||||
- [ ] Consistent error types across modules
|
||||
- [ ] Extract hardcoded limits to config
|
||||
- [ ] Add OpenTelemetry support (traces, not just metrics)
|
||||
|
||||
### Performance Requirements
|
||||
|
||||
| Metric | Target |
|
||||
|--------|--------|
|
||||
| Memory (idle) | <50MB |
|
||||
| Memory (under load) | <100MB |
|
||||
| Startup time | <1s |
|
||||
| Blob throughput | Wire speed (no processing overhead) |
|
||||
| Manifest latency | <10ms p99 |
|
||||
| Auth check | <1ms (cached) |
|
||||
|
||||
### Security Requirements
|
||||
|
||||
- [ ] No secrets in logs (already redacting)
|
||||
- [ ] TLS termination (or trust reverse proxy)
|
||||
- [ ] Content-addressable storage (immutable blobs)
|
||||
- [ ] Audit log for all mutations
|
||||
- [ ] SBOM generation for NORA itself
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- S3 storage: already implemented
|
||||
- Web UI: minimalist read-only dashboard (done)
|
||||
- TUI: consider for v1.0
|
||||
- Vulnerability scanning: out of scope (use Trivy externally)
|
||||
- Image signing: out of scope for now (use cosign externally)
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
#[derive(Parser)]
|
||||
|
||||
@@ -24,6 +24,8 @@ tracing-subscriber.workspace = true
|
||||
reqwest.workspace = true
|
||||
sha2.workspace = true
|
||||
async-trait.workspace = true
|
||||
hmac.workspace = true
|
||||
hex.workspace = true
|
||||
toml = "0.8"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
bcrypt = "0.17"
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use parking_lot::RwLock;
|
||||
use serde::Serialize;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::State,
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Backup and restore functionality for Nora
|
||||
//!
|
||||
//! Exports all artifacts to a tar.gz file and restores from backups.
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
@@ -53,6 +56,19 @@ pub struct StorageConfig {
|
||||
pub s3_url: String,
|
||||
#[serde(default = "default_bucket")]
|
||||
pub bucket: String,
|
||||
/// S3 access key (optional, uses anonymous access if not set)
|
||||
#[serde(default)]
|
||||
pub s3_access_key: Option<String>,
|
||||
/// S3 secret key (optional, uses anonymous access if not set)
|
||||
#[serde(default)]
|
||||
pub s3_secret_key: Option<String>,
|
||||
/// S3 region (default: us-east-1)
|
||||
#[serde(default = "default_s3_region")]
|
||||
pub s3_region: String,
|
||||
}
|
||||
|
||||
fn default_s3_region() -> String {
|
||||
"us-east-1".to_string()
|
||||
}
|
||||
|
||||
fn default_storage_path() -> String {
|
||||
@@ -325,6 +341,15 @@ impl Config {
|
||||
if let Ok(val) = env::var("NORA_STORAGE_BUCKET") {
|
||||
self.storage.bucket = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_ACCESS_KEY") {
|
||||
self.storage.s3_access_key = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_SECRET_KEY") {
|
||||
self.storage.s3_secret_key = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_REGION") {
|
||||
self.storage.s3_region = val;
|
||||
}
|
||||
|
||||
// Auth config
|
||||
if let Ok(val) = env::var("NORA_AUTH_ENABLED") {
|
||||
@@ -455,6 +480,9 @@ impl Default for Config {
|
||||
path: String::from("data/storage"),
|
||||
s3_url: String::from("http://127.0.0.1:3000"),
|
||||
bucket: String::from("registry"),
|
||||
s3_access_key: None,
|
||||
s3_secret_key: None,
|
||||
s3_region: String::from("us-east-1"),
|
||||
},
|
||||
maven: MavenConfig::default(),
|
||||
npm: NpmConfig::default(),
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Instant;
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! Application error handling with HTTP response conversion
|
||||
//!
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{extract::State, http::StatusCode, response::Json, routing::get, Router};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod activity_log;
|
||||
mod auth;
|
||||
mod backup;
|
||||
@@ -104,10 +107,18 @@ async fn main() {
|
||||
info!(
|
||||
s3_url = %config.storage.s3_url,
|
||||
bucket = %config.storage.bucket,
|
||||
region = %config.storage.s3_region,
|
||||
has_credentials = config.storage.s3_access_key.is_some(),
|
||||
"Using S3 storage"
|
||||
);
|
||||
}
|
||||
Storage::new_s3(&config.storage.s3_url, &config.storage.bucket)
|
||||
Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -131,7 +142,13 @@ async fn main() {
|
||||
Some(Commands::Migrate { from, to, dry_run }) => {
|
||||
let source = match from.as_str() {
|
||||
"local" => Storage::new_local(&config.storage.path),
|
||||
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||
"s3" => Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
),
|
||||
_ => {
|
||||
error!("Invalid source: '{}'. Use 'local' or 's3'", from);
|
||||
std::process::exit(1);
|
||||
@@ -140,7 +157,13 @@ async fn main() {
|
||||
|
||||
let dest = match to.as_str() {
|
||||
"local" => Storage::new_local(&config.storage.path),
|
||||
"s3" => Storage::new_s3(&config.storage.s3_url, &config.storage.bucket),
|
||||
"s3" => Storage::new_s3(
|
||||
&config.storage.s3_url,
|
||||
&config.storage.bucket,
|
||||
&config.storage.s3_region,
|
||||
config.storage.s3_access_key.as_deref(),
|
||||
config.storage.s3_secret_key.as_deref(),
|
||||
),
|
||||
_ => {
|
||||
error!("Invalid destination: '{}'. Use 'local' or 's3'", to);
|
||||
std::process::exit(1);
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::MatchedPath,
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Migration between storage backends
|
||||
//!
|
||||
//! Supports migrating artifacts from one storage backend to another
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! OpenAPI documentation and Swagger UI
|
||||
//!
|
||||
//! Functions in this module are stubs used only for generating OpenAPI documentation.
|
||||
@@ -15,7 +18,7 @@ use crate::AppState;
|
||||
#[openapi(
|
||||
info(
|
||||
title = "Nora",
|
||||
version = "0.2.10",
|
||||
version = "0.2.12",
|
||||
description = "Multi-protocol package registry supporting Docker, Maven, npm, Cargo, and PyPI",
|
||||
license(name = "MIT"),
|
||||
contact(name = "DevITWay", url = "https://github.com/getnora-io/nora")
|
||||
@@ -25,6 +28,7 @@ use crate::AppState;
|
||||
),
|
||||
tags(
|
||||
(name = "health", description = "Health check endpoints"),
|
||||
(name = "metrics", description = "Prometheus metrics"),
|
||||
(name = "dashboard", description = "Dashboard & Metrics API"),
|
||||
(name = "docker", description = "Docker Registry v2 API"),
|
||||
(name = "maven", description = "Maven Repository API"),
|
||||
@@ -37,18 +41,30 @@ use crate::AppState;
|
||||
// Health
|
||||
crate::openapi::health_check,
|
||||
crate::openapi::readiness_check,
|
||||
// Metrics
|
||||
crate::openapi::prometheus_metrics,
|
||||
// Dashboard
|
||||
crate::openapi::dashboard_metrics,
|
||||
// Docker
|
||||
// Docker - Read
|
||||
crate::openapi::docker_version,
|
||||
crate::openapi::docker_catalog,
|
||||
crate::openapi::docker_tags,
|
||||
crate::openapi::docker_manifest,
|
||||
crate::openapi::docker_blob,
|
||||
crate::openapi::docker_manifest_get,
|
||||
crate::openapi::docker_blob_head,
|
||||
crate::openapi::docker_blob_get,
|
||||
// Docker - Write
|
||||
crate::openapi::docker_manifest_put,
|
||||
crate::openapi::docker_blob_upload_start,
|
||||
crate::openapi::docker_blob_upload_patch,
|
||||
crate::openapi::docker_blob_upload_put,
|
||||
// Maven
|
||||
crate::openapi::maven_artifact,
|
||||
crate::openapi::maven_artifact_get,
|
||||
crate::openapi::maven_artifact_put,
|
||||
// npm
|
||||
crate::openapi::npm_package,
|
||||
// Cargo
|
||||
crate::openapi::cargo_metadata,
|
||||
crate::openapi::cargo_download,
|
||||
// PyPI
|
||||
crate::openapi::pypi_simple,
|
||||
crate::openapi::pypi_package,
|
||||
@@ -258,6 +274,8 @@ pub struct ActivityEntry {
|
||||
|
||||
// ============ Path Operations (documentation only) ============
|
||||
|
||||
// -------------------- Health --------------------
|
||||
|
||||
/// Health check endpoint
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -282,6 +300,23 @@ pub async fn health_check() {}
|
||||
)]
|
||||
pub async fn readiness_check() {}
|
||||
|
||||
// -------------------- Metrics --------------------
|
||||
|
||||
/// Prometheus metrics endpoint
|
||||
///
|
||||
/// Returns metrics in Prometheus text format for scraping.
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/metrics",
|
||||
tag = "metrics",
|
||||
responses(
|
||||
(status = 200, description = "Prometheus metrics", content_type = "text/plain")
|
||||
)
|
||||
)]
|
||||
pub async fn prometheus_metrics() {}
|
||||
|
||||
// -------------------- Dashboard --------------------
|
||||
|
||||
/// Dashboard metrics and activity
|
||||
///
|
||||
/// Returns comprehensive metrics including downloads, uploads, cache statistics,
|
||||
@@ -296,6 +331,8 @@ pub async fn readiness_check() {}
|
||||
)]
|
||||
pub async fn dashboard_metrics() {}
|
||||
|
||||
// -------------------- Docker Registry v2 - Read Operations --------------------
|
||||
|
||||
/// Docker Registry version check
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -325,7 +362,7 @@ pub async fn docker_catalog() {}
|
||||
path = "/v2/{name}/tags/list",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name")
|
||||
("name" = String, Path, description = "Repository name (e.g., 'alpine' or 'library/nginx')")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Tag list", body = DockerTags),
|
||||
@@ -341,14 +378,30 @@ pub async fn docker_tags() {}
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("reference" = String, Path, description = "Tag or digest")
|
||||
("reference" = String, Path, description = "Tag or digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Manifest content"),
|
||||
(status = 404, description = "Manifest not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_manifest() {}
|
||||
pub async fn docker_manifest_get() {}
|
||||
|
||||
/// Check if blob exists
|
||||
#[utoipa::path(
|
||||
head,
|
||||
path = "/v2/{name}/blobs/{digest}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("digest" = String, Path, description = "Blob digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blob exists, Content-Length header contains size"),
|
||||
(status = 404, description = "Blob not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_head() {}
|
||||
|
||||
/// Get blob
|
||||
#[utoipa::path(
|
||||
@@ -364,7 +417,79 @@ pub async fn docker_manifest() {}
|
||||
(status = 404, description = "Blob not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob() {}
|
||||
pub async fn docker_blob_get() {}
|
||||
|
||||
// -------------------- Docker Registry v2 - Write Operations --------------------
|
||||
|
||||
/// Push manifest
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/v2/{name}/manifests/{reference}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("reference" = String, Path, description = "Tag or digest")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Manifest created, Docker-Content-Digest header contains digest"),
|
||||
(status = 400, description = "Invalid manifest")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_manifest_put() {}
|
||||
|
||||
/// Start blob upload
|
||||
///
|
||||
/// Initiates a resumable blob upload. Returns a Location header with the upload URL.
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/v2/{name}/blobs/uploads/",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name")
|
||||
),
|
||||
responses(
|
||||
(status = 202, description = "Upload started, Location header contains upload URL")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_start() {}
|
||||
|
||||
/// Upload blob chunk (chunked upload)
|
||||
///
|
||||
/// Uploads a chunk of data to an in-progress upload session.
|
||||
#[utoipa::path(
|
||||
patch,
|
||||
path = "/v2/{name}/blobs/uploads/{uuid}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("uuid" = String, Path, description = "Upload session UUID")
|
||||
),
|
||||
responses(
|
||||
(status = 202, description = "Chunk accepted, Range header indicates bytes received")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_patch() {}
|
||||
|
||||
/// Complete blob upload
|
||||
///
|
||||
/// Finalizes the blob upload. Can include final chunk data in the body.
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/v2/{name}/blobs/uploads/{uuid}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("uuid" = String, Path, description = "Upload session UUID"),
|
||||
("digest" = String, Query, description = "Expected blob digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Blob created"),
|
||||
(status = 400, description = "Digest mismatch or missing")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob_upload_put() {}
|
||||
|
||||
// -------------------- Maven --------------------
|
||||
|
||||
/// Get Maven artifact
|
||||
#[utoipa::path(
|
||||
@@ -379,7 +504,24 @@ pub async fn docker_blob() {}
|
||||
(status = 404, description = "Artifact not found, trying upstream proxies")
|
||||
)
|
||||
)]
|
||||
pub async fn maven_artifact() {}
|
||||
pub async fn maven_artifact_get() {}
|
||||
|
||||
/// Upload Maven artifact
|
||||
#[utoipa::path(
|
||||
put,
|
||||
path = "/maven2/{path}",
|
||||
tag = "maven",
|
||||
params(
|
||||
("path" = String, Path, description = "Artifact path")
|
||||
),
|
||||
responses(
|
||||
(status = 201, description = "Artifact uploaded"),
|
||||
(status = 500, description = "Storage error")
|
||||
)
|
||||
)]
|
||||
pub async fn maven_artifact_put() {}
|
||||
|
||||
// -------------------- npm --------------------
|
||||
|
||||
/// Get npm package metadata
|
||||
#[utoipa::path(
|
||||
@@ -387,7 +529,7 @@ pub async fn maven_artifact() {}
|
||||
path = "/npm/{name}",
|
||||
tag = "npm",
|
||||
params(
|
||||
("name" = String, Path, description = "Package name")
|
||||
("name" = String, Path, description = "Package name (e.g., 'lodash' or '@scope/package')")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Package metadata (JSON)"),
|
||||
@@ -396,6 +538,41 @@ pub async fn maven_artifact() {}
|
||||
)]
|
||||
pub async fn npm_package() {}
|
||||
|
||||
// -------------------- Cargo --------------------
|
||||
|
||||
/// Get Cargo crate metadata
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/cargo/api/v1/crates/{crate_name}",
|
||||
tag = "cargo",
|
||||
params(
|
||||
("crate_name" = String, Path, description = "Crate name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Crate metadata (JSON)"),
|
||||
(status = 404, description = "Crate not found")
|
||||
)
|
||||
)]
|
||||
pub async fn cargo_metadata() {}
|
||||
|
||||
/// Download Cargo crate
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/cargo/api/v1/crates/{crate_name}/{version}/download",
|
||||
tag = "cargo",
|
||||
params(
|
||||
("crate_name" = String, Path, description = "Crate name"),
|
||||
("version" = String, Path, description = "Crate version")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Crate file (.crate)"),
|
||||
(status = 404, description = "Crate version not found")
|
||||
)
|
||||
)]
|
||||
pub async fn cargo_download() {}
|
||||
|
||||
// -------------------- PyPI --------------------
|
||||
|
||||
/// PyPI Simple index
|
||||
#[utoipa::path(
|
||||
get,
|
||||
@@ -422,6 +599,8 @@ pub async fn pypi_simple() {}
|
||||
)]
|
||||
pub async fn pypi_package() {}
|
||||
|
||||
// -------------------- Auth / Tokens --------------------
|
||||
|
||||
/// Create API token
|
||||
#[utoipa::path(
|
||||
post,
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Rate limiting configuration and middleware
|
||||
//!
|
||||
//! Provides rate limiting to protect against:
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::registry::docker_auth::DockerAuth;
|
||||
use crate::storage::Storage;
|
||||
@@ -47,6 +50,8 @@ static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, Vec<u8>>>> =
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/v2/", get(check))
|
||||
.route("/v2/_catalog", get(catalog))
|
||||
// Single-segment name routes (e.g., /v2/alpine/...)
|
||||
.route("/v2/{name}/blobs/{digest}", head(check_blob))
|
||||
.route("/v2/{name}/blobs/{digest}", get(download_blob))
|
||||
.route(
|
||||
@@ -60,12 +65,52 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
.route("/v2/{name}/manifests/{reference}", get(get_manifest))
|
||||
.route("/v2/{name}/manifests/{reference}", put(put_manifest))
|
||||
.route("/v2/{name}/tags/list", get(list_tags))
|
||||
// Two-segment name routes (e.g., /v2/library/alpine/...)
|
||||
.route("/v2/{ns}/{name}/blobs/{digest}", head(check_blob_ns))
|
||||
.route("/v2/{ns}/{name}/blobs/{digest}", get(download_blob_ns))
|
||||
.route(
|
||||
"/v2/{ns}/{name}/blobs/uploads/",
|
||||
axum::routing::post(start_upload_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/blobs/uploads/{uuid}",
|
||||
patch(patch_blob_ns).put(upload_blob_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/manifests/{reference}",
|
||||
get(get_manifest_ns),
|
||||
)
|
||||
.route(
|
||||
"/v2/{ns}/{name}/manifests/{reference}",
|
||||
put(put_manifest_ns),
|
||||
)
|
||||
.route("/v2/{ns}/{name}/tags/list", get(list_tags_ns))
|
||||
}
|
||||
|
||||
async fn check() -> (StatusCode, Json<Value>) {
|
||||
(StatusCode::OK, Json(json!({})))
|
||||
}
|
||||
|
||||
/// List all repositories in the registry
|
||||
async fn catalog(State(state): State<Arc<AppState>>) -> Json<Value> {
|
||||
let keys = state.storage.list("docker/").await;
|
||||
|
||||
// Extract unique repository names from paths like "docker/{name}/manifests/..."
|
||||
let mut repos: Vec<String> = keys
|
||||
.iter()
|
||||
.filter_map(|k| {
|
||||
k.strip_prefix("docker/")
|
||||
.and_then(|rest| rest.split('/').next())
|
||||
.map(String::from)
|
||||
})
|
||||
.collect();
|
||||
|
||||
repos.sort();
|
||||
repos.dedup();
|
||||
|
||||
Json(json!({ "repositories": repos }))
|
||||
}
|
||||
|
||||
async fn check_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, digest)): Path<(String, String)>,
|
||||
@@ -312,7 +357,12 @@ async fn get_manifest(
|
||||
}
|
||||
|
||||
// Try upstream proxies
|
||||
tracing::debug!(
|
||||
upstreams_count = state.config.docker.upstreams.len(),
|
||||
"Trying upstream proxies"
|
||||
);
|
||||
for upstream in &state.config.docker.upstreams {
|
||||
tracing::debug!(upstream_url = %upstream.url, "Trying upstream");
|
||||
if let Ok((data, content_type)) = fetch_manifest_from_upstream(
|
||||
&upstream.url,
|
||||
&name,
|
||||
@@ -454,6 +504,75 @@ async fn list_tags(State(state): State<Arc<AppState>>, Path(name): Path<String>)
|
||||
(StatusCode::OK, Json(json!({"name": name, "tags": tags}))).into_response()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Namespace handlers (for two-segment names like library/alpine)
|
||||
// These combine ns/name into a single name and delegate to the main handlers
|
||||
// ============================================================================
|
||||
|
||||
async fn check_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, digest)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
check_blob(state, Path((full_name, digest))).await
|
||||
}
|
||||
|
||||
async fn download_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, digest)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
download_blob(state, Path((full_name, digest))).await
|
||||
}
|
||||
|
||||
async fn start_upload_ns(Path((ns, name)): Path<(String, String)>) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
start_upload(Path(full_name)).await
|
||||
}
|
||||
|
||||
async fn patch_blob_ns(
|
||||
Path((ns, name, uuid)): Path<(String, String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
patch_blob(Path((full_name, uuid)), body).await
|
||||
}
|
||||
|
||||
async fn upload_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, uuid)): Path<(String, String, String)>,
|
||||
query: axum::extract::Query<std::collections::HashMap<String, String>>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
upload_blob(state, Path((full_name, uuid)), query, body).await
|
||||
}
|
||||
|
||||
async fn get_manifest_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, reference)): Path<(String, String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
get_manifest(state, Path((full_name, reference))).await
|
||||
}
|
||||
|
||||
async fn put_manifest_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, reference)): Path<(String, String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
put_manifest(state, Path((full_name, reference)), body).await
|
||||
}
|
||||
|
||||
async fn list_tags_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
list_tags(state, Path(full_name)).await
|
||||
}
|
||||
|
||||
/// Fetch a blob from an upstream Docker registry
|
||||
async fn fetch_blob_from_upstream(
|
||||
upstream_url: &str,
|
||||
@@ -525,10 +644,14 @@ async fn fetch_manifest_from_upstream(
|
||||
reference
|
||||
);
|
||||
|
||||
tracing::debug!(url = %url, "Fetching manifest from upstream");
|
||||
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout))
|
||||
.build()
|
||||
.map_err(|_| ())?;
|
||||
.map_err(|e| {
|
||||
tracing::error!(error = %e, "Failed to build HTTP client");
|
||||
})?;
|
||||
|
||||
// Request with Accept header for manifest types
|
||||
let accept_header = "application/vnd.docker.distribution.manifest.v2+json, \
|
||||
@@ -542,7 +665,11 @@ async fn fetch_manifest_from_upstream(
|
||||
.header("Accept", accept_header)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?;
|
||||
.map_err(|e| {
|
||||
tracing::error!(error = %e, url = %url, "Failed to send request to upstream");
|
||||
})?;
|
||||
|
||||
tracing::debug!(status = %response.status(), "Initial upstream response");
|
||||
|
||||
let response = if response.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
// Get Www-Authenticate header and fetch token
|
||||
@@ -552,25 +679,34 @@ async fn fetch_manifest_from_upstream(
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(String::from);
|
||||
|
||||
tracing::debug!(www_auth = ?www_auth, "Got 401, fetching token");
|
||||
|
||||
if let Some(token) = docker_auth
|
||||
.get_token(upstream_url, name, www_auth.as_deref())
|
||||
.await
|
||||
{
|
||||
tracing::debug!("Token acquired, retrying with auth");
|
||||
client
|
||||
.get(&url)
|
||||
.header("Accept", accept_header)
|
||||
.header("Authorization", format!("Bearer {}", token))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| ())?
|
||||
.map_err(|e| {
|
||||
tracing::error!(error = %e, "Failed to send authenticated request");
|
||||
})?
|
||||
} else {
|
||||
tracing::error!("Failed to acquire token");
|
||||
return Err(());
|
||||
}
|
||||
} else {
|
||||
response
|
||||
};
|
||||
|
||||
tracing::debug!(status = %response.status(), "Final upstream response");
|
||||
|
||||
if !response.status().is_success() {
|
||||
tracing::warn!(status = %response.status(), "Upstream returned non-success status");
|
||||
return Err(());
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{Duration, Instant};
|
||||
@@ -77,9 +80,12 @@ impl DockerAuth {
|
||||
let scope = format!("repository:{}:pull", name);
|
||||
let url = format!("{}?service={}&scope={}", realm, service, scope);
|
||||
|
||||
tracing::debug!(url = %url, "Fetching auth token");
|
||||
|
||||
let response = self.client.get(&url).send().await.ok()?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
tracing::warn!(status = %response.status(), "Token request failed");
|
||||
return None;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod cargo_registry;
|
||||
pub mod docker;
|
||||
pub mod docker_auth;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Request ID middleware for request tracking and correlation
|
||||
//!
|
||||
//! Generates a unique ID for each request that can be used for:
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Environment variables secrets provider
|
||||
//!
|
||||
//! Reads secrets from environment variables. This is the default provider
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)] // Foundational code for future S3/Vault integration
|
||||
|
||||
//! Secrets management for NORA
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Protected secret types with memory safety
|
||||
//!
|
||||
//! Secrets are automatically zeroed on drop and redacted in Debug output.
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod local;
|
||||
mod s3;
|
||||
|
||||
@@ -59,9 +62,17 @@ impl Storage {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_s3(s3_url: &str, bucket: &str) -> Self {
|
||||
pub fn new_s3(
|
||||
s3_url: &str,
|
||||
bucket: &str,
|
||||
region: &str,
|
||||
access_key: Option<&str>,
|
||||
secret_key: Option<&str>,
|
||||
) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(S3Storage::new(s3_url, bucket)),
|
||||
inner: Arc::new(S3Storage::new(
|
||||
s3_url, bucket, region, access_key, secret_key,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,146 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use chrono::Utc;
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use super::{FileMeta, Result, StorageBackend, StorageError};
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
/// S3-compatible storage backend (MinIO, AWS S3)
|
||||
pub struct S3Storage {
|
||||
s3_url: String,
|
||||
bucket: String,
|
||||
region: String,
|
||||
access_key: Option<String>,
|
||||
secret_key: Option<String>,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl S3Storage {
|
||||
pub fn new(s3_url: &str, bucket: &str) -> Self {
|
||||
/// Create new S3 storage with optional credentials
|
||||
pub fn new(
|
||||
s3_url: &str,
|
||||
bucket: &str,
|
||||
region: &str,
|
||||
access_key: Option<&str>,
|
||||
secret_key: Option<&str>,
|
||||
) -> Self {
|
||||
Self {
|
||||
s3_url: s3_url.to_string(),
|
||||
s3_url: s3_url.trim_end_matches('/').to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
region: region.to_string(),
|
||||
access_key: access_key.map(String::from),
|
||||
secret_key: secret_key.map(String::from),
|
||||
client: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Sign a request using AWS Signature v4
|
||||
fn sign_request(
|
||||
&self,
|
||||
method: &str,
|
||||
path: &str,
|
||||
payload_hash: &str,
|
||||
timestamp: &str,
|
||||
date: &str,
|
||||
) -> Option<String> {
|
||||
let (access_key, secret_key) = match (&self.access_key, &self.secret_key) {
|
||||
(Some(ak), Some(sk)) => (ak.as_str(), sk.as_str()),
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
// Parse host from URL
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
// Canonical request
|
||||
// URI must be URL-encoded (except /)
|
||||
let encoded_path = uri_encode(path);
|
||||
let canonical_uri = format!("/{}/{}", self.bucket, encoded_path);
|
||||
let canonical_query = "";
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
// AWS Signature v4 canonical request format:
|
||||
// HTTPMethod\nCanonicalURI\nCanonicalQueryString\nCanonicalHeaders\n\nSignedHeaders\nHashedPayload
|
||||
// Note: CanonicalHeaders already ends with \n, plus blank line before SignedHeaders
|
||||
let canonical_request = format!(
|
||||
"{}\n{}\n{}\n{}\n{}\n{}",
|
||||
method, canonical_uri, canonical_query, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
|
||||
// String to sign
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
// Calculate signature
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
// Authorization header
|
||||
Some(format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
))
|
||||
}
|
||||
|
||||
/// Make a signed request
|
||||
async fn signed_request(
|
||||
&self,
|
||||
method: reqwest::Method,
|
||||
key: &str,
|
||||
body: Option<&[u8]>,
|
||||
) -> std::result::Result<reqwest::Response, StorageError> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
|
||||
let payload_hash = match body {
|
||||
Some(data) => hex::encode(Sha256::digest(data)),
|
||||
None => hex::encode(Sha256::digest(b"")),
|
||||
};
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.request(method.clone(), &url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
if let Some(auth) =
|
||||
self.sign_request(method.as_str(), key, &payload_hash, ×tamp, &date)
|
||||
{
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
if let Some(data) = body {
|
||||
request = request.body(data.to_vec());
|
||||
}
|
||||
|
||||
request
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))
|
||||
}
|
||||
|
||||
fn parse_s3_keys(xml: &str, prefix: &str) -> Vec<String> {
|
||||
xml.split("<Key>")
|
||||
.filter_map(|part| part.split("</Key>").next())
|
||||
@@ -28,17 +150,34 @@ impl S3Storage {
|
||||
}
|
||||
}
|
||||
|
||||
/// URL-encode a string for S3 canonical URI (encode all except A-Za-z0-9-_.~/)
|
||||
fn uri_encode(s: &str) -> String {
|
||||
let mut result = String::with_capacity(s.len() * 3);
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'A'..='Z' | 'a'..='z' | '0'..='9' | '-' | '_' | '.' | '~' | '/' => result.push(c),
|
||||
_ => {
|
||||
for b in c.to_string().as_bytes() {
|
||||
result.push_str(&format!("%{:02X}", b));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
fn hmac_sha256(key: &[u8], data: &[u8]) -> Vec<u8> {
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC can take key of any size");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().to_vec()
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for S3Storage {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.put(&url)
|
||||
.body(data.to_vec())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
.signed_request(reqwest::Method::PUT, key, Some(data))
|
||||
.await?;
|
||||
|
||||
if response.status().is_success() {
|
||||
Ok(())
|
||||
@@ -51,13 +190,7 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
let response = self.signed_request(reqwest::Method::GET, key, None).await?;
|
||||
|
||||
if response.status().is_success() {
|
||||
response
|
||||
@@ -75,13 +208,9 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn delete(&self, key: &str) -> Result<()> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.delete(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
.signed_request(reqwest::Method::DELETE, key, None)
|
||||
.await?;
|
||||
|
||||
if response.status().is_success() || response.status().as_u16() == 204 {
|
||||
Ok(())
|
||||
@@ -96,8 +225,59 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
// For listing, we need to make a request to the bucket
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
match self.client.get(&url).send().await {
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
let payload_hash = hex::encode(Sha256::digest(b""));
|
||||
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.get(&url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
// Sign for bucket listing (different path)
|
||||
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
|
||||
let canonical_uri = format!("/{}", self.bucket);
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
let canonical_request = format!(
|
||||
"GET\n{}\n\n{}\n{}\n{}",
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
let auth = format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
);
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
Ok(response) if response.status().is_success() => {
|
||||
if let Ok(xml) = response.text().await {
|
||||
Self::parse_s3_keys(&xml, prefix)
|
||||
@@ -110,18 +290,22 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self.client.head(&url).send().await.ok()?;
|
||||
let response = self
|
||||
.signed_request(reqwest::Method::HEAD, key, None)
|
||||
.await
|
||||
.ok()?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let size = response
|
||||
.headers()
|
||||
.get("content-length")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(0);
|
||||
// S3 uses Last-Modified header, but for simplicity use current time if unavailable
|
||||
|
||||
let modified = response
|
||||
.headers()
|
||||
.get("last-modified")
|
||||
@@ -133,12 +317,63 @@ impl StorageBackend for S3Storage {
|
||||
.as_secs()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
|
||||
Some(FileMeta { size, modified })
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> bool {
|
||||
// Try HEAD on the bucket
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
match self.client.head(&url).send().await {
|
||||
let now = Utc::now();
|
||||
let timestamp = now.format("%Y%m%dT%H%M%SZ").to_string();
|
||||
let date = now.format("%Y%m%d").to_string();
|
||||
let payload_hash = hex::encode(Sha256::digest(b""));
|
||||
|
||||
let host = self
|
||||
.s3_url
|
||||
.trim_start_matches("http://")
|
||||
.trim_start_matches("https://");
|
||||
|
||||
let mut request = self
|
||||
.client
|
||||
.head(&url)
|
||||
.header("x-amz-date", ×tamp)
|
||||
.header("x-amz-content-sha256", &payload_hash);
|
||||
|
||||
if let (Some(access_key), Some(secret_key)) = (&self.access_key, &self.secret_key) {
|
||||
let canonical_uri = format!("/{}", self.bucket);
|
||||
let canonical_headers = format!(
|
||||
"host:{}\nx-amz-content-sha256:{}\nx-amz-date:{}\n",
|
||||
host, payload_hash, timestamp
|
||||
);
|
||||
let signed_headers = "host;x-amz-content-sha256;x-amz-date";
|
||||
|
||||
let canonical_request = format!(
|
||||
"HEAD\n{}\n\n{}\n{}\n{}",
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
timestamp, credential_scope, canonical_request_hash
|
||||
);
|
||||
|
||||
let k_date = hmac_sha256(format!("AWS4{}", secret_key).as_bytes(), date.as_bytes());
|
||||
let k_region = hmac_sha256(&k_date, self.region.as_bytes());
|
||||
let k_service = hmac_sha256(&k_region, b"s3");
|
||||
let k_signing = hmac_sha256(&k_service, b"aws4_request");
|
||||
let signature = hex::encode(hmac_sha256(&k_signing, string_to_sign.as_bytes()));
|
||||
|
||||
let auth = format!(
|
||||
"AWS4-HMAC-SHA256 Credential={}/{}, SignedHeaders={}, Signature={}",
|
||||
access_key, credential_scope, signed_headers, signature
|
||||
);
|
||||
request = request.header("Authorization", auth);
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
Ok(response) => response.status().is_success() || response.status().as_u16() == 404,
|
||||
Err(_) => false,
|
||||
}
|
||||
@@ -152,173 +387,28 @@ impl StorageBackend for S3Storage {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use wiremock::matchers::{method, path};
|
||||
use wiremock::{Mock, MockServer, ResponseTemplate};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("PUT"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(200))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.put("test-key", b"data").await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_put_failure() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("PUT"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(500))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.put("test-key", b"data").await;
|
||||
assert!(matches!(result, Err(StorageError::Network(_))));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(ResponseTemplate::new(200).set_body_bytes(b"test data".to_vec()))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let data = storage.get("test-key").await.unwrap();
|
||||
assert_eq!(&*data, b"test data");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_not_found() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket/missing"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let result = storage.get("missing").await;
|
||||
assert!(matches!(result, Err(StorageError::NotFound)));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_list() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
let xml_response = r#"<?xml version="1.0"?>
|
||||
<ListBucketResult>
|
||||
<Key>docker/image1</Key>
|
||||
<Key>docker/image2</Key>
|
||||
<Key>maven/artifact</Key>
|
||||
</ListBucketResult>"#;
|
||||
|
||||
Mock::given(method("GET"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(200).set_body_string(xml_response))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let keys = storage.list("docker/").await;
|
||||
assert_eq!(keys.len(), 2);
|
||||
assert!(keys.iter().all(|k| k.starts_with("docker/")));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stat_success() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket/test-key"))
|
||||
.respond_with(
|
||||
ResponseTemplate::new(200)
|
||||
.insert_header("content-length", "1234")
|
||||
.insert_header("last-modified", "Sun, 06 Nov 1994 08:49:37 GMT"),
|
||||
)
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let meta = storage.stat("test-key").await.unwrap();
|
||||
assert_eq!(meta.size, 1234);
|
||||
assert!(meta.modified > 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_stat_not_found() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket/missing"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
let meta = storage.stat("missing").await;
|
||||
assert!(meta.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_healthy() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(200))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
assert!(storage.health_check().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_bucket_not_found_is_ok() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(404))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
// 404 is OK for health check (bucket may be empty)
|
||||
assert!(storage.health_check().await);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_check_server_error() {
|
||||
let mock_server = MockServer::start().await;
|
||||
let storage = S3Storage::new(&mock_server.uri(), "test-bucket");
|
||||
|
||||
Mock::given(method("HEAD"))
|
||||
.and(path("/test-bucket"))
|
||||
.respond_with(ResponseTemplate::new(500))
|
||||
.mount(&mock_server)
|
||||
.await;
|
||||
|
||||
assert!(!storage.health_check().await);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backend_name() {
|
||||
let storage = S3Storage::new("http://localhost:9000", "bucket");
|
||||
let storage = S3Storage::new(
|
||||
"http://localhost:9000",
|
||||
"test-bucket",
|
||||
"us-east-1",
|
||||
Some("access"),
|
||||
Some("secret"),
|
||||
);
|
||||
assert_eq!(storage.backend_name(), "s3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_s3_storage_creation_anonymous() {
|
||||
let storage = S3Storage::new(
|
||||
"http://localhost:9000",
|
||||
"test-bucket",
|
||||
"us-east-1",
|
||||
None,
|
||||
None,
|
||||
);
|
||||
assert_eq!(storage.backend_name(), "s3");
|
||||
}
|
||||
|
||||
@@ -328,4 +418,10 @@ mod tests {
|
||||
let keys = S3Storage::parse_s3_keys(xml, "docker/");
|
||||
assert_eq!(keys, vec!["docker/a", "docker/b"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hmac_sha256() {
|
||||
let result = hmac_sha256(b"key", b"data");
|
||||
assert!(!result.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fs;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::components::{format_size, format_timestamp, html_escape};
|
||||
use super::templates::encode_uri_component;
|
||||
use crate::activity_log::ActivityEntry;
|
||||
@@ -396,6 +399,11 @@ pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new(); // (info, latest_modified)
|
||||
|
||||
for key in &keys {
|
||||
// Skip .meta.json files
|
||||
if key.ends_with(".meta.json") {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(rest) = key.strip_prefix("docker/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 3 {
|
||||
@@ -412,10 +420,35 @@ pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
)
|
||||
});
|
||||
|
||||
if parts[1] == "manifests" {
|
||||
if parts[1] == "manifests" && key.ends_with(".json") {
|
||||
entry.0.versions += 1;
|
||||
|
||||
// Parse manifest to get actual image size (config + layers)
|
||||
if let Ok(manifest_data) = storage.get(key).await {
|
||||
if let Ok(manifest) =
|
||||
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
||||
{
|
||||
let config_size = manifest
|
||||
.get("config")
|
||||
.and_then(|c| c.get("size"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
let layers_size: u64 = manifest
|
||||
.get("layers")
|
||||
.and_then(|l| l.as_array())
|
||||
.map(|layers| {
|
||||
layers
|
||||
.iter()
|
||||
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
entry.0.size += config_size + layers_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamp
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
@@ -470,11 +503,37 @@ pub async fn get_docker_detail(state: &AppState, name: &str) -> DockerDetail {
|
||||
"N/A".to_string()
|
||||
};
|
||||
|
||||
// Use size from metadata if available, otherwise from file
|
||||
// Calculate size from manifest layers (config + layers)
|
||||
let size = if metadata.size_bytes > 0 {
|
||||
metadata.size_bytes
|
||||
} else {
|
||||
state.storage.stat(key).await.map(|m| m.size).unwrap_or(0)
|
||||
// Parse manifest to get actual image size
|
||||
if let Ok(manifest_data) = state.storage.get(key).await {
|
||||
if let Ok(manifest) =
|
||||
serde_json::from_slice::<serde_json::Value>(&manifest_data)
|
||||
{
|
||||
let config_size = manifest
|
||||
.get("config")
|
||||
.and_then(|c| c.get("size"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
let layers_size: u64 = manifest
|
||||
.get("layers")
|
||||
.and_then(|l| l.as_array())
|
||||
.map(|layers| {
|
||||
layers
|
||||
.iter()
|
||||
.filter_map(|l| l.get("size").and_then(|s| s.as_u64()))
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
config_size + layers_size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
} else {
|
||||
0
|
||||
}
|
||||
};
|
||||
|
||||
// Format last_pulled
|
||||
@@ -574,68 +633,116 @@ pub async fn get_maven_detail(storage: &Storage, path: &str) -> MavenDetail {
|
||||
pub async fn get_npm_packages(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("npm/").await;
|
||||
|
||||
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
let mut packages: HashMap<String, RepoInfo> = HashMap::new();
|
||||
|
||||
// Find all metadata.json files
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("npm/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
if key.ends_with("/metadata.json") {
|
||||
if let Some(name) = key
|
||||
.strip_prefix("npm/")
|
||||
.and_then(|s| s.strip_suffix("/metadata.json"))
|
||||
{
|
||||
// Parse metadata to get version count and info
|
||||
if let Ok(data) = storage.get(key).await {
|
||||
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
||||
let versions_count = metadata
|
||||
.get("versions")
|
||||
.and_then(|v| v.as_object())
|
||||
.map(|v| v.len())
|
||||
.unwrap_or(0);
|
||||
|
||||
// Calculate total size from dist.unpackedSize or estimate
|
||||
let total_size: u64 = metadata
|
||||
.get("versions")
|
||||
.and_then(|v| v.as_object())
|
||||
.map(|versions| {
|
||||
versions
|
||||
.values()
|
||||
.filter_map(|v| {
|
||||
v.get("dist")
|
||||
.and_then(|d| d.get("unpackedSize"))
|
||||
.and_then(|s| s.as_u64())
|
||||
})
|
||||
.sum()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
|
||||
// Get latest version time for "updated"
|
||||
let updated = metadata
|
||||
.get("time")
|
||||
.and_then(|t| t.get("modified"))
|
||||
.and_then(|m| m.as_str())
|
||||
.map(|s| s[..10].to_string()) // Take just date part
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
|
||||
packages.insert(
|
||||
name.to_string(),
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
name: name.to_string(),
|
||||
versions: versions_count,
|
||||
size: total_size,
|
||||
updated,
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
if parts.len() >= 3 && parts[1] == "tarballs" {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
|
||||
let mut result: Vec<_> = packages.into_values().collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_npm_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
let prefix = format!("npm/{}/tarballs/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
let metadata_key = format!("npm/{}/metadata.json", name);
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(tarball) = key.strip_prefix(&prefix) {
|
||||
if let Some(version) = tarball
|
||||
.strip_prefix(&format!("{}-", name))
|
||||
.and_then(|s| s.strip_suffix(".tgz"))
|
||||
{
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
|
||||
// Parse metadata.json for version info
|
||||
if let Ok(data) = storage.get(&metadata_key).await {
|
||||
if let Ok(metadata) = serde_json::from_slice::<serde_json::Value>(&data) {
|
||||
if let Some(versions_obj) = metadata.get("versions").and_then(|v| v.as_object()) {
|
||||
let time_obj = metadata.get("time").and_then(|t| t.as_object());
|
||||
|
||||
for (version, info) in versions_obj {
|
||||
let size = info
|
||||
.get("dist")
|
||||
.and_then(|d| d.get("unpackedSize"))
|
||||
.and_then(|s| s.as_u64())
|
||||
.unwrap_or(0);
|
||||
|
||||
let published = time_obj
|
||||
.and_then(|t| t.get(version))
|
||||
.and_then(|p| p.as_str())
|
||||
.map(|s| s[..10].to_string())
|
||||
.unwrap_or_else(|| "N/A".to_string());
|
||||
|
||||
versions.push(VersionInfo {
|
||||
version: version.to_string(),
|
||||
version: version.clone(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by version (semver-like, newest first)
|
||||
versions.sort_by(|a, b| {
|
||||
let a_parts: Vec<u32> = a
|
||||
.version
|
||||
.split('.')
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
let b_parts: Vec<u32> = b
|
||||
.version
|
||||
.split('.')
|
||||
.filter_map(|s| s.parse().ok())
|
||||
.collect();
|
||||
b_parts.cmp(&a_parts)
|
||||
});
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::i18n::{get_translations, Lang, Translations};
|
||||
|
||||
/// Application version from Cargo.toml
|
||||
@@ -137,7 +140,7 @@ fn sidebar_dark(active_page: Option<&str>, t: &Translations) -> String {
|
||||
<div id="sidebar" class="fixed md:static inset-y-0 left-0 z-50 w-64 bg-slate-800 text-white flex flex-col transform -translate-x-full md:translate-x-0 transition-transform duration-200 ease-in-out">
|
||||
<div class="h-16 flex items-center justify-between px-6 border-b border-slate-700">
|
||||
<div class="flex items-center">
|
||||
<span class="text-2xl font-bold tracking-tight">N<span class="inline-block w-5 h-5 rounded-full border-2 border-current align-middle relative -top-0.5 mx-0.5"></span>RA</span>
|
||||
<span class="text-xl font-bold tracking-tight">N<span class="inline-block w-4 h-4 rounded-full border-2 border-current align-middle mx-px"></span>RA</span>
|
||||
</div>
|
||||
<button onclick="toggleSidebar()" class="md:hidden p-1 rounded-lg hover:bg-slate-700">
|
||||
<svg class="w-6 h-6" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
/// Internationalization support for the UI
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod api;
|
||||
mod components;
|
||||
pub mod i18n;
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use super::api::{DashboardResponse, DockerDetail, MavenDetail, PackageDetail, RepoInfo};
|
||||
use super::components::*;
|
||||
use super::i18n::{get_translations, Lang};
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#![allow(dead_code)]
|
||||
//! Input validation for artifact registry paths and identifiers
|
||||
//!
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
mod config;
|
||||
|
||||
use axum::extract::DefaultBodyLimit;
|
||||
|
||||
Reference in New Issue
Block a user