mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-12 04:30:32 +00:00
feat: initialize NORA artifact registry
Cloud-native multi-protocol artifact registry in Rust. - Docker Registry v2 - Maven (+ proxy) - npm (+ proxy) - Cargo, PyPI - Web UI, Swagger, Prometheus - Local & S3 storage - 32MB Docker image Created by DevITWay https://getnora.io
This commit is contained in:
11
.dockerignore
Normal file
11
.dockerignore
Normal file
@@ -0,0 +1,11 @@
|
||||
target/
|
||||
.git/
|
||||
.gitignore
|
||||
*.md
|
||||
!README.md
|
||||
Dockerfile*
|
||||
docker-compose*
|
||||
.env*
|
||||
*.htpasswd
|
||||
data/
|
||||
.internal*
|
||||
7
.gitignore
vendored
Normal file
7
.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/target
|
||||
data/
|
||||
*.htpasswd
|
||||
.env
|
||||
.env.*
|
||||
*.log
|
||||
internal config
|
||||
100
CONTRIBUTING.md
Normal file
100
CONTRIBUTING.md
Normal file
@@ -0,0 +1,100 @@
|
||||
# Contributing to NORA
|
||||
|
||||
Thanks for your interest in contributing to NORA!
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. **Fork** the repository
|
||||
2. **Clone** your fork:
|
||||
```bash
|
||||
git clone https://github.com/your-username/nora.git
|
||||
cd nora
|
||||
```
|
||||
3. **Create a branch**:
|
||||
```bash
|
||||
git checkout -b feature/your-feature-name
|
||||
```
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Rust 1.75+ (`rustup update`)
|
||||
- Docker (for testing)
|
||||
- Git
|
||||
|
||||
### Build
|
||||
|
||||
```bash
|
||||
cargo build
|
||||
```
|
||||
|
||||
### Run
|
||||
|
||||
```bash
|
||||
cargo run --bin nora
|
||||
```
|
||||
|
||||
### Test
|
||||
|
||||
```bash
|
||||
cargo test
|
||||
cargo clippy
|
||||
cargo fmt --check
|
||||
```
|
||||
|
||||
## Making Changes
|
||||
|
||||
1. **Write code** following Rust conventions
|
||||
2. **Add tests** for new features
|
||||
3. **Update docs** if needed
|
||||
4. **Run checks**:
|
||||
```bash
|
||||
cargo fmt
|
||||
cargo clippy -- -D warnings
|
||||
cargo test
|
||||
```
|
||||
|
||||
## Commit Messages
|
||||
|
||||
Follow [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
- `feat:` - New feature
|
||||
- `fix:` - Bug fix
|
||||
- `docs:` - Documentation
|
||||
- `test:` - Tests
|
||||
- `refactor:` - Code refactoring
|
||||
- `chore:` - Maintenance
|
||||
|
||||
Example:
|
||||
```bash
|
||||
git commit -m "feat: add S3 storage migration"
|
||||
```
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
1. **Push** to your fork:
|
||||
```bash
|
||||
git push origin feature/your-feature-name
|
||||
```
|
||||
|
||||
2. **Open a Pull Request** on GitHub
|
||||
|
||||
3. **Wait for review** - maintainers will review your PR
|
||||
|
||||
## Code Style
|
||||
|
||||
- Follow Rust conventions
|
||||
- Use `cargo fmt` for formatting
|
||||
- Pass `cargo clippy` with no warnings
|
||||
- Write meaningful commit messages
|
||||
|
||||
## Questions?
|
||||
|
||||
- Open an [Issue](https://github.com/getnora-io/nora/issues)
|
||||
- Ask in [Discussions](https://github.com/getnora-io/nora/discussions)
|
||||
- Reach out on [Telegram](https://t.me/DevITWay)
|
||||
|
||||
---
|
||||
|
||||
Built with love by the NORA community
|
||||
2622
Cargo.lock
generated
Normal file
2622
Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
26
Cargo.toml
Normal file
26
Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
"nora-registry",
|
||||
"nora-storage",
|
||||
"nora-cli",
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
license = "MIT"
|
||||
authors = ["DevITWay <devitway@gmail.com>"]
|
||||
repository = "https://github.com/getnora-io/nora"
|
||||
homepage = "https://getnora.io"
|
||||
|
||||
[workspace.dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
axum = "0.8"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
sha2 = "0.10"
|
||||
async-trait = "0.1"
|
||||
59
Dockerfile
Normal file
59
Dockerfile
Normal file
@@ -0,0 +1,59 @@
|
||||
# Build stage
|
||||
FROM rust:1.83-alpine AS builder
|
||||
|
||||
RUN apk add --no-cache musl-dev curl
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY nora-registry/Cargo.toml nora-registry/
|
||||
COPY nora-storage/Cargo.toml nora-storage/
|
||||
COPY nora-cli/Cargo.toml nora-cli/
|
||||
|
||||
# Create dummy sources for dependency caching
|
||||
RUN mkdir -p nora-registry/src nora-storage/src nora-cli/src && \
|
||||
echo "fn main() {}" > nora-registry/src/main.rs && \
|
||||
echo "fn main() {}" > nora-storage/src/main.rs && \
|
||||
echo "fn main() {}" > nora-cli/src/main.rs
|
||||
|
||||
# Build dependencies only
|
||||
RUN cargo build --release --package nora-registry && \
|
||||
rm -rf nora-registry/src nora-storage/src nora-cli/src
|
||||
|
||||
# Copy real sources
|
||||
COPY nora-registry/src nora-registry/src
|
||||
COPY nora-storage/src nora-storage/src
|
||||
COPY nora-cli/src nora-cli/src
|
||||
|
||||
# Build release binary
|
||||
RUN touch nora-registry/src/main.rs && \
|
||||
cargo build --release --package nora-registry
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:3.20
|
||||
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary
|
||||
COPY --from=builder /app/target/release/nora /usr/local/bin/nora
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data
|
||||
|
||||
# Default environment
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
ENV NORA_STORAGE_MODE=local
|
||||
ENV NORA_STORAGE_PATH=/data/storage
|
||||
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
ENTRYPOINT ["nora"]
|
||||
CMD ["serve"]
|
||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2026 DevITWay
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
169
README.md
Normal file
169
README.md
Normal file
@@ -0,0 +1,169 @@
|
||||
# NORA
|
||||
|
||||
[](LICENSE)
|
||||
[](https://t.me/DevITWay)
|
||||
|
||||
> **Your Cloud-Native Artifact Registry**
|
||||
|
||||
Fast. Organized. Feel at Home.
|
||||
|
||||
**10x faster** than Nexus | **< 100 MB RAM** | **32 MB Docker image**
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-Protocol Support**
|
||||
- Docker Registry v2
|
||||
- Maven repository (+ proxy to Maven Central)
|
||||
- npm registry (+ proxy to npmjs.org)
|
||||
- Cargo registry
|
||||
- PyPI index
|
||||
|
||||
- **Storage Backends**
|
||||
- Local filesystem (zero-config default)
|
||||
- S3-compatible (MinIO, AWS S3)
|
||||
|
||||
- **Production Ready**
|
||||
- Web UI with search and browse
|
||||
- Swagger UI API documentation
|
||||
- Prometheus metrics (`/metrics`)
|
||||
- Health checks (`/health`, `/ready`)
|
||||
- JSON structured logging
|
||||
- Graceful shutdown
|
||||
|
||||
- **Security**
|
||||
- Basic Auth (htpasswd + bcrypt)
|
||||
- Revocable API tokens
|
||||
- ENV-based configuration (12-Factor)
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Docker (Recommended)
|
||||
|
||||
```bash
|
||||
docker run -d -p 4000:4000 -v nora-data:/data getnora/nora
|
||||
```
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
cargo install nora-registry
|
||||
nora
|
||||
```
|
||||
|
||||
Open http://localhost:4000/ui/
|
||||
|
||||
## Usage
|
||||
|
||||
### Docker Images
|
||||
|
||||
```bash
|
||||
# Tag and push
|
||||
docker tag myapp:latest localhost:4000/myapp:latest
|
||||
docker push localhost:4000/myapp:latest
|
||||
|
||||
# Pull
|
||||
docker pull localhost:4000/myapp:latest
|
||||
```
|
||||
|
||||
### Maven
|
||||
|
||||
```xml
|
||||
<!-- settings.xml -->
|
||||
<server>
|
||||
<id>nora</id>
|
||||
<url>http://localhost:4000/maven2/</url>
|
||||
</server>
|
||||
```
|
||||
|
||||
### npm
|
||||
|
||||
```bash
|
||||
npm config set registry http://localhost:4000/npm/
|
||||
npm publish
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```bash
|
||||
nora # Start server
|
||||
nora serve # Start server (explicit)
|
||||
nora backup -o backup.tar.gz
|
||||
nora restore -i backup.tar.gz
|
||||
nora migrate --from local --to s3
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `NORA_HOST` | 127.0.0.1 | Bind address |
|
||||
| `NORA_PORT` | 4000 | Port |
|
||||
| `NORA_STORAGE_MODE` | local | `local` or `s3` |
|
||||
| `NORA_STORAGE_PATH` | data/storage | Local storage path |
|
||||
| `NORA_STORAGE_S3_URL` | - | S3 endpoint URL |
|
||||
| `NORA_STORAGE_BUCKET` | registry | S3 bucket name |
|
||||
| `NORA_AUTH_ENABLED` | false | Enable authentication |
|
||||
|
||||
### config.toml
|
||||
|
||||
```toml
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 4000
|
||||
|
||||
[storage]
|
||||
mode = "local"
|
||||
path = "data/storage"
|
||||
|
||||
[auth]
|
||||
enabled = false
|
||||
htpasswd_file = "users.htpasswd"
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
|
||||
| URL | Description |
|
||||
|-----|-------------|
|
||||
| `/ui/` | Web UI |
|
||||
| `/api-docs` | Swagger UI |
|
||||
| `/health` | Health check |
|
||||
| `/ready` | Readiness probe |
|
||||
| `/metrics` | Prometheus metrics |
|
||||
| `/v2/` | Docker Registry |
|
||||
| `/maven2/` | Maven |
|
||||
| `/npm/` | npm |
|
||||
| `/cargo/` | Cargo |
|
||||
| `/simple/` | PyPI |
|
||||
|
||||
## Performance
|
||||
|
||||
| Metric | NORA | Nexus | JFrog |
|
||||
|--------|------|-------|-------|
|
||||
| Startup | < 3s | 30-60s | 30-60s |
|
||||
| Memory | < 100 MB | 2-4 GB | 2-4 GB |
|
||||
| Image Size | 32 MB | 600+ MB | 1+ GB |
|
||||
|
||||
## Author
|
||||
|
||||
**Created and maintained by [DevITWay](https://github.com/devitway)**
|
||||
|
||||
- Website: [devopsway.ru](https://devopsway.ru)
|
||||
- Telegram: [@DevITWay](https://t.me/DevITWay)
|
||||
- GitHub: [@devitway](https://github.com/devitway)
|
||||
- Email: devitway@gmail.com
|
||||
|
||||
## Contributing
|
||||
|
||||
NORA welcomes contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see [LICENSE](LICENSE)
|
||||
|
||||
Copyright (c) 2026 DevITWay
|
||||
|
||||
---
|
||||
|
||||
**NORA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
15
docker-compose.yml
Normal file
15
docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
nora:
|
||||
build: .
|
||||
image: getnora/nora:latest
|
||||
ports:
|
||||
- "4000:4000"
|
||||
volumes:
|
||||
- nora-data:/data
|
||||
environment:
|
||||
- RUST_LOG=info
|
||||
- NORA_AUTH_ENABLED=false
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
nora-data:
|
||||
23
nora-cli/Cargo.toml
Normal file
23
nora-cli/Cargo.toml
Normal file
@@ -0,0 +1,23 @@
|
||||
[package]
|
||||
name = "nora-cli"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "CLI tool for NORA registry"
|
||||
|
||||
[[bin]]
|
||||
name = "nora-cli"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
reqwest.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
indicatif = "0.17"
|
||||
tar = "0.4"
|
||||
flate2 = "1.0"
|
||||
52
nora-cli/src/main.rs
Normal file
52
nora-cli/src/main.rs
Normal file
@@ -0,0 +1,52 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "nora-cli")]
|
||||
#[command(about = "CLI tool for Nora registry")]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Commands {
|
||||
/// Login to a registry
|
||||
Login {
|
||||
#[arg(long)]
|
||||
registry: String,
|
||||
#[arg(short, long)]
|
||||
username: String,
|
||||
},
|
||||
/// Push an artifact
|
||||
Push {
|
||||
#[arg(long)]
|
||||
registry: String,
|
||||
path: String,
|
||||
},
|
||||
/// Pull an artifact
|
||||
Pull {
|
||||
#[arg(long)]
|
||||
registry: String,
|
||||
artifact: String,
|
||||
},
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let cli = Cli::parse();
|
||||
|
||||
match cli.command {
|
||||
Commands::Login { registry, username } => {
|
||||
println!("Logging in to {} as {}", registry, username);
|
||||
// TODO: implement
|
||||
}
|
||||
Commands::Push { registry, path } => {
|
||||
println!("Pushing {} to {}", path, registry);
|
||||
// TODO: implement
|
||||
}
|
||||
Commands::Pull { registry, artifact } => {
|
||||
println!("Pulling {} from {}", artifact, registry);
|
||||
// TODO: implement
|
||||
}
|
||||
}
|
||||
}
|
||||
40
nora-registry/Cargo.toml
Normal file
40
nora-registry/Cargo.toml
Normal file
@@ -0,0 +1,40 @@
|
||||
[package]
|
||||
name = "nora-registry"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Cloud-Native Artifact Registry - Fast, lightweight, multi-protocol"
|
||||
keywords = ["registry", "docker", "artifacts", "cloud-native", "devops"]
|
||||
categories = ["command-line-utilities", "development-tools", "web-programming"]
|
||||
|
||||
[[bin]]
|
||||
name = "nora"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
axum.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
reqwest.workspace = true
|
||||
sha2.workspace = true
|
||||
async-trait.workspace = true
|
||||
toml = "0.8"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
bcrypt = "0.17"
|
||||
base64 = "0.22"
|
||||
prometheus = "0.13"
|
||||
lazy_static = "1.5"
|
||||
httpdate = "1"
|
||||
utoipa = { version = "5", features = ["axum_extras"] }
|
||||
utoipa-swagger-ui = { version = "9", features = ["axum", "reqwest"] }
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
tar = "0.4"
|
||||
flate2 = "1.0"
|
||||
indicatif = "0.17"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
315
nora-registry/src/auth.rs
Normal file
315
nora-registry/src/auth.rs
Normal file
@@ -0,0 +1,315 @@
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::State,
|
||||
http::{header, Request, StatusCode},
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
};
|
||||
use base64::{engine::general_purpose::STANDARD, Engine};
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
/// Htpasswd-based authentication
|
||||
#[derive(Clone)]
|
||||
pub struct HtpasswdAuth {
|
||||
users: HashMap<String, String>, // username -> bcrypt hash
|
||||
}
|
||||
|
||||
impl HtpasswdAuth {
|
||||
/// Load users from htpasswd file
|
||||
pub fn from_file(path: &Path) -> Option<Self> {
|
||||
let content = std::fs::read_to_string(path).ok()?;
|
||||
let mut users = HashMap::new();
|
||||
|
||||
for line in content.lines() {
|
||||
let line = line.trim();
|
||||
if line.is_empty() || line.starts_with('#') {
|
||||
continue;
|
||||
}
|
||||
if let Some((username, hash)) = line.split_once(':') {
|
||||
users.insert(username.to_string(), hash.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
if users.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(Self { users })
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify username and password
|
||||
pub fn authenticate(&self, username: &str, password: &str) -> bool {
|
||||
if let Some(hash) = self.users.get(username) {
|
||||
bcrypt::verify(password, hash).unwrap_or(false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Get list of usernames
|
||||
pub fn list_users(&self) -> Vec<&str> {
|
||||
self.users.keys().map(|s| s.as_str()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if path is public (no auth required)
|
||||
fn is_public_path(path: &str) -> bool {
|
||||
matches!(
|
||||
path,
|
||||
"/" | "/health" | "/ready" | "/metrics" | "/v2/" | "/v2"
|
||||
) || path.starts_with("/ui")
|
||||
|| path.starts_with("/api-docs")
|
||||
|| path.starts_with("/api/ui")
|
||||
|| path.starts_with("/api/tokens")
|
||||
}
|
||||
|
||||
/// Auth middleware - supports Basic auth and Bearer tokens
|
||||
pub async fn auth_middleware(
|
||||
State(state): State<Arc<AppState>>,
|
||||
request: Request<Body>,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
// Skip auth if disabled
|
||||
let auth = match &state.auth {
|
||||
Some(auth) => auth,
|
||||
None => return next.run(request).await,
|
||||
};
|
||||
|
||||
// Skip auth for public endpoints
|
||||
if is_public_path(request.uri().path()) {
|
||||
return next.run(request).await;
|
||||
}
|
||||
|
||||
// Extract Authorization header
|
||||
let auth_header = request
|
||||
.headers()
|
||||
.get(header::AUTHORIZATION)
|
||||
.and_then(|h| h.to_str().ok());
|
||||
|
||||
let auth_header = match auth_header {
|
||||
Some(h) => h,
|
||||
None => return unauthorized_response("Authentication required"),
|
||||
};
|
||||
|
||||
// Try Bearer token first
|
||||
if let Some(token) = auth_header.strip_prefix("Bearer ") {
|
||||
if let Some(ref token_store) = state.tokens {
|
||||
match token_store.verify_token(token) {
|
||||
Ok(_user) => return next.run(request).await,
|
||||
Err(_) => return unauthorized_response("Invalid or expired token"),
|
||||
}
|
||||
} else {
|
||||
return unauthorized_response("Token authentication not configured");
|
||||
}
|
||||
}
|
||||
|
||||
// Parse Basic auth
|
||||
if !auth_header.starts_with("Basic ") {
|
||||
return unauthorized_response("Basic or Bearer authentication required");
|
||||
}
|
||||
|
||||
let encoded = &auth_header[6..];
|
||||
let decoded = match STANDARD.decode(encoded) {
|
||||
Ok(d) => d,
|
||||
Err(_) => return unauthorized_response("Invalid credentials encoding"),
|
||||
};
|
||||
|
||||
let credentials = match String::from_utf8(decoded) {
|
||||
Ok(c) => c,
|
||||
Err(_) => return unauthorized_response("Invalid credentials encoding"),
|
||||
};
|
||||
|
||||
let (username, password) = match credentials.split_once(':') {
|
||||
Some((u, p)) => (u, p),
|
||||
None => return unauthorized_response("Invalid credentials format"),
|
||||
};
|
||||
|
||||
// Verify credentials
|
||||
if !auth.authenticate(username, password) {
|
||||
return unauthorized_response("Invalid username or password");
|
||||
}
|
||||
|
||||
// Auth successful
|
||||
next.run(request).await
|
||||
}
|
||||
|
||||
fn unauthorized_response(message: &str) -> Response {
|
||||
(
|
||||
StatusCode::UNAUTHORIZED,
|
||||
[
|
||||
(header::WWW_AUTHENTICATE, "Basic realm=\"Nora\""),
|
||||
(header::CONTENT_TYPE, "application/json"),
|
||||
],
|
||||
format!(r#"{{"error":"{}"}}"#, message),
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
/// Generate bcrypt hash for password (for CLI user management)
|
||||
#[allow(dead_code)]
|
||||
pub fn hash_password(password: &str) -> Result<String, bcrypt::BcryptError> {
|
||||
bcrypt::hash(password, bcrypt::DEFAULT_COST)
|
||||
}
|
||||
|
||||
// Token management API routes
|
||||
use axum::{routing::post, Json, Router};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct CreateTokenRequest {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
#[serde(default = "default_ttl")]
|
||||
pub ttl_days: u64,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
fn default_ttl() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct CreateTokenResponse {
|
||||
pub token: String,
|
||||
pub expires_in_days: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct TokenListItem {
|
||||
pub hash_prefix: String,
|
||||
pub created_at: u64,
|
||||
pub expires_at: u64,
|
||||
pub last_used: Option<u64>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct TokenListResponse {
|
||||
pub tokens: Vec<TokenListItem>,
|
||||
}
|
||||
|
||||
/// Create a new API token (requires Basic auth)
|
||||
async fn create_token(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(req): Json<CreateTokenRequest>,
|
||||
) -> Response {
|
||||
// Verify user credentials first
|
||||
let auth = match &state.auth {
|
||||
Some(auth) => auth,
|
||||
None => return (StatusCode::SERVICE_UNAVAILABLE, "Auth not configured").into_response(),
|
||||
};
|
||||
|
||||
if !auth.authenticate(&req.username, &req.password) {
|
||||
return (StatusCode::UNAUTHORIZED, "Invalid credentials").into_response();
|
||||
}
|
||||
|
||||
let token_store = match &state.tokens {
|
||||
Some(ts) => ts,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Token storage not configured",
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
};
|
||||
|
||||
match token_store.create_token(&req.username, req.ttl_days, req.description) {
|
||||
Ok(token) => Json(CreateTokenResponse {
|
||||
token,
|
||||
expires_in_days: req.ttl_days,
|
||||
})
|
||||
.into_response(),
|
||||
Err(e) => (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
/// List tokens for authenticated user
|
||||
async fn list_tokens(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(req): Json<CreateTokenRequest>,
|
||||
) -> Response {
|
||||
let auth = match &state.auth {
|
||||
Some(auth) => auth,
|
||||
None => return (StatusCode::SERVICE_UNAVAILABLE, "Auth not configured").into_response(),
|
||||
};
|
||||
|
||||
if !auth.authenticate(&req.username, &req.password) {
|
||||
return (StatusCode::UNAUTHORIZED, "Invalid credentials").into_response();
|
||||
}
|
||||
|
||||
let token_store = match &state.tokens {
|
||||
Some(ts) => ts,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Token storage not configured",
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
};
|
||||
|
||||
let tokens: Vec<TokenListItem> = token_store
|
||||
.list_tokens(&req.username)
|
||||
.into_iter()
|
||||
.map(|t| TokenListItem {
|
||||
hash_prefix: t.token_hash[..16].to_string(),
|
||||
created_at: t.created_at,
|
||||
expires_at: t.expires_at,
|
||||
last_used: t.last_used,
|
||||
description: t.description,
|
||||
})
|
||||
.collect();
|
||||
|
||||
Json(TokenListResponse { tokens }).into_response()
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct RevokeRequest {
|
||||
pub username: String,
|
||||
pub password: String,
|
||||
pub hash_prefix: String,
|
||||
}
|
||||
|
||||
/// Revoke a token
|
||||
async fn revoke_token(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Json(req): Json<RevokeRequest>,
|
||||
) -> Response {
|
||||
let auth = match &state.auth {
|
||||
Some(auth) => auth,
|
||||
None => return (StatusCode::SERVICE_UNAVAILABLE, "Auth not configured").into_response(),
|
||||
};
|
||||
|
||||
if !auth.authenticate(&req.username, &req.password) {
|
||||
return (StatusCode::UNAUTHORIZED, "Invalid credentials").into_response();
|
||||
}
|
||||
|
||||
let token_store = match &state.tokens {
|
||||
Some(ts) => ts,
|
||||
None => {
|
||||
return (
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
"Token storage not configured",
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
};
|
||||
|
||||
match token_store.revoke_token(&req.hash_prefix) {
|
||||
Ok(()) => (StatusCode::OK, "Token revoked").into_response(),
|
||||
Err(e) => (StatusCode::NOT_FOUND, e.to_string()).into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Token management routes
|
||||
pub fn token_routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/api/tokens", post(create_token))
|
||||
.route("/api/tokens/list", post(list_tokens))
|
||||
.route("/api/tokens/revoke", post(revoke_token))
|
||||
}
|
||||
299
nora-registry/src/backup.rs
Normal file
299
nora-registry/src/backup.rs
Normal file
@@ -0,0 +1,299 @@
|
||||
//! Backup and restore functionality for Nora
|
||||
//!
|
||||
//! Exports all artifacts to a tar.gz file and restores from backups.
|
||||
|
||||
use crate::storage::Storage;
|
||||
use chrono::{DateTime, Utc};
|
||||
use flate2::read::GzDecoder;
|
||||
use flate2::write::GzEncoder;
|
||||
use flate2::Compression;
|
||||
use indicatif::{ProgressBar, ProgressStyle};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::Path;
|
||||
use tar::{Archive, Builder, Header};
|
||||
|
||||
/// Backup metadata stored in metadata.json
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct BackupMetadata {
|
||||
pub version: String,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub artifact_count: usize,
|
||||
pub total_bytes: u64,
|
||||
pub storage_backend: String,
|
||||
}
|
||||
|
||||
/// Statistics returned after backup
|
||||
#[derive(Debug)]
|
||||
pub struct BackupStats {
|
||||
pub artifact_count: usize,
|
||||
pub total_bytes: u64,
|
||||
pub output_size: u64,
|
||||
}
|
||||
|
||||
/// Statistics returned after restore
|
||||
#[derive(Debug)]
|
||||
pub struct RestoreStats {
|
||||
pub artifact_count: usize,
|
||||
pub total_bytes: u64,
|
||||
}
|
||||
|
||||
/// Create a backup of all artifacts to a tar.gz file
|
||||
pub async fn create_backup(storage: &Storage, output: &Path) -> Result<BackupStats, String> {
|
||||
println!("Creating backup to: {}", output.display());
|
||||
println!("Storage backend: {}", storage.backend_name());
|
||||
|
||||
// List all keys
|
||||
println!("Scanning storage...");
|
||||
let keys = storage.list("").await;
|
||||
|
||||
if keys.is_empty() {
|
||||
println!("No artifacts found in storage. Creating empty backup.");
|
||||
} else {
|
||||
println!("Found {} artifacts", keys.len());
|
||||
}
|
||||
|
||||
// Create output file
|
||||
let file = File::create(output).map_err(|e| format!("Failed to create output file: {}", e))?;
|
||||
let encoder = GzEncoder::new(file, Compression::default());
|
||||
let mut archive = Builder::new(encoder);
|
||||
|
||||
// Progress bar
|
||||
let pb = ProgressBar::new(keys.len() as u64);
|
||||
pb.set_style(
|
||||
ProgressStyle::default_bar()
|
||||
.template(
|
||||
"{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})",
|
||||
)
|
||||
.expect("Invalid progress template")
|
||||
.progress_chars("#>-"),
|
||||
);
|
||||
|
||||
let mut total_bytes: u64 = 0;
|
||||
let mut artifact_count = 0;
|
||||
|
||||
for key in &keys {
|
||||
// Get file data
|
||||
let data = match storage.get(key).await {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
pb.println(format!("Warning: Failed to read {}: {}", key, e));
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create tar header
|
||||
let mut header = Header::new_gnu();
|
||||
header.set_size(data.len() as u64);
|
||||
header.set_mode(0o644);
|
||||
header.set_mtime(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs(),
|
||||
);
|
||||
header.set_cksum();
|
||||
|
||||
// Add to archive
|
||||
archive
|
||||
.append_data(&mut header, key, &*data)
|
||||
.map_err(|e| format!("Failed to add {} to archive: {}", key, e))?;
|
||||
|
||||
total_bytes += data.len() as u64;
|
||||
artifact_count += 1;
|
||||
pb.inc(1);
|
||||
}
|
||||
|
||||
// Add metadata.json
|
||||
let metadata = BackupMetadata {
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
created_at: Utc::now(),
|
||||
artifact_count,
|
||||
total_bytes,
|
||||
storage_backend: storage.backend_name().to_string(),
|
||||
};
|
||||
|
||||
let metadata_json = serde_json::to_vec_pretty(&metadata)
|
||||
.map_err(|e| format!("Failed to serialize metadata: {}", e))?;
|
||||
|
||||
let mut header = Header::new_gnu();
|
||||
header.set_size(metadata_json.len() as u64);
|
||||
header.set_mode(0o644);
|
||||
header.set_mtime(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs(),
|
||||
);
|
||||
header.set_cksum();
|
||||
|
||||
archive
|
||||
.append_data(&mut header, "metadata.json", metadata_json.as_slice())
|
||||
.map_err(|e| format!("Failed to add metadata.json: {}", e))?;
|
||||
|
||||
// Finish archive
|
||||
let encoder = archive
|
||||
.into_inner()
|
||||
.map_err(|e| format!("Failed to finish archive: {}", e))?;
|
||||
encoder
|
||||
.finish()
|
||||
.map_err(|e| format!("Failed to finish compression: {}", e))?;
|
||||
|
||||
pb.finish_with_message("Backup complete");
|
||||
|
||||
// Get output file size
|
||||
let output_size = std::fs::metadata(output).map(|m| m.len()).unwrap_or(0);
|
||||
|
||||
let stats = BackupStats {
|
||||
artifact_count,
|
||||
total_bytes,
|
||||
output_size,
|
||||
};
|
||||
|
||||
println!();
|
||||
println!("Backup complete:");
|
||||
println!(" Artifacts: {}", stats.artifact_count);
|
||||
println!(" Total data: {} bytes", stats.total_bytes);
|
||||
println!(" Backup file: {} bytes", stats.output_size);
|
||||
println!(
|
||||
" Compression ratio: {:.1}%",
|
||||
if stats.total_bytes > 0 {
|
||||
(stats.output_size as f64 / stats.total_bytes as f64) * 100.0
|
||||
} else {
|
||||
100.0
|
||||
}
|
||||
);
|
||||
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
/// Restore artifacts from a backup file
|
||||
pub async fn restore_backup(storage: &Storage, input: &Path) -> Result<RestoreStats, String> {
|
||||
println!("Restoring from: {}", input.display());
|
||||
println!("Storage backend: {}", storage.backend_name());
|
||||
|
||||
// Open backup file
|
||||
let file = File::open(input).map_err(|e| format!("Failed to open backup file: {}", e))?;
|
||||
let decoder = GzDecoder::new(file);
|
||||
let mut archive = Archive::new(decoder);
|
||||
|
||||
// First pass: count entries and read metadata
|
||||
let file = File::open(input).map_err(|e| format!("Failed to open backup file: {}", e))?;
|
||||
let decoder = GzDecoder::new(file);
|
||||
let mut archive_count = Archive::new(decoder);
|
||||
|
||||
let mut entry_count = 0;
|
||||
let mut metadata: Option<BackupMetadata> = None;
|
||||
|
||||
for entry in archive_count
|
||||
.entries()
|
||||
.map_err(|e| format!("Failed to read archive: {}", e))?
|
||||
{
|
||||
let mut entry = entry.map_err(|e| format!("Failed to read entry: {}", e))?;
|
||||
let path = entry
|
||||
.path()
|
||||
.map_err(|e| format!("Failed to read path: {}", e))?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
if path == "metadata.json" {
|
||||
let mut data = Vec::new();
|
||||
entry
|
||||
.read_to_end(&mut data)
|
||||
.map_err(|e| format!("Failed to read metadata: {}", e))?;
|
||||
metadata = serde_json::from_slice(&data).ok();
|
||||
} else {
|
||||
entry_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(ref meta) = metadata {
|
||||
println!("Backup info:");
|
||||
println!(" Version: {}", meta.version);
|
||||
println!(" Created: {}", meta.created_at);
|
||||
println!(" Artifacts: {}", meta.artifact_count);
|
||||
println!(" Original size: {} bytes", meta.total_bytes);
|
||||
println!();
|
||||
}
|
||||
|
||||
// Progress bar
|
||||
let pb = ProgressBar::new(entry_count as u64);
|
||||
pb.set_style(
|
||||
ProgressStyle::default_bar()
|
||||
.template(
|
||||
"{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta})",
|
||||
)
|
||||
.expect("Invalid progress template")
|
||||
.progress_chars("#>-"),
|
||||
);
|
||||
|
||||
let mut total_bytes: u64 = 0;
|
||||
let mut artifact_count = 0;
|
||||
|
||||
// Second pass: restore files
|
||||
for entry in archive
|
||||
.entries()
|
||||
.map_err(|e| format!("Failed to read archive: {}", e))?
|
||||
{
|
||||
let mut entry = entry.map_err(|e| format!("Failed to read entry: {}", e))?;
|
||||
let path = entry
|
||||
.path()
|
||||
.map_err(|e| format!("Failed to read path: {}", e))?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
|
||||
// Skip metadata file
|
||||
if path == "metadata.json" {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read data
|
||||
let mut data = Vec::new();
|
||||
entry
|
||||
.read_to_end(&mut data)
|
||||
.map_err(|e| format!("Failed to read {}: {}", path, e))?;
|
||||
|
||||
// Put to storage
|
||||
storage
|
||||
.put(&path, &data)
|
||||
.await
|
||||
.map_err(|e| format!("Failed to store {}: {}", path, e))?;
|
||||
|
||||
total_bytes += data.len() as u64;
|
||||
artifact_count += 1;
|
||||
pb.inc(1);
|
||||
}
|
||||
|
||||
pb.finish_with_message("Restore complete");
|
||||
|
||||
let stats = RestoreStats {
|
||||
artifact_count,
|
||||
total_bytes,
|
||||
};
|
||||
|
||||
println!();
|
||||
println!("Restore complete:");
|
||||
println!(" Artifacts: {}", stats.artifact_count);
|
||||
println!(" Total data: {} bytes", stats.total_bytes);
|
||||
|
||||
Ok(stats)
|
||||
}
|
||||
|
||||
/// Format bytes for human-readable display
|
||||
#[allow(dead_code)]
|
||||
fn format_bytes(bytes: u64) -> String {
|
||||
const KB: u64 = 1024;
|
||||
const MB: u64 = KB * 1024;
|
||||
const GB: u64 = MB * 1024;
|
||||
|
||||
if bytes >= GB {
|
||||
format!("{:.2} GB", bytes as f64 / GB as f64)
|
||||
} else if bytes >= MB {
|
||||
format!("{:.2} MB", bytes as f64 / MB as f64)
|
||||
} else if bytes >= KB {
|
||||
format!("{:.2} KB", bytes as f64 / KB as f64)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
218
nora-registry/src/config.rs
Normal file
218
nora-registry/src/config.rs
Normal file
@@ -0,0 +1,218 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
use std::fs;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub server: ServerConfig,
|
||||
pub storage: StorageConfig,
|
||||
#[serde(default)]
|
||||
pub maven: MavenConfig,
|
||||
#[serde(default)]
|
||||
pub npm: NpmConfig,
|
||||
#[serde(default)]
|
||||
pub auth: AuthConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize, PartialEq)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum StorageMode {
|
||||
#[default]
|
||||
Local,
|
||||
S3,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StorageConfig {
|
||||
#[serde(default)]
|
||||
pub mode: StorageMode,
|
||||
#[serde(default = "default_storage_path")]
|
||||
pub path: String,
|
||||
#[serde(default = "default_s3_url")]
|
||||
pub s3_url: String,
|
||||
#[serde(default = "default_bucket")]
|
||||
pub bucket: String,
|
||||
}
|
||||
|
||||
fn default_storage_path() -> String {
|
||||
"data/storage".to_string()
|
||||
}
|
||||
|
||||
fn default_s3_url() -> String {
|
||||
"http://127.0.0.1:3000".to_string()
|
||||
}
|
||||
|
||||
fn default_bucket() -> String {
|
||||
"registry".to_string()
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MavenConfig {
|
||||
#[serde(default)]
|
||||
pub proxies: Vec<String>,
|
||||
#[serde(default = "default_timeout")]
|
||||
pub proxy_timeout: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NpmConfig {
|
||||
#[serde(default)]
|
||||
pub proxy: Option<String>,
|
||||
#[serde(default = "default_timeout")]
|
||||
pub proxy_timeout: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AuthConfig {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
#[serde(default = "default_htpasswd_file")]
|
||||
pub htpasswd_file: String,
|
||||
#[serde(default = "default_token_storage")]
|
||||
pub token_storage: String,
|
||||
}
|
||||
|
||||
fn default_htpasswd_file() -> String {
|
||||
"users.htpasswd".to_string()
|
||||
}
|
||||
|
||||
fn default_token_storage() -> String {
|
||||
"data/tokens".to_string()
|
||||
}
|
||||
|
||||
fn default_timeout() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
impl Default for MavenConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
proxies: vec!["https://repo1.maven.org/maven2".to_string()],
|
||||
proxy_timeout: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NpmConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
proxy: Some("https://registry.npmjs.org".to_string()),
|
||||
proxy_timeout: 30,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AuthConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
htpasswd_file: "users.htpasswd".to_string(),
|
||||
token_storage: "data/tokens".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Config {
|
||||
/// Load configuration with priority: ENV > config.toml > defaults
|
||||
pub fn load() -> Self {
|
||||
// 1. Start with defaults
|
||||
// 2. Override with config.toml if exists
|
||||
let mut config: Config = fs::read_to_string("config.toml")
|
||||
.ok()
|
||||
.and_then(|content| toml::from_str(&content).ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
// 3. Override with ENV vars (highest priority)
|
||||
config.apply_env_overrides();
|
||||
config
|
||||
}
|
||||
|
||||
/// Apply environment variable overrides
|
||||
fn apply_env_overrides(&mut self) {
|
||||
// Server config
|
||||
if let Ok(val) = env::var("NORA_HOST") {
|
||||
self.server.host = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_PORT") {
|
||||
if let Ok(port) = val.parse() {
|
||||
self.server.port = port;
|
||||
}
|
||||
}
|
||||
|
||||
// Storage config
|
||||
if let Ok(val) = env::var("NORA_STORAGE_MODE") {
|
||||
self.storage.mode = match val.to_lowercase().as_str() {
|
||||
"s3" => StorageMode::S3,
|
||||
_ => StorageMode::Local,
|
||||
};
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_PATH") {
|
||||
self.storage.path = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_S3_URL") {
|
||||
self.storage.s3_url = val;
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_STORAGE_BUCKET") {
|
||||
self.storage.bucket = val;
|
||||
}
|
||||
|
||||
// Auth config
|
||||
if let Ok(val) = env::var("NORA_AUTH_ENABLED") {
|
||||
self.auth.enabled = val.to_lowercase() == "true" || val == "1";
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_AUTH_HTPASSWD_FILE") {
|
||||
self.auth.htpasswd_file = val;
|
||||
}
|
||||
|
||||
// Maven config
|
||||
if let Ok(val) = env::var("NORA_MAVEN_PROXIES") {
|
||||
self.maven.proxies = val.split(',').map(|s| s.trim().to_string()).collect();
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_MAVEN_PROXY_TIMEOUT") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.maven.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
|
||||
// npm config
|
||||
if let Ok(val) = env::var("NORA_NPM_PROXY") {
|
||||
self.npm.proxy = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_NPM_PROXY_TIMEOUT") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.npm.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
|
||||
// Token storage
|
||||
if let Ok(val) = env::var("NORA_AUTH_TOKEN_STORAGE") {
|
||||
self.auth.token_storage = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server: ServerConfig {
|
||||
host: String::from("127.0.0.1"),
|
||||
port: 4000,
|
||||
},
|
||||
storage: StorageConfig {
|
||||
mode: StorageMode::Local,
|
||||
path: String::from("data/storage"),
|
||||
s3_url: String::from("http://127.0.0.1:3000"),
|
||||
bucket: String::from("registry"),
|
||||
},
|
||||
maven: MavenConfig::default(),
|
||||
npm: NpmConfig::default(),
|
||||
auth: AuthConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
89
nora-registry/src/health.rs
Normal file
89
nora-registry/src/health.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use axum::{extract::State, http::StatusCode, response::Json, routing::get, Router};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct HealthStatus {
|
||||
pub status: String,
|
||||
pub version: String,
|
||||
pub uptime_seconds: u64,
|
||||
pub storage: StorageHealth,
|
||||
pub registries: RegistriesHealth,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct StorageHealth {
|
||||
pub backend: String,
|
||||
pub reachable: bool,
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct RegistriesHealth {
|
||||
pub docker: String,
|
||||
pub maven: String,
|
||||
pub npm: String,
|
||||
pub cargo: String,
|
||||
pub pypi: String,
|
||||
}
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/health", get(health_check))
|
||||
.route("/ready", get(readiness_check))
|
||||
}
|
||||
|
||||
async fn health_check(State(state): State<Arc<AppState>>) -> (StatusCode, Json<HealthStatus>) {
|
||||
let storage_reachable = check_storage_reachable(&state).await;
|
||||
|
||||
let status = if storage_reachable {
|
||||
"healthy"
|
||||
} else {
|
||||
"unhealthy"
|
||||
};
|
||||
|
||||
let uptime = state.start_time.elapsed().as_secs();
|
||||
|
||||
let health = HealthStatus {
|
||||
status: status.to_string(),
|
||||
version: env!("CARGO_PKG_VERSION").to_string(),
|
||||
uptime_seconds: uptime,
|
||||
storage: StorageHealth {
|
||||
backend: state.storage.backend_name().to_string(),
|
||||
reachable: storage_reachable,
|
||||
endpoint: match state.storage.backend_name() {
|
||||
"s3" => state.config.storage.s3_url.clone(),
|
||||
_ => state.config.storage.path.clone(),
|
||||
},
|
||||
},
|
||||
registries: RegistriesHealth {
|
||||
docker: "ok".to_string(),
|
||||
maven: "ok".to_string(),
|
||||
npm: "ok".to_string(),
|
||||
cargo: "ok".to_string(),
|
||||
pypi: "ok".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
let status_code = if storage_reachable {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::SERVICE_UNAVAILABLE
|
||||
};
|
||||
|
||||
(status_code, Json(health))
|
||||
}
|
||||
|
||||
async fn readiness_check(State(state): State<Arc<AppState>>) -> StatusCode {
|
||||
if check_storage_reachable(&state).await {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::SERVICE_UNAVAILABLE
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_storage_reachable(state: &AppState) -> bool {
|
||||
state.storage.health_check().await
|
||||
}
|
||||
267
nora-registry/src/main.rs
Normal file
267
nora-registry/src/main.rs
Normal file
@@ -0,0 +1,267 @@
|
||||
mod auth;
|
||||
mod backup;
|
||||
mod config;
|
||||
mod health;
|
||||
mod metrics;
|
||||
mod openapi;
|
||||
mod registry;
|
||||
mod storage;
|
||||
mod tokens;
|
||||
mod ui;
|
||||
|
||||
use axum::{middleware, Router};
|
||||
use clap::{Parser, Subcommand};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::signal;
|
||||
use tracing::{error, info, warn};
|
||||
use tracing_subscriber::{fmt, prelude::*, EnvFilter};
|
||||
|
||||
use auth::HtpasswdAuth;
|
||||
use config::{Config, StorageMode};
|
||||
pub use storage::Storage;
|
||||
use tokens::TokenStore;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(
|
||||
name = "nora",
|
||||
version,
|
||||
about = "Multi-protocol artifact registry"
|
||||
)]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Option<Commands>,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Commands {
|
||||
/// Start the registry server (default)
|
||||
Serve,
|
||||
/// Backup all artifacts to a tar.gz file
|
||||
Backup {
|
||||
/// Output file path (e.g., backup.tar.gz)
|
||||
#[arg(short, long)]
|
||||
output: PathBuf,
|
||||
},
|
||||
/// Restore artifacts from a backup file
|
||||
Restore {
|
||||
/// Input backup file path
|
||||
#[arg(short, long)]
|
||||
input: PathBuf,
|
||||
},
|
||||
/// Migrate artifacts between storage backends
|
||||
Migrate {
|
||||
/// Source storage: local or s3
|
||||
#[arg(long)]
|
||||
from: String,
|
||||
/// Destination storage: local or s3
|
||||
#[arg(long)]
|
||||
to: String,
|
||||
/// Dry run - show what would be migrated without copying
|
||||
#[arg(long, default_value = "false")]
|
||||
dry_run: bool,
|
||||
},
|
||||
}
|
||||
|
||||
pub struct AppState {
|
||||
pub storage: Storage,
|
||||
pub config: Config,
|
||||
pub start_time: Instant,
|
||||
pub auth: Option<HtpasswdAuth>,
|
||||
pub tokens: Option<TokenStore>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let cli = Cli::parse();
|
||||
|
||||
// Initialize logging (JSON for server, plain for CLI commands)
|
||||
let is_server = matches!(cli.command, None | Some(Commands::Serve));
|
||||
init_logging(is_server);
|
||||
|
||||
let config = Config::load();
|
||||
|
||||
// Initialize storage based on mode
|
||||
let storage = match config.storage.mode {
|
||||
StorageMode::Local => {
|
||||
if is_server {
|
||||
info!(path = %config.storage.path, "Using local storage");
|
||||
}
|
||||
Storage::new_local(&config.storage.path)
|
||||
}
|
||||
StorageMode::S3 => {
|
||||
if is_server {
|
||||
info!(
|
||||
s3_url = %config.storage.s3_url,
|
||||
bucket = %config.storage.bucket,
|
||||
"Using S3 storage"
|
||||
);
|
||||
}
|
||||
Storage::new_s3(&config.storage.s3_url, &config.storage.bucket)
|
||||
}
|
||||
};
|
||||
|
||||
// Dispatch to command
|
||||
match cli.command {
|
||||
None | Some(Commands::Serve) => {
|
||||
run_server(config, storage).await;
|
||||
}
|
||||
Some(Commands::Backup { output }) => {
|
||||
if let Err(e) = backup::create_backup(&storage, &output).await {
|
||||
error!("Backup failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
Some(Commands::Restore { input }) => {
|
||||
if let Err(e) = backup::restore_backup(&storage, &input).await {
|
||||
error!("Restore failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
Some(Commands::Migrate { from, to, dry_run }) => {
|
||||
eprintln!("Migration from '{}' to '{}' (dry_run: {})", from, to, dry_run);
|
||||
eprintln!("TODO: Migration not yet implemented");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn init_logging(json_format: bool) {
|
||||
let env_filter = EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"));
|
||||
|
||||
if json_format {
|
||||
tracing_subscriber::registry()
|
||||
.with(env_filter)
|
||||
.with(fmt::layer().json().with_target(true))
|
||||
.init();
|
||||
} else {
|
||||
tracing_subscriber::registry()
|
||||
.with(env_filter)
|
||||
.with(fmt::layer().with_target(false))
|
||||
.init();
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_server(config: Config, storage: Storage) {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Load auth if enabled
|
||||
let auth = if config.auth.enabled {
|
||||
let path = Path::new(&config.auth.htpasswd_file);
|
||||
match HtpasswdAuth::from_file(path) {
|
||||
Some(auth) => {
|
||||
info!(users = auth.list_users().len(), "Auth enabled");
|
||||
Some(auth)
|
||||
}
|
||||
None => {
|
||||
warn!(file = %config.auth.htpasswd_file, "Auth enabled but htpasswd file not found or empty");
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Initialize token store if auth is enabled
|
||||
let tokens = if config.auth.enabled {
|
||||
let token_path = Path::new(&config.auth.token_storage);
|
||||
info!(path = %config.auth.token_storage, "Token storage initialized");
|
||||
Some(TokenStore::new(token_path))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
storage,
|
||||
config,
|
||||
start_time,
|
||||
auth,
|
||||
tokens,
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
.merge(health::routes())
|
||||
.merge(metrics::routes())
|
||||
.merge(ui::routes())
|
||||
.merge(openapi::routes())
|
||||
.merge(auth::token_routes())
|
||||
.merge(registry::docker_routes())
|
||||
.merge(registry::maven_routes())
|
||||
.merge(registry::npm_routes())
|
||||
.merge(registry::cargo_routes())
|
||||
.merge(registry::pypi_routes())
|
||||
.layer(middleware::from_fn(metrics::metrics_middleware))
|
||||
.layer(middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
auth::auth_middleware,
|
||||
))
|
||||
.with_state(state.clone());
|
||||
|
||||
let addr = format!("{}:{}", state.config.server.host, state.config.server.port);
|
||||
let listener = tokio::net::TcpListener::bind(&addr)
|
||||
.await
|
||||
.expect("Failed to bind");
|
||||
|
||||
info!(
|
||||
address = %addr,
|
||||
version = env!("CARGO_PKG_VERSION"),
|
||||
storage = state.storage.backend_name(),
|
||||
auth_enabled = state.auth.is_some(),
|
||||
"Nora started"
|
||||
);
|
||||
|
||||
info!(
|
||||
health = "/health",
|
||||
ready = "/ready",
|
||||
metrics = "/metrics",
|
||||
ui = "/ui/",
|
||||
api_docs = "/api-docs",
|
||||
docker = "/v2/",
|
||||
maven = "/maven2/",
|
||||
npm = "/npm/",
|
||||
cargo = "/cargo/",
|
||||
pypi = "/simple/",
|
||||
"Available endpoints"
|
||||
);
|
||||
|
||||
// Graceful shutdown on SIGTERM/SIGINT
|
||||
axum::serve(listener, app)
|
||||
.with_graceful_shutdown(shutdown_signal())
|
||||
.await
|
||||
.expect("Server error");
|
||||
|
||||
info!(
|
||||
uptime_seconds = state.start_time.elapsed().as_secs(),
|
||||
"Nora shutdown complete"
|
||||
);
|
||||
}
|
||||
|
||||
/// Wait for shutdown signal (SIGTERM or SIGINT)
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c()
|
||||
.await
|
||||
.expect("Failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("Failed to install SIGTERM handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
info!("Received SIGINT, starting graceful shutdown...");
|
||||
}
|
||||
_ = terminate => {
|
||||
info!("Received SIGTERM, starting graceful shutdown...");
|
||||
}
|
||||
}
|
||||
}
|
||||
147
nora-registry/src/metrics.rs
Normal file
147
nora-registry/src/metrics.rs
Normal file
@@ -0,0 +1,147 @@
|
||||
use axum::{
|
||||
body::Body,
|
||||
extract::MatchedPath,
|
||||
http::Request,
|
||||
middleware::Next,
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use prometheus::{
|
||||
register_histogram_vec, register_int_counter_vec, Encoder, HistogramVec, IntCounterVec,
|
||||
TextEncoder,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
lazy_static! {
|
||||
/// Total HTTP requests counter
|
||||
pub static ref HTTP_REQUESTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_http_requests_total",
|
||||
"Total number of HTTP requests",
|
||||
&["registry", "method", "status"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// HTTP request duration histogram
|
||||
pub static ref HTTP_REQUEST_DURATION: HistogramVec = register_histogram_vec!(
|
||||
"nora_http_request_duration_seconds",
|
||||
"HTTP request latency in seconds",
|
||||
&["registry", "method"],
|
||||
vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Cache requests counter (hit/miss)
|
||||
pub static ref CACHE_REQUESTS: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_cache_requests_total",
|
||||
"Total cache requests",
|
||||
&["registry", "result"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Storage operations counter
|
||||
pub static ref STORAGE_OPERATIONS: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_storage_operations_total",
|
||||
"Total storage operations",
|
||||
&["operation", "status"]
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Artifacts count by registry
|
||||
pub static ref ARTIFACTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_artifacts_total",
|
||||
"Total artifacts stored",
|
||||
&["registry"]
|
||||
).expect("metric can be created");
|
||||
}
|
||||
|
||||
/// Routes for metrics endpoint
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new().route("/metrics", get(metrics_handler))
|
||||
}
|
||||
|
||||
/// Handler for /metrics endpoint
|
||||
async fn metrics_handler() -> impl IntoResponse {
|
||||
let encoder = TextEncoder::new();
|
||||
let metric_families = prometheus::gather();
|
||||
let mut buffer = Vec::new();
|
||||
|
||||
encoder
|
||||
.encode(&metric_families, &mut buffer)
|
||||
.unwrap_or_default();
|
||||
|
||||
([("content-type", "text/plain; charset=utf-8")], buffer)
|
||||
}
|
||||
|
||||
/// Middleware to record request metrics
|
||||
pub async fn metrics_middleware(
|
||||
matched_path: Option<MatchedPath>,
|
||||
request: Request<Body>,
|
||||
next: Next,
|
||||
) -> Response {
|
||||
let start = Instant::now();
|
||||
let method = request.method().to_string();
|
||||
let path = matched_path
|
||||
.map(|p| p.as_str().to_string())
|
||||
.unwrap_or_else(|| request.uri().path().to_string());
|
||||
|
||||
// Determine registry from path
|
||||
let registry = detect_registry(&path);
|
||||
|
||||
// Process request
|
||||
let response = next.run(request).await;
|
||||
|
||||
let duration = start.elapsed().as_secs_f64();
|
||||
let status = response.status().as_u16().to_string();
|
||||
|
||||
// Record metrics
|
||||
HTTP_REQUESTS_TOTAL
|
||||
.with_label_values(&[®istry, &method, &status])
|
||||
.inc();
|
||||
|
||||
HTTP_REQUEST_DURATION
|
||||
.with_label_values(&[®istry, &method])
|
||||
.observe(duration);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
/// Detect registry type from path
|
||||
fn detect_registry(path: &str) -> String {
|
||||
if path.starts_with("/v2") {
|
||||
"docker".to_string()
|
||||
} else if path.starts_with("/maven2") {
|
||||
"maven".to_string()
|
||||
} else if path.starts_with("/npm") {
|
||||
"npm".to_string()
|
||||
} else if path.starts_with("/cargo") {
|
||||
"cargo".to_string()
|
||||
} else if path.starts_with("/simple") || path.starts_with("/packages") {
|
||||
"pypi".to_string()
|
||||
} else if path.starts_with("/ui") {
|
||||
"ui".to_string()
|
||||
} else {
|
||||
"other".to_string()
|
||||
}
|
||||
}
|
||||
|
||||
/// Record cache hit
|
||||
#[allow(dead_code)]
|
||||
pub fn record_cache_hit(registry: &str) {
|
||||
CACHE_REQUESTS.with_label_values(&[registry, "hit"]).inc();
|
||||
}
|
||||
|
||||
/// Record cache miss
|
||||
#[allow(dead_code)]
|
||||
pub fn record_cache_miss(registry: &str) {
|
||||
CACHE_REQUESTS.with_label_values(&[registry, "miss"]).inc();
|
||||
}
|
||||
|
||||
/// Record storage operation
|
||||
#[allow(dead_code)]
|
||||
pub fn record_storage_op(operation: &str, success: bool) {
|
||||
let status = if success { "success" } else { "error" };
|
||||
STORAGE_OPERATIONS
|
||||
.with_label_values(&[operation, status])
|
||||
.inc();
|
||||
}
|
||||
382
nora-registry/src/openapi.rs
Normal file
382
nora-registry/src/openapi.rs
Normal file
@@ -0,0 +1,382 @@
|
||||
//! OpenAPI documentation and Swagger UI
|
||||
//!
|
||||
//! Functions in this module are stubs used only for generating OpenAPI documentation.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use axum::Router;
|
||||
use std::sync::Arc;
|
||||
use utoipa::OpenApi;
|
||||
use utoipa_swagger_ui::SwaggerUi;
|
||||
|
||||
use crate::AppState;
|
||||
|
||||
#[derive(OpenApi)]
|
||||
#[openapi(
|
||||
info(
|
||||
title = "Nora",
|
||||
version = "0.1.0",
|
||||
description = "Multi-protocol package registry supporting Docker, Maven, npm, Cargo, and PyPI",
|
||||
license(name = "MIT"),
|
||||
contact(name = "DevITWay", url = "https://github.com/getnora-io/nora")
|
||||
),
|
||||
servers(
|
||||
(url = "/", description = "Current server")
|
||||
),
|
||||
tags(
|
||||
(name = "health", description = "Health check endpoints"),
|
||||
(name = "docker", description = "Docker Registry v2 API"),
|
||||
(name = "maven", description = "Maven Repository API"),
|
||||
(name = "npm", description = "npm Registry API"),
|
||||
(name = "cargo", description = "Cargo Registry API"),
|
||||
(name = "pypi", description = "PyPI Simple API"),
|
||||
(name = "auth", description = "Authentication & API Tokens")
|
||||
),
|
||||
paths(
|
||||
// Health
|
||||
crate::openapi::health_check,
|
||||
crate::openapi::readiness_check,
|
||||
// Docker
|
||||
crate::openapi::docker_version,
|
||||
crate::openapi::docker_catalog,
|
||||
crate::openapi::docker_tags,
|
||||
crate::openapi::docker_manifest,
|
||||
crate::openapi::docker_blob,
|
||||
// Maven
|
||||
crate::openapi::maven_artifact,
|
||||
// npm
|
||||
crate::openapi::npm_package,
|
||||
// PyPI
|
||||
crate::openapi::pypi_simple,
|
||||
crate::openapi::pypi_package,
|
||||
// Tokens
|
||||
crate::openapi::create_token,
|
||||
crate::openapi::list_tokens,
|
||||
crate::openapi::revoke_token,
|
||||
),
|
||||
components(
|
||||
schemas(
|
||||
HealthResponse,
|
||||
StorageHealth,
|
||||
RegistriesHealth,
|
||||
DockerVersion,
|
||||
DockerCatalog,
|
||||
DockerTags,
|
||||
TokenRequest,
|
||||
TokenResponse,
|
||||
TokenListResponse,
|
||||
TokenInfo,
|
||||
ErrorResponse
|
||||
)
|
||||
)
|
||||
)]
|
||||
pub struct ApiDoc;
|
||||
|
||||
// ============ Schemas ============
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::ToSchema;
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct HealthResponse {
|
||||
/// Current health status
|
||||
pub status: String,
|
||||
/// Application version
|
||||
pub version: String,
|
||||
/// Uptime in seconds
|
||||
pub uptime_seconds: u64,
|
||||
/// Storage backend health
|
||||
pub storage: StorageHealth,
|
||||
/// Registry health status
|
||||
pub registries: RegistriesHealth,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct StorageHealth {
|
||||
/// Backend type (local, s3)
|
||||
pub backend: String,
|
||||
/// Whether storage is reachable
|
||||
pub reachable: bool,
|
||||
/// Storage endpoint/path
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct RegistriesHealth {
|
||||
pub docker: String,
|
||||
pub maven: String,
|
||||
pub npm: String,
|
||||
pub cargo: String,
|
||||
pub pypi: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DockerVersion {
|
||||
/// API version
|
||||
#[serde(rename = "Docker-Distribution-API-Version")]
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DockerCatalog {
|
||||
/// List of repository names
|
||||
pub repositories: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct DockerTags {
|
||||
/// Repository name
|
||||
pub name: String,
|
||||
/// List of tags
|
||||
pub tags: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, ToSchema)]
|
||||
pub struct TokenRequest {
|
||||
/// Username for authentication
|
||||
pub username: String,
|
||||
/// Password for authentication
|
||||
pub password: String,
|
||||
/// Token TTL in days (default: 30)
|
||||
#[serde(default = "default_ttl")]
|
||||
pub ttl_days: u32,
|
||||
/// Optional description
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
fn default_ttl() -> u32 {
|
||||
30
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct TokenResponse {
|
||||
/// Generated API token (starts with nra_)
|
||||
pub token: String,
|
||||
/// Token expiration in days
|
||||
pub expires_in_days: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct TokenListResponse {
|
||||
/// List of tokens
|
||||
pub tokens: Vec<TokenInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct TokenInfo {
|
||||
/// Token hash prefix (for identification)
|
||||
pub hash_prefix: String,
|
||||
/// Creation timestamp
|
||||
pub created_at: u64,
|
||||
/// Expiration timestamp
|
||||
pub expires_at: u64,
|
||||
/// Last used timestamp
|
||||
pub last_used: Option<u64>,
|
||||
/// Description
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, ToSchema)]
|
||||
pub struct ErrorResponse {
|
||||
/// Error message
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
// ============ Path Operations (documentation only) ============
|
||||
|
||||
/// Health check endpoint
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/health",
|
||||
tag = "health",
|
||||
responses(
|
||||
(status = 200, description = "Service is healthy", body = HealthResponse),
|
||||
(status = 503, description = "Service is unhealthy", body = HealthResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn health_check() {}
|
||||
|
||||
/// Readiness probe
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/ready",
|
||||
tag = "health",
|
||||
responses(
|
||||
(status = 200, description = "Service is ready"),
|
||||
(status = 503, description = "Service is not ready")
|
||||
)
|
||||
)]
|
||||
pub async fn readiness_check() {}
|
||||
|
||||
/// Docker Registry version check
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v2/",
|
||||
tag = "docker",
|
||||
responses(
|
||||
(status = 200, description = "Registry is available", body = DockerVersion),
|
||||
(status = 401, description = "Authentication required")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_version() {}
|
||||
|
||||
/// List all repositories
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v2/_catalog",
|
||||
tag = "docker",
|
||||
responses(
|
||||
(status = 200, description = "Repository list", body = DockerCatalog)
|
||||
)
|
||||
)]
|
||||
pub async fn docker_catalog() {}
|
||||
|
||||
/// List tags for a repository
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v2/{name}/tags/list",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Tag list", body = DockerTags),
|
||||
(status = 404, description = "Repository not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_tags() {}
|
||||
|
||||
/// Get manifest
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v2/{name}/manifests/{reference}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("reference" = String, Path, description = "Tag or digest")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Manifest content"),
|
||||
(status = 404, description = "Manifest not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_manifest() {}
|
||||
|
||||
/// Get blob
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/v2/{name}/blobs/{digest}",
|
||||
tag = "docker",
|
||||
params(
|
||||
("name" = String, Path, description = "Repository name"),
|
||||
("digest" = String, Path, description = "Blob digest (sha256:...)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Blob content"),
|
||||
(status = 404, description = "Blob not found")
|
||||
)
|
||||
)]
|
||||
pub async fn docker_blob() {}
|
||||
|
||||
/// Get Maven artifact
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/maven2/{path}",
|
||||
tag = "maven",
|
||||
params(
|
||||
("path" = String, Path, description = "Artifact path (e.g., org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.jar)")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Artifact content"),
|
||||
(status = 404, description = "Artifact not found, trying upstream proxies")
|
||||
)
|
||||
)]
|
||||
pub async fn maven_artifact() {}
|
||||
|
||||
/// Get npm package metadata
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/npm/{name}",
|
||||
tag = "npm",
|
||||
params(
|
||||
("name" = String, Path, description = "Package name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Package metadata (JSON)"),
|
||||
(status = 404, description = "Package not found")
|
||||
)
|
||||
)]
|
||||
pub async fn npm_package() {}
|
||||
|
||||
/// PyPI Simple index
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/simple/",
|
||||
tag = "pypi",
|
||||
responses(
|
||||
(status = 200, description = "HTML list of packages")
|
||||
)
|
||||
)]
|
||||
pub async fn pypi_simple() {}
|
||||
|
||||
/// PyPI package page
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/simple/{name}/",
|
||||
tag = "pypi",
|
||||
params(
|
||||
("name" = String, Path, description = "Package name")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "HTML list of package files"),
|
||||
(status = 404, description = "Package not found")
|
||||
)
|
||||
)]
|
||||
pub async fn pypi_package() {}
|
||||
|
||||
/// Create API token
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/tokens",
|
||||
tag = "auth",
|
||||
request_body = TokenRequest,
|
||||
responses(
|
||||
(status = 200, description = "Token created", body = TokenResponse),
|
||||
(status = 401, description = "Invalid credentials", body = ErrorResponse),
|
||||
(status = 400, description = "Auth not configured", body = ErrorResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn create_token() {}
|
||||
|
||||
/// List user's tokens
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/tokens/list",
|
||||
tag = "auth",
|
||||
request_body = TokenRequest,
|
||||
responses(
|
||||
(status = 200, description = "Token list", body = TokenListResponse),
|
||||
(status = 401, description = "Invalid credentials", body = ErrorResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn list_tokens() {}
|
||||
|
||||
/// Revoke a token
|
||||
#[utoipa::path(
|
||||
post,
|
||||
path = "/api/tokens/revoke",
|
||||
tag = "auth",
|
||||
responses(
|
||||
(status = 200, description = "Token revoked"),
|
||||
(status = 401, description = "Invalid credentials", body = ErrorResponse),
|
||||
(status = 404, description = "Token not found", body = ErrorResponse)
|
||||
)
|
||||
)]
|
||||
pub async fn revoke_token() {}
|
||||
|
||||
// ============ Routes ============
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.merge(SwaggerUi::new("/api-docs").url("/api-docs/openapi.json", ApiDoc::openapi()))
|
||||
}
|
||||
43
nora-registry/src/registry/cargo_registry.rs
Normal file
43
nora-registry/src/registry/cargo_registry.rs
Normal file
@@ -0,0 +1,43 @@
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/cargo/api/v1/crates/{crate_name}", get(get_metadata))
|
||||
.route(
|
||||
"/cargo/api/v1/crates/{crate_name}/{version}/download",
|
||||
get(download),
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_metadata(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(crate_name): Path<String>,
|
||||
) -> Response {
|
||||
let key = format!("cargo/{}/metadata.json", crate_name);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (StatusCode::OK, data).into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn download(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((crate_name, version)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let key = format!(
|
||||
"cargo/{}/{}/{}-{}.crate",
|
||||
crate_name, version, crate_name, version
|
||||
);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (StatusCode::OK, data).into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
154
nora-registry/src/registry/docker.rs
Normal file
154
nora-registry/src/registry/docker.rs
Normal file
@@ -0,0 +1,154 @@
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State},
|
||||
http::{header, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::{get, head, put},
|
||||
Json, Router,
|
||||
};
|
||||
use serde_json::{json, Value};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/v2/", get(check))
|
||||
.route("/v2/{name}/blobs/{digest}", head(check_blob))
|
||||
.route("/v2/{name}/blobs/{digest}", get(download_blob))
|
||||
.route(
|
||||
"/v2/{name}/blobs/uploads/",
|
||||
axum::routing::post(start_upload),
|
||||
)
|
||||
.route("/v2/{name}/blobs/uploads/{uuid}", put(upload_blob))
|
||||
.route("/v2/{name}/manifests/{reference}", get(get_manifest))
|
||||
.route("/v2/{name}/manifests/{reference}", put(put_manifest))
|
||||
.route("/v2/{name}/tags/list", get(list_tags))
|
||||
}
|
||||
|
||||
async fn check() -> (StatusCode, Json<Value>) {
|
||||
(StatusCode::OK, Json(json!({})))
|
||||
}
|
||||
|
||||
async fn check_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, digest)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let key = format!("docker/{}/blobs/{}", name, digest);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_LENGTH, data.len().to_string())],
|
||||
)
|
||||
.into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, digest)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let key = format!("docker/{}/blobs/{}", name, digest);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_TYPE, "application/octet-stream")],
|
||||
data,
|
||||
)
|
||||
.into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn start_upload(Path(name): Path<String>) -> Response {
|
||||
let uuid = uuid::Uuid::new_v4().to_string();
|
||||
let location = format!("/v2/{}/blobs/uploads/{}", name, uuid);
|
||||
(
|
||||
StatusCode::ACCEPTED,
|
||||
[
|
||||
(header::LOCATION, location.clone()),
|
||||
("Docker-Upload-UUID".parse().unwrap(), uuid),
|
||||
],
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
async fn upload_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, _uuid)): Path<(String, String)>,
|
||||
axum::extract::Query(params): axum::extract::Query<std::collections::HashMap<String, String>>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let digest = match params.get("digest") {
|
||||
Some(d) => d,
|
||||
None => return StatusCode::BAD_REQUEST.into_response(),
|
||||
};
|
||||
let key = format!("docker/{}/blobs/{}", name, digest);
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
let location = format!("/v2/{}/blobs/{}", name, digest);
|
||||
(StatusCode::CREATED, [(header::LOCATION, location)]).into_response()
|
||||
}
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_manifest(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, reference)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let key = format!("docker/{}/manifests/{}.json", name, reference);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (
|
||||
StatusCode::OK,
|
||||
[(
|
||||
header::CONTENT_TYPE,
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
)],
|
||||
data,
|
||||
)
|
||||
.into_response(),
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn put_manifest(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, reference)): Path<(String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let key = format!("docker/{}/manifests/{}.json", name, reference);
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => {
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body));
|
||||
let location = format!("/v2/{}/manifests/{}", name, reference);
|
||||
(
|
||||
StatusCode::CREATED,
|
||||
[
|
||||
(header::LOCATION, location),
|
||||
("Docker-Content-Digest".parse().unwrap(), digest),
|
||||
],
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_tags(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> (StatusCode, Json<Value>) {
|
||||
let prefix = format!("docker/{}/manifests/", name);
|
||||
let keys = state.storage.list(&prefix).await;
|
||||
let tags: Vec<String> = keys
|
||||
.iter()
|
||||
.filter_map(|k| {
|
||||
k.strip_prefix(&prefix)
|
||||
.and_then(|t| t.strip_suffix(".json"))
|
||||
.map(String::from)
|
||||
})
|
||||
.collect();
|
||||
(StatusCode::OK, Json(json!({"name": name, "tags": tags})))
|
||||
}
|
||||
94
nora-registry/src/registry/maven.rs
Normal file
94
nora-registry/src/registry/maven.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State},
|
||||
http::{header, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::{get, put},
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/maven2/{*path}", get(download))
|
||||
.route("/maven2/{*path}", put(upload))
|
||||
}
|
||||
|
||||
async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
let key = format!("maven/{}", path);
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
return with_content_type(&path, data).into_response();
|
||||
}
|
||||
|
||||
// Try proxy servers
|
||||
for proxy_url in &state.config.maven.proxies {
|
||||
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
|
||||
|
||||
match fetch_from_proxy(&url, state.config.maven.proxy_timeout).await {
|
||||
Ok(data) => {
|
||||
// Cache in local storage (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
return with_content_type(&path, data.into()).into_response();
|
||||
}
|
||||
Err(_) => continue,
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn upload(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(path): Path<String>,
|
||||
body: Bytes,
|
||||
) -> StatusCode {
|
||||
let key = format!("maven/{}", path);
|
||||
match state.storage.put(&key, &body).await {
|
||||
Ok(()) => StatusCode::CREATED,
|
||||
Err(_) => StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.build()
|
||||
.map_err(|_| ())?;
|
||||
|
||||
let response = client.get(url).send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn with_content_type(
|
||||
path: &str,
|
||||
data: Bytes,
|
||||
) -> (StatusCode, [(header::HeaderName, &'static str); 1], Bytes) {
|
||||
let content_type = if path.ends_with(".pom") {
|
||||
"application/xml"
|
||||
} else if path.ends_with(".jar") {
|
||||
"application/java-archive"
|
||||
} else if path.ends_with(".xml") {
|
||||
"application/xml"
|
||||
} else if path.ends_with(".sha1") || path.ends_with(".md5") {
|
||||
"text/plain"
|
||||
} else {
|
||||
"application/octet-stream"
|
||||
};
|
||||
|
||||
(StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data)
|
||||
}
|
||||
11
nora-registry/src/registry/mod.rs
Normal file
11
nora-registry/src/registry/mod.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
mod cargo_registry;
|
||||
mod docker;
|
||||
mod maven;
|
||||
mod npm;
|
||||
mod pypi;
|
||||
|
||||
pub use cargo_registry::routes as cargo_routes;
|
||||
pub use docker::routes as docker_routes;
|
||||
pub use maven::routes as maven_routes;
|
||||
pub use npm::routes as npm_routes;
|
||||
pub use pypi::routes as pypi_routes;
|
||||
89
nora-registry/src/registry/npm.rs
Normal file
89
nora-registry/src/registry/npm.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State},
|
||||
http::{header, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new().route("/npm/{*path}", get(handle_request))
|
||||
}
|
||||
|
||||
async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
// Determine if this is a tarball request or metadata request
|
||||
let is_tarball = path.contains("/-/");
|
||||
|
||||
let key = if is_tarball {
|
||||
let parts: Vec<&str> = path.split("/-/").collect();
|
||||
if parts.len() == 2 {
|
||||
format!("npm/{}/tarballs/{}", parts[0], parts[1])
|
||||
} else {
|
||||
format!("npm/{}", path)
|
||||
}
|
||||
} else {
|
||||
format!("npm/{}/metadata.json", path)
|
||||
};
|
||||
|
||||
// Try local storage first
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
return with_content_type(is_tarball, data).into_response();
|
||||
}
|
||||
|
||||
// Try proxy if configured
|
||||
if let Some(proxy_url) = &state.config.npm.proxy {
|
||||
let url = if is_tarball {
|
||||
// Tarball URL: https://registry.npmjs.org/package/-/package-version.tgz
|
||||
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
|
||||
} else {
|
||||
// Metadata URL: https://registry.npmjs.org/package
|
||||
format!("{}/{}", proxy_url.trim_end_matches('/'), path)
|
||||
};
|
||||
|
||||
if let Ok(data) = fetch_from_proxy(&url, state.config.npm.proxy_timeout).await {
|
||||
// Cache in local storage (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
return with_content_type(is_tarball, data.into()).into_response();
|
||||
}
|
||||
}
|
||||
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn fetch_from_proxy(url: &str, timeout_secs: u64) -> Result<Vec<u8>, ()> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.build()
|
||||
.map_err(|_| ())?;
|
||||
|
||||
let response = client.get(url).send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn with_content_type(
|
||||
is_tarball: bool,
|
||||
data: Bytes,
|
||||
) -> (StatusCode, [(header::HeaderName, &'static str); 1], Bytes) {
|
||||
let content_type = if is_tarball {
|
||||
"application/octet-stream"
|
||||
} else {
|
||||
"application/json"
|
||||
};
|
||||
|
||||
(StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data)
|
||||
}
|
||||
35
nora-registry/src/registry/pypi.rs
Normal file
35
nora-registry/src/registry/pypi.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::State,
|
||||
http::StatusCode,
|
||||
response::{Html, IntoResponse},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new().route("/simple/", get(list_packages))
|
||||
}
|
||||
|
||||
async fn list_packages(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let keys = state.storage.list("pypi/").await;
|
||||
let mut packages = std::collections::HashSet::new();
|
||||
|
||||
for key in keys {
|
||||
if let Some(pkg) = key.strip_prefix("pypi/").and_then(|k| k.split('/').next()) {
|
||||
packages.insert(pkg.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
let mut html = String::from("<html><body><h1>Simple Index</h1>");
|
||||
let mut pkg_list: Vec<_> = packages.into_iter().collect();
|
||||
pkg_list.sort();
|
||||
|
||||
for pkg in pkg_list {
|
||||
html.push_str(&format!("<a href=\"/simple/{}/\">{}</a><br>", pkg, pkg));
|
||||
}
|
||||
html.push_str("</body></html>");
|
||||
|
||||
(StatusCode::OK, Html(html))
|
||||
}
|
||||
131
nora-registry/src/storage/local.rs
Normal file
131
nora-registry/src/storage/local.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
use super::{FileMeta, Result, StorageBackend, StorageError};
|
||||
|
||||
/// Local filesystem storage backend (zero-config default)
|
||||
pub struct LocalStorage {
|
||||
base_path: PathBuf,
|
||||
}
|
||||
|
||||
impl LocalStorage {
|
||||
pub fn new(path: &str) -> Self {
|
||||
Self {
|
||||
base_path: PathBuf::from(path),
|
||||
}
|
||||
}
|
||||
|
||||
fn key_to_path(&self, key: &str) -> PathBuf {
|
||||
self.base_path.join(key)
|
||||
}
|
||||
|
||||
/// Recursively list all files under a directory (sync helper)
|
||||
fn list_files_sync(dir: &PathBuf, base: &PathBuf, prefix: &str, results: &mut Vec<String>) {
|
||||
if let Ok(entries) = std::fs::read_dir(dir) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.is_file() {
|
||||
if let Ok(rel_path) = path.strip_prefix(base) {
|
||||
let key = rel_path.to_string_lossy().replace('\\', "/");
|
||||
if key.starts_with(prefix) || prefix.is_empty() {
|
||||
results.push(key);
|
||||
}
|
||||
}
|
||||
} else if path.is_dir() {
|
||||
Self::list_files_sync(&path, base, prefix, results);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for LocalStorage {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
|
||||
let path = self.key_to_path(key);
|
||||
|
||||
// Create parent directories
|
||||
if let Some(parent) = path.parent() {
|
||||
fs::create_dir_all(parent)
|
||||
.await
|
||||
.map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
}
|
||||
|
||||
// Write file
|
||||
fs::write(&path, data)
|
||||
.await
|
||||
.map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
let path = self.key_to_path(key);
|
||||
|
||||
if !path.exists() {
|
||||
return Err(StorageError::NotFound);
|
||||
}
|
||||
|
||||
let mut file = fs::File::open(&path).await.map_err(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
StorageError::NotFound
|
||||
} else {
|
||||
StorageError::Io(e.to_string())
|
||||
}
|
||||
})?;
|
||||
|
||||
let mut buffer = Vec::new();
|
||||
file.read_to_end(&mut buffer)
|
||||
.await
|
||||
.map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
|
||||
Ok(Bytes::from(buffer))
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
let base = self.base_path.clone();
|
||||
let prefix = prefix.to_string();
|
||||
|
||||
// Use blocking task for filesystem traversal
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let mut results = Vec::new();
|
||||
if base.exists() {
|
||||
Self::list_files_sync(&base, &base, &prefix, &mut results);
|
||||
}
|
||||
results.sort();
|
||||
results
|
||||
})
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta> {
|
||||
let path = self.key_to_path(key);
|
||||
let metadata = fs::metadata(&path).await.ok()?;
|
||||
let modified = metadata
|
||||
.modified()
|
||||
.ok()?
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.ok()?
|
||||
.as_secs();
|
||||
Some(FileMeta {
|
||||
size: metadata.len(),
|
||||
modified,
|
||||
})
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> bool {
|
||||
// For local storage, just check if base directory exists or can be created
|
||||
if self.base_path.exists() {
|
||||
return true;
|
||||
}
|
||||
fs::create_dir_all(&self.base_path).await.is_ok()
|
||||
}
|
||||
|
||||
fn backend_name(&self) -> &'static str {
|
||||
"local"
|
||||
}
|
||||
}
|
||||
93
nora-registry/src/storage/mod.rs
Normal file
93
nora-registry/src/storage/mod.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
mod local;
|
||||
mod s3;
|
||||
|
||||
pub use local::LocalStorage;
|
||||
pub use s3::S3Storage;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use std::fmt;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// File metadata
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileMeta {
|
||||
pub size: u64,
|
||||
pub modified: u64, // Unix timestamp
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum StorageError {
|
||||
Network(String),
|
||||
NotFound,
|
||||
Io(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for StorageError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Network(msg) => write!(f, "Network error: {}", msg),
|
||||
Self::NotFound => write!(f, "Object not found"),
|
||||
Self::Io(msg) => write!(f, "IO error: {}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for StorageError {}
|
||||
|
||||
pub type Result<T> = std::result::Result<T, StorageError>;
|
||||
|
||||
/// Storage backend trait
|
||||
#[async_trait]
|
||||
pub trait StorageBackend: Send + Sync {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()>;
|
||||
async fn get(&self, key: &str) -> Result<Bytes>;
|
||||
async fn list(&self, prefix: &str) -> Vec<String>;
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta>;
|
||||
async fn health_check(&self) -> bool;
|
||||
fn backend_name(&self) -> &'static str;
|
||||
}
|
||||
|
||||
/// Storage wrapper for dynamic dispatch
|
||||
#[derive(Clone)]
|
||||
pub struct Storage {
|
||||
inner: Arc<dyn StorageBackend>,
|
||||
}
|
||||
|
||||
impl Storage {
|
||||
pub fn new_local(path: &str) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(LocalStorage::new(path)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_s3(s3_url: &str, bucket: &str) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(S3Storage::new(s3_url, bucket)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
|
||||
self.inner.put(key, data).await
|
||||
}
|
||||
|
||||
pub async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
self.inner.get(key).await
|
||||
}
|
||||
|
||||
pub async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
self.inner.list(prefix).await
|
||||
}
|
||||
|
||||
pub async fn stat(&self, key: &str) -> Option<FileMeta> {
|
||||
self.inner.stat(key).await
|
||||
}
|
||||
|
||||
pub async fn health_check(&self) -> bool {
|
||||
self.inner.health_check().await
|
||||
}
|
||||
|
||||
pub fn backend_name(&self) -> &'static str {
|
||||
self.inner.backend_name()
|
||||
}
|
||||
}
|
||||
129
nora-registry/src/storage/s3.rs
Normal file
129
nora-registry/src/storage/s3.rs
Normal file
@@ -0,0 +1,129 @@
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
|
||||
use super::{FileMeta, Result, StorageBackend, StorageError};
|
||||
|
||||
/// S3-compatible storage backend (MinIO, AWS S3)
|
||||
pub struct S3Storage {
|
||||
s3_url: String,
|
||||
bucket: String,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
impl S3Storage {
|
||||
pub fn new(s3_url: &str, bucket: &str) -> Self {
|
||||
Self {
|
||||
s3_url: s3_url.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
client: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_s3_keys(xml: &str, prefix: &str) -> Vec<String> {
|
||||
xml.split("<Key>")
|
||||
.filter_map(|part| part.split("</Key>").next())
|
||||
.filter(|key| key.starts_with(prefix))
|
||||
.map(String::from)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl StorageBackend for S3Storage {
|
||||
async fn put(&self, key: &str, data: &[u8]) -> Result<()> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.put(&url)
|
||||
.body(data.to_vec())
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(StorageError::Network(format!(
|
||||
"PUT failed: {}",
|
||||
response.status()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self
|
||||
.client
|
||||
.get(&url)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))?;
|
||||
|
||||
if response.status().is_success() {
|
||||
response
|
||||
.bytes()
|
||||
.await
|
||||
.map_err(|e| StorageError::Network(e.to_string()))
|
||||
} else if response.status().as_u16() == 404 {
|
||||
Err(StorageError::NotFound)
|
||||
} else {
|
||||
Err(StorageError::Network(format!(
|
||||
"GET failed: {}",
|
||||
response.status()
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
async fn list(&self, prefix: &str) -> Vec<String> {
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
match self.client.get(&url).send().await {
|
||||
Ok(response) if response.status().is_success() => {
|
||||
if let Ok(xml) = response.text().await {
|
||||
Self::parse_s3_keys(&xml, prefix)
|
||||
} else {
|
||||
Vec::new()
|
||||
}
|
||||
}
|
||||
_ => Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta> {
|
||||
let url = format!("{}/{}/{}", self.s3_url, self.bucket, key);
|
||||
let response = self.client.head(&url).send().await.ok()?;
|
||||
if !response.status().is_success() {
|
||||
return None;
|
||||
}
|
||||
let size = response
|
||||
.headers()
|
||||
.get("content-length")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| v.parse().ok())
|
||||
.unwrap_or(0);
|
||||
// S3 uses Last-Modified header, but for simplicity use current time if unavailable
|
||||
let modified = response
|
||||
.headers()
|
||||
.get("last-modified")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|v| httpdate::parse_http_date(v).ok())
|
||||
.map(|t| {
|
||||
t.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
})
|
||||
.unwrap_or(0);
|
||||
Some(FileMeta { size, modified })
|
||||
}
|
||||
|
||||
async fn health_check(&self) -> bool {
|
||||
let url = format!("{}/{}", self.s3_url, self.bucket);
|
||||
match self.client.head(&url).send().await {
|
||||
Ok(response) => response.status().is_success() || response.status().as_u16() == 404,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn backend_name(&self) -> &'static str {
|
||||
"s3"
|
||||
}
|
||||
}
|
||||
202
nora-registry/src/tokens.rs
Normal file
202
nora-registry/src/tokens.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use uuid::Uuid;
|
||||
|
||||
const TOKEN_PREFIX: &str = "nra_";
|
||||
|
||||
/// API Token metadata stored on disk
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TokenInfo {
|
||||
pub token_hash: String,
|
||||
pub user: String,
|
||||
pub created_at: u64,
|
||||
pub expires_at: u64,
|
||||
pub last_used: Option<u64>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
/// Token store for managing API tokens
|
||||
#[derive(Clone)]
|
||||
pub struct TokenStore {
|
||||
storage_path: PathBuf,
|
||||
}
|
||||
|
||||
impl TokenStore {
|
||||
/// Create a new token store
|
||||
pub fn new(storage_path: &Path) -> Self {
|
||||
// Ensure directory exists
|
||||
let _ = fs::create_dir_all(storage_path);
|
||||
Self {
|
||||
storage_path: storage_path.to_path_buf(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate a new API token for a user
|
||||
pub fn create_token(
|
||||
&self,
|
||||
user: &str,
|
||||
ttl_days: u64,
|
||||
description: Option<String>,
|
||||
) -> Result<String, TokenError> {
|
||||
// Generate random token
|
||||
let raw_token = format!(
|
||||
"{}{}",
|
||||
TOKEN_PREFIX,
|
||||
Uuid::new_v4().to_string().replace("-", "")
|
||||
);
|
||||
let token_hash = hash_token(&raw_token);
|
||||
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let expires_at = now + (ttl_days * 24 * 60 * 60);
|
||||
|
||||
let info = TokenInfo {
|
||||
token_hash: token_hash.clone(),
|
||||
user: user.to_string(),
|
||||
created_at: now,
|
||||
expires_at,
|
||||
last_used: None,
|
||||
description,
|
||||
};
|
||||
|
||||
// Save to file
|
||||
let file_path = self
|
||||
.storage_path
|
||||
.join(format!("{}.json", &token_hash[..16]));
|
||||
let json =
|
||||
serde_json::to_string_pretty(&info).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
fs::write(&file_path, json).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
Ok(raw_token)
|
||||
}
|
||||
|
||||
/// Verify a token and return user info if valid
|
||||
pub fn verify_token(&self, token: &str) -> Result<String, TokenError> {
|
||||
if !token.starts_with(TOKEN_PREFIX) {
|
||||
return Err(TokenError::InvalidFormat);
|
||||
}
|
||||
|
||||
let token_hash = hash_token(token);
|
||||
let file_path = self
|
||||
.storage_path
|
||||
.join(format!("{}.json", &token_hash[..16]));
|
||||
|
||||
if !file_path.exists() {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
let content =
|
||||
fs::read_to_string(&file_path).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
let mut info: TokenInfo =
|
||||
serde_json::from_str(&content).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
// Verify hash matches
|
||||
if info.token_hash != token_hash {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
// Check expiration
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
if now > info.expires_at {
|
||||
return Err(TokenError::Expired);
|
||||
}
|
||||
|
||||
// Update last_used
|
||||
info.last_used = Some(now);
|
||||
if let Ok(json) = serde_json::to_string_pretty(&info) {
|
||||
let _ = fs::write(&file_path, json);
|
||||
}
|
||||
|
||||
Ok(info.user)
|
||||
}
|
||||
|
||||
/// List all tokens for a user
|
||||
pub fn list_tokens(&self, user: &str) -> Vec<TokenInfo> {
|
||||
let mut tokens = Vec::new();
|
||||
|
||||
if let Ok(entries) = fs::read_dir(&self.storage_path) {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(content) = fs::read_to_string(entry.path()) {
|
||||
if let Ok(info) = serde_json::from_str::<TokenInfo>(&content) {
|
||||
if info.user == user {
|
||||
tokens.push(info);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokens.sort_by(|a, b| b.created_at.cmp(&a.created_at));
|
||||
tokens
|
||||
}
|
||||
|
||||
/// Revoke a token by its hash prefix
|
||||
pub fn revoke_token(&self, hash_prefix: &str) -> Result<(), TokenError> {
|
||||
let file_path = self.storage_path.join(format!("{}.json", hash_prefix));
|
||||
|
||||
if !file_path.exists() {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
fs::remove_file(&file_path).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Revoke all tokens for a user
|
||||
pub fn revoke_all_for_user(&self, user: &str) -> usize {
|
||||
let mut count = 0;
|
||||
|
||||
if let Ok(entries) = fs::read_dir(&self.storage_path) {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(content) = fs::read_to_string(entry.path()) {
|
||||
if let Ok(info) = serde_json::from_str::<TokenInfo>(&content) {
|
||||
if info.user == user && fs::remove_file(entry.path()).is_ok() {
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
count
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash a token using SHA256
|
||||
fn hash_token(token: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(token.as_bytes());
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum TokenError {
|
||||
InvalidFormat,
|
||||
NotFound,
|
||||
Expired,
|
||||
Storage(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for TokenError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::InvalidFormat => write!(f, "Invalid token format"),
|
||||
Self::NotFound => write!(f, "Token not found"),
|
||||
Self::Expired => write!(f, "Token expired"),
|
||||
Self::Storage(msg) => write!(f, "Storage error: {}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for TokenError {}
|
||||
580
nora-registry/src/ui/api.rs
Normal file
580
nora-registry/src/ui/api.rs
Normal file
@@ -0,0 +1,580 @@
|
||||
use super::components::{format_size, format_timestamp, html_escape};
|
||||
use super::templates::encode_uri_component;
|
||||
use crate::AppState;
|
||||
use crate::Storage;
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
response::Json,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct RegistryStats {
|
||||
pub docker: usize,
|
||||
pub maven: usize,
|
||||
pub npm: usize,
|
||||
pub cargo: usize,
|
||||
pub pypi: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
pub struct RepoInfo {
|
||||
pub name: String,
|
||||
pub versions: usize,
|
||||
pub size: u64,
|
||||
pub updated: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct TagInfo {
|
||||
pub name: String,
|
||||
pub size: u64,
|
||||
pub created: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct DockerDetail {
|
||||
pub tags: Vec<TagInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct VersionInfo {
|
||||
pub version: String,
|
||||
pub size: u64,
|
||||
pub published: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct PackageDetail {
|
||||
pub versions: Vec<VersionInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MavenArtifact {
|
||||
pub filename: String,
|
||||
pub size: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct MavenDetail {
|
||||
pub artifacts: Vec<MavenArtifact>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct SearchQuery {
|
||||
pub q: Option<String>,
|
||||
}
|
||||
|
||||
// ============ API Handlers ============
|
||||
|
||||
pub async fn api_stats(State(state): State<Arc<AppState>>) -> Json<RegistryStats> {
|
||||
let stats = get_registry_stats(&state.storage).await;
|
||||
Json(stats)
|
||||
}
|
||||
|
||||
pub async fn api_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(registry_type): Path<String>,
|
||||
) -> Json<Vec<RepoInfo>> {
|
||||
let repos = match registry_type.as_str() {
|
||||
"docker" => get_docker_repos(&state.storage).await,
|
||||
"maven" => get_maven_repos(&state.storage).await,
|
||||
"npm" => get_npm_packages(&state.storage).await,
|
||||
"cargo" => get_cargo_crates(&state.storage).await,
|
||||
"pypi" => get_pypi_packages(&state.storage).await,
|
||||
_ => vec![],
|
||||
};
|
||||
Json(repos)
|
||||
}
|
||||
|
||||
pub async fn api_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((registry_type, name)): Path<(String, String)>,
|
||||
) -> Json<serde_json::Value> {
|
||||
match registry_type.as_str() {
|
||||
"docker" => {
|
||||
let detail = get_docker_detail(&state.storage, &name).await;
|
||||
Json(serde_json::to_value(detail).unwrap_or_default())
|
||||
}
|
||||
"npm" => {
|
||||
let detail = get_npm_detail(&state.storage, &name).await;
|
||||
Json(serde_json::to_value(detail).unwrap_or_default())
|
||||
}
|
||||
"cargo" => {
|
||||
let detail = get_cargo_detail(&state.storage, &name).await;
|
||||
Json(serde_json::to_value(detail).unwrap_or_default())
|
||||
}
|
||||
_ => Json(serde_json::json!({})),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn api_search(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(registry_type): Path<String>,
|
||||
Query(params): Query<SearchQuery>,
|
||||
) -> axum::response::Html<String> {
|
||||
let query = params.q.unwrap_or_default().to_lowercase();
|
||||
|
||||
let repos = match registry_type.as_str() {
|
||||
"docker" => get_docker_repos(&state.storage).await,
|
||||
"maven" => get_maven_repos(&state.storage).await,
|
||||
"npm" => get_npm_packages(&state.storage).await,
|
||||
"cargo" => get_cargo_crates(&state.storage).await,
|
||||
"pypi" => get_pypi_packages(&state.storage).await,
|
||||
_ => vec![],
|
||||
};
|
||||
|
||||
let filtered: Vec<_> = if query.is_empty() {
|
||||
repos
|
||||
} else {
|
||||
repos
|
||||
.into_iter()
|
||||
.filter(|r| r.name.to_lowercase().contains(&query))
|
||||
.collect()
|
||||
};
|
||||
|
||||
// Return HTML fragment for HTMX
|
||||
let html = if filtered.is_empty() {
|
||||
r#"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
||||
<div class="text-4xl mb-2">🔍</div>
|
||||
<div>No matching repositories found</div>
|
||||
</td></tr>"#
|
||||
.to_string()
|
||||
} else {
|
||||
filtered
|
||||
.iter()
|
||||
.map(|repo| {
|
||||
let detail_url =
|
||||
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
|
||||
format!(
|
||||
r#"
|
||||
<tr class="hover:bg-slate-50 cursor-pointer" onclick="window.location='{}'">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-600 hover:text-blue-800 font-medium">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"#,
|
||||
detail_url,
|
||||
detail_url,
|
||||
html_escape(&repo.name),
|
||||
repo.versions,
|
||||
format_size(repo.size),
|
||||
&repo.updated
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
axum::response::Html(html)
|
||||
}
|
||||
|
||||
// ============ Data Fetching Functions ============
|
||||
|
||||
pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
|
||||
let all_keys = storage.list("").await;
|
||||
|
||||
let docker = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("docker/") && k.contains("/manifests/"))
|
||||
.filter_map(|k| k.split('/').nth(1))
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
let maven = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("maven/"))
|
||||
.filter_map(|k| {
|
||||
// Extract groupId/artifactId from maven path
|
||||
let parts: Vec<_> = k.strip_prefix("maven/")?.split('/').collect();
|
||||
if parts.len() >= 2 {
|
||||
Some(parts[..parts.len() - 1].join("/"))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
let npm = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("npm/") && k.ends_with("/metadata.json"))
|
||||
.count();
|
||||
|
||||
let cargo = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("cargo/") && k.ends_with("/metadata.json"))
|
||||
.count();
|
||||
|
||||
let pypi = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("pypi/"))
|
||||
.filter_map(|k| k.strip_prefix("pypi/")?.split('/').next())
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
RegistryStats {
|
||||
docker,
|
||||
maven,
|
||||
npm,
|
||||
cargo,
|
||||
pypi,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_docker_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("docker/").await;
|
||||
|
||||
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new(); // (info, latest_modified)
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("docker/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 3 {
|
||||
let name = parts[0].to_string();
|
||||
let entry = repos.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
if parts[1] == "manifests" {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = repos.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_docker_detail(storage: &Storage, name: &str) -> DockerDetail {
|
||||
let prefix = format!("docker/{}/manifests/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut tags = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(tag_name) = key
|
||||
.strip_prefix(&prefix)
|
||||
.and_then(|s| s.strip_suffix(".json"))
|
||||
{
|
||||
let (size, created) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
tags.push(TagInfo {
|
||||
name: tag_name.to_string(),
|
||||
size,
|
||||
created,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
DockerDetail { tags }
|
||||
}
|
||||
|
||||
pub async fn get_maven_repos(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("maven/").await;
|
||||
|
||||
let mut repos: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("maven/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if parts.len() >= 2 {
|
||||
let artifact_path = parts[..parts.len() - 1].join("/");
|
||||
let entry = repos.entry(artifact_path.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name: artifact_path,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = repos.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_maven_detail(storage: &Storage, path: &str) -> MavenDetail {
|
||||
let prefix = format!("maven/{}/", path);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut artifacts = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(filename) = key.strip_prefix(&prefix) {
|
||||
if filename.contains('/') {
|
||||
continue;
|
||||
}
|
||||
let size = storage.stat(key).await.map(|m| m.size).unwrap_or(0);
|
||||
artifacts.push(MavenArtifact {
|
||||
filename: filename.to_string(),
|
||||
size,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
MavenDetail { artifacts }
|
||||
}
|
||||
|
||||
pub async fn get_npm_packages(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("npm/").await;
|
||||
|
||||
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("npm/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
if parts.len() >= 3 && parts[1] == "tarballs" {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_npm_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
let prefix = format!("npm/{}/tarballs/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(tarball) = key.strip_prefix(&prefix) {
|
||||
if let Some(version) = tarball
|
||||
.strip_prefix(&format!("{}-", name))
|
||||
.and_then(|s| s.strip_suffix(".tgz"))
|
||||
{
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version: version.to_string(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
pub async fn get_cargo_crates(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("cargo/").await;
|
||||
|
||||
let mut crates: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("cargo/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = crates.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
if parts.len() >= 3 && key.ends_with(".crate") {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = crates.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_cargo_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
let prefix = format!("cargo/{}/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in keys.iter().filter(|k| k.ends_with(".crate")) {
|
||||
if let Some(rest) = key.strip_prefix(&prefix) {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version: parts[0].to_string(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
pub async fn get_pypi_packages(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("pypi/").await;
|
||||
|
||||
let mut packages: HashMap<String, (RepoInfo, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("pypi/") {
|
||||
let parts: Vec<_> = rest.split('/').collect();
|
||||
if !parts.is_empty() {
|
||||
let name = parts[0].to_string();
|
||||
let entry = packages.entry(name.clone()).or_insert_with(|| {
|
||||
(
|
||||
RepoInfo {
|
||||
name,
|
||||
versions: 0,
|
||||
size: 0,
|
||||
updated: "N/A".to_string(),
|
||||
},
|
||||
0,
|
||||
)
|
||||
});
|
||||
|
||||
if parts.len() >= 2 {
|
||||
entry.0.versions += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.0.size += meta.size;
|
||||
if meta.modified > entry.1 {
|
||||
entry.1 = meta.modified;
|
||||
entry.0.updated = format_timestamp(meta.modified);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut result: Vec<_> = packages.into_values().map(|(r, _)| r).collect();
|
||||
result.sort_by(|a, b| a.name.cmp(&b.name));
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn get_pypi_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
let prefix = format!("pypi/{}/", name);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(filename) = key.strip_prefix(&prefix) {
|
||||
if let Some(version) = extract_pypi_version(name, filename) {
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version,
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
fn extract_pypi_version(name: &str, filename: &str) -> Option<String> {
|
||||
// Handle both .tar.gz and .whl files
|
||||
let clean_name = name.replace('-', "_");
|
||||
|
||||
if filename.ends_with(".tar.gz") {
|
||||
// package-1.0.0.tar.gz
|
||||
let base = filename.strip_suffix(".tar.gz")?;
|
||||
let version = base
|
||||
.strip_prefix(&format!("{}-", name))
|
||||
.or_else(|| base.strip_prefix(&format!("{}-", clean_name)))?;
|
||||
Some(version.to_string())
|
||||
} else if filename.ends_with(".whl") {
|
||||
// package-1.0.0-py3-none-any.whl
|
||||
let parts: Vec<_> = filename.split('-').collect();
|
||||
if parts.len() >= 2 {
|
||||
Some(parts[1].to_string())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
222
nora-registry/src/ui/components.rs
Normal file
222
nora-registry/src/ui/components.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
/// Main layout wrapper with header and sidebar
|
||||
pub fn layout(title: &str, content: &str, active_page: Option<&str>) -> String {
|
||||
format!(
|
||||
r##"<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{} - Nora</title>
|
||||
<script src="https://cdn.tailwindcss.com"></script>
|
||||
<script src="https://unpkg.com/htmx.org@1.9.10"></script>
|
||||
<style>
|
||||
[x-cloak] {{ display: none !important; }}
|
||||
</style>
|
||||
</head>
|
||||
<body class="bg-slate-100 min-h-screen">
|
||||
<div class="flex h-screen overflow-hidden">
|
||||
<!-- Sidebar -->
|
||||
{}
|
||||
|
||||
<!-- Main content -->
|
||||
<div class="flex-1 flex flex-col overflow-hidden">
|
||||
<!-- Header -->
|
||||
{}
|
||||
|
||||
<!-- Content -->
|
||||
<main class="flex-1 overflow-y-auto p-6">
|
||||
{}
|
||||
</main>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>"##,
|
||||
html_escape(title),
|
||||
sidebar(active_page),
|
||||
header(),
|
||||
content
|
||||
)
|
||||
}
|
||||
|
||||
/// Sidebar navigation component
|
||||
fn sidebar(active_page: Option<&str>) -> String {
|
||||
let active = active_page.unwrap_or("");
|
||||
|
||||
let nav_items = [
|
||||
(
|
||||
"dashboard",
|
||||
"/ui/",
|
||||
"Dashboard",
|
||||
r#"<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M3 12l2-2m0 0l7-7 7 7M5 10v10a1 1 0 001 1h3m10-11l2 2m-2-2v10a1 1 0 01-1 1h-3m-6 0a1 1 0 001-1v-4a1 1 0 011-1h2a1 1 0 011 1v4a1 1 0 001 1m-6 0h6"/>"#,
|
||||
),
|
||||
("docker", "/ui/docker", "🐳 Docker", ""),
|
||||
("maven", "/ui/maven", "☕ Maven", ""),
|
||||
("npm", "/ui/npm", "📦 npm", ""),
|
||||
("cargo", "/ui/cargo", "🦀 Cargo", ""),
|
||||
("pypi", "/ui/pypi", "🐍 PyPI", ""),
|
||||
];
|
||||
|
||||
let nav_html: String = nav_items.iter().map(|(id, href, label, icon_path)| {
|
||||
let is_active = active == *id;
|
||||
let active_class = if is_active {
|
||||
"bg-slate-700 text-white"
|
||||
} else {
|
||||
"text-slate-300 hover:bg-slate-700 hover:text-white"
|
||||
};
|
||||
|
||||
if icon_path.is_empty() {
|
||||
// Emoji-based item
|
||||
format!(r#"
|
||||
<a href="{}" class="flex items-center px-4 py-3 text-sm font-medium rounded-lg transition-colors {}">
|
||||
<span class="mr-3 text-lg">{}</span>
|
||||
</a>
|
||||
"#, href, active_class, label)
|
||||
} else {
|
||||
// SVG icon item
|
||||
format!(r##"
|
||||
<a href="{}" class="flex items-center px-4 py-3 text-sm font-medium rounded-lg transition-colors {}">
|
||||
<svg class="w-5 h-5 mr-3" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
{}
|
||||
</svg>
|
||||
{}
|
||||
</a>
|
||||
"##, href, active_class, icon_path, label)
|
||||
}
|
||||
}).collect();
|
||||
|
||||
format!(
|
||||
r#"
|
||||
<div class="w-64 bg-slate-800 text-white flex flex-col">
|
||||
<!-- Logo -->
|
||||
<div class="h-16 flex items-center px-6 border-b border-slate-700">
|
||||
<span class="text-2xl mr-2">⚓</span>
|
||||
<span class="text-xl font-bold">Nora</span>
|
||||
</div>
|
||||
|
||||
<!-- Navigation -->
|
||||
<nav class="flex-1 px-4 py-6 space-y-1">
|
||||
<div class="text-xs font-semibold text-slate-400 uppercase tracking-wider px-4 mb-3">
|
||||
Navigation
|
||||
</div>
|
||||
{}
|
||||
|
||||
<div class="text-xs font-semibold text-slate-400 uppercase tracking-wider px-4 mt-8 mb-3">
|
||||
Registries
|
||||
</div>
|
||||
</nav>
|
||||
|
||||
<!-- Footer -->
|
||||
<div class="px-4 py-4 border-t border-slate-700">
|
||||
<div class="text-xs text-slate-400">
|
||||
Nora v0.1.0
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
"#,
|
||||
nav_html
|
||||
)
|
||||
}
|
||||
|
||||
/// Header component
|
||||
fn header() -> String {
|
||||
r##"
|
||||
<header class="h-16 bg-white border-b border-slate-200 flex items-center justify-between px-6">
|
||||
<div class="flex-1">
|
||||
<!-- Search removed for simplicity, HTMX search is on list pages -->
|
||||
</div>
|
||||
<div class="flex items-center space-x-4">
|
||||
<a href="https://github.com" target="_blank" class="text-slate-500 hover:text-slate-700">
|
||||
<svg class="w-5 h-5" fill="currentColor" viewBox="0 0 24 24">
|
||||
<path fill-rule="evenodd" d="M12 2C6.477 2 2 6.484 2 12.017c0 4.425 2.865 8.18 6.839 9.504.5.092.682-.217.682-.483 0-.237-.008-.868-.013-1.703-2.782.605-3.369-1.343-3.369-1.343-.454-1.158-1.11-1.466-1.11-1.466-.908-.62.069-.608.069-.608 1.003.07 1.531 1.032 1.531 1.032.892 1.53 2.341 1.088 2.91.832.092-.647.35-1.088.636-1.338-2.22-.253-4.555-1.113-4.555-4.951 0-1.093.39-1.988 1.029-2.688-.103-.253-.446-1.272.098-2.65 0 0 .84-.27 2.75 1.026A9.564 9.564 0 0112 6.844c.85.004 1.705.115 2.504.337 1.909-1.296 2.747-1.027 2.747-1.027.546 1.379.202 2.398.1 2.651.64.7 1.028 1.595 1.028 2.688 0 3.848-2.339 4.695-4.566 4.943.359.309.678.92.678 1.855 0 1.338-.012 2.419-.012 2.747 0 .268.18.58.688.482A10.019 10.019 0 0022 12.017C22 6.484 17.522 2 12 2z" clip-rule="evenodd"/>
|
||||
</svg>
|
||||
</a>
|
||||
<button class="text-slate-500 hover:text-slate-700">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M8.228 9c.549-1.165 2.03-2 3.772-2 2.21 0 4 1.343 4 3 0 1.4-1.278 2.575-3.006 2.907-.542.104-.994.54-.994 1.093m0 3h.01M21 12a9 9 0 11-18 0 9 9 0 0118 0z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</header>
|
||||
"##.to_string()
|
||||
}
|
||||
|
||||
/// Stat card for dashboard
|
||||
pub fn stat_card(name: &str, icon: &str, count: usize, href: &str, unit: &str) -> String {
|
||||
format!(
|
||||
r##"
|
||||
<a href="{}" class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 hover:shadow-md hover:border-blue-300 transition-all">
|
||||
<div class="flex items-center justify-between mb-4">
|
||||
<span class="text-3xl">{}</span>
|
||||
<span class="text-xs font-medium text-green-600 bg-green-100 px-2 py-1 rounded-full">ACTIVE</span>
|
||||
</div>
|
||||
<div class="text-lg font-semibold text-slate-800 mb-1">{}</div>
|
||||
<div class="text-2xl font-bold text-slate-800">{}</div>
|
||||
<div class="text-sm text-slate-500">{}</div>
|
||||
</a>
|
||||
"##,
|
||||
href, icon, name, count, unit
|
||||
)
|
||||
}
|
||||
|
||||
/// Format file size in human-readable format
|
||||
pub fn format_size(bytes: u64) -> String {
|
||||
const KB: u64 = 1024;
|
||||
const MB: u64 = KB * 1024;
|
||||
const GB: u64 = MB * 1024;
|
||||
|
||||
if bytes >= GB {
|
||||
format!("{:.1} GB", bytes as f64 / GB as f64)
|
||||
} else if bytes >= MB {
|
||||
format!("{:.1} MB", bytes as f64 / MB as f64)
|
||||
} else if bytes >= KB {
|
||||
format!("{:.1} KB", bytes as f64 / KB as f64)
|
||||
} else {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
/// Escape HTML special characters
|
||||
pub fn html_escape(s: &str) -> String {
|
||||
s.replace('&', "&")
|
||||
.replace('<', "<")
|
||||
.replace('>', ">")
|
||||
.replace('"', """)
|
||||
.replace('\'', "'")
|
||||
}
|
||||
|
||||
/// Format Unix timestamp as relative time
|
||||
pub fn format_timestamp(ts: u64) -> String {
|
||||
if ts == 0 {
|
||||
return "N/A".to_string();
|
||||
}
|
||||
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.map(|d| d.as_secs())
|
||||
.unwrap_or(0);
|
||||
|
||||
if now < ts {
|
||||
return "just now".to_string();
|
||||
}
|
||||
|
||||
let diff = now - ts;
|
||||
|
||||
if diff < 60 {
|
||||
"just now".to_string()
|
||||
} else if diff < 3600 {
|
||||
let mins = diff / 60;
|
||||
format!("{} min{} ago", mins, if mins == 1 { "" } else { "s" })
|
||||
} else if diff < 86400 {
|
||||
let hours = diff / 3600;
|
||||
format!("{} hour{} ago", hours, if hours == 1 { "" } else { "s" })
|
||||
} else if diff < 604800 {
|
||||
let days = diff / 86400;
|
||||
format!("{} day{} ago", days, if days == 1 { "" } else { "s" })
|
||||
} else if diff < 2592000 {
|
||||
let weeks = diff / 604800;
|
||||
format!("{} week{} ago", weeks, if weeks == 1 { "" } else { "s" })
|
||||
} else {
|
||||
let months = diff / 2592000;
|
||||
format!("{} month{} ago", months, if months == 1 { "" } else { "s" })
|
||||
}
|
||||
}
|
||||
114
nora-registry/src/ui/mod.rs
Normal file
114
nora-registry/src/ui/mod.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
mod api;
|
||||
mod components;
|
||||
mod templates;
|
||||
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
response::{Html, IntoResponse, Redirect},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
|
||||
use api::*;
|
||||
use templates::*;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
// UI Pages
|
||||
.route("/", get(|| async { Redirect::to("/ui/") }))
|
||||
.route("/ui", get(|| async { Redirect::to("/ui/") }))
|
||||
.route("/ui/", get(dashboard))
|
||||
.route("/ui/docker", get(docker_list))
|
||||
.route("/ui/docker/{name}", get(docker_detail))
|
||||
.route("/ui/maven", get(maven_list))
|
||||
.route("/ui/maven/{*path}", get(maven_detail))
|
||||
.route("/ui/npm", get(npm_list))
|
||||
.route("/ui/npm/{name}", get(npm_detail))
|
||||
.route("/ui/cargo", get(cargo_list))
|
||||
.route("/ui/cargo/{name}", get(cargo_detail))
|
||||
.route("/ui/pypi", get(pypi_list))
|
||||
.route("/ui/pypi/{name}", get(pypi_detail))
|
||||
// API endpoints for HTMX
|
||||
.route("/api/ui/stats", get(api_stats))
|
||||
.route("/api/ui/{registry_type}/list", get(api_list))
|
||||
.route("/api/ui/{registry_type}/{name}", get(api_detail))
|
||||
.route("/api/ui/{registry_type}/search", get(api_search))
|
||||
}
|
||||
|
||||
// Dashboard page
|
||||
async fn dashboard(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let stats = get_registry_stats(&state.storage).await;
|
||||
Html(render_dashboard(&stats))
|
||||
}
|
||||
|
||||
// Docker pages
|
||||
async fn docker_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let repos = get_docker_repos(&state.storage).await;
|
||||
Html(render_registry_list("docker", "Docker Registry", &repos))
|
||||
}
|
||||
|
||||
async fn docker_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let detail = get_docker_detail(&state.storage, &name).await;
|
||||
Html(render_docker_detail(&name, &detail))
|
||||
}
|
||||
|
||||
// Maven pages
|
||||
async fn maven_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let repos = get_maven_repos(&state.storage).await;
|
||||
Html(render_registry_list("maven", "Maven Repository", &repos))
|
||||
}
|
||||
|
||||
async fn maven_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(path): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let detail = get_maven_detail(&state.storage, &path).await;
|
||||
Html(render_maven_detail(&path, &detail))
|
||||
}
|
||||
|
||||
// npm pages
|
||||
async fn npm_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let packages = get_npm_packages(&state.storage).await;
|
||||
Html(render_registry_list("npm", "npm Registry", &packages))
|
||||
}
|
||||
|
||||
async fn npm_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let detail = get_npm_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("npm", &name, &detail))
|
||||
}
|
||||
|
||||
// Cargo pages
|
||||
async fn cargo_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let crates = get_cargo_crates(&state.storage).await;
|
||||
Html(render_registry_list("cargo", "Cargo Registry", &crates))
|
||||
}
|
||||
|
||||
async fn cargo_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let detail = get_cargo_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("cargo", &name, &detail))
|
||||
}
|
||||
|
||||
// PyPI pages
|
||||
async fn pypi_list(State(state): State<Arc<AppState>>) -> impl IntoResponse {
|
||||
let packages = get_pypi_packages(&state.storage).await;
|
||||
Html(render_registry_list("pypi", "PyPI Repository", &packages))
|
||||
}
|
||||
|
||||
async fn pypi_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
) -> impl IntoResponse {
|
||||
let detail = get_pypi_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("pypi", &name, &detail))
|
||||
}
|
||||
478
nora-registry/src/ui/templates.rs
Normal file
478
nora-registry/src/ui/templates.rs
Normal file
@@ -0,0 +1,478 @@
|
||||
use super::api::{DockerDetail, MavenDetail, PackageDetail, RegistryStats, RepoInfo};
|
||||
use super::components::*;
|
||||
|
||||
/// Renders the main dashboard page
|
||||
pub fn render_dashboard(stats: &RegistryStats) -> String {
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-8">
|
||||
<h1 class="text-2xl font-bold text-slate-800 mb-2">Dashboard</h1>
|
||||
<p class="text-slate-500">Overview of all registries</p>
|
||||
</div>
|
||||
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-5 gap-6 mb-8">
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
{}
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-4">Quick Links</h2>
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 gap-4">
|
||||
<a href="/ui/docker" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<span class="text-2xl mr-3">🐳</span>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Docker Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /v2/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/maven" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<span class="text-2xl mr-3">☕</span>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Maven Repository</div>
|
||||
<div class="text-sm text-slate-500">API: /maven2/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/npm" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<span class="text-2xl mr-3">📦</span>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">npm Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /npm/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/cargo" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<span class="text-2xl mr-3">🦀</span>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">Cargo Registry</div>
|
||||
<div class="text-sm text-slate-500">API: /cargo/</div>
|
||||
</div>
|
||||
</a>
|
||||
<a href="/ui/pypi" class="flex items-center p-3 rounded-lg border border-slate-200 hover:border-blue-300 hover:bg-blue-50 transition-colors">
|
||||
<span class="text-2xl mr-3">🐍</span>
|
||||
<div>
|
||||
<div class="font-medium text-slate-700">PyPI Repository</div>
|
||||
<div class="text-sm text-slate-500">API: /simple/</div>
|
||||
</div>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
"##,
|
||||
stat_card("Docker", "🐳", stats.docker, "/ui/docker", "images"),
|
||||
stat_card("Maven", "☕", stats.maven, "/ui/maven", "artifacts"),
|
||||
stat_card("npm", "📦", stats.npm, "/ui/npm", "packages"),
|
||||
stat_card("Cargo", "🦀", stats.cargo, "/ui/cargo", "crates"),
|
||||
stat_card("PyPI", "🐍", stats.pypi, "/ui/pypi", "packages"),
|
||||
);
|
||||
|
||||
layout("Dashboard", &content, Some("dashboard"))
|
||||
}
|
||||
|
||||
/// Renders a registry list page (docker, maven, npm, cargo, pypi)
|
||||
pub fn render_registry_list(registry_type: &str, title: &str, repos: &[RepoInfo]) -> String {
|
||||
let icon = get_registry_icon(registry_type);
|
||||
|
||||
let table_rows = if repos.is_empty() {
|
||||
r##"<tr><td colspan="4" class="px-6 py-12 text-center text-slate-500">
|
||||
<div class="text-4xl mb-2">📭</div>
|
||||
<div>No repositories found</div>
|
||||
<div class="text-sm mt-1">Push your first artifact to see it here</div>
|
||||
</td></tr>"##
|
||||
.to_string()
|
||||
} else {
|
||||
repos
|
||||
.iter()
|
||||
.map(|repo| {
|
||||
let detail_url =
|
||||
format!("/ui/{}/{}", registry_type, encode_uri_component(&repo.name));
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-50 cursor-pointer" onclick="window.location='{}'">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-600 hover:text-blue-800 font-medium">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
detail_url,
|
||||
detail_url,
|
||||
html_escape(&repo.name),
|
||||
repo.versions,
|
||||
format_size(repo.size),
|
||||
&repo.updated
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
let version_label = match registry_type {
|
||||
"docker" => "Tags",
|
||||
"maven" => "Versions",
|
||||
_ => "Versions",
|
||||
};
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6 flex items-center justify-between">
|
||||
<div class="flex items-center">
|
||||
<span class="text-3xl mr-3">{}</span>
|
||||
<div>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
<p class="text-slate-500">{} repositories</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="flex items-center gap-4">
|
||||
<div class="relative">
|
||||
<input type="text"
|
||||
placeholder="Search repositories..."
|
||||
class="pl-10 pr-4 py-2 border border-slate-300 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent"
|
||||
hx-get="/api/ui/{}/search"
|
||||
hx-trigger="keyup changed delay:300ms"
|
||||
hx-target="#repo-table-body"
|
||||
name="q">
|
||||
<svg class="absolute left-3 top-2.5 h-5 w-5 text-slate-400" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"/>
|
||||
</svg>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Name</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">{}</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Updated</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="repo-table-body" class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"##,
|
||||
icon,
|
||||
title,
|
||||
repos.len(),
|
||||
registry_type,
|
||||
version_label,
|
||||
table_rows
|
||||
);
|
||||
|
||||
layout(title, &content, Some(registry_type))
|
||||
}
|
||||
|
||||
/// Renders Docker image detail page
|
||||
pub fn render_docker_detail(name: &str, detail: &DockerDetail) -> String {
|
||||
let tags_rows = if detail.tags.is_empty() {
|
||||
r##"<tr><td colspan="3" class="px-6 py-8 text-center text-slate-500">No tags found</td></tr>"##.to_string()
|
||||
} else {
|
||||
detail
|
||||
.tags
|
||||
.iter()
|
||||
.map(|tag| {
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<span class="font-mono text-sm bg-slate-100 px-2 py-1 rounded">{}</span>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
html_escape(&tag.name),
|
||||
format_size(tag.size),
|
||||
&tag.created
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
let pull_cmd = format!("docker pull 127.0.0.1:4000/{}", name);
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/docker" class="text-blue-600 hover:text-blue-800">Docker Registry</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<span class="text-3xl mr-3">🐳</span>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Pull Command</h2>
|
||||
<div class="flex items-center bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm">
|
||||
<code class="flex-1">{}</code>
|
||||
<button onclick="navigator.clipboard.writeText('{}')" class="ml-4 text-slate-400 hover:text-white transition-colors" title="Copy to clipboard">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M8 16H6a2 2 0 01-2-2V6a2 2 0 012-2h8a2 2 0 012 2v2m-6 12h8a2 2 0 002-2v-8a2 2 0 00-2-2h-8a2 2 0 00-2 2v8a2 2 0 002 2z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Tags ({} total)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Tag</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Created</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"##,
|
||||
html_escape(name),
|
||||
html_escape(name),
|
||||
pull_cmd,
|
||||
pull_cmd,
|
||||
detail.tags.len(),
|
||||
tags_rows
|
||||
);
|
||||
|
||||
layout(&format!("{} - Docker", name), &content, Some("docker"))
|
||||
}
|
||||
|
||||
/// Renders package detail page (npm, cargo, pypi)
|
||||
pub fn render_package_detail(registry_type: &str, name: &str, detail: &PackageDetail) -> String {
|
||||
let icon = get_registry_icon(registry_type);
|
||||
let registry_title = get_registry_title(registry_type);
|
||||
|
||||
let versions_rows = if detail.versions.is_empty() {
|
||||
r##"<tr><td colspan="3" class="px-6 py-8 text-center text-slate-500">No versions found</td></tr>"##.to_string()
|
||||
} else {
|
||||
detail
|
||||
.versions
|
||||
.iter()
|
||||
.map(|v| {
|
||||
format!(
|
||||
r##"
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<span class="font-mono text-sm bg-slate-100 px-2 py-1 rounded">{}</span>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
<td class="px-6 py-4 text-slate-500 text-sm">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
html_escape(&v.version),
|
||||
format_size(v.size),
|
||||
&v.published
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>()
|
||||
.join("")
|
||||
};
|
||||
|
||||
let install_cmd = match registry_type {
|
||||
"npm" => format!("npm install {} --registry http://127.0.0.1:4000/npm", name),
|
||||
"cargo" => format!("cargo add {}", name),
|
||||
"pypi" => format!(
|
||||
"pip install {} --index-url http://127.0.0.1:4000/simple",
|
||||
name
|
||||
),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/{}" class="text-blue-600 hover:text-blue-800">{}</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<span class="text-3xl mr-3">{}</span>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Install Command</h2>
|
||||
<div class="flex items-center bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm">
|
||||
<code class="flex-1">{}</code>
|
||||
<button onclick="navigator.clipboard.writeText('{}')" class="ml-4 text-slate-400 hover:text-white transition-colors" title="Copy to clipboard">
|
||||
<svg class="w-5 h-5" fill="none" stroke="currentColor" viewBox="0 0 24 24">
|
||||
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M8 16H6a2 2 0 01-2-2V6a2 2 0 012-2h8a2 2 0 012 2v2m-6 12h8a2 2 0 002-2v-8a2 2 0 00-2-2h-8a2 2 0 00-2 2v8a2 2 0 002 2z"/>
|
||||
</svg>
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Versions ({} total)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Version</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Published</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"##,
|
||||
registry_type,
|
||||
registry_title,
|
||||
html_escape(name),
|
||||
icon,
|
||||
html_escape(name),
|
||||
install_cmd,
|
||||
install_cmd,
|
||||
detail.versions.len(),
|
||||
versions_rows
|
||||
);
|
||||
|
||||
layout(
|
||||
&format!("{} - {}", name, registry_title),
|
||||
&content,
|
||||
Some(registry_type),
|
||||
)
|
||||
}
|
||||
|
||||
/// Renders Maven artifact detail page
|
||||
pub fn render_maven_detail(path: &str, detail: &MavenDetail) -> String {
|
||||
let artifact_rows = if detail.artifacts.is_empty() {
|
||||
r##"<tr><td colspan="2" class="px-6 py-8 text-center text-slate-500">No artifacts found</td></tr>"##.to_string()
|
||||
} else {
|
||||
detail.artifacts.iter().map(|a| {
|
||||
let download_url = format!("/maven2/{}/{}", path, a.filename);
|
||||
format!(r##"
|
||||
<tr class="hover:bg-slate-50">
|
||||
<td class="px-6 py-4">
|
||||
<a href="{}" class="text-blue-600 hover:text-blue-800 font-mono text-sm">{}</a>
|
||||
</td>
|
||||
<td class="px-6 py-4 text-slate-600">{}</td>
|
||||
</tr>
|
||||
"##, download_url, html_escape(&a.filename), format_size(a.size))
|
||||
}).collect::<Vec<_>>().join("")
|
||||
};
|
||||
|
||||
// Extract artifact name from path (last component before version)
|
||||
let parts: Vec<&str> = path.split('/').collect();
|
||||
let artifact_name = if parts.len() >= 2 {
|
||||
parts[parts.len() - 2]
|
||||
} else {
|
||||
path
|
||||
};
|
||||
|
||||
let dep_cmd = format!(
|
||||
r#"<dependency>
|
||||
<groupId>{}</groupId>
|
||||
<artifactId>{}</artifactId>
|
||||
<version>{}</version>
|
||||
</dependency>"#,
|
||||
parts[..parts.len().saturating_sub(2)].join("."),
|
||||
artifact_name,
|
||||
parts.last().unwrap_or(&"")
|
||||
);
|
||||
|
||||
let content = format!(
|
||||
r##"
|
||||
<div class="mb-6">
|
||||
<div class="flex items-center mb-2">
|
||||
<a href="/ui/maven" class="text-blue-600 hover:text-blue-800">Maven Repository</a>
|
||||
<span class="mx-2 text-slate-400">/</span>
|
||||
<span class="text-slate-800 font-medium">{}</span>
|
||||
</div>
|
||||
<div class="flex items-center">
|
||||
<span class="text-3xl mr-3">☕</span>
|
||||
<h1 class="text-2xl font-bold text-slate-800">{}</h1>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 p-6 mb-6">
|
||||
<h2 class="text-lg font-semibold text-slate-800 mb-3">Maven Dependency</h2>
|
||||
<pre class="bg-slate-900 text-green-400 rounded-lg p-4 font-mono text-sm overflow-x-auto">{}</pre>
|
||||
</div>
|
||||
|
||||
<div class="bg-white rounded-lg shadow-sm border border-slate-200 overflow-hidden">
|
||||
<div class="px-6 py-4 border-b border-slate-200">
|
||||
<h2 class="text-lg font-semibold text-slate-800">Artifacts ({} files)</h2>
|
||||
</div>
|
||||
<table class="w-full">
|
||||
<thead class="bg-slate-50 border-b border-slate-200">
|
||||
<tr>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Filename</th>
|
||||
<th class="px-6 py-3 text-left text-xs font-semibold text-slate-600 uppercase tracking-wider">Size</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody class="divide-y divide-slate-200">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
"##,
|
||||
html_escape(path),
|
||||
html_escape(path),
|
||||
html_escape(&dep_cmd),
|
||||
detail.artifacts.len(),
|
||||
artifact_rows
|
||||
);
|
||||
|
||||
layout(&format!("{} - Maven", path), &content, Some("maven"))
|
||||
}
|
||||
|
||||
fn get_registry_icon(registry_type: &str) -> &'static str {
|
||||
match registry_type {
|
||||
"docker" => "🐳",
|
||||
"maven" => "☕",
|
||||
"npm" => "📦",
|
||||
"cargo" => "🦀",
|
||||
"pypi" => "🐍",
|
||||
_ => "📁",
|
||||
}
|
||||
}
|
||||
|
||||
fn get_registry_title(registry_type: &str) -> &'static str {
|
||||
match registry_type {
|
||||
"docker" => "Docker Registry",
|
||||
"maven" => "Maven Repository",
|
||||
"npm" => "npm Registry",
|
||||
"cargo" => "Cargo Registry",
|
||||
"pypi" => "PyPI Repository",
|
||||
_ => "Registry",
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple URL encoding for path components
|
||||
pub fn encode_uri_component(s: &str) -> String {
|
||||
let mut result = String::new();
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'a'..='z' | 'A'..='Z' | '0'..='9' | '-' | '_' | '.' | '~' => result.push(c),
|
||||
_ => {
|
||||
for byte in c.to_string().as_bytes() {
|
||||
result.push_str(&format!("%{:02X}", byte));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
28
nora-storage/Cargo.toml
Normal file
28
nora-storage/Cargo.toml
Normal file
@@ -0,0 +1,28 @@
|
||||
[package]
|
||||
name = "nora-storage"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
authors.workspace = true
|
||||
repository.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "S3-compatible storage server for NORA"
|
||||
|
||||
[[bin]]
|
||||
name = "nora-storage"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
axum.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
tracing.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
toml = "0.8"
|
||||
uuid = { version = "1", features = ["v4"] }
|
||||
sha2 = "0.10"
|
||||
base64 = "0.22"
|
||||
httpdate = "1"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
quick-xml = { version = "0.31", features = ["serialize"] }
|
||||
44
nora-storage/src/config.rs
Normal file
44
nora-storage/src/config.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub server: ServerConfig,
|
||||
pub storage: StorageConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct StorageConfig {
|
||||
pub data_dir: String,
|
||||
pub max_body_size: usize,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn load() -> Self {
|
||||
fs::read_to_string("config.toml")
|
||||
.ok()
|
||||
.and_then(|content| toml::from_str(&content).ok())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Config {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
server: ServerConfig {
|
||||
host: String::from("127.0.0.1"),
|
||||
port: 3000,
|
||||
},
|
||||
storage: StorageConfig {
|
||||
data_dir: String::from("data"),
|
||||
max_body_size: 1024 * 1024 * 1024, // 1GB
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
304
nora-storage/src/main.rs
Normal file
304
nora-storage/src/main.rs
Normal file
@@ -0,0 +1,304 @@
|
||||
mod config;
|
||||
|
||||
use axum::extract::DefaultBodyLimit;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
extract::{Path, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Response},
|
||||
routing::{delete, get, put},
|
||||
Router,
|
||||
};
|
||||
use chrono::Utc;
|
||||
use config::Config;
|
||||
use quick_xml::se::to_string as to_xml;
|
||||
use serde::Serialize;
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use tracing::info;
|
||||
|
||||
pub struct AppState {
|
||||
pub config: Config,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename = "ListAllMyBucketsResult")]
|
||||
struct ListBucketsResult {
|
||||
#[serde(rename = "Buckets")]
|
||||
buckets: Buckets,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct Buckets {
|
||||
#[serde(rename = "Bucket")]
|
||||
bucket: Vec<BucketInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct BucketInfo {
|
||||
#[serde(rename = "Name")]
|
||||
name: String,
|
||||
#[serde(rename = "CreationDate")]
|
||||
creation_date: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename = "ListBucketResult")]
|
||||
struct ListObjectsResult {
|
||||
#[serde(rename = "Name")]
|
||||
name: String,
|
||||
#[serde(rename = "Contents")]
|
||||
contents: Vec<ObjectInfo>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct ObjectInfo {
|
||||
#[serde(rename = "Key")]
|
||||
key: String,
|
||||
#[serde(rename = "Size")]
|
||||
size: u64,
|
||||
#[serde(rename = "LastModified")]
|
||||
last_modified: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename = "Error")]
|
||||
struct S3Error {
|
||||
#[serde(rename = "Code")]
|
||||
code: String,
|
||||
#[serde(rename = "Message")]
|
||||
message: String,
|
||||
}
|
||||
|
||||
fn xml_response<T: Serialize>(data: T) -> Response {
|
||||
let xml = format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n{}",
|
||||
to_xml(&data).unwrap()
|
||||
);
|
||||
(
|
||||
StatusCode::OK,
|
||||
[(axum::http::header::CONTENT_TYPE, "application/xml")],
|
||||
xml,
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
fn error_response(status: StatusCode, code: &str, message: &str) -> Response {
|
||||
let error = S3Error {
|
||||
code: code.to_string(),
|
||||
message: message.to_string(),
|
||||
};
|
||||
let xml = format!(
|
||||
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n{}",
|
||||
to_xml(&error).unwrap()
|
||||
);
|
||||
(
|
||||
status,
|
||||
[(axum::http::header::CONTENT_TYPE, "application/xml")],
|
||||
xml,
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
tracing_subscriber::fmt()
|
||||
.with_env_filter(
|
||||
tracing_subscriber::EnvFilter::from_default_env()
|
||||
.add_directive("nora_storage=info".parse().unwrap()),
|
||||
)
|
||||
.init();
|
||||
|
||||
let config = Config::load();
|
||||
fs::create_dir_all(&config.storage.data_dir).unwrap();
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
config: config.clone(),
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
.route("/", get(list_buckets))
|
||||
.route("/{bucket}", get(list_objects))
|
||||
.route("/{bucket}", put(create_bucket))
|
||||
.route("/{bucket}", delete(delete_bucket))
|
||||
.route("/{bucket}/{*key}", put(put_object))
|
||||
.route("/{bucket}/{*key}", get(get_object))
|
||||
.route("/{bucket}/{*key}", delete(delete_object))
|
||||
.layer(DefaultBodyLimit::max(config.storage.max_body_size))
|
||||
.with_state(state);
|
||||
|
||||
let addr = format!("{}:{}", config.server.host, config.server.port);
|
||||
let listener = tokio::net::TcpListener::bind(&addr).await.unwrap();
|
||||
|
||||
info!("nora-storage (S3 compatible) running on http://{}", addr);
|
||||
axum::serve(listener, app).await.unwrap();
|
||||
}
|
||||
|
||||
async fn list_buckets(State(state): State<Arc<AppState>>) -> Response {
|
||||
let data_dir = &state.config.storage.data_dir;
|
||||
let entries = match fs::read_dir(data_dir) {
|
||||
Ok(e) => e,
|
||||
Err(_) => {
|
||||
return error_response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"InternalError",
|
||||
"Failed to read data",
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
let bucket_list: Vec<BucketInfo> = entries
|
||||
.filter_map(|e| e.ok())
|
||||
.filter(|e| e.path().is_dir())
|
||||
.filter_map(|e| {
|
||||
let name = e.file_name().into_string().ok()?;
|
||||
let modified = e.metadata().ok()?.modified().ok()?;
|
||||
let datetime: chrono::DateTime<Utc> = modified.into();
|
||||
Some(BucketInfo {
|
||||
name,
|
||||
creation_date: datetime.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
xml_response(ListBucketsResult {
|
||||
buckets: Buckets {
|
||||
bucket: bucket_list,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
async fn list_objects(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
|
||||
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
|
||||
|
||||
if !std::path::Path::new(&bucket_path).is_dir() {
|
||||
return error_response(
|
||||
StatusCode::NOT_FOUND,
|
||||
"NoSuchBucket",
|
||||
"The specified bucket does not exist",
|
||||
);
|
||||
}
|
||||
|
||||
let objects = collect_files(std::path::Path::new(&bucket_path), "");
|
||||
xml_response(ListObjectsResult {
|
||||
name: bucket,
|
||||
contents: objects,
|
||||
})
|
||||
}
|
||||
|
||||
fn collect_files(dir: &std::path::Path, prefix: &str) -> Vec<ObjectInfo> {
|
||||
let mut objects = Vec::new();
|
||||
if let Ok(entries) = fs::read_dir(dir) {
|
||||
for entry in entries.filter_map(|e| e.ok()) {
|
||||
let path = entry.path();
|
||||
let name = entry.file_name().into_string().unwrap_or_default();
|
||||
let key = if prefix.is_empty() {
|
||||
name.clone()
|
||||
} else {
|
||||
format!("{}/{}", prefix, name)
|
||||
};
|
||||
|
||||
if path.is_dir() {
|
||||
objects.extend(collect_files(&path, &key));
|
||||
} else if let Ok(metadata) = entry.metadata() {
|
||||
let modified: chrono::DateTime<Utc> = metadata.modified().unwrap().into();
|
||||
objects.push(ObjectInfo {
|
||||
key,
|
||||
size: metadata.len(),
|
||||
last_modified: modified.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
objects
|
||||
}
|
||||
|
||||
async fn create_bucket(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
|
||||
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
|
||||
match fs::create_dir(&bucket_path) {
|
||||
Ok(_) => (StatusCode::OK, "").into_response(),
|
||||
Err(_) => error_response(
|
||||
StatusCode::CONFLICT,
|
||||
"BucketAlreadyExists",
|
||||
"Bucket already exists",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn put_object(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((bucket, key)): Path<(String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
|
||||
|
||||
if let Some(parent) = std::path::Path::new(&file_path).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
match fs::write(&file_path, &body) {
|
||||
Ok(_) => {
|
||||
println!("PUT {}/{} ({} bytes)", bucket, key, body.len());
|
||||
(StatusCode::OK, "").into_response()
|
||||
}
|
||||
Err(e) => {
|
||||
println!("ERROR writing {}/{}: {}", bucket, key, e);
|
||||
error_response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
"InternalError",
|
||||
"Failed to write object",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((bucket, key)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
|
||||
|
||||
match fs::read(&file_path) {
|
||||
Ok(data) => (StatusCode::OK, data).into_response(),
|
||||
Err(_) => error_response(
|
||||
StatusCode::NOT_FOUND,
|
||||
"NoSuchKey",
|
||||
"The specified key does not exist",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_object(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((bucket, key)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
|
||||
|
||||
match fs::remove_file(&file_path) {
|
||||
Ok(_) => {
|
||||
println!("DELETE {}/{}", bucket, key);
|
||||
(StatusCode::NO_CONTENT, "").into_response()
|
||||
}
|
||||
Err(_) => error_response(
|
||||
StatusCode::NOT_FOUND,
|
||||
"NoSuchKey",
|
||||
"The specified key does not exist",
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_bucket(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
|
||||
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
|
||||
|
||||
match fs::remove_dir(&bucket_path) {
|
||||
Ok(_) => {
|
||||
println!("DELETE bucket {}", bucket);
|
||||
(StatusCode::NO_CONTENT, "").into_response()
|
||||
}
|
||||
Err(_) => error_response(
|
||||
StatusCode::CONFLICT,
|
||||
"BucketNotEmpty",
|
||||
"The bucket is not empty",
|
||||
),
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user