mirror of
https://github.com/getnora-io/nora.git
synced 2026-04-13 10:50:32 +00:00
Compare commits
63 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 206bc06927 | |||
| 32a0d97b2a | |||
| 6fa5dfd534 | |||
| 26e1e12e64 | |||
| 29516f4ea3 | |||
| 28ff719508 | |||
| d260ff8b5e | |||
| 578cdd7dd6 | |||
| 186855e892 | |||
| 78dd91795d | |||
| c1f6430aa9 | |||
| 52e59a8272 | |||
| 8b1b9c8401 | |||
| 62027c44dc | |||
| 68365dfe98 | |||
| 59cdd4530b | |||
| 1cc5c8cc86 | |||
| e2919b83de | |||
| c035561fd2 | |||
| 1a38902b0c | |||
| 3b9b2ee0a0 | |||
| b7cb458edf | |||
| e1a1d80a77 | |||
| b50dd6386e | |||
| 6b5a397862 | |||
| 6b4d627fa2 | |||
| 659e7730de | |||
| d0441f31d1 | |||
| 1956401932 | |||
| e415f0f1ce | |||
| aa86633a04 | |||
| 31afa1f70b | |||
| f36abd82ef | |||
| ea6a86b0f1 | |||
| 638f99d8dc | |||
| c55307a3af | |||
| cc416f3adf | |||
| 30aedac238 | |||
| 34e85acd6e | |||
|
|
41eefdd90d | ||
|
|
94ca418155 | ||
|
|
e72648a6c4 | ||
| 18e93d23a9 | |||
| db05adb060 | |||
| a57de6690e | |||
| d3439ae33d | |||
| b3b74b8b2d | |||
| d41b55fa3a | |||
| 5a68bfd695 | |||
| 9c8fee5a5d | |||
| bbff337b4c | |||
| a73335c549 | |||
| ad6aba46b2 | |||
| 095270d113 | |||
| 769f5fb01d | |||
| 53884e143b | |||
| 0eb26f24f7 | |||
| fa962b2d6e | |||
| a1da4fff1e | |||
| 868c4feca7 | |||
| 5b4cba1392 | |||
| ad890be56a | |||
| 3b9ea37b0e |
2
.github/CODEOWNERS
vendored
2
.github/CODEOWNERS
vendored
@@ -1,2 +0,0 @@
|
||||
# Default owner for everything
|
||||
* @devitway
|
||||
42
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
42
.github/ISSUE_TEMPLATE/bug_report.yml
vendored
@@ -1,57 +1,39 @@
|
||||
name: Bug Report
|
||||
description: Report a bug in NORA
|
||||
labels: [bug]
|
||||
description: Report a bug or unexpected behavior
|
||||
labels: ["bug"]
|
||||
body:
|
||||
- type: textarea
|
||||
id: description
|
||||
attributes:
|
||||
label: Description
|
||||
description: What happened?
|
||||
description: What happened? What did you expect?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: expected
|
||||
id: steps
|
||||
attributes:
|
||||
label: Expected Behavior
|
||||
description: What did you expect to happen?
|
||||
- type: textarea
|
||||
id: reproduce
|
||||
attributes:
|
||||
label: Steps to Reproduce
|
||||
description: How can we reproduce this?
|
||||
validations:
|
||||
required: true
|
||||
label: Steps to reproduce
|
||||
description: How can we reproduce the issue?
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: NORA Version
|
||||
description: Output of 'nora --version' or Docker tag
|
||||
placeholder: v0.3.0
|
||||
validations:
|
||||
required: true
|
||||
label: NORA version
|
||||
placeholder: "0.2.32"
|
||||
- type: dropdown
|
||||
id: registry
|
||||
id: protocol
|
||||
attributes:
|
||||
label: Registry Protocol
|
||||
label: Registry protocol
|
||||
options:
|
||||
- Docker/OCI
|
||||
- Docker
|
||||
- npm
|
||||
- Maven
|
||||
- PyPI
|
||||
- Cargo
|
||||
- Go
|
||||
- Raw
|
||||
- UI/Dashboard
|
||||
- Other
|
||||
- type: dropdown
|
||||
id: storage
|
||||
attributes:
|
||||
label: Storage Backend
|
||||
options:
|
||||
- Local filesystem
|
||||
- S3-compatible
|
||||
- type: textarea
|
||||
id: logs
|
||||
attributes:
|
||||
label: Relevant Logs
|
||||
label: Logs / error output
|
||||
render: shell
|
||||
|
||||
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
23
.github/ISSUE_TEMPLATE/feature_request.yml
vendored
@@ -1,6 +1,6 @@
|
||||
name: Feature Request
|
||||
description: Suggest a new feature for NORA
|
||||
labels: [enhancement]
|
||||
description: Suggest a new feature or improvement
|
||||
labels: ["enhancement"]
|
||||
body:
|
||||
- type: textarea
|
||||
id: problem
|
||||
@@ -12,26 +12,19 @@ body:
|
||||
- type: textarea
|
||||
id: solution
|
||||
attributes:
|
||||
label: Proposed Solution
|
||||
label: Proposed solution
|
||||
description: How would you like it to work?
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
id: alternatives
|
||||
attributes:
|
||||
label: Alternatives Considered
|
||||
description: Other approaches you've thought about
|
||||
- type: dropdown
|
||||
id: registry
|
||||
id: protocol
|
||||
attributes:
|
||||
label: Related Registry
|
||||
label: Related protocol
|
||||
options:
|
||||
- Docker/OCI
|
||||
- Docker
|
||||
- npm
|
||||
- Maven
|
||||
- PyPI
|
||||
- Cargo
|
||||
- Go
|
||||
- Raw
|
||||
- CLI
|
||||
- UI/Dashboard
|
||||
- Core/General
|
||||
- General
|
||||
|
||||
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
16
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,16 +0,0 @@
|
||||
## Summary
|
||||
|
||||
<!-- What does this PR do? -->
|
||||
|
||||
## Changes
|
||||
|
||||
<!-- List key changes -->
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] passes
|
||||
- [ ] passes
|
||||
- [ ] passes
|
||||
- [ ] No in production code
|
||||
- [ ] New public API has documentation
|
||||
- [ ] CHANGELOG updated (if user-facing change)
|
||||
3
.github/actionlint.yaml
vendored
3
.github/actionlint.yaml
vendored
@@ -1,3 +0,0 @@
|
||||
self-hosted-runner:
|
||||
labels:
|
||||
- nora
|
||||
BIN
.github/assets/dashboard.gif
vendored
BIN
.github/assets/dashboard.gif
vendored
Binary file not shown.
|
Before Width: | Height: | Size: 124 KiB |
49
.github/workflows/ci.yml
vendored
49
.github/workflows/ci.yml
vendored
@@ -30,55 +30,6 @@ jobs:
|
||||
- name: Run tests
|
||||
run: cargo test --package nora-registry
|
||||
|
||||
|
||||
lint-workflows:
|
||||
name: Lint Workflows
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
- name: Install actionlint
|
||||
run: bash <(curl -s https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash)
|
||||
- name: Run actionlint
|
||||
run: ./actionlint -ignore "shellcheck reported issue" -ignore "SC[0-9]"
|
||||
|
||||
|
||||
coverage:
|
||||
name: Coverage
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6
|
||||
|
||||
- name: Install Rust
|
||||
uses: dtolnay/rust-toolchain@631a55b12751854ce901bb631d5902ceb48146f7 # stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: Swatinem/rust-cache@42dc69e1aa15d09112580998cf2ef0119e2e91ae # v2
|
||||
|
||||
- name: Install tarpaulin
|
||||
run: cargo install cargo-tarpaulin --locked
|
||||
|
||||
- name: Run coverage
|
||||
run: |
|
||||
cargo tarpaulin --config tarpaulin.toml 2>&1 | tee /tmp/tarpaulin.log
|
||||
COVERAGE=$(python3 -c "import json; d=json.load(open('coverage/tarpaulin-report.json')); print(f\"{d['coverage']:.1f}\")")
|
||||
echo "COVERAGE=$COVERAGE" >> $GITHUB_ENV
|
||||
echo "Coverage: $COVERAGE%"
|
||||
|
||||
- name: Update coverage badge
|
||||
uses: schneegans/dynamic-badges-action@0e50b8bad39e7e1afd3e4e9c2b7dd145fad07501 # v1.8.0
|
||||
with:
|
||||
auth: ${{ secrets.GIST_TOKEN }}
|
||||
gistID: ${{ vars.COVERAGE_GIST_ID }}
|
||||
filename: nora-coverage.json
|
||||
label: coverage
|
||||
message: ${{ env.COVERAGE }}%
|
||||
valColorRange: ${{ env.COVERAGE }}
|
||||
minColorRange: 0
|
||||
maxColorRange: 100
|
||||
|
||||
security:
|
||||
name: Security
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
70
.github/workflows/release.yml
vendored
70
.github/workflows/release.yml
vendored
@@ -59,6 +59,7 @@ jobs:
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6
|
||||
with:
|
||||
images: |
|
||||
${{ env.NORA }}/${{ env.IMAGE_NAME }}
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}}
|
||||
@@ -74,6 +75,8 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta-alpine.outputs.tags }}
|
||||
labels: ${{ steps.meta-alpine.outputs.labels }}
|
||||
cache-from: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:alpine,ignore-error=true
|
||||
cache-to: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:alpine,mode=max
|
||||
|
||||
# ── RED OS ───────────────────────────────────────────────────────────────
|
||||
- name: Extract metadata (redos)
|
||||
@@ -81,6 +84,7 @@ jobs:
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6
|
||||
with:
|
||||
images: |
|
||||
${{ env.NORA }}/${{ env.IMAGE_NAME }}
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
flavor: suffix=-redos,onlatest=true
|
||||
tags: |
|
||||
@@ -97,6 +101,8 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta-redos.outputs.tags }}
|
||||
labels: ${{ steps.meta-redos.outputs.labels }}
|
||||
cache-from: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:redos,ignore-error=true
|
||||
cache-to: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:redos,mode=max
|
||||
|
||||
# ── Astra Linux SE ───────────────────────────────────────────────────────
|
||||
- name: Extract metadata (astra)
|
||||
@@ -104,6 +110,7 @@ jobs:
|
||||
uses: docker/metadata-action@030e881283bb7a6894de51c315a6bfe6a94e05cf # v6
|
||||
with:
|
||||
images: |
|
||||
${{ env.NORA }}/${{ env.IMAGE_NAME }}
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
flavor: suffix=-astra,onlatest=true
|
||||
tags: |
|
||||
@@ -120,10 +127,12 @@ jobs:
|
||||
push: true
|
||||
tags: ${{ steps.meta-astra.outputs.tags }}
|
||||
labels: ${{ steps.meta-astra.outputs.labels }}
|
||||
cache-from: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:astra,ignore-error=true
|
||||
cache-to: type=registry,ref=${{ env.NORA }}/${{ env.IMAGE_NAME }}-cache:astra,mode=max
|
||||
|
||||
# ── Smoke test ──────────────────────────────────────────────────────────
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v3
|
||||
uses: sigstore/cosign-installer@c56c2d3e59e4281cc41dea2217323ba5694b171e # v3
|
||||
|
||||
- name: Sign Docker images (keyless Sigstore)
|
||||
run: |
|
||||
@@ -136,7 +145,8 @@ jobs:
|
||||
- name: Smoke test — verify alpine image starts and responds
|
||||
run: |
|
||||
docker rm -f nora-smoke 2>/dev/null || true
|
||||
docker run --rm -d --name nora-smoke -p 5555:4000 -e NORA_HOST=0.0.0.0 ghcr.io/${{ github.repository }}:${{ steps.meta-alpine.outputs.version }}
|
||||
docker run --rm -d --name nora-smoke -p 5555:4000 -e NORA_HOST=0.0.0.0 \
|
||||
${{ env.NORA }}/${{ env.IMAGE_NAME }}:latest
|
||||
for i in $(seq 1 10); do
|
||||
curl -sf http://localhost:5555/health && break || sleep 2
|
||||
done
|
||||
@@ -172,11 +182,11 @@ jobs:
|
||||
uses: aquasecurity/trivy-action@57a97c7e7821a5776cebc9bb87c984fa69cba8f1 # 0.35.0
|
||||
with:
|
||||
scan-type: image
|
||||
image-ref: ghcr.io/${{ github.repository }}:${{ steps.ver.outputs.tag }}${{ matrix.suffix }}
|
||||
image-ref: ${{ env.NORA }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}${{ matrix.suffix }}
|
||||
format: sarif
|
||||
output: trivy-image-${{ matrix.name }}.sarif
|
||||
severity: HIGH,CRITICAL
|
||||
exit-code: 0
|
||||
exit-code: 1
|
||||
|
||||
- name: Upload Trivy image results to GitHub Security tab
|
||||
uses: github/codeql-action/upload-sarif@a60c4df7a135c7317c1e9ddf9b5a9b07a910dda9 # v4
|
||||
@@ -215,59 +225,25 @@ jobs:
|
||||
echo "Binary size: $(du -sh nora-linux-amd64 | cut -f1)"
|
||||
cat nora-linux-amd64.sha256
|
||||
|
||||
- name: Generate SLSA provenance
|
||||
uses: slsa-framework/slsa-github-generator/.github/actions/generate-builder@f7dd8c54c2067bafc12ca7a55595d5ee9b75204a # v2.1.0
|
||||
id: provenance-generate
|
||||
continue-on-error: true
|
||||
|
||||
- name: Upload provenance attestation
|
||||
if: always()
|
||||
run: |
|
||||
# Generate provenance using gh attestation (built-in GitHub feature)
|
||||
gh attestation create ./nora-linux-amd64 --repo ${{ github.repository }} --signer-workflow ${{ github.server_url }}/${{ github.repository }}/.github/workflows/release.yml 2>/dev/null || echo "WARNING: attestation failed, continuing without provenance"
|
||||
# Also create a simple provenance file for scorecard
|
||||
cat > nora-v${{ github.ref_name }}.provenance.json << 'PROVEOF'
|
||||
{
|
||||
"_type": "https://in-toto.io/Statement/v0.1",
|
||||
"predicateType": "https://slsa.dev/provenance/v0.2",
|
||||
"subject": [{"name": "nora-linux-amd64"}],
|
||||
"predicate": {
|
||||
"builder": {"id": "${{ github.server_url }}/${{ github.repository }}/.github/workflows/release.yml"},
|
||||
"buildType": "https://github.com/slsa-framework/slsa-github-generator/generic@v2",
|
||||
"invocation": {
|
||||
"configSource": {
|
||||
"uri": "${{ github.server_url }}/${{ github.repository }}",
|
||||
"digest": {"sha1": "${{ github.sha }}"},
|
||||
"entryPoint": ".github/workflows/release.yml"
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"buildInvocationID": "${{ github.run_id }}",
|
||||
"completeness": {"parameters": true, "environment": false, "materials": false}
|
||||
}
|
||||
}
|
||||
}
|
||||
PROVEOF
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Generate SBOM (SPDX)
|
||||
uses: anchore/sbom-action@57aae528053a48a3f6235f2d9461b05fbcb7366d # v0
|
||||
with:
|
||||
image: ${{ env.NORA }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
|
||||
format: spdx-json
|
||||
output-file: nora-${{ github.ref_name }}.sbom.spdx.json
|
||||
|
||||
- name: Generate SBOM (CycloneDX)
|
||||
uses: anchore/sbom-action@57aae528053a48a3f6235f2d9461b05fbcb7366d # v0
|
||||
with:
|
||||
image: ${{ env.NORA }}/${{ env.IMAGE_NAME }}:${{ steps.ver.outputs.tag }}
|
||||
format: cyclonedx-json
|
||||
output-file: nora-${{ github.ref_name }}.sbom.cdx.json
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v3
|
||||
uses: sigstore/cosign-installer@c56c2d3e59e4281cc41dea2217323ba5694b171e # v3
|
||||
|
||||
- name: Sign binary with cosign (keyless Sigstore)
|
||||
run: cosign sign-blob --yes --bundle nora-linux-amd64.bundle ./nora-linux-amd64
|
||||
run: cosign sign-blob --yes --output-signature nora-linux-amd64.sig --output-certificate nora-linux-amd64.pem ./nora-linux-amd64
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@153bb8e04406b158c6c84fc1615b65b24149a1fe # v2
|
||||
@@ -276,10 +252,10 @@ jobs:
|
||||
files: |
|
||||
nora-linux-amd64
|
||||
nora-linux-amd64.sha256
|
||||
nora-linux-amd64.bundle
|
||||
nora-linux-amd64.sig
|
||||
nora-linux-amd64.pem
|
||||
nora-${{ github.ref_name }}.sbom.spdx.json
|
||||
nora-${{ github.ref_name }}.sbom.cdx.json
|
||||
nora-${{ github.ref_name }}.provenance.json
|
||||
body: |
|
||||
## Install
|
||||
|
||||
@@ -299,17 +275,17 @@ jobs:
|
||||
|
||||
**Alpine (standard):**
|
||||
```bash
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ steps.ver.outputs.tag }}
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}
|
||||
```
|
||||
|
||||
**RED OS:**
|
||||
```bash
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ steps.ver.outputs.tag }}-redos
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}-redos
|
||||
```
|
||||
|
||||
**Astra Linux SE:**
|
||||
```bash
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ steps.ver.outputs.tag }}-astra
|
||||
docker pull ghcr.io/${{ github.repository }}:${{ github.ref_name }}-astra
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
28
.gitignore
vendored
28
.gitignore
vendored
@@ -4,24 +4,24 @@ data/
|
||||
.env
|
||||
.env.*
|
||||
*.log
|
||||
internal config
|
||||
|
||||
# Backup files
|
||||
*.bak
|
||||
|
||||
# Internal files
|
||||
SESSION*.md
|
||||
TODO.md
|
||||
docs-site/
|
||||
docs/
|
||||
*.txt
|
||||
|
||||
## Internal files
|
||||
.internal/
|
||||
examples/
|
||||
|
||||
# Generated by CI
|
||||
*.cdx.json
|
||||
|
||||
# Playwright / Node
|
||||
node_modules/
|
||||
package-lock.json
|
||||
/tmp/
|
||||
|
||||
# Working files (never commit)
|
||||
SESSION_*.md
|
||||
TODO.md
|
||||
FEEDBACK.txt
|
||||
*.session.txt
|
||||
*-this-session-*.txt
|
||||
nora-review.sh
|
||||
coverage/
|
||||
target/criterion/
|
||||
# Dead crates (kept in repo for reference but excluded from workspace)
|
||||
# nora-cli/ and nora-storage/ remain in git but are not built
|
||||
|
||||
@@ -3,11 +3,31 @@
|
||||
|
||||
title = "NORA gitleaks rules"
|
||||
|
||||
# Internal infrastructure — private IPs and domains
|
||||
[[rules]]
|
||||
id = "private-network"
|
||||
description = "Private network addresses and internal domains"
|
||||
regex = '''(10\.25\.1\.\d+|10\.0\.\d+\.\d+)'''
|
||||
tags = ["network"]
|
||||
[rules.allowlist]
|
||||
regexTarget = "match"
|
||||
regexes = ['''10\.0\.0\.0''']
|
||||
|
||||
[[rules]]
|
||||
id = "internal-domains"
|
||||
description = "Internal domain names"
|
||||
regex = '''[a-z0-9]+\.(lab|internal|local)\b'''
|
||||
tags = ["network"]
|
||||
|
||||
[[rules]]
|
||||
id = "tailscale-hostnames"
|
||||
description = "Tailscale MagicDNS hostnames"
|
||||
regex = '''[a-z0-9]+\.tail[a-z0-9]+\.ts\.net'''
|
||||
tags = ["network"]
|
||||
|
||||
[allowlist]
|
||||
description = "Global allowlist for false positives"
|
||||
description = "Allowlist for false positives"
|
||||
paths = [
|
||||
'''\.gitleaks\.toml$''',
|
||||
'''\.gitignore$''',
|
||||
]
|
||||
regexTarget = "match"
|
||||
# Test placeholder tokens (e.g. nra_00112233...)
|
||||
regexes = ['''nra_0{2}[0-9a-f]{30}''']
|
||||
|
||||
130
CHANGELOG.md
130
CHANGELOG.md
@@ -1,134 +1,4 @@
|
||||
# Changelog
|
||||
## [0.4.0] - 2026-04-05
|
||||
|
||||
### Added
|
||||
- **Docker image mirroring** — nora mirror docker fetches manifests and blobs from upstream registries (Docker Hub, ghcr.io, etc.) and pushes into NORA (#41)
|
||||
- **yarn.lock support** — nora mirror yarn parses v1 format with scoped packages and dedup (#44)
|
||||
- **--json output for mirror** — nora mirror npm --json outputs structured JSON for CI/CD pipelines (#43)
|
||||
- **Storage size in /health** — total_size_bytes field in health endpoint response (#42)
|
||||
- 499 total tests (up from 466), 61.5% code coverage (up from 43%)
|
||||
|
||||
### Changed
|
||||
- fetch_blob_from_upstream and fetch_manifest_from_upstream are now pub for reuse in mirror module
|
||||
|
||||
### Fixed
|
||||
- tarpaulin exclude-files paths corrected to workspace-relative (coverage jumped from 29% to 61%) (#92)
|
||||
- Env var naming unified across all registries (#39, #90)
|
||||
|
||||
## [0.3.1] - 2026-04-05
|
||||
|
||||
### Added
|
||||
- **Token verification cache** — in-memory with 5min TTL, eliminates repeated Argon2id on every request
|
||||
- **Property-based tests** (proptest) for Docker/OCI manifest parsers (#84)
|
||||
- 466 total tests, 43% code coverage (up from 22%) (#87)
|
||||
- MSRV declared in Cargo.toml (#84)
|
||||
|
||||
### Changed
|
||||
- Upload sessions moved from global static to AppState
|
||||
- Blocking I/O replaced with async in hot paths
|
||||
- Production docker-compose includes Caddy reverse proxy
|
||||
- clippy.toml added for consistent lint rules
|
||||
|
||||
### Fixed
|
||||
- Proxy request deduplication — concurrent requests coalesced (#83)
|
||||
- Multi-registry GC now handles all 7 registry types (#83)
|
||||
- TOCTOU race condition in credential validation (#83)
|
||||
- Config validation at startup — fail fast with clear errors (#73)
|
||||
- Raw registry in dashboard sidebar, footer stats updated (#64)
|
||||
- tarpaulin.toml config format (#88)
|
||||
|
||||
### Security
|
||||
- sha2 0.10→0.11, hmac 0.12→0.13 (#75)
|
||||
- Credential hygiene — cleared from memory after use (#83)
|
||||
- cosign-installer 3.8.0→4.1.1 (#71)
|
||||
|
||||
### Documentation
|
||||
- Development Setup in CONTRIBUTING.md (#76)
|
||||
- Roadmap consolidated into README (#65, #66)
|
||||
- Helm OCI docs and logging env vars documented
|
||||
|
||||
## [0.3.0] - 2026-03-21
|
||||
|
||||
### Added
|
||||
- **Go module proxy** — full GOPROXY protocol support (list, info, mod, zip, latest) (#59)
|
||||
- **Upstream proxy retry** with configurable timeout and backoff (#56)
|
||||
- **Maven proxy-only mode** — proxy Maven artifacts without local storage (#56)
|
||||
- **Anonymous read mode** docs — Go proxy section in README (#62)
|
||||
- Integration tests: Docker push/pull, npm install, upstream timeout (#57)
|
||||
- Go proxy and Raw registry integration tests in smoke suite (#72)
|
||||
- Config validation at startup — clear errors instead of runtime panics
|
||||
- Dockerfile HEALTHCHECK for standalone deployments (#72)
|
||||
- rust-toolchain.toml for reproducible builds (#72)
|
||||
|
||||
### Changed
|
||||
- **Token hashing migrated from SHA-256 to Argon2id** — existing tokens auto-migrate on first use (#55)
|
||||
- UI: Raw registry in sidebar, footer stats updated (32MB, 7 registries) (#64)
|
||||
- README restructured: roadmap in README, removed stale ROADMAP.md (#65, #66)
|
||||
|
||||
### Fixed
|
||||
- Remove all unwrap() from production code — proper error handling throughout (#72)
|
||||
- Add `#![forbid(unsafe_code)]` — no unsafe code allowed at crate level (#72)
|
||||
- Add input validation to Cargo registry endpoints (#72)
|
||||
- Improve expect() messages with descriptive context (#72)
|
||||
- Remove 7 unnecessary clone() calls (#72)
|
||||
- Restore .gitleaks.toml lost during merge (#58)
|
||||
- Update SECURITY.md — add 0.3.x to supported versions (#72)
|
||||
|
||||
### Security
|
||||
- Update rustls-webpki 0.103.9 → 0.103.10 (RUSTSEC-2026-0049)
|
||||
- Argon2id token hashing replaces SHA-256 (#55)
|
||||
- `#![forbid(unsafe_code)]` enforced (#72)
|
||||
- Zero unwrap() in production code (#72)
|
||||
|
||||
## [0.2.35] - 2026-03-20
|
||||
|
||||
### Added
|
||||
- **Anonymous read mode** (`NORA_AUTH_ANONYMOUS_READ=true`): allow pull/download without credentials while requiring auth for push. Use case: public demo registries, read-only mirrors.
|
||||
|
||||
### Fixed
|
||||
- Pin slsa-github-generator and codeql-action by SHA instead of tag
|
||||
- Replace anonymous tuple with named struct in activity grouping (readability)
|
||||
- Replace unwrap() with if-let pattern in activity grouping (safety)
|
||||
- Add warning message on SLSA attestation failure instead of silent suppression
|
||||
|
||||
## [0.2.34] - 2026-03-20
|
||||
|
||||
### Fixed
|
||||
- **UI**: Group consecutive identical activity entries — repeated cache hits show as "artifact (x4)" instead of 4 identical rows
|
||||
- **UI**: Fix table cell padding in Mount Points and Activity tables — th/td alignment now consistent
|
||||
- **Security**: Update tar crate 0.4.44 → 0.4.45 (CVE-2026-33055 PAX size header bypass, CVE-2026-33056 symlink chmod traversal)
|
||||
|
||||
### Added
|
||||
- 82 new unit tests across 7 modules (activity_log, audit, config, dashboard_metrics, error, metrics, repo_index)
|
||||
- Test coverage badge in README (12.55% → 21.56%)
|
||||
- Dashboard GIF (EN/RU crossfade) in README
|
||||
- 7 missing environment variables added to docs (NORA_PUBLIC_URL, S3 credentials, NPM_METADATA_TTL, Raw config)
|
||||
|
||||
### Changed
|
||||
- README restructured: tagline + docker run + GIF first, badges moved to Security section
|
||||
- Remove hardcoded OpenSSF Scorecard version from README
|
||||
|
||||
|
||||
## [0.2.33] - 2026-03-19
|
||||
|
||||
### Security
|
||||
- Verify blob digest (SHA256) on upload — reject mismatches with DIGEST_INVALID error
|
||||
- Reject sha512 digests (only sha256 supported for blob uploads)
|
||||
- Add upload session limits: max 100 concurrent, 2GB per session, 30min TTL (configurable via NORA_MAX_UPLOAD_SESSIONS, NORA_MAX_UPLOAD_SESSION_SIZE_MB)
|
||||
- Bind upload sessions to repository name (prevent session fixation attacks)
|
||||
- Add security headers: Content-Security-Policy, X-Frame-Options, X-Content-Type-Options, Referrer-Policy
|
||||
- Run containers as non-root user (USER nora) in all Dockerfiles
|
||||
|
||||
### Fixed
|
||||
- Filter .meta.json from Docker tag list (fixes ArgoCD Image Updater tag recursion)
|
||||
- Fix catalog endpoint to show namespaced images correctly (library/alpine instead of library)
|
||||
|
||||
### Added
|
||||
- CodeQL workflow for SAST analysis
|
||||
- SLSA provenance attestation for release artifacts
|
||||
|
||||
### Changed
|
||||
- Configurable upload session size for ML models via NORA_MAX_UPLOAD_SESSION_SIZE_MB (default 2048 MB)
|
||||
|
||||
## [0.2.32] - 2026-03-18
|
||||
|
||||
|
||||
@@ -2,34 +2,6 @@
|
||||
|
||||
Thank you for your interest in contributing to NORA!
|
||||
|
||||
## Developer Certificate of Origin (DCO)
|
||||
|
||||
By submitting a pull request, you agree to the [Developer Certificate of Origin](https://developercertificate.org/).
|
||||
Your contribution will be licensed under the [MIT License](LICENSE).
|
||||
|
||||
You confirm that you have the right to submit the code and that it does not violate any third-party rights.
|
||||
|
||||
## Project Governance
|
||||
|
||||
NORA uses a **Benevolent Dictator** governance model:
|
||||
|
||||
- **Maintainer:** [@devitway](https://github.com/devitway) — final decisions on features, releases, and architecture
|
||||
- **Contributors:** anyone who submits issues, PRs, or docs improvements
|
||||
- **Decision process:** proposals via GitHub Issues → discussion → maintainer decision
|
||||
- **Release authority:** maintainer only
|
||||
|
||||
### Roles and Responsibilities
|
||||
|
||||
| Role | Person | Responsibilities |
|
||||
|------|--------|-----------------|
|
||||
| Maintainer | @devitway | Code review, releases, roadmap, security response |
|
||||
| Contributor | anyone | Issues, PRs, documentation, testing |
|
||||
| Dependabot | automated | Dependency updates |
|
||||
|
||||
### Continuity
|
||||
|
||||
The GitHub organization [getnora-io](https://github.com/getnora-io) has multiple admin accounts to ensure project continuity. Source code is MIT-licensed, enabling anyone to fork and continue the project.
|
||||
|
||||
## Getting Started
|
||||
|
||||
1. Fork the repository
|
||||
@@ -38,59 +10,24 @@ The GitHub organization [getnora-io](https://github.com/getnora-io) has multiple
|
||||
|
||||
## Development Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- **Rust** stable (1.85+) — install via [rustup](https://rustup.rs/)
|
||||
- **Docker** (optional) — for integration tests (docker push/pull)
|
||||
- **Node.js** 18+ (optional) — for npm integration tests
|
||||
|
||||
### Build and Test
|
||||
|
||||
```bash
|
||||
# Install Rust (if needed)
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
|
||||
|
||||
# Build
|
||||
cargo build --package nora-registry
|
||||
|
||||
# Run unit tests (important: use --lib --bin to skip fuzz targets)
|
||||
# Run tests (important: always use --lib --bin nora to skip fuzz targets)
|
||||
cargo test --lib --bin nora
|
||||
|
||||
# Run clippy (must pass with zero warnings)
|
||||
# Run clippy
|
||||
cargo clippy --package nora-registry -- -D warnings
|
||||
|
||||
# Format check
|
||||
cargo fmt --check
|
||||
```
|
||||
# Format
|
||||
cargo fmt
|
||||
|
||||
### Run Locally
|
||||
|
||||
```bash
|
||||
# Start with defaults (port 4000, local storage in ./data/)
|
||||
# Run locally
|
||||
cargo run --bin nora -- serve
|
||||
|
||||
# Custom port and storage
|
||||
NORA_PORT=5000 NORA_STORAGE_PATH=/tmp/nora-data cargo run --bin nora -- serve
|
||||
|
||||
# Test health
|
||||
curl http://localhost:4000/health
|
||||
```
|
||||
|
||||
### Integration / Smoke Tests
|
||||
|
||||
```bash
|
||||
# Build release binary first
|
||||
cargo build --release
|
||||
|
||||
# Run full smoke suite (starts NORA, tests all 7 protocols, stops)
|
||||
bash tests/smoke.sh
|
||||
```
|
||||
|
||||
### Fuzz Testing
|
||||
|
||||
```bash
|
||||
# Install cargo-fuzz (one-time)
|
||||
cargo install cargo-fuzz
|
||||
|
||||
# Run fuzz target (Ctrl+C to stop)
|
||||
cargo +nightly fuzz run fuzz_validation -- -max_total_time=60
|
||||
```
|
||||
|
||||
## Before Submitting a PR
|
||||
|
||||
448
Cargo.lock
generated
448
Cargo.lock
generated
@@ -32,12 +32,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "anes"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b46cbb362ab8752921c97e041f5e366ee6297bd428a31275b9fcf1e380f7299"
|
||||
|
||||
[[package]]
|
||||
name = "anstream"
|
||||
version = "1.0.0"
|
||||
@@ -103,18 +97,6 @@ dependencies = [
|
||||
"derive_arbitrary",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "argon2"
|
||||
version = "0.5.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3c3610892ee6e0cbce8ae2700349fcf8f98adb0dbfbee85aec3c9179d29cc072"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"blake2",
|
||||
"cpufeatures 0.2.17",
|
||||
"password-hash",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "assert-json-diff"
|
||||
version = "2.0.2"
|
||||
@@ -206,12 +188,6 @@ version = "0.22.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6"
|
||||
|
||||
[[package]]
|
||||
name = "base64ct"
|
||||
version = "1.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06"
|
||||
|
||||
[[package]]
|
||||
name = "bcrypt"
|
||||
version = "0.19.0"
|
||||
@@ -225,36 +201,12 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-set"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "08807e080ed7f9d5433fa9b275196cfc35414f66a0c79d864dc51a0d825231a3"
|
||||
dependencies = [
|
||||
"bit-vec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "bit-vec"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
|
||||
|
||||
[[package]]
|
||||
name = "blake2"
|
||||
version = "0.10.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe"
|
||||
dependencies = [
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.10.4"
|
||||
@@ -264,15 +216,6 @@ dependencies = [
|
||||
"generic-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "block-buffer"
|
||||
version = "0.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cdd35008169921d80bc60d3d0ab416eecb028c4cd653352907921d95084790be"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "blowfish"
|
||||
version = "0.9.1"
|
||||
@@ -301,12 +244,6 @@ version = "1.11.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
|
||||
|
||||
[[package]]
|
||||
name = "cast"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.55"
|
||||
@@ -345,40 +282,13 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"ciborium-ll",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-io"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757"
|
||||
|
||||
[[package]]
|
||||
name = "ciborium-ll"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9"
|
||||
dependencies = [
|
||||
"ciborium-io",
|
||||
"half",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cipher"
|
||||
version = "0.4.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
|
||||
dependencies = [
|
||||
"crypto-common 0.1.7",
|
||||
"crypto-common",
|
||||
"inout",
|
||||
]
|
||||
|
||||
@@ -422,12 +332,6 @@ version = "1.0.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831"
|
||||
|
||||
[[package]]
|
||||
name = "cmov"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "de0758edba32d61d1fd9f4d69491b47604b91ee2f7e6b33de7e54ca4ebe55dc3"
|
||||
|
||||
[[package]]
|
||||
name = "colorchoice"
|
||||
version = "1.0.4"
|
||||
@@ -447,12 +351,6 @@ dependencies = [
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-oid"
|
||||
version = "0.10.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6ef517f0926dd24a1582492c791b6a4818a4d94e789a334894aa15b0d12f55c"
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation-sys"
|
||||
version = "0.8.7"
|
||||
@@ -468,15 +366,6 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cpufeatures"
|
||||
version = "0.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.5.0"
|
||||
@@ -486,73 +375,12 @@ dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion"
|
||||
version = "0.5.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
|
||||
dependencies = [
|
||||
"anes",
|
||||
"cast",
|
||||
"ciborium",
|
||||
"clap",
|
||||
"criterion-plot",
|
||||
"is-terminal",
|
||||
"itertools",
|
||||
"num-traits",
|
||||
"once_cell",
|
||||
"oorandom",
|
||||
"plotters",
|
||||
"rayon",
|
||||
"regex",
|
||||
"serde",
|
||||
"serde_derive",
|
||||
"serde_json",
|
||||
"tinytemplate",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "criterion-plot"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
|
||||
dependencies = [
|
||||
"cast",
|
||||
"itertools",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-deque"
|
||||
version = "0.8.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51"
|
||||
dependencies = [
|
||||
"crossbeam-epoch",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-epoch"
|
||||
version = "0.9.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e"
|
||||
dependencies = [
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crossbeam-utils"
|
||||
version = "0.8.21"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28"
|
||||
|
||||
[[package]]
|
||||
name = "crunchy"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5"
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.1.7"
|
||||
@@ -563,24 +391,6 @@ dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "crypto-common"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "77727bb15fa921304124b128af125e7e3b968275d1b108b379190264f4423710"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ctutils"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1005a6d4446f5120ef475ad3d2af2b30c49c2c9c6904258e3bb30219bebed5e4"
|
||||
dependencies = [
|
||||
"cmov",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dashmap"
|
||||
version = "6.1.0"
|
||||
@@ -630,23 +440,11 @@ version = "0.10.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
|
||||
dependencies = [
|
||||
"block-buffer 0.10.4",
|
||||
"crypto-common 0.1.7",
|
||||
"block-buffer",
|
||||
"crypto-common",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "digest"
|
||||
version = "0.11.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4850db49bf08e663084f7fb5c87d202ef91a3907271aff24a94eb97ff039153c"
|
||||
dependencies = [
|
||||
"block-buffer 0.12.0",
|
||||
"const-oid",
|
||||
"crypto-common 0.2.1",
|
||||
"ctutils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "displaydoc"
|
||||
version = "0.2.5"
|
||||
@@ -658,12 +456,6 @@ dependencies = [
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "either"
|
||||
version = "1.15.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
|
||||
|
||||
[[package]]
|
||||
name = "encode_unicode"
|
||||
version = "1.0.0"
|
||||
@@ -944,17 +736,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "half"
|
||||
version = "2.7.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"crunchy",
|
||||
"zerocopy",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hashbrown"
|
||||
version = "0.14.5"
|
||||
@@ -1001,11 +782,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
|
||||
|
||||
[[package]]
|
||||
name = "hmac"
|
||||
version = "0.13.0"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6303bc9732ae41b04cb554b844a762b4115a61bfaa81e3e83050991eeb56863f"
|
||||
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
|
||||
dependencies = [
|
||||
"digest 0.11.2",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1053,15 +834,6 @@ version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
||||
|
||||
[[package]]
|
||||
name = "hybrid-array"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3944cf8cf766b40e2a1a333ee5e9b563f854d5fa49d6a8ca2764e97c6eddb214"
|
||||
dependencies = [
|
||||
"typenum",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.8.1"
|
||||
@@ -1321,32 +1093,12 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is-terminal"
|
||||
version = "0.4.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
|
||||
dependencies = [
|
||||
"hermit-abi",
|
||||
"libc",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "is_terminal_polyfill"
|
||||
version = "1.70.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695"
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.10.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.17"
|
||||
@@ -1525,38 +1277,32 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "nora-registry"
|
||||
version = "0.4.0"
|
||||
version = "0.2.33"
|
||||
dependencies = [
|
||||
"argon2",
|
||||
"async-trait",
|
||||
"axum",
|
||||
"base64",
|
||||
"bcrypt",
|
||||
"chrono",
|
||||
"clap",
|
||||
"criterion",
|
||||
"flate2",
|
||||
"governor",
|
||||
"hex",
|
||||
"hmac",
|
||||
"http-body-util",
|
||||
"httpdate",
|
||||
"indicatif",
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"percent-encoding",
|
||||
"prometheus",
|
||||
"proptest",
|
||||
"reqwest",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"sha2 0.11.0",
|
||||
"sha2",
|
||||
"tar",
|
||||
"tempfile",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"toml",
|
||||
"tower",
|
||||
"tower-http",
|
||||
"tower_governor",
|
||||
"tracing",
|
||||
@@ -1608,12 +1354,6 @@ version = "1.70.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe"
|
||||
|
||||
[[package]]
|
||||
name = "oorandom"
|
||||
version = "11.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||
|
||||
[[package]]
|
||||
name = "parking_lot"
|
||||
version = "0.12.5"
|
||||
@@ -1637,17 +1377,6 @@ dependencies = [
|
||||
"windows-link",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "346f04948ba92c43e8469c1ee6736c7563d71012b17d40745260fe106aac2166"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"rand_core 0.6.4",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "percent-encoding"
|
||||
version = "2.3.2"
|
||||
@@ -1686,34 +1415,6 @@ version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "plotters"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
"plotters-backend",
|
||||
"plotters-svg",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "plotters-backend"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a"
|
||||
|
||||
[[package]]
|
||||
name = "plotters-svg"
|
||||
version = "0.3.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670"
|
||||
dependencies = [
|
||||
"plotters-backend",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.13.0"
|
||||
@@ -1772,25 +1473,6 @@ dependencies = [
|
||||
"thiserror 2.0.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "proptest"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"bit-vec",
|
||||
"bitflags",
|
||||
"num-traits",
|
||||
"rand",
|
||||
"rand_chacha",
|
||||
"rand_xorshift",
|
||||
"regex-syntax",
|
||||
"rusty-fork",
|
||||
"tempfile",
|
||||
"unarray",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "protobuf"
|
||||
version = "3.7.2"
|
||||
@@ -1826,12 +1508,6 @@ dependencies = [
|
||||
"winapi",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "quick-error"
|
||||
version = "1.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
|
||||
|
||||
[[package]]
|
||||
name = "quinn"
|
||||
version = "0.11.9"
|
||||
@@ -1909,7 +1585,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1"
|
||||
dependencies = [
|
||||
"rand_chacha",
|
||||
"rand_core 0.9.5",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1919,16 +1595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb"
|
||||
dependencies = [
|
||||
"ppv-lite86",
|
||||
"rand_core 0.9.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_core"
|
||||
version = "0.6.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c"
|
||||
dependencies = [
|
||||
"getrandom 0.2.17",
|
||||
"rand_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1940,15 +1607,6 @@ dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rand_xorshift"
|
||||
version = "0.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "513962919efc330f829edb2535844d1b912b0fbe2ca165d613e4e8788bb05a5a"
|
||||
dependencies = [
|
||||
"rand_core 0.9.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "raw-cpuid"
|
||||
version = "11.6.0"
|
||||
@@ -1958,26 +1616,6 @@ dependencies = [
|
||||
"bitflags",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "368f01d005bf8fd9b1206fb6fa653e6c4a81ceb1466406b81792d87c5677a58f"
|
||||
dependencies = [
|
||||
"either",
|
||||
"rayon-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rayon-core"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22e18b0f0062d30d4230b2e85ff77fdfe4326feb054b9783a3460d8435c8ab91"
|
||||
dependencies = [
|
||||
"crossbeam-deque",
|
||||
"crossbeam-utils",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.5.18"
|
||||
@@ -2109,7 +1747,7 @@ version = "8.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5bcdef0be6fe7f6fa333b1073c949729274b05f123a0ad7efcb8efd878e5c3b1"
|
||||
dependencies = [
|
||||
"sha2 0.10.9",
|
||||
"sha2",
|
||||
"walkdir",
|
||||
]
|
||||
|
||||
@@ -2158,9 +1796,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-webpki"
|
||||
version = "0.103.10"
|
||||
version = "0.103.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
|
||||
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
|
||||
dependencies = [
|
||||
"ring",
|
||||
"rustls-pki-types",
|
||||
@@ -2173,18 +1811,6 @@ version = "1.0.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||
|
||||
[[package]]
|
||||
name = "rusty-fork"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cc6bf79ff24e648f6da1f8d1f011e9cac26491b619e6b9280f2b47f1774e6ee2"
|
||||
dependencies = [
|
||||
"fnv",
|
||||
"quick-error",
|
||||
"tempfile",
|
||||
"wait-timeout",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.22"
|
||||
@@ -2294,19 +1920,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures 0.2.17",
|
||||
"digest 0.10.7",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "sha2"
|
||||
version = "0.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "446ba717509524cb3f22f17ecc096f10f4822d76ab5c0b9822c5f9c284e825f4"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"cpufeatures 0.3.0",
|
||||
"digest 0.11.2",
|
||||
"cpufeatures",
|
||||
"digest",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -2422,9 +2037,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "tar"
|
||||
version = "0.4.45"
|
||||
version = "0.4.44"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22692a6476a21fa75fdfc11d452fda482af402c008cdbaf3476414e122040973"
|
||||
checksum = "1d863878d212c87a19c1a610eb53bb01fe12951c0501cf5a0d65f724914a667a"
|
||||
dependencies = [
|
||||
"filetime",
|
||||
"libc",
|
||||
@@ -2503,16 +2118,6 @@ dependencies = [
|
||||
"zerovec",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinytemplate"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "be4d6b5f19ff7664e8c98d03e2139cb510db9b0a60b55f8e8709b689d939b6bc"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_json",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tinyvec"
|
||||
version = "1.10.0"
|
||||
@@ -2811,12 +2416,6 @@ version = "1.19.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
|
||||
|
||||
[[package]]
|
||||
name = "unarray"
|
||||
version = "0.1.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94"
|
||||
|
||||
[[package]]
|
||||
name = "unicase"
|
||||
version = "2.9.0"
|
||||
@@ -2922,9 +2521,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.23.0"
|
||||
version = "1.22.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
||||
dependencies = [
|
||||
"getrandom 0.4.1",
|
||||
"js-sys",
|
||||
@@ -2943,15 +2542,6 @@ version = "0.9.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
|
||||
|
||||
[[package]]
|
||||
name = "wait-timeout"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11"
|
||||
dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "walkdir"
|
||||
version = "2.5.0"
|
||||
|
||||
@@ -6,9 +6,8 @@ members = [
|
||||
]
|
||||
|
||||
[workspace.package]
|
||||
version = "0.4.0"
|
||||
version = "0.2.33"
|
||||
edition = "2021"
|
||||
rust-version = "1.75"
|
||||
license = "MIT"
|
||||
authors = ["DevITWay <devitway@gmail.com>"]
|
||||
repository = "https://github.com/getnora-io/nora"
|
||||
@@ -22,7 +21,7 @@ serde_json = "1"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter", "json"] }
|
||||
reqwest = { version = "0.12", default-features = false, features = ["json", "rustls-tls"] }
|
||||
sha2 = "0.11"
|
||||
sha2 = "0.10"
|
||||
async-trait = "0.1"
|
||||
hmac = "0.13"
|
||||
hmac = "0.12"
|
||||
hex = "0.4"
|
||||
|
||||
@@ -21,8 +21,5 @@ VOLUME ["/data"]
|
||||
|
||||
USER nora
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD wget -q --spider http://localhost:4000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
|
||||
@@ -1,15 +1,20 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# NORA on Astra Linux SE base (Debian-based, FSTEC-certified)
|
||||
# Binary is pre-built by CI and passed via context
|
||||
FROM debian:bookworm-slim
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with Astra Linux SE (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.astralinux.ru/library/alse:latest
|
||||
# RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install -y --no-install-recommends ca-certificates curl \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& groupadd -r nora && useradd -r -g nora -d /data -s /usr/sbin/nologin nora \
|
||||
&& mkdir -p /data && chown nora:nora /data
|
||||
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
|
||||
|
||||
COPY --chown=nora:nora nora /usr/local/bin/nora
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=certs /etc/passwd /etc/passwd
|
||||
COPY --from=certs /etc/group /etc/group
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
@@ -24,8 +29,5 @@ VOLUME ["/data"]
|
||||
|
||||
USER nora
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD curl -sf http://localhost:4000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
|
||||
@@ -1,14 +1,20 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# NORA on RED OS base (RPM-based, FSTEC-certified)
|
||||
# Binary is pre-built by CI and passed via context
|
||||
FROM registry.access.redhat.com/ubi9/ubi-minimal:9.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with RED OS (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.red-soft.ru/redos/redos:8
|
||||
# RUN dnf install -y ca-certificates && dnf clean all
|
||||
|
||||
RUN microdnf install -y ca-certificates shadow-utils \
|
||||
&& microdnf clean all \
|
||||
&& groupadd -r nora && useradd -r -g nora -d /data -s /sbin/nologin nora \
|
||||
&& mkdir -p /data && chown nora:nora /data
|
||||
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
|
||||
|
||||
COPY --chown=nora:nora nora /usr/local/bin/nora
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=certs /etc/passwd /etc/passwd
|
||||
COPY --from=certs /etc/group /etc/group
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
@@ -23,8 +29,5 @@ VOLUME ["/data"]
|
||||
|
||||
USER nora
|
||||
|
||||
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
|
||||
CMD curl -sf http://localhost:4000/health || exit 1
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
|
||||
210
README.md
210
README.md
@@ -1,44 +1,52 @@
|
||||
# NORA
|
||||
|
||||
**The artifact registry that grows with you.** Starts with `docker run`, scales to enterprise.
|
||||
|
||||
```bash
|
||||
docker run -d -p 4000:4000 -v nora-data:/data ghcr.io/getnora-io/nora:latest
|
||||
```
|
||||
|
||||
Open [http://localhost:4000/ui/](http://localhost:4000/ui/) — your registry is ready.
|
||||
|
||||
<p align="center">
|
||||
<img src=".github/assets/dashboard.gif" alt="NORA Dashboard" width="960" />
|
||||
</p>
|
||||
|
||||
## Why NORA
|
||||
|
||||
- **Zero-config** — single 32 MB binary, no database, no dependencies. `docker run` and it works.
|
||||
- **Production-tested** — Docker (+ Helm OCI), Maven, npm, PyPI, Cargo, Go, Raw. Used in real CI/CD with ArgoCD, Buildx cache, and air-gapped environments.
|
||||
- **Secure by default** — [OpenSSF Scorecard](https://scorecard.dev/viewer/?uri=github.com/getnora-io/nora), signed releases, SBOM, fuzz testing, 460+ tests.
|
||||
|
||||
[](https://github.com/getnora-io/nora/releases)
|
||||
[](https://github.com/getnora-io/nora/pkgs/container/nora)
|
||||
[](LICENSE)
|
||||
[](https://github.com/getnora-io/nora/releases)
|
||||
[](https://github.com/getnora-io/nora/actions)
|
||||
[](https://github.com/getnora-io/nora/pkgs/container/nora)
|
||||
[](https://github.com/getnora-io/nora/stargazers)
|
||||
[](https://www.rust-lang.org/)
|
||||
[](https://getnora.dev)
|
||||
[](https://t.me/getnora)
|
||||
[](https://scorecard.dev/viewer/?uri=github.com/getnora-io/nora)
|
||||
[](https://www.bestpractices.dev/projects/12207)
|
||||
|
||||
**32 MB** binary | **< 100 MB** RAM | **3s** startup | **7** registries
|
||||
> **Multi-protocol artifact registry that doesn't suck.**
|
||||
>
|
||||
> One binary. All protocols. Stupidly fast.
|
||||
|
||||
> Used in production at [DevIT Academy](https://github.com/devitway) since January 2026 for Docker images, Maven artifacts, and npm packages.
|
||||
**32 MB** binary | **< 100 MB** RAM | **3s** startup | **5** protocols
|
||||
|
||||
## Supported Registries
|
||||
## Features
|
||||
|
||||
| Registry | Mount Point | Upstream Proxy | Auth |
|
||||
|----------|------------|----------------|------|
|
||||
| Docker Registry v2 | `/v2/` | Docker Hub, GHCR, any OCI, Helm OCI | ✓ |
|
||||
| Maven | `/maven2/` | Maven Central, custom | proxy-only |
|
||||
| npm | `/npm/` | npmjs.org, custom | ✓ |
|
||||
| Cargo | `/cargo/` | — | ✓ |
|
||||
| PyPI | `/simple/` | pypi.org, custom | ✓ |
|
||||
| Go Modules | `/go/` | proxy.golang.org, custom | ✓ |
|
||||
| Raw files | `/raw/` | — | ✓ |
|
||||
- **Multi-Protocol Support**
|
||||
- Docker Registry v2
|
||||
- Maven repository (+ proxy to Maven Central)
|
||||
- npm registry (+ proxy to npmjs.org)
|
||||
- Cargo registry
|
||||
- PyPI index
|
||||
|
||||
> **Helm charts** work via the Docker/OCI endpoint — `helm push`/`pull` with `--plain-http` or behind TLS reverse proxy.
|
||||
- **Storage Backends**
|
||||
- Local filesystem (zero-config default)
|
||||
- S3-compatible (MinIO, AWS S3)
|
||||
|
||||
- **Production Ready**
|
||||
- Web UI with search and browse
|
||||
- Swagger UI API documentation
|
||||
- Prometheus metrics (`/metrics`)
|
||||
- Health checks (`/health`, `/ready`)
|
||||
- JSON structured logging
|
||||
- Graceful shutdown
|
||||
|
||||
- **Security**
|
||||
- Basic Auth (htpasswd + bcrypt)
|
||||
- Revocable API tokens with RBAC
|
||||
- Blob digest verification (SHA256)
|
||||
- Non-root container images
|
||||
- Security headers (CSP, X-Frame-Options, nosniff)
|
||||
- Upload session limits (DoS protection)
|
||||
- Configurable upload size for ML models (`NORA_MAX_UPLOAD_SESSION_SIZE_MB`)
|
||||
- ENV-based configuration (12-Factor)
|
||||
- SBOM (SPDX + CycloneDX) in every release
|
||||
- See [SECURITY.md](SECURITY.md) for vulnerability reporting
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -48,13 +56,6 @@ Open [http://localhost:4000/ui/](http://localhost:4000/ui/) — your registry is
|
||||
docker run -d -p 4000:4000 -v nora-data:/data ghcr.io/getnora-io/nora:latest
|
||||
```
|
||||
|
||||
### Binary
|
||||
|
||||
```bash
|
||||
curl -fsSL https://github.com/getnora-io/nora/releases/latest/download/nora-linux-amd64 -o nora
|
||||
chmod +x nora && ./nora
|
||||
```
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
@@ -62,13 +63,18 @@ cargo install nora-registry
|
||||
nora
|
||||
```
|
||||
|
||||
Open http://localhost:4000/ui/
|
||||
|
||||
## Usage
|
||||
|
||||
### Docker Images
|
||||
|
||||
```bash
|
||||
# Tag and push
|
||||
docker tag myapp:latest localhost:4000/myapp:latest
|
||||
docker push localhost:4000/myapp:latest
|
||||
|
||||
# Pull
|
||||
docker pull localhost:4000/myapp:latest
|
||||
```
|
||||
|
||||
@@ -89,42 +95,31 @@ npm config set registry http://localhost:4000/npm/
|
||||
npm publish
|
||||
```
|
||||
|
||||
### Go Modules
|
||||
|
||||
```bash
|
||||
GOPROXY=http://localhost:4000/go go get golang.org/x/text@latest
|
||||
```
|
||||
|
||||
## Features
|
||||
|
||||
- **Web UI** — dashboard with search, browse, i18n (EN/RU)
|
||||
- **Proxy & Cache** — transparent proxy to upstream registries with local cache
|
||||
- **Mirror CLI** — offline sync for air-gapped environments (`nora mirror`)
|
||||
- **Backup & Restore** — `nora backup` / `nora restore`
|
||||
- **Migration** — `nora migrate --from local --to s3`
|
||||
- **S3 Storage** — MinIO, AWS S3, any S3-compatible backend
|
||||
- **Prometheus Metrics** — `/metrics` endpoint
|
||||
- **Health Checks** — `/health`, `/ready` for Kubernetes probes
|
||||
- **Swagger UI** — `/api-docs` for API exploration
|
||||
- **Rate Limiting** — configurable per-endpoint rate limits
|
||||
- **FSTEC Builds** — Astra Linux SE and RED OS images in every release
|
||||
|
||||
## Authentication
|
||||
|
||||
NORA supports Basic Auth (htpasswd) and revocable API tokens with RBAC.
|
||||
|
||||
```bash
|
||||
# Create htpasswd file
|
||||
htpasswd -cbB users.htpasswd admin yourpassword
|
||||
### Quick Setup
|
||||
|
||||
# Start with auth enabled
|
||||
```bash
|
||||
# 1. Create htpasswd file with bcrypt
|
||||
htpasswd -cbB users.htpasswd admin yourpassword
|
||||
# Add more users:
|
||||
htpasswd -bB users.htpasswd ci-user ci-secret
|
||||
|
||||
# 2. Start NORA with auth enabled
|
||||
docker run -d -p 4000:4000 \
|
||||
-v nora-data:/data \
|
||||
-v ./users.htpasswd:/data/users.htpasswd \
|
||||
-e NORA_AUTH_ENABLED=true \
|
||||
ghcr.io/getnora-io/nora:latest
|
||||
|
||||
# 3. Verify
|
||||
curl -u admin:yourpassword http://localhost:4000/v2/_catalog
|
||||
```
|
||||
|
||||
### API Tokens (RBAC)
|
||||
|
||||
| Role | Pull/Read | Push/Write | Delete/Admin |
|
||||
|------|-----------|------------|--------------|
|
||||
| `read` | Yes | No | No |
|
||||
@@ -133,6 +128,16 @@ docker run -d -p 4000:4000 \
|
||||
|
||||
See [Authentication guide](https://getnora.dev/configuration/authentication/) for token management, Docker login, and CI/CD integration.
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```bash
|
||||
nora # Start server
|
||||
nora serve # Start server (explicit)
|
||||
nora backup -o backup.tar.gz
|
||||
nora restore -i backup.tar.gz
|
||||
nora migrate --from local --to s3
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
@@ -144,10 +149,8 @@ See [Authentication guide](https://getnora.dev/configuration/authentication/) fo
|
||||
| `NORA_STORAGE_MODE` | local | `local` or `s3` |
|
||||
| `NORA_AUTH_ENABLED` | false | Enable authentication |
|
||||
| `NORA_DOCKER_UPSTREAMS` | `https://registry-1.docker.io` | Docker upstreams (`url\|user:pass,...`) |
|
||||
| `NORA_LOG_LEVEL` | info | Log level: trace, debug, info, warn, error |
|
||||
| `NORA_LOG_FORMAT` | text | Log format: `text` (human) or `json` (structured) |
|
||||
| `NORA_PUBLIC_URL` | — | Public URL for rewriting artifact links |
|
||||
See [full configuration reference](https://getnora.dev/configuration/settings/) for all options.
|
||||
|
||||
See [full configuration reference](https://getnora.dev/configuration/settings/) for all environment variables including storage, rate limiting, proxy auth, and secrets.
|
||||
|
||||
### config.toml
|
||||
|
||||
@@ -169,21 +172,9 @@ proxy_timeout = 60
|
||||
|
||||
[[docker.upstreams]]
|
||||
url = "https://registry-1.docker.io"
|
||||
|
||||
[go]
|
||||
proxy = "https://proxy.golang.org"
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
```bash
|
||||
nora # Start server
|
||||
nora serve # Start server (explicit)
|
||||
nora backup -o backup.tar.gz
|
||||
nora restore -i backup.tar.gz
|
||||
nora migrate --from local --to s3
|
||||
nora mirror # Sync packages for offline use
|
||||
```
|
||||
See [full config reference](https://getnora.dev/configuration/settings/) for rate limiting, secrets, proxy auth, and all options.
|
||||
|
||||
## Endpoints
|
||||
|
||||
@@ -199,7 +190,6 @@ nora mirror # Sync packages for offline use
|
||||
| `/npm/` | npm |
|
||||
| `/cargo/` | Cargo |
|
||||
| `/simple/` | PyPI |
|
||||
| `/go/` | Go Modules |
|
||||
|
||||
## TLS / HTTPS
|
||||
|
||||
@@ -211,8 +201,21 @@ registry.example.com {
|
||||
}
|
||||
```
|
||||
|
||||
For internal networks without TLS, configure Docker:
|
||||
|
||||
```json
|
||||
// /etc/docker/daemon.json
|
||||
{
|
||||
"insecure-registries": ["192.168.1.100:4000"]
|
||||
}
|
||||
```
|
||||
|
||||
See [TLS / HTTPS guide](https://getnora.dev/configuration/tls/) for Nginx, Traefik, and custom CA setup.
|
||||
|
||||
## FSTEC-Certified OS Builds
|
||||
|
||||
Dedicated builds for Astra Linux SE and RED OS are published as `-astra` and `-redos` tagged images in every [GitHub Release](https://github.com/getnora-io/nora/releases). Both use `scratch` base with statically-linked binary.
|
||||
|
||||
## Performance
|
||||
|
||||
| Metric | NORA | Nexus | JFrog |
|
||||
@@ -221,47 +224,24 @@ See [TLS / HTTPS guide](https://getnora.dev/configuration/tls/) for Nginx, Traef
|
||||
| Memory | < 100 MB | 2-4 GB | 2-4 GB |
|
||||
| Image Size | 32 MB | 600+ MB | 1+ GB |
|
||||
|
||||
[See how NORA compares to other registries](https://getnora.dev)
|
||||
|
||||
## Roadmap
|
||||
|
||||
- **Mirror CLI** — offline sync for air-gapped environments
|
||||
- **OIDC / Workload Identity** — zero-secret auth for GitHub Actions, GitLab CI
|
||||
- **Online Garbage Collection** — non-blocking cleanup without registry downtime
|
||||
- **Retention Policies** — declarative rules: keep last N tags, delete older than X days
|
||||
- **Image Signing** — cosign verification and policy enforcement
|
||||
- **Image Signing** — cosign/notation verification and policy enforcement
|
||||
- **Replication** — push/pull sync between NORA instances
|
||||
|
||||
See [CHANGELOG.md](CHANGELOG.md) for release history.
|
||||
|
||||
## Security & Trust
|
||||
|
||||
[](https://scorecard.dev/viewer/?uri=github.com/getnora-io/nora)
|
||||
[](https://www.bestpractices.dev/projects/12207)
|
||||
[](https://github.com/getnora-io/nora/actions/workflows/ci.yml)
|
||||
[](https://github.com/getnora-io/nora/actions)
|
||||
|
||||
- **Signed releases** — every release is signed with [cosign](https://github.com/sigstore/cosign)
|
||||
- **SBOM** — SPDX + CycloneDX in every release
|
||||
- **Fuzz testing** — cargo-fuzz + ClusterFuzzLite
|
||||
- **Blob verification** — SHA256 digest validation on every upload
|
||||
- **Non-root containers** — all images run as non-root
|
||||
- **Security headers** — CSP, X-Frame-Options, nosniff
|
||||
|
||||
See [SECURITY.md](SECURITY.md) for vulnerability reporting.
|
||||
|
||||
## Author
|
||||
|
||||
Created and maintained by [DevITWay](https://github.com/devitway)
|
||||
|
||||
[](https://github.com/getnora-io/nora/pkgs/container/nora)
|
||||
[](https://www.rust-lang.org/)
|
||||
[](https://getnora.dev)
|
||||
[](https://t.me/getnora)
|
||||
[](https://github.com/getnora-io/nora/stargazers)
|
||||
**Created and maintained by [DevITWay](https://github.com/devitway)**
|
||||
|
||||
- Website: [getnora.dev](https://getnora.dev)
|
||||
- Telegram: [@getnora](https://t.me/getnora)
|
||||
- Telegram: [@DevITWay](https://t.me/DevITWay)
|
||||
- GitHub: [@devitway](https://github.com/devitway)
|
||||
- Email: devitway@gmail.com
|
||||
|
||||
## Contributing
|
||||
|
||||
@@ -269,6 +249,10 @@ NORA welcomes contributions! See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelin
|
||||
|
||||
## License
|
||||
|
||||
MIT License — see [LICENSE](LICENSE)
|
||||
MIT License - see [LICENSE](LICENSE)
|
||||
|
||||
Copyright (c) 2026 DevITWay
|
||||
|
||||
---
|
||||
|
||||
**🐿️ N○RA** - Organized like a chipmunk's stash | Built with Rust by [DevITWay](https://t.me/DevITWay)
|
||||
|
||||
38
ROADMAP.md
Normal file
38
ROADMAP.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Roadmap
|
||||
|
||||
> This roadmap reflects current priorities. It may change based on community feedback.
|
||||
|
||||
## Recently Completed
|
||||
|
||||
- **v0.2.32** — Docker dashboard fix for namespaced images, `library/` auto-prepend for Hub official images
|
||||
- **v0.2.31** — npm full proxy (URL rewriting, scoped packages, publish, SHA-256 integrity cache, metadata TTL)
|
||||
- **v0.2.29** — Upstream authentication for all protocols (Docker, Maven, npm, PyPI)
|
||||
|
||||
## In Progress
|
||||
|
||||
- **`nora mirror`** — Pre-fetch dependencies from lockfiles for air-gapped environments ([#40](https://github.com/getnora-io/nora/issues/40))
|
||||
- npm: `package-lock.json` (v1/v2/v3)
|
||||
- pip: `requirements.txt`
|
||||
- cargo: `Cargo.lock`
|
||||
- maven: dependency list
|
||||
|
||||
## Next Up
|
||||
|
||||
- **Consistent env var naming** — Unify `NORA_*_PROXY` / `NORA_*_UPSTREAMS` across all protocols ([#39](https://github.com/getnora-io/nora/issues/39))
|
||||
- **Package blocklist** — Deny specific packages or versions via config ([#41](https://github.com/getnora-io/nora/issues/41))
|
||||
- **Multiple upstreams for npm/PyPI** — Same as Maven already supports
|
||||
- **v1.0.0 release** — Stable API, production-ready
|
||||
|
||||
## Future
|
||||
|
||||
- Docker image mirroring ([#42](https://github.com/getnora-io/nora/issues/42))
|
||||
- Virtual repositories via config (named endpoints with custom search order)
|
||||
- Path-based ACL (per-namespace write permissions)
|
||||
- OIDC/LDAP authentication
|
||||
- HA mode (stateless API + external database)
|
||||
- Golang modules proxy
|
||||
- Content trust (Cosign/Notation verification)
|
||||
|
||||
## How to Influence
|
||||
|
||||
Open an issue or join [Telegram](https://t.me/getnora) to discuss priorities.
|
||||
@@ -4,7 +4,6 @@
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.3.x | :white_check_mark: |
|
||||
| 0.2.x | :white_check_mark: |
|
||||
| < 0.2 | :x: |
|
||||
|
||||
@@ -51,6 +50,4 @@ When deploying NORA:
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities in our release notes and CHANGELOG, unless the reporter requests anonymity.
|
||||
|
||||
If you have previously reported a vulnerability and would like to be credited, please let us know.
|
||||
We appreciate responsible disclosure and will acknowledge security researchers who report valid vulnerabilities.
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
# NORA clippy configuration
|
||||
cognitive-complexity-threshold = 25
|
||||
too-many-arguments-threshold = 7
|
||||
type-complexity-threshold = 300
|
||||
@@ -1,33 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with Astra Linux SE (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.astralinux.ru/library/alse:latest
|
||||
# RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
|
||||
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=certs /etc/passwd /etc/passwd
|
||||
COPY --from=certs /etc/group /etc/group
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
ENV NORA_STORAGE_MODE=local
|
||||
ENV NORA_STORAGE_PATH=/data/storage
|
||||
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
USER nora
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
@@ -1,33 +0,0 @@
|
||||
# syntax=docker/dockerfile:1.4
|
||||
# Binary is pre-built by CI (cargo build --release) and passed via context
|
||||
# Runtime: scratch — compatible with RED OS (FSTEC certified)
|
||||
# To switch to official base: replace FROM scratch with
|
||||
# FROM registry.red-soft.ru/redos/redos:8
|
||||
# RUN dnf install -y ca-certificates && dnf clean all
|
||||
|
||||
FROM alpine:3.20@sha256:a4f4213abb84c497377b8544c81b3564f313746700372ec4fe84653e4fb03805 AS certs
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& addgroup -S -g 10001 nora && adduser -S -u 10001 -G nora nora
|
||||
|
||||
FROM scratch
|
||||
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
|
||||
COPY --from=certs /etc/passwd /etc/passwd
|
||||
COPY --from=certs /etc/group /etc/group
|
||||
COPY nora /usr/local/bin/nora
|
||||
|
||||
ENV RUST_LOG=info
|
||||
ENV NORA_HOST=0.0.0.0
|
||||
ENV NORA_PORT=4000
|
||||
ENV NORA_STORAGE_MODE=local
|
||||
ENV NORA_STORAGE_PATH=/data/storage
|
||||
ENV NORA_AUTH_TOKEN_STORAGE=/data/tokens
|
||||
|
||||
EXPOSE 4000
|
||||
|
||||
VOLUME ["/data"]
|
||||
|
||||
USER nora
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/nora"]
|
||||
CMD ["serve"]
|
||||
@@ -1,9 +1,12 @@
|
||||
services:
|
||||
nora:
|
||||
image: ghcr.io/getnora-io/nora:latest
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: Dockerfile
|
||||
restart: unless-stopped
|
||||
expose:
|
||||
- "4000"
|
||||
ports:
|
||||
- "4000:4000"
|
||||
volumes:
|
||||
- nora-data:/data
|
||||
environment:
|
||||
@@ -11,28 +14,6 @@ services:
|
||||
- NORA_HOST=0.0.0.0
|
||||
- NORA_PORT=4000
|
||||
- NORA_AUTH_ENABLED=false
|
||||
healthcheck:
|
||||
test: ["CMD", "wget", "-q", "--spider", "http://localhost:4000/health"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
start_period: 5s
|
||||
retries: 3
|
||||
|
||||
caddy:
|
||||
image: caddy:2-alpine
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile:ro
|
||||
- caddy-data:/data
|
||||
- caddy-config:/config
|
||||
depends_on:
|
||||
nora:
|
||||
condition: service_healthy
|
||||
|
||||
volumes:
|
||||
nora-data:
|
||||
caddy-data:
|
||||
caddy-config:
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
services:
|
||||
nora:
|
||||
build: .
|
||||
image: ghcr.io/getnora-io/nora:latest
|
||||
ports:
|
||||
- "4000:4000"
|
||||
|
||||
@@ -49,18 +49,8 @@ tower_governor = "0.8"
|
||||
governor = "0.10"
|
||||
parking_lot = "0.12"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
argon2 = { version = "0.5", features = ["std", "rand"] }
|
||||
tower-http = { version = "0.6", features = ["set-header"] }
|
||||
percent-encoding = "2"
|
||||
|
||||
[dev-dependencies]
|
||||
proptest = "1"
|
||||
tempfile = "3"
|
||||
wiremock = "0.6"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
tower = { version = "0.5", features = ["util"] }
|
||||
http-body-util = "0.1"
|
||||
|
||||
[[bench]]
|
||||
name = "parsing"
|
||||
harness = false
|
||||
|
||||
@@ -1,109 +0,0 @@
|
||||
use criterion::{black_box, criterion_group, criterion_main, Criterion};
|
||||
use nora_registry::validation::{
|
||||
validate_digest, validate_docker_name, validate_docker_reference, validate_storage_key,
|
||||
};
|
||||
|
||||
fn bench_validation(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("validation");
|
||||
|
||||
group.bench_function("storage_key_short", |b| {
|
||||
b.iter(|| validate_storage_key(black_box("docker/alpine/blobs/sha256:abc123")))
|
||||
});
|
||||
|
||||
group.bench_function("storage_key_long", |b| {
|
||||
let key = "maven/com/example/deep/nested/path/artifact-1.0.0-SNAPSHOT.jar";
|
||||
b.iter(|| validate_storage_key(black_box(key)))
|
||||
});
|
||||
|
||||
group.bench_function("storage_key_reject", |b| {
|
||||
b.iter(|| validate_storage_key(black_box("../etc/passwd")))
|
||||
});
|
||||
|
||||
group.bench_function("docker_name_simple", |b| {
|
||||
b.iter(|| validate_docker_name(black_box("library/alpine")))
|
||||
});
|
||||
|
||||
group.bench_function("docker_name_nested", |b| {
|
||||
b.iter(|| validate_docker_name(black_box("my-org/sub/repo-name")))
|
||||
});
|
||||
|
||||
group.bench_function("docker_name_reject", |b| {
|
||||
b.iter(|| validate_docker_name(black_box("INVALID/NAME")))
|
||||
});
|
||||
|
||||
group.bench_function("digest_sha256", |b| {
|
||||
b.iter(|| {
|
||||
validate_digest(black_box(
|
||||
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
))
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_function("digest_reject", |b| {
|
||||
b.iter(|| validate_digest(black_box("md5:abc")))
|
||||
});
|
||||
|
||||
group.bench_function("reference_tag", |b| {
|
||||
b.iter(|| validate_docker_reference(black_box("v1.2.3-alpine")))
|
||||
});
|
||||
|
||||
group.bench_function("reference_digest", |b| {
|
||||
b.iter(|| {
|
||||
validate_docker_reference(black_box(
|
||||
"sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
))
|
||||
})
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
fn bench_manifest_detection(c: &mut Criterion) {
|
||||
let mut group = c.benchmark_group("manifest_detection");
|
||||
|
||||
let docker_v2 = serde_json::json!({
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"schemaVersion": 2,
|
||||
"config": {"mediaType": "application/vnd.docker.container.image.v1+json", "digest": "sha256:abc"},
|
||||
"layers": [{"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip", "digest": "sha256:def", "size": 1000}]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let oci_index = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{"digest": "sha256:aaa", "platform": {"os": "linux", "architecture": "amd64"}},
|
||||
{"digest": "sha256:bbb", "platform": {"os": "linux", "architecture": "arm64"}}
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let minimal = serde_json::json!({"schemaVersion": 2}).to_string();
|
||||
|
||||
group.bench_function("docker_v2_explicit", |b| {
|
||||
b.iter(|| {
|
||||
nora_registry::docker_fuzz::detect_manifest_media_type(black_box(docker_v2.as_bytes()))
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_function("oci_index", |b| {
|
||||
b.iter(|| {
|
||||
nora_registry::docker_fuzz::detect_manifest_media_type(black_box(oci_index.as_bytes()))
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_function("minimal_json", |b| {
|
||||
b.iter(|| {
|
||||
nora_registry::docker_fuzz::detect_manifest_media_type(black_box(minimal.as_bytes()))
|
||||
})
|
||||
});
|
||||
|
||||
group.bench_function("invalid_json", |b| {
|
||||
b.iter(|| nora_registry::docker_fuzz::detect_manifest_media_type(black_box(b"not json")))
|
||||
});
|
||||
|
||||
group.finish();
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_validation, bench_manifest_detection);
|
||||
criterion_main!(benches);
|
||||
@@ -99,139 +99,3 @@ impl Default for ActivityLog {
|
||||
Self::new(50)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_action_type_display() {
|
||||
assert_eq!(ActionType::Pull.to_string(), "PULL");
|
||||
assert_eq!(ActionType::Push.to_string(), "PUSH");
|
||||
assert_eq!(ActionType::CacheHit.to_string(), "CACHE");
|
||||
assert_eq!(ActionType::ProxyFetch.to_string(), "PROXY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_action_type_equality() {
|
||||
assert_eq!(ActionType::Pull, ActionType::Pull);
|
||||
assert_ne!(ActionType::Pull, ActionType::Push);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_entry_new() {
|
||||
let entry = ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
"nginx:latest".to_string(),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
);
|
||||
assert_eq!(entry.action, ActionType::Pull);
|
||||
assert_eq!(entry.artifact, "nginx:latest");
|
||||
assert_eq!(entry.registry, "docker");
|
||||
assert_eq!(entry.source, "LOCAL");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_push_and_len() {
|
||||
let log = ActivityLog::new(10);
|
||||
assert!(log.is_empty());
|
||||
assert_eq!(log.len(), 0);
|
||||
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Push,
|
||||
"test:v1".to_string(),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
assert!(!log.is_empty());
|
||||
assert_eq!(log.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_recent() {
|
||||
let log = ActivityLog::new(10);
|
||||
for i in 0..5 {
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("image:{}", i),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
}
|
||||
|
||||
let recent = log.recent(3);
|
||||
assert_eq!(recent.len(), 3);
|
||||
// newest first
|
||||
assert_eq!(recent[0].artifact, "image:4");
|
||||
assert_eq!(recent[1].artifact, "image:3");
|
||||
assert_eq!(recent[2].artifact, "image:2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_all() {
|
||||
let log = ActivityLog::new(10);
|
||||
for i in 0..3 {
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("pkg:{}", i),
|
||||
"npm",
|
||||
"PROXY",
|
||||
));
|
||||
}
|
||||
|
||||
let all = log.all();
|
||||
assert_eq!(all.len(), 3);
|
||||
assert_eq!(all[0].artifact, "pkg:2"); // newest first
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_bounded_size() {
|
||||
let log = ActivityLog::new(3);
|
||||
for i in 0..5 {
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("item:{}", i),
|
||||
"cargo",
|
||||
"CACHE",
|
||||
));
|
||||
}
|
||||
|
||||
assert_eq!(log.len(), 3);
|
||||
let all = log.all();
|
||||
// oldest entries should be dropped
|
||||
assert_eq!(all[0].artifact, "item:4");
|
||||
assert_eq!(all[1].artifact, "item:3");
|
||||
assert_eq!(all[2].artifact, "item:2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_recent_more_than_available() {
|
||||
let log = ActivityLog::new(10);
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Push,
|
||||
"one".to_string(),
|
||||
"maven",
|
||||
"LOCAL",
|
||||
));
|
||||
|
||||
let recent = log.recent(100);
|
||||
assert_eq!(recent.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_activity_log_default() {
|
||||
let log = ActivityLog::default();
|
||||
assert!(log.is_empty());
|
||||
// default capacity is 50
|
||||
for i in 0..60 {
|
||||
log.push(ActivityEntry::new(
|
||||
ActionType::Pull,
|
||||
format!("x:{}", i),
|
||||
"docker",
|
||||
"LOCAL",
|
||||
));
|
||||
}
|
||||
assert_eq!(log.len(), 50);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,6 @@ use serde::Serialize;
|
||||
use std::fs::{self, OpenOptions};
|
||||
use std::io::Write;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tracing::{info, warn};
|
||||
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
@@ -40,7 +39,7 @@ impl AuditEntry {
|
||||
|
||||
pub struct AuditLog {
|
||||
path: PathBuf,
|
||||
writer: Arc<Mutex<Option<fs::File>>>,
|
||||
writer: Mutex<Option<fs::File>>,
|
||||
}
|
||||
|
||||
impl AuditLog {
|
||||
@@ -49,116 +48,26 @@ impl AuditLog {
|
||||
let writer = match OpenOptions::new().create(true).append(true).open(&path) {
|
||||
Ok(f) => {
|
||||
info!(path = %path.display(), "Audit log initialized");
|
||||
Arc::new(Mutex::new(Some(f)))
|
||||
Mutex::new(Some(f))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(path = %path.display(), error = %e, "Failed to open audit log, auditing disabled");
|
||||
Arc::new(Mutex::new(None))
|
||||
Mutex::new(None)
|
||||
}
|
||||
};
|
||||
Self { path, writer }
|
||||
}
|
||||
|
||||
pub fn log(&self, entry: AuditEntry) {
|
||||
let writer = Arc::clone(&self.writer);
|
||||
tokio::task::spawn_blocking(move || {
|
||||
if let Some(ref mut file) = *writer.lock() {
|
||||
if let Ok(json) = serde_json::to_string(&entry) {
|
||||
let _ = writeln!(file, "{}", json);
|
||||
let _ = file.flush();
|
||||
}
|
||||
if let Some(ref mut file) = *self.writer.lock() {
|
||||
if let Ok(json) = serde_json::to_string(&entry) {
|
||||
let _ = writeln!(file, "{}", json);
|
||||
let _ = file.flush();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn path(&self) -> &PathBuf {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_audit_entry_new() {
|
||||
let entry = AuditEntry::new(
|
||||
"push",
|
||||
"admin",
|
||||
"nginx:latest",
|
||||
"docker",
|
||||
"uploaded manifest",
|
||||
);
|
||||
assert_eq!(entry.action, "push");
|
||||
assert_eq!(entry.actor, "admin");
|
||||
assert_eq!(entry.artifact, "nginx:latest");
|
||||
assert_eq!(entry.registry, "docker");
|
||||
assert_eq!(entry.detail, "uploaded manifest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_audit_log_new_and_path() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let log = AuditLog::new(tmp.path().to_str().unwrap());
|
||||
assert!(log.path().ends_with("audit.jsonl"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_audit_log_write_entry() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let log = AuditLog::new(tmp.path().to_str().unwrap());
|
||||
|
||||
let entry = AuditEntry::new("pull", "user1", "lodash", "npm", "downloaded");
|
||||
log.log(entry);
|
||||
|
||||
// spawn_blocking is fire-and-forget; retry until flushed (max 1s)
|
||||
let path = log.path().clone();
|
||||
let mut content = String::new();
|
||||
for _ in 0..20 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||
content = std::fs::read_to_string(&path).unwrap_or_default();
|
||||
if content.contains(r#""action":"pull""#) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(content.contains(r#""action":"pull""#));
|
||||
assert!(content.contains(r#""actor":"user1""#));
|
||||
assert!(content.contains(r#""artifact":"lodash""#));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_audit_log_multiple_entries() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let log = AuditLog::new(tmp.path().to_str().unwrap());
|
||||
|
||||
log.log(AuditEntry::new("push", "admin", "a", "docker", ""));
|
||||
log.log(AuditEntry::new("pull", "user", "b", "npm", ""));
|
||||
log.log(AuditEntry::new("delete", "admin", "c", "maven", ""));
|
||||
|
||||
// Retry until all 3 entries flushed (max 1s)
|
||||
let path = log.path().clone();
|
||||
let mut line_count = 0;
|
||||
for _ in 0..20 {
|
||||
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||
if let Ok(content) = std::fs::read_to_string(&path) {
|
||||
line_count = content.lines().count();
|
||||
if line_count >= 3 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(line_count, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_audit_entry_serialization() {
|
||||
let entry = AuditEntry::new("push", "ci", "app:v1", "docker", "ci build");
|
||||
let json = serde_json::to_string(&entry).unwrap();
|
||||
assert!(json.contains(r#""action":"push""#));
|
||||
assert!(json.contains(r#""ts":""#));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,16 +94,6 @@ pub async fn auth_middleware(
|
||||
return next.run(request).await;
|
||||
}
|
||||
|
||||
// Allow anonymous read if configured
|
||||
let is_read_method = matches!(
|
||||
*request.method(),
|
||||
axum::http::Method::GET | axum::http::Method::HEAD
|
||||
);
|
||||
if state.config.auth.anonymous_read && is_read_method {
|
||||
// Read requests allowed without auth
|
||||
return next.run(request).await;
|
||||
}
|
||||
|
||||
// Extract Authorization header
|
||||
let auth_header = request
|
||||
.headers()
|
||||
@@ -366,7 +356,6 @@ pub fn token_routes() -> Router<Arc<AppState>> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
@@ -404,7 +393,7 @@ mod tests {
|
||||
fn test_htpasswd_loading_with_comments() {
|
||||
let mut file = NamedTempFile::new().unwrap();
|
||||
writeln!(file, "# This is a comment").unwrap();
|
||||
writeln!(file).unwrap();
|
||||
writeln!(file, "").unwrap();
|
||||
let hash = bcrypt::hash("secret", 4).unwrap();
|
||||
writeln!(file, "admin:{}", hash).unwrap();
|
||||
file.flush().unwrap();
|
||||
@@ -473,185 +462,4 @@ mod tests {
|
||||
assert!(hash.starts_with("$2"));
|
||||
assert!(bcrypt::verify("test123", &hash).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_health() {
|
||||
assert!(is_public_path("/health"));
|
||||
assert!(is_public_path("/ready"));
|
||||
assert!(is_public_path("/metrics"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_v2() {
|
||||
assert!(is_public_path("/v2/"));
|
||||
assert!(is_public_path("/v2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_ui() {
|
||||
assert!(is_public_path("/ui"));
|
||||
assert!(is_public_path("/ui/dashboard"));
|
||||
assert!(is_public_path("/ui/repos"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_api_docs() {
|
||||
assert!(is_public_path("/api-docs"));
|
||||
assert!(is_public_path("/api-docs/openapi.json"));
|
||||
assert!(is_public_path("/api/ui"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_tokens() {
|
||||
assert!(is_public_path("/api/tokens"));
|
||||
assert!(is_public_path("/api/tokens/list"));
|
||||
assert!(is_public_path("/api/tokens/revoke"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_public_path_root() {
|
||||
assert!(is_public_path("/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_public_path_registry() {
|
||||
assert!(!is_public_path("/v2/library/alpine/manifests/latest"));
|
||||
assert!(!is_public_path("/npm/lodash"));
|
||||
assert!(!is_public_path("/maven/com/example"));
|
||||
assert!(!is_public_path("/pypi/simple/flask"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_public_path_random() {
|
||||
assert!(!is_public_path("/admin"));
|
||||
assert!(!is_public_path("/secret"));
|
||||
assert!(!is_public_path("/api/data"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_role_str() {
|
||||
assert_eq!(default_role_str(), "read");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_ttl() {
|
||||
assert_eq!(default_ttl(), 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_token_request_defaults() {
|
||||
let json = r#"{"username":"admin","password":"pass"}"#;
|
||||
let req: CreateTokenRequest = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(req.username, "admin");
|
||||
assert_eq!(req.password, "pass");
|
||||
assert_eq!(req.ttl_days, 30);
|
||||
assert_eq!(req.role, "read");
|
||||
assert!(req.description.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_token_request_custom() {
|
||||
let json = r#"{"username":"admin","password":"pass","ttl_days":90,"role":"write","description":"CI token"}"#;
|
||||
let req: CreateTokenRequest = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(req.ttl_days, 90);
|
||||
assert_eq!(req.role, "write");
|
||||
assert_eq!(req.description, Some("CI token".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_token_response_serialization() {
|
||||
let resp = CreateTokenResponse {
|
||||
token: "nora_abc123".to_string(),
|
||||
expires_in_days: 30,
|
||||
};
|
||||
let json = serde_json::to_string(&resp).unwrap();
|
||||
assert!(json.contains("nora_abc123"));
|
||||
assert!(json.contains("30"));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::*;
|
||||
use axum::http::{Method, StatusCode};
|
||||
use base64::{engine::general_purpose::STANDARD, Engine};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_disabled_passes_all() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::PUT, "/raw/test.txt", b"data".to_vec()).await;
|
||||
assert_eq!(response.status(), StatusCode::CREATED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_public_paths_always_pass() {
|
||||
let ctx = create_test_context_with_auth(&[("admin", "secret")]);
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let response = send(&ctx.app, Method::GET, "/ready", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let response = send(&ctx.app, Method::GET, "/v2/", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_blocks_without_credentials() {
|
||||
let ctx = create_test_context_with_auth(&[("admin", "secret")]);
|
||||
let response = send(&ctx.app, Method::PUT, "/raw/test.txt", b"data".to_vec()).await;
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
assert!(response.headers().contains_key("www-authenticate"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_basic_works() {
|
||||
let ctx = create_test_context_with_auth(&[("admin", "secret")]);
|
||||
let header_val = format!("Basic {}", STANDARD.encode("admin:secret"));
|
||||
let response = send_with_headers(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/raw/test.txt",
|
||||
vec![("authorization", &header_val)],
|
||||
b"data".to_vec(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(response.status(), StatusCode::CREATED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_basic_wrong_password() {
|
||||
let ctx = create_test_context_with_auth(&[("admin", "secret")]);
|
||||
let header_val = format!("Basic {}", STANDARD.encode("admin:wrong"));
|
||||
let response = send_with_headers(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/raw/test.txt",
|
||||
vec![("authorization", &header_val)],
|
||||
b"data".to_vec(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_auth_anonymous_read() {
|
||||
let ctx = create_test_context_with_anonymous_read(&[("admin", "secret")]);
|
||||
// Upload with auth
|
||||
let header_val = format!("Basic {}", STANDARD.encode("admin:secret"));
|
||||
let response = send_with_headers(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/raw/test.txt",
|
||||
vec![("authorization", &header_val)],
|
||||
b"data".to_vec(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(response.status(), StatusCode::CREATED);
|
||||
// Read without auth should work
|
||||
let response = send(&ctx.app, Method::GET, "/raw/test.txt", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
// Write without auth should fail
|
||||
let response = send(&ctx.app, Method::PUT, "/raw/test2.txt", b"data".to_vec()).await;
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -300,134 +300,3 @@ fn format_bytes(bytes: u64) -> String {
|
||||
format!("{} B", bytes)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_bytes_zero() {
|
||||
assert_eq!(format_bytes(0), "0 B");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_bytes_bytes() {
|
||||
assert_eq!(format_bytes(512), "512 B");
|
||||
assert_eq!(format_bytes(1023), "1023 B");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_bytes_kilobytes() {
|
||||
assert_eq!(format_bytes(1024), "1.00 KB");
|
||||
assert_eq!(format_bytes(1536), "1.50 KB");
|
||||
assert_eq!(format_bytes(10240), "10.00 KB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_bytes_megabytes() {
|
||||
assert_eq!(format_bytes(1048576), "1.00 MB");
|
||||
assert_eq!(format_bytes(5 * 1024 * 1024), "5.00 MB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_bytes_gigabytes() {
|
||||
assert_eq!(format_bytes(1073741824), "1.00 GB");
|
||||
assert_eq!(format_bytes(3 * 1024 * 1024 * 1024), "3.00 GB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backup_metadata_serialization() {
|
||||
let meta = BackupMetadata {
|
||||
version: "0.3.0".to_string(),
|
||||
created_at: chrono::Utc::now(),
|
||||
artifact_count: 42,
|
||||
total_bytes: 1024000,
|
||||
storage_backend: "local".to_string(),
|
||||
};
|
||||
let json = serde_json::to_string(&meta).unwrap();
|
||||
assert!(json.contains("\"version\":\"0.3.0\""));
|
||||
assert!(json.contains("\"artifact_count\":42"));
|
||||
assert!(json.contains("\"storage_backend\":\"local\""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backup_metadata_deserialization() {
|
||||
let json = r#"{
|
||||
"version": "0.3.0",
|
||||
"created_at": "2026-01-01T00:00:00Z",
|
||||
"artifact_count": 10,
|
||||
"total_bytes": 5000,
|
||||
"storage_backend": "s3"
|
||||
}"#;
|
||||
let meta: BackupMetadata = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(meta.version, "0.3.0");
|
||||
assert_eq!(meta.artifact_count, 10);
|
||||
assert_eq!(meta.total_bytes, 5000);
|
||||
assert_eq!(meta.storage_backend, "s3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_backup_metadata_roundtrip() {
|
||||
let meta = BackupMetadata {
|
||||
version: "1.0.0".to_string(),
|
||||
created_at: chrono::Utc::now(),
|
||||
artifact_count: 100,
|
||||
total_bytes: 999999,
|
||||
storage_backend: "local".to_string(),
|
||||
};
|
||||
let json = serde_json::to_value(&meta).unwrap();
|
||||
let restored: BackupMetadata = serde_json::from_value(json).unwrap();
|
||||
assert_eq!(meta.version, restored.version);
|
||||
assert_eq!(meta.artifact_count, restored.artifact_count);
|
||||
assert_eq!(meta.total_bytes, restored.total_bytes);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_create_backup_empty_storage() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
let output = dir.path().join("backup.tar.gz");
|
||||
|
||||
let stats = create_backup(&storage, &output).await.unwrap();
|
||||
assert_eq!(stats.artifact_count, 0);
|
||||
assert_eq!(stats.total_bytes, 0);
|
||||
assert!(output.exists());
|
||||
assert!(stats.output_size > 0); // at least metadata.json
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_backup_restore_roundtrip() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
// Put some test data
|
||||
storage
|
||||
.put("maven/com/example/1.0/test.jar", b"test-content")
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:abc123", b"blob-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Create backup
|
||||
let backup_file = dir.path().join("backup.tar.gz");
|
||||
let backup_stats = create_backup(&storage, &backup_file).await.unwrap();
|
||||
assert_eq!(backup_stats.artifact_count, 2);
|
||||
|
||||
// Restore to different storage
|
||||
let restore_storage = Storage::new_local(dir.path().join("restored").to_str().unwrap());
|
||||
let restore_stats = restore_backup(&restore_storage, &backup_file)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(restore_stats.artifact_count, 2);
|
||||
|
||||
// Verify data
|
||||
let data = restore_storage
|
||||
.get("maven/com/example/1.0/test.jar")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(&data[..], b"test-content");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,8 +26,6 @@ pub struct Config {
|
||||
#[serde(default)]
|
||||
pub docker: DockerConfig,
|
||||
#[serde(default)]
|
||||
pub go: GoConfig,
|
||||
#[serde(default)]
|
||||
pub raw: RawConfig,
|
||||
#[serde(default)]
|
||||
pub auth: AuthConfig,
|
||||
@@ -72,10 +70,10 @@ pub struct StorageConfig {
|
||||
#[serde(default = "default_bucket")]
|
||||
pub bucket: String,
|
||||
/// S3 access key (optional, uses anonymous access if not set)
|
||||
#[serde(default, skip_serializing)]
|
||||
#[serde(default)]
|
||||
pub s3_access_key: Option<String>,
|
||||
/// S3 secret key (optional, uses anonymous access if not set)
|
||||
#[serde(default, skip_serializing)]
|
||||
#[serde(default)]
|
||||
pub s3_secret_key: Option<String>,
|
||||
/// S3 region (default: us-east-1)
|
||||
#[serde(default = "default_s3_region")]
|
||||
@@ -129,48 +127,6 @@ pub struct PypiConfig {
|
||||
pub proxy_timeout: u64,
|
||||
}
|
||||
|
||||
/// Go module proxy configuration (GOPROXY protocol)
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GoConfig {
|
||||
/// Upstream Go module proxy URL (default: https://proxy.golang.org)
|
||||
#[serde(default = "default_go_proxy")]
|
||||
pub proxy: Option<String>,
|
||||
#[serde(default)]
|
||||
pub proxy_auth: Option<String>, // "user:pass" for basic auth
|
||||
#[serde(default = "default_timeout")]
|
||||
pub proxy_timeout: u64,
|
||||
/// Separate timeout for .zip downloads (default: 120s, zips can be large)
|
||||
#[serde(default = "default_go_zip_timeout")]
|
||||
pub proxy_timeout_zip: u64,
|
||||
/// Maximum module zip size in bytes (default: 100MB)
|
||||
#[serde(default = "default_go_max_zip_size")]
|
||||
pub max_zip_size: u64,
|
||||
}
|
||||
|
||||
fn default_go_proxy() -> Option<String> {
|
||||
Some("https://proxy.golang.org".to_string())
|
||||
}
|
||||
|
||||
fn default_go_zip_timeout() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
fn default_go_max_zip_size() -> u64 {
|
||||
104_857_600 // 100MB
|
||||
}
|
||||
|
||||
impl Default for GoConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
proxy: default_go_proxy(),
|
||||
proxy_auth: None,
|
||||
proxy_timeout: 30,
|
||||
proxy_timeout_zip: 120,
|
||||
max_zip_size: 104_857_600,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Docker registry configuration with upstream proxy support
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct DockerConfig {
|
||||
@@ -244,9 +200,6 @@ fn default_max_file_size() -> u64 {
|
||||
pub struct AuthConfig {
|
||||
#[serde(default)]
|
||||
pub enabled: bool,
|
||||
/// Allow anonymous read access (pull/download without auth, push requires auth)
|
||||
#[serde(default)]
|
||||
pub anonymous_read: bool,
|
||||
#[serde(default = "default_htpasswd_file")]
|
||||
pub htpasswd_file: String,
|
||||
#[serde(default = "default_token_storage")]
|
||||
@@ -326,7 +279,6 @@ impl Default for AuthConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: false,
|
||||
anonymous_read: false,
|
||||
htpasswd_file: "users.htpasswd".to_string(),
|
||||
token_storage: "data/tokens".to_string(),
|
||||
}
|
||||
@@ -414,14 +366,11 @@ impl Config {
|
||||
pub fn warn_plaintext_credentials(&self) {
|
||||
// Docker upstreams
|
||||
for (i, upstream) in self.docker.upstreams.iter().enumerate() {
|
||||
if upstream.auth.is_some()
|
||||
&& std::env::var("NORA_DOCKER_PROXIES").is_err()
|
||||
&& std::env::var("NORA_DOCKER_UPSTREAMS").is_err()
|
||||
{
|
||||
if upstream.auth.is_some() && std::env::var("NORA_DOCKER_UPSTREAMS").is_err() {
|
||||
tracing::warn!(
|
||||
upstream_index = i,
|
||||
url = %upstream.url,
|
||||
"Docker upstream credentials in config.toml are plaintext — consider NORA_DOCKER_PROXIES env var"
|
||||
"Docker upstream credentials in config.toml are plaintext — consider NORA_DOCKER_UPSTREAMS env var"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -434,10 +383,6 @@ impl Config {
|
||||
);
|
||||
}
|
||||
}
|
||||
// Go
|
||||
if self.go.proxy_auth.is_some() && std::env::var("NORA_GO_PROXY_AUTH").is_err() {
|
||||
tracing::warn!("Go proxy credentials in config.toml are plaintext — consider NORA_GO_PROXY_AUTH env var");
|
||||
}
|
||||
// npm
|
||||
if self.npm.proxy_auth.is_some() && std::env::var("NORA_NPM_PROXY_AUTH").is_err() {
|
||||
tracing::warn!("npm proxy credentials in config.toml are plaintext — consider NORA_NPM_PROXY_AUTH env var");
|
||||
@@ -448,68 +393,6 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate configuration and return (warnings, errors).
|
||||
///
|
||||
/// Warnings are logged but do not prevent startup.
|
||||
/// Errors indicate a fatal misconfiguration and should cause a panic.
|
||||
pub fn validate(&self) -> (Vec<String>, Vec<String>) {
|
||||
let mut warnings = Vec::new();
|
||||
let mut errors = Vec::new();
|
||||
|
||||
// 1. Port must not be 0
|
||||
if self.server.port == 0 {
|
||||
errors.push("server.port must not be 0".to_string());
|
||||
}
|
||||
|
||||
// 2. Storage path must not be empty when mode = Local
|
||||
if self.storage.mode == StorageMode::Local && self.storage.path.trim().is_empty() {
|
||||
errors.push("storage.path must not be empty when storage mode is local".to_string());
|
||||
}
|
||||
|
||||
// 3. S3 bucket must not be empty when mode = S3
|
||||
if self.storage.mode == StorageMode::S3 && self.storage.bucket.trim().is_empty() {
|
||||
errors.push("storage.bucket must not be empty when storage mode is s3".to_string());
|
||||
}
|
||||
|
||||
// 4. Rate limit values must be > 0 when rate limiting is enabled
|
||||
if self.rate_limit.enabled {
|
||||
if self.rate_limit.auth_rps == 0 {
|
||||
warnings
|
||||
.push("rate_limit.auth_rps is 0 while rate limiting is enabled".to_string());
|
||||
}
|
||||
if self.rate_limit.auth_burst == 0 {
|
||||
warnings
|
||||
.push("rate_limit.auth_burst is 0 while rate limiting is enabled".to_string());
|
||||
}
|
||||
if self.rate_limit.upload_rps == 0 {
|
||||
warnings
|
||||
.push("rate_limit.upload_rps is 0 while rate limiting is enabled".to_string());
|
||||
}
|
||||
if self.rate_limit.upload_burst == 0 {
|
||||
warnings.push(
|
||||
"rate_limit.upload_burst is 0 while rate limiting is enabled".to_string(),
|
||||
);
|
||||
}
|
||||
if self.rate_limit.general_rps == 0 {
|
||||
warnings
|
||||
.push("rate_limit.general_rps is 0 while rate limiting is enabled".to_string());
|
||||
}
|
||||
if self.rate_limit.general_burst == 0 {
|
||||
warnings.push(
|
||||
"rate_limit.general_burst is 0 while rate limiting is enabled".to_string(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Body limit must be > 0
|
||||
if self.server.body_limit_mb == 0 {
|
||||
warnings
|
||||
.push("server.body_limit_mb is 0, no request bodies will be accepted".to_string());
|
||||
}
|
||||
|
||||
(warnings, errors)
|
||||
}
|
||||
|
||||
/// Load configuration with priority: ENV > config.toml > defaults
|
||||
pub fn load() -> Self {
|
||||
// 1. Start with defaults
|
||||
@@ -521,19 +404,6 @@ impl Config {
|
||||
|
||||
// 3. Override with ENV vars (highest priority)
|
||||
config.apply_env_overrides();
|
||||
|
||||
// 4. Validate configuration
|
||||
let (warnings, errors) = config.validate();
|
||||
for w in &warnings {
|
||||
tracing::warn!("Config validation: {}", w);
|
||||
}
|
||||
if !errors.is_empty() {
|
||||
for e in &errors {
|
||||
tracing::error!("Config validation: {}", e);
|
||||
}
|
||||
panic!("Fatal configuration errors: {}", errors.join("; "));
|
||||
}
|
||||
|
||||
config
|
||||
}
|
||||
|
||||
@@ -587,9 +457,6 @@ impl Config {
|
||||
if let Ok(val) = env::var("NORA_AUTH_ENABLED") {
|
||||
self.auth.enabled = val.to_lowercase() == "true" || val == "1";
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_AUTH_ANONYMOUS_READ") {
|
||||
self.auth.anonymous_read = val.to_lowercase() == "true" || val == "1";
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_AUTH_HTPASSWD_FILE") {
|
||||
self.auth.htpasswd_file = val;
|
||||
}
|
||||
@@ -659,14 +526,8 @@ impl Config {
|
||||
self.docker.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
// NORA_DOCKER_PROXIES format: "url1,url2" or "url1|auth1,url2|auth2"
|
||||
// Backward compat: NORA_DOCKER_UPSTREAMS still works but is deprecated
|
||||
if let Ok(val) =
|
||||
env::var("NORA_DOCKER_PROXIES").or_else(|_| env::var("NORA_DOCKER_UPSTREAMS"))
|
||||
{
|
||||
if env::var("NORA_DOCKER_PROXIES").is_err() {
|
||||
tracing::warn!("NORA_DOCKER_UPSTREAMS is deprecated, use NORA_DOCKER_PROXIES");
|
||||
}
|
||||
// NORA_DOCKER_UPSTREAMS format: "url1,url2" or "url1|auth1,url2|auth2"
|
||||
if let Ok(val) = env::var("NORA_DOCKER_UPSTREAMS") {
|
||||
self.docker.upstreams = val
|
||||
.split(',')
|
||||
.filter(|s| !s.is_empty())
|
||||
@@ -680,29 +541,6 @@ impl Config {
|
||||
.collect();
|
||||
}
|
||||
|
||||
// Go config
|
||||
if let Ok(val) = env::var("NORA_GO_PROXY") {
|
||||
self.go.proxy = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_GO_PROXY_AUTH") {
|
||||
self.go.proxy_auth = if val.is_empty() { None } else { Some(val) };
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_GO_PROXY_TIMEOUT") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.go.proxy_timeout = timeout;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_GO_PROXY_TIMEOUT_ZIP") {
|
||||
if let Ok(timeout) = val.parse() {
|
||||
self.go.proxy_timeout_zip = timeout;
|
||||
}
|
||||
}
|
||||
if let Ok(val) = env::var("NORA_GO_MAX_ZIP_SIZE") {
|
||||
if let Ok(size) = val.parse() {
|
||||
self.go.max_zip_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
// Raw config
|
||||
if let Ok(val) = env::var("NORA_RAW_ENABLED") {
|
||||
self.raw.enabled = val.to_lowercase() == "true" || val == "1";
|
||||
@@ -784,7 +622,6 @@ impl Default for Config {
|
||||
maven: MavenConfig::default(),
|
||||
npm: NpmConfig::default(),
|
||||
pypi: PypiConfig::default(),
|
||||
go: GoConfig::default(),
|
||||
docker: DockerConfig::default(),
|
||||
raw: RawConfig::default(),
|
||||
auth: AuthConfig::default(),
|
||||
@@ -795,7 +632,6 @@ impl Default for Config {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -830,545 +666,4 @@ mod tests {
|
||||
assert_eq!(config.rate_limit.upload_burst, 1000);
|
||||
assert_eq!(config.rate_limit.auth_burst, 5); // default
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_auth_header() {
|
||||
let header = basic_auth_header("user:pass");
|
||||
assert_eq!(header, "Basic dXNlcjpwYXNz");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_auth_header_empty() {
|
||||
let header = basic_auth_header("");
|
||||
assert!(header.starts_with("Basic "));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_default() {
|
||||
let config = Config::default();
|
||||
assert_eq!(config.server.host, "127.0.0.1");
|
||||
assert_eq!(config.server.port, 4000);
|
||||
assert_eq!(config.server.body_limit_mb, 2048);
|
||||
assert!(config.server.public_url.is_none());
|
||||
assert_eq!(config.storage.path, "data/storage");
|
||||
assert_eq!(config.storage.mode, StorageMode::Local);
|
||||
assert_eq!(config.storage.bucket, "registry");
|
||||
assert_eq!(config.storage.s3_region, "us-east-1");
|
||||
assert!(!config.auth.enabled);
|
||||
assert_eq!(config.auth.htpasswd_file, "users.htpasswd");
|
||||
assert_eq!(config.auth.token_storage, "data/tokens");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maven_config_default() {
|
||||
let m = MavenConfig::default();
|
||||
assert_eq!(m.proxy_timeout, 30);
|
||||
assert_eq!(m.proxies.len(), 1);
|
||||
assert_eq!(m.proxies[0].url(), "https://repo1.maven.org/maven2");
|
||||
assert!(m.proxies[0].auth().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_npm_config_default() {
|
||||
let n = NpmConfig::default();
|
||||
assert_eq!(n.proxy, Some("https://registry.npmjs.org".to_string()));
|
||||
assert!(n.proxy_auth.is_none());
|
||||
assert_eq!(n.proxy_timeout, 30);
|
||||
assert_eq!(n.metadata_ttl, 300);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pypi_config_default() {
|
||||
let p = PypiConfig::default();
|
||||
assert_eq!(p.proxy, Some("https://pypi.org/simple/".to_string()));
|
||||
assert!(p.proxy_auth.is_none());
|
||||
assert_eq!(p.proxy_timeout, 30);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_docker_config_default() {
|
||||
let d = DockerConfig::default();
|
||||
assert_eq!(d.proxy_timeout, 60);
|
||||
assert_eq!(d.upstreams.len(), 1);
|
||||
assert_eq!(d.upstreams[0].url, "https://registry-1.docker.io");
|
||||
assert!(d.upstreams[0].auth.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_raw_config_default() {
|
||||
let r = RawConfig::default();
|
||||
assert!(r.enabled);
|
||||
assert_eq!(r.max_file_size, 104_857_600);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_config_default() {
|
||||
let a = AuthConfig::default();
|
||||
assert!(!a.enabled);
|
||||
assert!(!a.anonymous_read);
|
||||
assert_eq!(a.htpasswd_file, "users.htpasswd");
|
||||
assert_eq!(a.token_storage, "data/tokens");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_auth_anonymous_read_from_toml() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
host = "127.0.0.1"
|
||||
port = 4000
|
||||
|
||||
[storage]
|
||||
mode = "local"
|
||||
|
||||
[auth]
|
||||
enabled = true
|
||||
anonymous_read = true
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert!(config.auth.enabled);
|
||||
assert!(config.auth.anonymous_read);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_anonymous_read() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_AUTH_ANONYMOUS_READ", "true");
|
||||
config.apply_env_overrides();
|
||||
assert!(config.auth.anonymous_read);
|
||||
std::env::remove_var("NORA_AUTH_ANONYMOUS_READ");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maven_proxy_entry_simple() {
|
||||
let entry = MavenProxyEntry::Simple("https://repo.example.com".to_string());
|
||||
assert_eq!(entry.url(), "https://repo.example.com");
|
||||
assert!(entry.auth().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maven_proxy_entry_full() {
|
||||
let entry = MavenProxyEntry::Full(MavenProxy {
|
||||
url: "https://private.repo.com".to_string(),
|
||||
auth: Some("user:secret".to_string()),
|
||||
});
|
||||
assert_eq!(entry.url(), "https://private.repo.com");
|
||||
assert_eq!(entry.auth(), Some("user:secret"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_maven_proxy_entry_full_no_auth() {
|
||||
let entry = MavenProxyEntry::Full(MavenProxy {
|
||||
url: "https://repo.com".to_string(),
|
||||
auth: None,
|
||||
});
|
||||
assert_eq!(entry.url(), "https://repo.com");
|
||||
assert!(entry.auth().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_storage_mode_default() {
|
||||
let mode = StorageMode::default();
|
||||
assert_eq!(mode, StorageMode::Local);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_server() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_HOST", "0.0.0.0");
|
||||
std::env::set_var("NORA_PORT", "8080");
|
||||
std::env::set_var("NORA_PUBLIC_URL", "registry.example.com");
|
||||
std::env::set_var("NORA_BODY_LIMIT_MB", "4096");
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(config.server.host, "0.0.0.0");
|
||||
assert_eq!(config.server.port, 8080);
|
||||
assert_eq!(
|
||||
config.server.public_url,
|
||||
Some("registry.example.com".to_string())
|
||||
);
|
||||
assert_eq!(config.server.body_limit_mb, 4096);
|
||||
std::env::remove_var("NORA_HOST");
|
||||
std::env::remove_var("NORA_PORT");
|
||||
std::env::remove_var("NORA_PUBLIC_URL");
|
||||
std::env::remove_var("NORA_BODY_LIMIT_MB");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_storage() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_STORAGE_MODE", "s3");
|
||||
std::env::set_var("NORA_STORAGE_PATH", "/data/nora");
|
||||
std::env::set_var("NORA_STORAGE_BUCKET", "my-bucket");
|
||||
std::env::set_var("NORA_STORAGE_S3_REGION", "eu-west-1");
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(config.storage.mode, StorageMode::S3);
|
||||
assert_eq!(config.storage.path, "/data/nora");
|
||||
assert_eq!(config.storage.bucket, "my-bucket");
|
||||
assert_eq!(config.storage.s3_region, "eu-west-1");
|
||||
std::env::remove_var("NORA_STORAGE_MODE");
|
||||
std::env::remove_var("NORA_STORAGE_PATH");
|
||||
std::env::remove_var("NORA_STORAGE_BUCKET");
|
||||
std::env::remove_var("NORA_STORAGE_S3_REGION");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_auth() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_AUTH_ENABLED", "true");
|
||||
std::env::set_var("NORA_AUTH_HTPASSWD_FILE", "/etc/nora/users");
|
||||
std::env::set_var("NORA_AUTH_TOKEN_STORAGE", "/data/tokens");
|
||||
config.apply_env_overrides();
|
||||
assert!(config.auth.enabled);
|
||||
assert_eq!(config.auth.htpasswd_file, "/etc/nora/users");
|
||||
assert_eq!(config.auth.token_storage, "/data/tokens");
|
||||
std::env::remove_var("NORA_AUTH_ENABLED");
|
||||
std::env::remove_var("NORA_AUTH_HTPASSWD_FILE");
|
||||
std::env::remove_var("NORA_AUTH_TOKEN_STORAGE");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_maven_proxies() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var(
|
||||
"NORA_MAVEN_PROXIES",
|
||||
"https://repo1.com,https://repo2.com|user:pass",
|
||||
);
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(config.maven.proxies.len(), 2);
|
||||
assert_eq!(config.maven.proxies[0].url(), "https://repo1.com");
|
||||
assert!(config.maven.proxies[0].auth().is_none());
|
||||
assert_eq!(config.maven.proxies[1].url(), "https://repo2.com");
|
||||
assert_eq!(config.maven.proxies[1].auth(), Some("user:pass"));
|
||||
std::env::remove_var("NORA_MAVEN_PROXIES");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_npm() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_NPM_PROXY", "https://npm.company.com");
|
||||
std::env::set_var("NORA_NPM_PROXY_AUTH", "user:token");
|
||||
std::env::set_var("NORA_NPM_PROXY_TIMEOUT", "60");
|
||||
std::env::set_var("NORA_NPM_METADATA_TTL", "600");
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(
|
||||
config.npm.proxy,
|
||||
Some("https://npm.company.com".to_string())
|
||||
);
|
||||
assert_eq!(config.npm.proxy_auth, Some("user:token".to_string()));
|
||||
assert_eq!(config.npm.proxy_timeout, 60);
|
||||
assert_eq!(config.npm.metadata_ttl, 600);
|
||||
std::env::remove_var("NORA_NPM_PROXY");
|
||||
std::env::remove_var("NORA_NPM_PROXY_AUTH");
|
||||
std::env::remove_var("NORA_NPM_PROXY_TIMEOUT");
|
||||
std::env::remove_var("NORA_NPM_METADATA_TTL");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_raw() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_RAW_ENABLED", "false");
|
||||
std::env::set_var("NORA_RAW_MAX_FILE_SIZE", "524288000");
|
||||
config.apply_env_overrides();
|
||||
assert!(!config.raw.enabled);
|
||||
assert_eq!(config.raw.max_file_size, 524288000);
|
||||
std::env::remove_var("NORA_RAW_ENABLED");
|
||||
std::env::remove_var("NORA_RAW_MAX_FILE_SIZE");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_rate_limit() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_RATE_LIMIT_ENABLED", "false");
|
||||
std::env::set_var("NORA_RATE_LIMIT_AUTH_RPS", "10");
|
||||
std::env::set_var("NORA_RATE_LIMIT_GENERAL_BURST", "500");
|
||||
config.apply_env_overrides();
|
||||
assert!(!config.rate_limit.enabled);
|
||||
assert_eq!(config.rate_limit.auth_rps, 10);
|
||||
assert_eq!(config.rate_limit.general_burst, 500);
|
||||
std::env::remove_var("NORA_RATE_LIMIT_ENABLED");
|
||||
std::env::remove_var("NORA_RATE_LIMIT_AUTH_RPS");
|
||||
std::env::remove_var("NORA_RATE_LIMIT_GENERAL_BURST");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_toml_full() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
host = "0.0.0.0"
|
||||
port = 8080
|
||||
public_url = "nora.example.com"
|
||||
body_limit_mb = 4096
|
||||
|
||||
[storage]
|
||||
mode = "s3"
|
||||
path = "/data"
|
||||
s3_url = "http://minio:9000"
|
||||
bucket = "artifacts"
|
||||
s3_region = "eu-central-1"
|
||||
|
||||
[auth]
|
||||
enabled = true
|
||||
htpasswd_file = "/etc/nora/users.htpasswd"
|
||||
|
||||
[raw]
|
||||
enabled = false
|
||||
max_file_size = 500000000
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.server.host, "0.0.0.0");
|
||||
assert_eq!(config.server.port, 8080);
|
||||
assert_eq!(
|
||||
config.server.public_url,
|
||||
Some("nora.example.com".to_string())
|
||||
);
|
||||
assert_eq!(config.server.body_limit_mb, 4096);
|
||||
assert_eq!(config.storage.mode, StorageMode::S3);
|
||||
assert_eq!(config.storage.s3_url, "http://minio:9000");
|
||||
assert_eq!(config.storage.bucket, "artifacts");
|
||||
assert!(config.auth.enabled);
|
||||
assert!(!config.raw.enabled);
|
||||
assert_eq!(config.raw.max_file_size, 500000000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_from_toml_minimal() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
host = "127.0.0.1"
|
||||
port = 4000
|
||||
|
||||
[storage]
|
||||
mode = "local"
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
// Defaults should be filled
|
||||
assert_eq!(config.storage.path, "data/storage");
|
||||
assert_eq!(config.maven.proxies.len(), 1);
|
||||
assert_eq!(
|
||||
config.npm.proxy,
|
||||
Some("https://registry.npmjs.org".to_string())
|
||||
);
|
||||
assert_eq!(config.docker.upstreams.len(), 1);
|
||||
assert!(config.raw.enabled);
|
||||
assert!(!config.auth.enabled);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_toml_docker_upstreams() {
|
||||
let toml = r#"
|
||||
[server]
|
||||
host = "127.0.0.1"
|
||||
port = 4000
|
||||
|
||||
[storage]
|
||||
mode = "local"
|
||||
|
||||
[docker]
|
||||
proxy_timeout = 120
|
||||
|
||||
[[docker.upstreams]]
|
||||
url = "https://mirror.gcr.io"
|
||||
|
||||
[[docker.upstreams]]
|
||||
url = "https://private.registry.io"
|
||||
auth = "user:pass"
|
||||
"#;
|
||||
|
||||
let config: Config = toml::from_str(toml).unwrap();
|
||||
assert_eq!(config.docker.proxy_timeout, 120);
|
||||
assert_eq!(config.docker.upstreams.len(), 2);
|
||||
assert!(config.docker.upstreams[0].auth.is_none());
|
||||
assert_eq!(
|
||||
config.docker.upstreams[1].auth,
|
||||
Some("user:pass".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_default_config_ok() {
|
||||
let config = Config::default();
|
||||
let (warnings, errors) = config.validate();
|
||||
assert!(
|
||||
errors.is_empty(),
|
||||
"default config should have no errors: {:?}",
|
||||
errors
|
||||
);
|
||||
assert!(
|
||||
warnings.is_empty(),
|
||||
"default config should have no warnings: {:?}",
|
||||
warnings
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_port_zero() {
|
||||
let mut config = Config::default();
|
||||
config.server.port = 0;
|
||||
let (_, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 1);
|
||||
assert!(errors[0].contains("port"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_empty_storage_path_local() {
|
||||
let mut config = Config::default();
|
||||
config.storage.mode = StorageMode::Local;
|
||||
config.storage.path = String::new();
|
||||
let (_, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 1);
|
||||
assert!(errors[0].contains("storage.path"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_whitespace_storage_path_local() {
|
||||
let mut config = Config::default();
|
||||
config.storage.mode = StorageMode::Local;
|
||||
config.storage.path = " ".to_string();
|
||||
let (_, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 1);
|
||||
assert!(errors[0].contains("storage.path"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_empty_bucket_s3() {
|
||||
let mut config = Config::default();
|
||||
config.storage.mode = StorageMode::S3;
|
||||
config.storage.bucket = String::new();
|
||||
let (_, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 1);
|
||||
assert!(errors[0].contains("storage.bucket"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_empty_storage_path_s3_ok() {
|
||||
// Empty path is fine when mode is S3
|
||||
let mut config = Config::default();
|
||||
config.storage.mode = StorageMode::S3;
|
||||
config.storage.path = String::new();
|
||||
let (_, errors) = config.validate();
|
||||
assert!(errors.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_rate_limit_zero_rps() {
|
||||
let mut config = Config::default();
|
||||
config.rate_limit.enabled = true;
|
||||
config.rate_limit.auth_rps = 0;
|
||||
let (warnings, errors) = config.validate();
|
||||
assert!(errors.is_empty());
|
||||
assert_eq!(warnings.len(), 1);
|
||||
assert!(warnings[0].contains("auth_rps"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_rate_limit_disabled_zero_ok() {
|
||||
// Zero rate limit values are fine when rate limiting is disabled
|
||||
let mut config = Config::default();
|
||||
config.rate_limit.enabled = false;
|
||||
config.rate_limit.auth_rps = 0;
|
||||
config.rate_limit.auth_burst = 0;
|
||||
let (warnings, errors) = config.validate();
|
||||
assert!(errors.is_empty());
|
||||
assert!(warnings.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_rate_limit_all_zeros() {
|
||||
let mut config = Config::default();
|
||||
config.rate_limit.enabled = true;
|
||||
config.rate_limit.auth_rps = 0;
|
||||
config.rate_limit.auth_burst = 0;
|
||||
config.rate_limit.upload_rps = 0;
|
||||
config.rate_limit.upload_burst = 0;
|
||||
config.rate_limit.general_rps = 0;
|
||||
config.rate_limit.general_burst = 0;
|
||||
let (warnings, errors) = config.validate();
|
||||
assert!(errors.is_empty());
|
||||
assert_eq!(warnings.len(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_body_limit_zero() {
|
||||
let mut config = Config::default();
|
||||
config.server.body_limit_mb = 0;
|
||||
let (warnings, errors) = config.validate();
|
||||
assert!(errors.is_empty());
|
||||
assert_eq!(warnings.len(), 1);
|
||||
assert!(warnings[0].contains("body_limit_mb"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_multiple_errors() {
|
||||
let mut config = Config::default();
|
||||
config.server.port = 0;
|
||||
config.storage.mode = StorageMode::Local;
|
||||
config.storage.path = String::new();
|
||||
let (_, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_warnings_and_errors_together() {
|
||||
let mut config = Config::default();
|
||||
config.server.port = 0;
|
||||
config.server.body_limit_mb = 0;
|
||||
config.rate_limit.enabled = true;
|
||||
config.rate_limit.auth_rps = 0;
|
||||
let (warnings, errors) = config.validate();
|
||||
assert_eq!(errors.len(), 1);
|
||||
assert_eq!(warnings.len(), 2); // body_limit + auth_rps
|
||||
}
|
||||
#[test]
|
||||
fn test_env_override_docker_proxies_and_backward_compat() {
|
||||
// Test new NORA_DOCKER_PROXIES name
|
||||
std::env::remove_var("NORA_DOCKER_UPSTREAMS");
|
||||
std::env::set_var(
|
||||
"NORA_DOCKER_PROXIES",
|
||||
"https://mirror.gcr.io,https://private.io|token123",
|
||||
);
|
||||
let mut config = Config::default();
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(config.docker.upstreams.len(), 2);
|
||||
assert_eq!(config.docker.upstreams[0].url, "https://mirror.gcr.io");
|
||||
assert!(config.docker.upstreams[0].auth.is_none());
|
||||
assert_eq!(config.docker.upstreams[1].url, "https://private.io");
|
||||
assert_eq!(
|
||||
config.docker.upstreams[1].auth,
|
||||
Some("token123".to_string())
|
||||
);
|
||||
std::env::remove_var("NORA_DOCKER_PROXIES");
|
||||
|
||||
// Test backward compat: old NORA_DOCKER_UPSTREAMS still works
|
||||
std::env::remove_var("NORA_DOCKER_PROXIES");
|
||||
std::env::set_var("NORA_DOCKER_UPSTREAMS", "https://legacy.io|secret");
|
||||
let mut config2 = Config::default();
|
||||
config2.apply_env_overrides();
|
||||
assert_eq!(config2.docker.upstreams.len(), 1);
|
||||
assert_eq!(config2.docker.upstreams[0].url, "https://legacy.io");
|
||||
assert_eq!(config2.docker.upstreams[0].auth, Some("secret".to_string()));
|
||||
std::env::remove_var("NORA_DOCKER_UPSTREAMS");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_go_proxy() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_GO_PROXY", "https://goproxy.company.com");
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(
|
||||
config.go.proxy,
|
||||
Some("https://goproxy.company.com".to_string()),
|
||||
);
|
||||
std::env::remove_var("NORA_GO_PROXY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_override_go_proxy_auth() {
|
||||
let mut config = Config::default();
|
||||
std::env::set_var("NORA_GO_PROXY_AUTH", "user:pass");
|
||||
config.apply_env_overrides();
|
||||
assert_eq!(config.go.proxy_auth, Some("user:pass".to_string()));
|
||||
std::env::remove_var("NORA_GO_PROXY_AUTH");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,83 +2,52 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::Instant;
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Known registry names for per-registry metrics
|
||||
const REGISTRIES: &[&str] = &["docker", "maven", "npm", "cargo", "pypi", "raw", "go"];
|
||||
|
||||
/// Serializable snapshot of metrics for persistence.
|
||||
/// Uses HashMap for per-registry counters — adding a new registry only
|
||||
/// requires adding its name to REGISTRIES (one line).
|
||||
/// Serializable snapshot of metrics for persistence
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
struct MetricsSnapshot {
|
||||
downloads: u64,
|
||||
uploads: u64,
|
||||
cache_hits: u64,
|
||||
cache_misses: u64,
|
||||
#[serde(default)]
|
||||
registry_downloads: HashMap<String, u64>,
|
||||
#[serde(default)]
|
||||
registry_uploads: HashMap<String, u64>,
|
||||
docker_downloads: u64,
|
||||
docker_uploads: u64,
|
||||
npm_downloads: u64,
|
||||
maven_downloads: u64,
|
||||
maven_uploads: u64,
|
||||
cargo_downloads: u64,
|
||||
pypi_downloads: u64,
|
||||
raw_downloads: u64,
|
||||
raw_uploads: u64,
|
||||
}
|
||||
|
||||
/// Thread-safe atomic counter map for per-registry metrics.
|
||||
struct CounterMap(HashMap<String, AtomicU64>);
|
||||
|
||||
impl CounterMap {
|
||||
fn new(keys: &[&str]) -> Self {
|
||||
let mut map = HashMap::with_capacity(keys.len());
|
||||
for &k in keys {
|
||||
map.insert(k.to_string(), AtomicU64::new(0));
|
||||
}
|
||||
Self(map)
|
||||
}
|
||||
|
||||
fn inc(&self, key: &str) {
|
||||
if let Some(counter) = self.0.get(key) {
|
||||
counter.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
fn get(&self, key: &str) -> u64 {
|
||||
self.0
|
||||
.get(key)
|
||||
.map(|c| c.load(Ordering::Relaxed))
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn snapshot(&self) -> HashMap<String, u64> {
|
||||
self.0
|
||||
.iter()
|
||||
.map(|(k, v)| (k.clone(), v.load(Ordering::Relaxed)))
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn load_from(&self, data: &HashMap<String, u64>) {
|
||||
for (k, v) in data {
|
||||
if let Some(counter) = self.0.get(k.as_str()) {
|
||||
counter.store(*v, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Dashboard metrics for tracking registry activity.
|
||||
/// Global counters are separate fields; per-registry counters use CounterMap.
|
||||
/// Dashboard metrics for tracking registry activity
|
||||
/// Uses atomic counters for thread-safe access without locks
|
||||
pub struct DashboardMetrics {
|
||||
// Global counters
|
||||
pub downloads: AtomicU64,
|
||||
pub uploads: AtomicU64,
|
||||
pub cache_hits: AtomicU64,
|
||||
pub cache_misses: AtomicU64,
|
||||
|
||||
registry_downloads: CounterMap,
|
||||
registry_uploads: CounterMap,
|
||||
// Per-registry download counters
|
||||
pub docker_downloads: AtomicU64,
|
||||
pub docker_uploads: AtomicU64,
|
||||
pub npm_downloads: AtomicU64,
|
||||
pub maven_downloads: AtomicU64,
|
||||
pub maven_uploads: AtomicU64,
|
||||
pub cargo_downloads: AtomicU64,
|
||||
pub pypi_downloads: AtomicU64,
|
||||
pub raw_downloads: AtomicU64,
|
||||
pub raw_uploads: AtomicU64,
|
||||
|
||||
pub start_time: Instant,
|
||||
|
||||
/// Path to metrics.json for persistence
|
||||
persist_path: Option<PathBuf>,
|
||||
}
|
||||
|
||||
@@ -89,8 +58,15 @@ impl DashboardMetrics {
|
||||
uploads: AtomicU64::new(0),
|
||||
cache_hits: AtomicU64::new(0),
|
||||
cache_misses: AtomicU64::new(0),
|
||||
registry_downloads: CounterMap::new(REGISTRIES),
|
||||
registry_uploads: CounterMap::new(REGISTRIES),
|
||||
docker_downloads: AtomicU64::new(0),
|
||||
docker_uploads: AtomicU64::new(0),
|
||||
npm_downloads: AtomicU64::new(0),
|
||||
maven_downloads: AtomicU64::new(0),
|
||||
maven_uploads: AtomicU64::new(0),
|
||||
cargo_downloads: AtomicU64::new(0),
|
||||
pypi_downloads: AtomicU64::new(0),
|
||||
raw_downloads: AtomicU64::new(0),
|
||||
raw_uploads: AtomicU64::new(0),
|
||||
start_time: Instant::now(),
|
||||
persist_path: None,
|
||||
}
|
||||
@@ -100,7 +76,9 @@ impl DashboardMetrics {
|
||||
pub fn with_persistence(storage_path: &str) -> Self {
|
||||
let path = Path::new(storage_path).join("metrics.json");
|
||||
let mut metrics = Self::new();
|
||||
metrics.persist_path = Some(path.clone());
|
||||
|
||||
// Load existing metrics if file exists
|
||||
if path.exists() {
|
||||
match std::fs::read_to_string(&path) {
|
||||
Ok(data) => match serde_json::from_str::<MetricsSnapshot>(&data) {
|
||||
@@ -109,10 +87,15 @@ impl DashboardMetrics {
|
||||
metrics.uploads = AtomicU64::new(snap.uploads);
|
||||
metrics.cache_hits = AtomicU64::new(snap.cache_hits);
|
||||
metrics.cache_misses = AtomicU64::new(snap.cache_misses);
|
||||
metrics
|
||||
.registry_downloads
|
||||
.load_from(&snap.registry_downloads);
|
||||
metrics.registry_uploads.load_from(&snap.registry_uploads);
|
||||
metrics.docker_downloads = AtomicU64::new(snap.docker_downloads);
|
||||
metrics.docker_uploads = AtomicU64::new(snap.docker_uploads);
|
||||
metrics.npm_downloads = AtomicU64::new(snap.npm_downloads);
|
||||
metrics.maven_downloads = AtomicU64::new(snap.maven_downloads);
|
||||
metrics.maven_uploads = AtomicU64::new(snap.maven_uploads);
|
||||
metrics.cargo_downloads = AtomicU64::new(snap.cargo_downloads);
|
||||
metrics.pypi_downloads = AtomicU64::new(snap.pypi_downloads);
|
||||
metrics.raw_downloads = AtomicU64::new(snap.raw_downloads);
|
||||
metrics.raw_uploads = AtomicU64::new(snap.raw_uploads);
|
||||
info!(
|
||||
downloads = snap.downloads,
|
||||
uploads = snap.uploads,
|
||||
@@ -125,12 +108,11 @@ impl DashboardMetrics {
|
||||
}
|
||||
}
|
||||
|
||||
metrics.persist_path = Some(path);
|
||||
metrics
|
||||
}
|
||||
|
||||
/// Save current metrics to disk (async to avoid blocking the runtime)
|
||||
pub async fn save(&self) {
|
||||
/// Save current metrics to disk
|
||||
pub fn save(&self) {
|
||||
let Some(path) = &self.persist_path else {
|
||||
return;
|
||||
};
|
||||
@@ -139,35 +121,61 @@ impl DashboardMetrics {
|
||||
uploads: self.uploads.load(Ordering::Relaxed),
|
||||
cache_hits: self.cache_hits.load(Ordering::Relaxed),
|
||||
cache_misses: self.cache_misses.load(Ordering::Relaxed),
|
||||
registry_downloads: self.registry_downloads.snapshot(),
|
||||
registry_uploads: self.registry_uploads.snapshot(),
|
||||
docker_downloads: self.docker_downloads.load(Ordering::Relaxed),
|
||||
docker_uploads: self.docker_uploads.load(Ordering::Relaxed),
|
||||
npm_downloads: self.npm_downloads.load(Ordering::Relaxed),
|
||||
maven_downloads: self.maven_downloads.load(Ordering::Relaxed),
|
||||
maven_uploads: self.maven_uploads.load(Ordering::Relaxed),
|
||||
cargo_downloads: self.cargo_downloads.load(Ordering::Relaxed),
|
||||
pypi_downloads: self.pypi_downloads.load(Ordering::Relaxed),
|
||||
raw_downloads: self.raw_downloads.load(Ordering::Relaxed),
|
||||
raw_uploads: self.raw_uploads.load(Ordering::Relaxed),
|
||||
};
|
||||
// Atomic write: write to tmp then rename
|
||||
let tmp = path.with_extension("json.tmp");
|
||||
if let Ok(data) = serde_json::to_string_pretty(&snap) {
|
||||
if tokio::fs::write(&tmp, &data).await.is_ok() {
|
||||
let _ = tokio::fs::rename(&tmp, path).await;
|
||||
if std::fs::write(&tmp, &data).is_ok() {
|
||||
let _ = std::fs::rename(&tmp, path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a download event for the specified registry
|
||||
pub fn record_download(&self, registry: &str) {
|
||||
self.downloads.fetch_add(1, Ordering::Relaxed);
|
||||
self.registry_downloads.inc(registry);
|
||||
match registry {
|
||||
"docker" => self.docker_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"npm" => self.npm_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"maven" => self.maven_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"cargo" => self.cargo_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"pypi" => self.pypi_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
"raw" => self.raw_downloads.fetch_add(1, Ordering::Relaxed),
|
||||
_ => 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Record an upload event for the specified registry
|
||||
pub fn record_upload(&self, registry: &str) {
|
||||
self.uploads.fetch_add(1, Ordering::Relaxed);
|
||||
self.registry_uploads.inc(registry);
|
||||
match registry {
|
||||
"docker" => self.docker_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
"maven" => self.maven_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
"raw" => self.raw_uploads.fetch_add(1, Ordering::Relaxed),
|
||||
_ => 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Record a cache hit
|
||||
pub fn record_cache_hit(&self) {
|
||||
self.cache_hits.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Record a cache miss
|
||||
pub fn record_cache_miss(&self) {
|
||||
self.cache_misses.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Calculate the cache hit rate as a percentage
|
||||
pub fn cache_hit_rate(&self) -> f64 {
|
||||
let hits = self.cache_hits.load(Ordering::Relaxed);
|
||||
let misses = self.cache_misses.load(Ordering::Relaxed);
|
||||
@@ -179,12 +187,27 @@ impl DashboardMetrics {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get download count for a specific registry
|
||||
pub fn get_registry_downloads(&self, registry: &str) -> u64 {
|
||||
self.registry_downloads.get(registry)
|
||||
match registry {
|
||||
"docker" => self.docker_downloads.load(Ordering::Relaxed),
|
||||
"npm" => self.npm_downloads.load(Ordering::Relaxed),
|
||||
"maven" => self.maven_downloads.load(Ordering::Relaxed),
|
||||
"cargo" => self.cargo_downloads.load(Ordering::Relaxed),
|
||||
"pypi" => self.pypi_downloads.load(Ordering::Relaxed),
|
||||
"raw" => self.raw_downloads.load(Ordering::Relaxed),
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get upload count for a specific registry
|
||||
pub fn get_registry_uploads(&self, registry: &str) -> u64 {
|
||||
self.registry_uploads.get(registry)
|
||||
match registry {
|
||||
"docker" => self.docker_uploads.load(Ordering::Relaxed),
|
||||
"maven" => self.maven_uploads.load(Ordering::Relaxed),
|
||||
"raw" => self.raw_uploads.load(Ordering::Relaxed),
|
||||
_ => 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -193,149 +216,3 @@ impl Default for DashboardMetrics {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_new_defaults() {
|
||||
let m = DashboardMetrics::new();
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(m.uploads.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(m.cache_hits.load(Ordering::Relaxed), 0);
|
||||
assert_eq!(m.cache_misses.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_download_all_registries() {
|
||||
let m = DashboardMetrics::new();
|
||||
for reg in &["docker", "npm", "maven", "cargo", "pypi", "raw"] {
|
||||
m.record_download(reg);
|
||||
}
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 6);
|
||||
assert_eq!(m.get_registry_downloads("docker"), 1);
|
||||
assert_eq!(m.get_registry_downloads("npm"), 1);
|
||||
assert_eq!(m.get_registry_downloads("maven"), 1);
|
||||
assert_eq!(m.get_registry_downloads("cargo"), 1);
|
||||
assert_eq!(m.get_registry_downloads("pypi"), 1);
|
||||
assert_eq!(m.get_registry_downloads("raw"), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_download_unknown_registry() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_download("unknown");
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(m.get_registry_downloads("docker"), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_upload() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_upload("docker");
|
||||
m.record_upload("maven");
|
||||
m.record_upload("raw");
|
||||
assert_eq!(m.uploads.load(Ordering::Relaxed), 3);
|
||||
assert_eq!(m.get_registry_uploads("docker"), 1);
|
||||
assert_eq!(m.get_registry_uploads("maven"), 1);
|
||||
assert_eq!(m.get_registry_uploads("raw"), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_upload_unknown_registry() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_upload("npm");
|
||||
assert_eq!(m.uploads.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_hit_rate_zero() {
|
||||
let m = DashboardMetrics::new();
|
||||
assert_eq!(m.cache_hit_rate(), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_hit_rate_all_hits() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_cache_hit();
|
||||
m.record_cache_hit();
|
||||
assert_eq!(m.cache_hit_rate(), 100.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cache_hit_rate_mixed() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_cache_hit();
|
||||
m.record_cache_miss();
|
||||
assert_eq!(m.cache_hit_rate(), 50.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_registry_downloads() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_download("docker");
|
||||
m.record_download("docker");
|
||||
m.record_download("npm");
|
||||
assert_eq!(m.get_registry_downloads("docker"), 2);
|
||||
assert_eq!(m.get_registry_downloads("npm"), 1);
|
||||
assert_eq!(m.get_registry_downloads("cargo"), 0);
|
||||
assert_eq!(m.get_registry_downloads("unknown"), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_registry_uploads() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_upload("docker");
|
||||
assert_eq!(m.get_registry_uploads("docker"), 1);
|
||||
assert_eq!(m.get_registry_uploads("maven"), 0);
|
||||
assert_eq!(m.get_registry_uploads("unknown"), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_persistence_save_and_load() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let path = tmp.path().to_str().unwrap();
|
||||
|
||||
{
|
||||
let m = DashboardMetrics::with_persistence(path);
|
||||
m.record_download("docker");
|
||||
m.record_download("docker");
|
||||
m.record_upload("maven");
|
||||
m.record_cache_hit();
|
||||
m.save().await;
|
||||
}
|
||||
|
||||
{
|
||||
let m = DashboardMetrics::with_persistence(path);
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 2);
|
||||
assert_eq!(m.uploads.load(Ordering::Relaxed), 1);
|
||||
assert_eq!(m.get_registry_downloads("docker"), 2);
|
||||
assert_eq!(m.get_registry_uploads("maven"), 1);
|
||||
assert_eq!(m.cache_hits.load(Ordering::Relaxed), 1);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_persistence_missing_file() {
|
||||
let tmp = TempDir::new().unwrap();
|
||||
let path = tmp.path().to_str().unwrap();
|
||||
let m = DashboardMetrics::with_persistence(path);
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default() {
|
||||
let m = DashboardMetrics::default();
|
||||
assert_eq!(m.downloads.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_registry_supported() {
|
||||
let m = DashboardMetrics::new();
|
||||
m.record_download("go");
|
||||
assert_eq!(m.get_registry_downloads("go"), 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,11 +51,11 @@ struct ErrorResponse {
|
||||
|
||||
impl IntoResponse for AppError {
|
||||
fn into_response(self) -> Response {
|
||||
let (status, message) = match self {
|
||||
AppError::NotFound(msg) => (StatusCode::NOT_FOUND, msg),
|
||||
AppError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg),
|
||||
AppError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg),
|
||||
AppError::Internal(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg),
|
||||
let (status, message) = match &self {
|
||||
AppError::NotFound(msg) => (StatusCode::NOT_FOUND, msg.clone()),
|
||||
AppError::BadRequest(msg) => (StatusCode::BAD_REQUEST, msg.clone()),
|
||||
AppError::Unauthorized(msg) => (StatusCode::UNAUTHORIZED, msg.clone()),
|
||||
AppError::Internal(msg) => (StatusCode::INTERNAL_SERVER_ERROR, msg.clone()),
|
||||
AppError::Storage(e) => match e {
|
||||
StorageError::NotFound => (StatusCode::NOT_FOUND, "Resource not found".to_string()),
|
||||
StorageError::Validation(v) => (StatusCode::BAD_REQUEST, v.to_string()),
|
||||
@@ -124,77 +124,4 @@ mod tests {
|
||||
let err = AppError::NotFound("image not found".to_string());
|
||||
assert_eq!(err.to_string(), "Not found: image not found");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_constructors() {
|
||||
let err = AppError::not_found("missing");
|
||||
assert!(matches!(err, AppError::NotFound(_)));
|
||||
assert_eq!(err.to_string(), "Not found: missing");
|
||||
|
||||
let err = AppError::bad_request("invalid input");
|
||||
assert!(matches!(err, AppError::BadRequest(_)));
|
||||
assert_eq!(err.to_string(), "Bad request: invalid input");
|
||||
|
||||
let err = AppError::unauthorized("no token");
|
||||
assert!(matches!(err, AppError::Unauthorized(_)));
|
||||
assert_eq!(err.to_string(), "Unauthorized: no token");
|
||||
|
||||
let err = AppError::internal("db crashed");
|
||||
assert!(matches!(err, AppError::Internal(_)));
|
||||
assert_eq!(err.to_string(), "Internal error: db crashed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_display_storage() {
|
||||
let err = AppError::Storage(StorageError::NotFound);
|
||||
assert!(err.to_string().contains("Storage error"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_display_validation() {
|
||||
let err = AppError::Validation(ValidationError::PathTraversal);
|
||||
assert!(err.to_string().contains("Validation error"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_not_found() {
|
||||
let err = AppError::NotFound("gone".to_string());
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_bad_request() {
|
||||
let err = AppError::BadRequest("bad".to_string());
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_unauthorized() {
|
||||
let err = AppError::Unauthorized("nope".to_string());
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::UNAUTHORIZED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_internal() {
|
||||
let err = AppError::Internal("boom".to_string());
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_storage_not_found() {
|
||||
let err = AppError::Storage(StorageError::NotFound);
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_into_response_validation() {
|
||||
let err = AppError::Validation(ValidationError::EmptyInput);
|
||||
let response = err.into_response();
|
||||
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,15 +72,10 @@ pub async fn run_gc(storage: &Storage, dry_run: bool) -> GcResult {
|
||||
|
||||
async fn collect_all_blobs(storage: &Storage) -> Vec<String> {
|
||||
let mut blobs = Vec::new();
|
||||
// Collect blobs from all registry types, not just Docker
|
||||
for prefix in &[
|
||||
"docker/", "maven/", "npm/", "cargo/", "pypi/", "raw/", "go/",
|
||||
] {
|
||||
let keys = storage.list(prefix).await;
|
||||
for key in keys {
|
||||
if key.contains("/blobs/") || key.contains("/tarballs/") {
|
||||
blobs.push(key);
|
||||
}
|
||||
let docker_blobs = storage.list("docker/").await;
|
||||
for key in docker_blobs {
|
||||
if key.contains("/blobs/") {
|
||||
blobs.push(key);
|
||||
}
|
||||
}
|
||||
blobs
|
||||
@@ -124,198 +119,3 @@ async fn collect_referenced_digests(storage: &Storage) -> HashSet<String> {
|
||||
|
||||
referenced
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_gc_result_defaults() {
|
||||
let result = GcResult {
|
||||
total_blobs: 0,
|
||||
referenced_blobs: 0,
|
||||
orphaned_blobs: 0,
|
||||
deleted_blobs: 0,
|
||||
orphan_keys: vec![],
|
||||
};
|
||||
assert_eq!(result.total_blobs, 0);
|
||||
assert!(result.orphan_keys.is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_empty_storage() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
let result = run_gc(&storage, true).await;
|
||||
assert_eq!(result.total_blobs, 0);
|
||||
assert_eq!(result.referenced_blobs, 0);
|
||||
assert_eq!(result.orphaned_blobs, 0);
|
||||
assert_eq!(result.deleted_blobs, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_no_orphans() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
// Create a manifest that references a blob
|
||||
let manifest = serde_json::json!({
|
||||
"config": {"digest": "sha256:configabc"},
|
||||
"layers": [{"digest": "sha256:layer111", "size": 100}]
|
||||
});
|
||||
storage
|
||||
.put(
|
||||
"docker/test/manifests/latest.json",
|
||||
manifest.to_string().as_bytes(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:configabc", b"config-data")
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:layer111", b"layer-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = run_gc(&storage, true).await;
|
||||
assert_eq!(result.total_blobs, 2);
|
||||
assert_eq!(result.orphaned_blobs, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_finds_orphans_dry_run() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
// Create a manifest referencing only one blob
|
||||
let manifest = serde_json::json!({
|
||||
"config": {"digest": "sha256:configabc"},
|
||||
"layers": [{"digest": "sha256:layer111", "size": 100}]
|
||||
});
|
||||
storage
|
||||
.put(
|
||||
"docker/test/manifests/latest.json",
|
||||
manifest.to_string().as_bytes(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:configabc", b"config-data")
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:layer111", b"layer-data")
|
||||
.await
|
||||
.unwrap();
|
||||
// Orphan blob (not referenced)
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:orphan999", b"orphan-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = run_gc(&storage, true).await;
|
||||
assert_eq!(result.total_blobs, 3);
|
||||
assert_eq!(result.orphaned_blobs, 1);
|
||||
assert_eq!(result.deleted_blobs, 0); // dry run
|
||||
assert!(result.orphan_keys[0].contains("orphan999"));
|
||||
|
||||
// Verify orphan still exists (dry run)
|
||||
assert!(storage
|
||||
.get("docker/test/blobs/sha256:orphan999")
|
||||
.await
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_deletes_orphans() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
let manifest = serde_json::json!({
|
||||
"config": {"digest": "sha256:configabc"},
|
||||
"layers": []
|
||||
});
|
||||
storage
|
||||
.put(
|
||||
"docker/test/manifests/latest.json",
|
||||
manifest.to_string().as_bytes(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:configabc", b"config")
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/test/blobs/sha256:orphan1", b"orphan")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = run_gc(&storage, false).await;
|
||||
assert_eq!(result.orphaned_blobs, 1);
|
||||
assert_eq!(result.deleted_blobs, 1);
|
||||
|
||||
// Verify orphan is gone
|
||||
assert!(storage
|
||||
.get("docker/test/blobs/sha256:orphan1")
|
||||
.await
|
||||
.is_err());
|
||||
// Referenced blob still exists
|
||||
assert!(storage
|
||||
.get("docker/test/blobs/sha256:configabc")
|
||||
.await
|
||||
.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_manifest_list_references() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
// Multi-arch manifest list
|
||||
let manifest = serde_json::json!({
|
||||
"manifests": [
|
||||
{"digest": "sha256:platformA", "size": 100},
|
||||
{"digest": "sha256:platformB", "size": 200}
|
||||
]
|
||||
});
|
||||
storage
|
||||
.put(
|
||||
"docker/multi/manifests/latest.json",
|
||||
manifest.to_string().as_bytes(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/multi/blobs/sha256:platformA", b"arch-a")
|
||||
.await
|
||||
.unwrap();
|
||||
storage
|
||||
.put("docker/multi/blobs/sha256:platformB", b"arch-b")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = run_gc(&storage, true).await;
|
||||
assert_eq!(result.orphaned_blobs, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_gc_multi_registry_blobs() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let storage = Storage::new_local(dir.path().join("data").to_str().unwrap());
|
||||
|
||||
// npm tarball (not referenced by Docker manifests => orphan candidate)
|
||||
storage
|
||||
.put("npm/lodash/tarballs/lodash-4.17.21.tgz", b"tarball-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = run_gc(&storage, true).await;
|
||||
// npm tarballs contain "tarballs/" which matches the filter
|
||||
assert_eq!(result.total_blobs, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ pub struct StorageHealth {
|
||||
pub backend: String,
|
||||
pub reachable: bool,
|
||||
pub endpoint: String,
|
||||
pub total_size_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -41,7 +40,6 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
|
||||
async fn health_check(State(state): State<Arc<AppState>>) -> (StatusCode, Json<HealthStatus>) {
|
||||
let storage_reachable = check_storage_reachable(&state).await;
|
||||
let total_size = state.storage.total_size().await;
|
||||
|
||||
let status = if storage_reachable {
|
||||
"healthy"
|
||||
@@ -62,7 +60,6 @@ async fn health_check(State(state): State<Arc<AppState>>) -> (StatusCode, Json<H
|
||||
"s3" => state.config.storage.s3_url.clone(),
|
||||
_ => state.config.storage.path.clone(),
|
||||
},
|
||||
total_size_bytes: total_size,
|
||||
},
|
||||
registries: RegistriesHealth {
|
||||
docker: "ok".to_string(),
|
||||
@@ -93,72 +90,3 @@ async fn readiness_check(State(state): State<Arc<AppState>>) -> StatusCode {
|
||||
async fn check_storage_reachable(state: &AppState) -> bool {
|
||||
state.storage.health_check().await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::http::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_returns_200() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let body_str = std::str::from_utf8(&body).unwrap();
|
||||
assert!(body_str.contains("healthy"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_json_has_version() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert!(json.get("version").is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_json_has_storage_size() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
// Put some data to have non-zero size
|
||||
ctx.state
|
||||
.storage
|
||||
.put("test/artifact", b"hello world")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
|
||||
let storage = json.get("storage").unwrap();
|
||||
let size = storage.get("total_size_bytes").unwrap().as_u64().unwrap();
|
||||
assert!(
|
||||
size > 0,
|
||||
"total_size_bytes should be > 0 after storing data"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_health_empty_storage_size_zero() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
let body = body_bytes(response).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
|
||||
let size = json["storage"]["total_size_bytes"].as_u64().unwrap();
|
||||
assert_eq!(size, 0, "empty storage should report 0 bytes");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ready_returns_200() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/ready", "").await;
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#![deny(clippy::unwrap_used)]
|
||||
#![forbid(unsafe_code)]
|
||||
//! NORA Registry — library interface for fuzzing and testing
|
||||
|
||||
pub mod validation;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
#![deny(clippy::unwrap_used)]
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
mod activity_log;
|
||||
mod audit;
|
||||
mod auth;
|
||||
@@ -25,9 +24,6 @@ mod tokens;
|
||||
mod ui;
|
||||
mod validation;
|
||||
|
||||
#[cfg(test)]
|
||||
mod test_helpers;
|
||||
|
||||
use axum::{extract::DefaultBodyLimit, http::HeaderValue, middleware, Router};
|
||||
use clap::{Parser, Subcommand};
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -46,9 +42,6 @@ use repo_index::RepoIndex;
|
||||
pub use storage::Storage;
|
||||
use tokens::TokenStore;
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "nora", version, about = "Multi-protocol artifact registry")]
|
||||
struct Cli {
|
||||
@@ -100,9 +93,6 @@ enum Commands {
|
||||
/// Max concurrent downloads
|
||||
#[arg(long, default_value = "8", global = true)]
|
||||
concurrency: usize,
|
||||
/// Output results as JSON (for CI pipelines)
|
||||
#[arg(long, global = true)]
|
||||
json: bool,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -118,7 +108,6 @@ pub struct AppState {
|
||||
pub docker_auth: registry::DockerAuth,
|
||||
pub repo_index: RepoIndex,
|
||||
pub http_client: reqwest::Client,
|
||||
pub upload_sessions: Arc<RwLock<HashMap<String, registry::docker::UploadSession>>>,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -191,9 +180,8 @@ async fn main() {
|
||||
format,
|
||||
registry,
|
||||
concurrency,
|
||||
json,
|
||||
}) => {
|
||||
if let Err(e) = mirror::run_mirror(format, ®istry, concurrency, json).await {
|
||||
if let Err(e) = mirror::run_mirror(format, ®istry, concurrency).await {
|
||||
error!("Mirror failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
@@ -338,8 +326,7 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
.merge(registry::npm_routes())
|
||||
.merge(registry::cargo_routes())
|
||||
.merge(registry::pypi_routes())
|
||||
.merge(registry::raw_routes())
|
||||
.merge(registry::go_routes());
|
||||
.merge(registry::raw_routes());
|
||||
|
||||
// Routes WITHOUT rate limiting (health, metrics, UI)
|
||||
let public_routes = Router::new()
|
||||
@@ -380,7 +367,6 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
docker_auth,
|
||||
repo_index: RepoIndex::new(),
|
||||
http_client,
|
||||
upload_sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
@@ -442,17 +428,13 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
"Available endpoints"
|
||||
);
|
||||
|
||||
// Background task: persist metrics and flush token last_used every 30 seconds
|
||||
// Background task: persist metrics every 30 seconds
|
||||
let metrics_state = state.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
|
||||
loop {
|
||||
interval.tick().await;
|
||||
metrics_state.metrics.save().await;
|
||||
if let Some(ref token_store) = metrics_state.tokens {
|
||||
token_store.flush_last_used().await;
|
||||
}
|
||||
registry::docker::cleanup_expired_sessions(&metrics_state.upload_sessions);
|
||||
metrics_state.metrics.save();
|
||||
}
|
||||
});
|
||||
|
||||
@@ -466,7 +448,7 @@ async fn run_server(config: Config, storage: Storage) {
|
||||
.expect("Server error");
|
||||
|
||||
// Save metrics on shutdown
|
||||
state.metrics.save().await;
|
||||
state.metrics.save();
|
||||
|
||||
info!(
|
||||
uptime_seconds = state.start_time.elapsed().as_secs(),
|
||||
|
||||
@@ -26,7 +26,7 @@ lazy_static! {
|
||||
"nora_http_requests_total",
|
||||
"Total number of HTTP requests",
|
||||
&["registry", "method", "status"]
|
||||
).expect("failed to create HTTP_REQUESTS_TOTAL metric at startup");
|
||||
).expect("metric can be created");
|
||||
|
||||
/// HTTP request duration histogram
|
||||
pub static ref HTTP_REQUEST_DURATION: HistogramVec = register_histogram_vec!(
|
||||
@@ -34,28 +34,28 @@ lazy_static! {
|
||||
"HTTP request latency in seconds",
|
||||
&["registry", "method"],
|
||||
vec![0.001, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0]
|
||||
).expect("failed to create HTTP_REQUEST_DURATION metric at startup");
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Cache requests counter (hit/miss)
|
||||
pub static ref CACHE_REQUESTS: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_cache_requests_total",
|
||||
"Total cache requests",
|
||||
&["registry", "result"]
|
||||
).expect("failed to create CACHE_REQUESTS metric at startup");
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Storage operations counter
|
||||
pub static ref STORAGE_OPERATIONS: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_storage_operations_total",
|
||||
"Total storage operations",
|
||||
&["operation", "status"]
|
||||
).expect("failed to create STORAGE_OPERATIONS metric at startup");
|
||||
).expect("metric can be created");
|
||||
|
||||
/// Artifacts count by registry
|
||||
pub static ref ARTIFACTS_TOTAL: IntCounterVec = register_int_counter_vec!(
|
||||
"nora_artifacts_total",
|
||||
"Total artifacts stored",
|
||||
&["registry"]
|
||||
).expect("failed to create ARTIFACTS_TOTAL metric at startup");
|
||||
).expect("metric can be created");
|
||||
}
|
||||
|
||||
/// Routes for metrics endpoint
|
||||
@@ -148,85 +148,3 @@ pub fn record_storage_op(operation: &str, success: bool) {
|
||||
.with_label_values(&[operation, status])
|
||||
.inc();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_docker() {
|
||||
assert_eq!(detect_registry("/v2/nginx/manifests/latest"), "docker");
|
||||
assert_eq!(detect_registry("/v2/"), "docker");
|
||||
assert_eq!(
|
||||
detect_registry("/v2/library/alpine/blobs/sha256:abc"),
|
||||
"docker"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_maven() {
|
||||
assert_eq!(detect_registry("/maven2/com/example/artifact"), "maven");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_npm() {
|
||||
assert_eq!(detect_registry("/npm/lodash"), "npm");
|
||||
assert_eq!(detect_registry("/npm/@scope/package"), "npm");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_cargo_path() {
|
||||
assert_eq!(detect_registry("/cargo/api/v1/crates"), "cargo");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_pypi() {
|
||||
assert_eq!(detect_registry("/simple/requests/"), "pypi");
|
||||
assert_eq!(
|
||||
detect_registry("/packages/requests/1.0/requests-1.0.tar.gz"),
|
||||
"pypi"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_ui() {
|
||||
assert_eq!(detect_registry("/ui/dashboard"), "ui");
|
||||
assert_eq!(detect_registry("/ui"), "ui");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_other() {
|
||||
assert_eq!(detect_registry("/health"), "other");
|
||||
assert_eq!(detect_registry("/ready"), "other");
|
||||
assert_eq!(detect_registry("/unknown/path"), "other");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_registry_go_path() {
|
||||
assert_eq!(
|
||||
detect_registry("/go/github.com/user/repo/@v/v1.0.0.info"),
|
||||
"other"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_cache_hit() {
|
||||
record_cache_hit("docker");
|
||||
// Doesn't panic — metric is recorded
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_cache_miss() {
|
||||
record_cache_miss("npm");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_storage_op_success() {
|
||||
record_storage_op("get", true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_record_storage_op_error() {
|
||||
record_storage_op("put", false);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +138,6 @@ pub async fn migrate(
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
@@ -202,9 +201,16 @@ mod tests {
|
||||
|
||||
src.put("test/file", b"data").await.unwrap();
|
||||
|
||||
let stats = migrate(&src, &dst, MigrateOptions { dry_run: true })
|
||||
.await
|
||||
.unwrap();
|
||||
let stats = migrate(
|
||||
&src,
|
||||
&dst,
|
||||
MigrateOptions {
|
||||
dry_run: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stats.migrated, 1);
|
||||
|
||||
|
||||
@@ -1,610 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Docker image mirroring — fetch images from upstream registries and push to NORA.
|
||||
|
||||
use super::{create_progress_bar, MirrorResult};
|
||||
use crate::registry::docker_auth::DockerAuth;
|
||||
use reqwest::Client;
|
||||
use std::time::Duration;
|
||||
|
||||
const DEFAULT_REGISTRY: &str = "https://registry-1.docker.io";
|
||||
const DEFAULT_TIMEOUT: u64 = 120;
|
||||
|
||||
/// Parsed Docker image reference
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct ImageRef {
|
||||
/// Upstream registry (e.g., "registry-1.docker.io", "ghcr.io")
|
||||
pub registry: String,
|
||||
/// Image name (e.g., "library/alpine", "grafana/grafana")
|
||||
pub name: String,
|
||||
/// Tag or digest reference (e.g., "3.20", "sha256:abc...")
|
||||
pub reference: String,
|
||||
}
|
||||
|
||||
/// Parse an image reference string into structured components.
|
||||
///
|
||||
/// Supports formats:
|
||||
/// - `alpine:3.20` → Docker Hub library/alpine:3.20
|
||||
/// - `grafana/grafana:latest` → Docker Hub grafana/grafana:latest
|
||||
/// - `ghcr.io/owner/repo:v1` → ghcr.io owner/repo:v1
|
||||
/// - `alpine@sha256:abc` → Docker Hub library/alpine@sha256:abc
|
||||
/// - `alpine` → Docker Hub library/alpine:latest
|
||||
pub fn parse_image_ref(input: &str) -> ImageRef {
|
||||
let input = input.trim();
|
||||
|
||||
// Split off @digest or :tag
|
||||
let (name_part, reference) = if let Some(idx) = input.rfind('@') {
|
||||
(&input[..idx], &input[idx + 1..])
|
||||
} else if let Some(idx) = input.rfind(':') {
|
||||
// Make sure colon is not part of a port (e.g., localhost:5000/image)
|
||||
let before_colon = &input[..idx];
|
||||
if let Some(last_slash) = before_colon.rfind('/') {
|
||||
let segment_after_slash = &input[last_slash + 1..];
|
||||
if segment_after_slash.contains(':') {
|
||||
// Colon in last segment — tag separator
|
||||
(&input[..idx], &input[idx + 1..])
|
||||
} else {
|
||||
// Colon in earlier segment (port) — no tag
|
||||
(input, "latest")
|
||||
}
|
||||
} else {
|
||||
(&input[..idx], &input[idx + 1..])
|
||||
}
|
||||
} else {
|
||||
(input, "latest")
|
||||
};
|
||||
|
||||
// Determine if first segment is a registry hostname
|
||||
let parts: Vec<&str> = name_part.splitn(2, '/').collect();
|
||||
|
||||
let (registry, name) = if parts.len() == 1 {
|
||||
// Simple name like "alpine" → Docker Hub library/
|
||||
(
|
||||
DEFAULT_REGISTRY.to_string(),
|
||||
format!("library/{}", parts[0]),
|
||||
)
|
||||
} else {
|
||||
let first = parts[0];
|
||||
// A segment is a registry if it contains a dot or colon (hostname/port)
|
||||
if first.contains('.') || first.contains(':') {
|
||||
let reg = if first.starts_with("http") {
|
||||
first.to_string()
|
||||
} else {
|
||||
format!("https://{}", first)
|
||||
};
|
||||
(reg, parts[1].to_string())
|
||||
} else {
|
||||
// Docker Hub with org, e.g., "grafana/grafana"
|
||||
(DEFAULT_REGISTRY.to_string(), name_part.to_string())
|
||||
}
|
||||
};
|
||||
|
||||
ImageRef {
|
||||
registry,
|
||||
name,
|
||||
reference: reference.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a list of image references from a newline-separated string.
|
||||
pub fn parse_images_file(content: &str) -> Vec<ImageRef> {
|
||||
content
|
||||
.lines()
|
||||
.map(|l| l.trim())
|
||||
.filter(|l| !l.is_empty() && !l.starts_with('#'))
|
||||
.map(parse_image_ref)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Mirror Docker images from upstream registries into NORA.
|
||||
pub async fn run_docker_mirror(
|
||||
client: &Client,
|
||||
nora_url: &str,
|
||||
images: &[ImageRef],
|
||||
concurrency: usize,
|
||||
) -> Result<MirrorResult, String> {
|
||||
let docker_auth = DockerAuth::new(DEFAULT_TIMEOUT);
|
||||
let semaphore = std::sync::Arc::new(tokio::sync::Semaphore::new(concurrency));
|
||||
|
||||
let pb = create_progress_bar(images.len() as u64);
|
||||
let nora_base = nora_url.trim_end_matches('/');
|
||||
|
||||
let mut total_fetched = 0usize;
|
||||
let mut total_failed = 0usize;
|
||||
let mut total_bytes = 0u64;
|
||||
|
||||
for image in images {
|
||||
let _permit = semaphore.acquire().await.map_err(|e| e.to_string())?;
|
||||
pb.set_message(format!("{}:{}", image.name, image.reference));
|
||||
|
||||
match mirror_single_image(client, nora_base, image, &docker_auth).await {
|
||||
Ok(bytes) => {
|
||||
total_fetched += 1;
|
||||
total_bytes += bytes;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
image = %format!("{}/{}:{}", image.registry, image.name, image.reference),
|
||||
error = %e,
|
||||
"Failed to mirror image"
|
||||
);
|
||||
total_failed += 1;
|
||||
}
|
||||
}
|
||||
pb.inc(1);
|
||||
}
|
||||
|
||||
pb.finish_with_message("done");
|
||||
|
||||
Ok(MirrorResult {
|
||||
total: images.len(),
|
||||
fetched: total_fetched,
|
||||
failed: total_failed,
|
||||
bytes: total_bytes,
|
||||
})
|
||||
}
|
||||
|
||||
/// Mirror a single image: fetch manifest + blobs from upstream, push to NORA.
|
||||
async fn mirror_single_image(
|
||||
client: &Client,
|
||||
nora_base: &str,
|
||||
image: &ImageRef,
|
||||
docker_auth: &DockerAuth,
|
||||
) -> Result<u64, String> {
|
||||
let mut bytes = 0u64;
|
||||
|
||||
// 1. Fetch manifest from upstream
|
||||
let (manifest_bytes, content_type) = crate::registry::docker::fetch_manifest_from_upstream(
|
||||
client,
|
||||
&image.registry,
|
||||
&image.name,
|
||||
&image.reference,
|
||||
docker_auth,
|
||||
DEFAULT_TIMEOUT,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.map_err(|()| format!("Failed to fetch manifest for {}", image.name))?;
|
||||
|
||||
bytes += manifest_bytes.len() as u64;
|
||||
|
||||
// 2. Parse manifest to find layer digests
|
||||
let manifest_json: serde_json::Value = serde_json::from_slice(&manifest_bytes)
|
||||
.map_err(|e| format!("Invalid manifest JSON: {}", e))?;
|
||||
|
||||
// Check if this is a manifest list / OCI index
|
||||
let manifests_to_process = if is_manifest_list(&content_type, &manifest_json) {
|
||||
// Pick linux/amd64 manifest from the list
|
||||
resolve_platform_manifest(
|
||||
client,
|
||||
&image.registry,
|
||||
&image.name,
|
||||
docker_auth,
|
||||
&manifest_json,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
vec![(
|
||||
manifest_bytes.clone(),
|
||||
manifest_json.clone(),
|
||||
content_type.clone(),
|
||||
)]
|
||||
};
|
||||
|
||||
for (mf_bytes, mf_json, mf_ct) in &manifests_to_process {
|
||||
// 3. Get config digest and layer digests
|
||||
let blobs = extract_blob_digests(mf_json);
|
||||
|
||||
// 4. For each blob, check if NORA already has it, otherwise fetch and push
|
||||
for digest in &blobs {
|
||||
if blob_exists(client, nora_base, &image.name, digest).await {
|
||||
tracing::debug!(digest = %digest, "Blob already exists, skipping");
|
||||
continue;
|
||||
}
|
||||
|
||||
let blob_data = crate::registry::docker::fetch_blob_from_upstream(
|
||||
client,
|
||||
&image.registry,
|
||||
&image.name,
|
||||
digest,
|
||||
docker_auth,
|
||||
DEFAULT_TIMEOUT,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.map_err(|()| format!("Failed to fetch blob {}", digest))?;
|
||||
|
||||
bytes += blob_data.len() as u64;
|
||||
push_blob(client, nora_base, &image.name, digest, &blob_data).await?;
|
||||
}
|
||||
|
||||
// 5. Push manifest to NORA
|
||||
push_manifest(
|
||||
client,
|
||||
nora_base,
|
||||
&image.name,
|
||||
&image.reference,
|
||||
mf_bytes,
|
||||
mf_ct,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// If this was a manifest list, also push the list itself
|
||||
if manifests_to_process.len() > 1 || is_manifest_list(&content_type, &manifest_json) {
|
||||
push_manifest(
|
||||
client,
|
||||
nora_base,
|
||||
&image.name,
|
||||
&image.reference,
|
||||
&manifest_bytes,
|
||||
&content_type,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
/// Check if a manifest is a manifest list (fat manifest) or OCI index.
|
||||
fn is_manifest_list(content_type: &str, json: &serde_json::Value) -> bool {
|
||||
content_type.contains("manifest.list")
|
||||
|| content_type.contains("image.index")
|
||||
|| json.get("manifests").is_some()
|
||||
}
|
||||
|
||||
/// From a manifest list, resolve the linux/amd64 platform manifest.
|
||||
async fn resolve_platform_manifest(
|
||||
client: &Client,
|
||||
upstream_url: &str,
|
||||
name: &str,
|
||||
docker_auth: &DockerAuth,
|
||||
list_json: &serde_json::Value,
|
||||
) -> Result<Vec<(Vec<u8>, serde_json::Value, String)>, String> {
|
||||
let manifests = list_json
|
||||
.get("manifests")
|
||||
.and_then(|m| m.as_array())
|
||||
.ok_or("Manifest list has no manifests array")?;
|
||||
|
||||
// Find linux/amd64 manifest
|
||||
let target = manifests
|
||||
.iter()
|
||||
.find(|m| {
|
||||
let platform = m.get("platform");
|
||||
let os = platform
|
||||
.and_then(|p| p.get("os"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
let arch = platform
|
||||
.and_then(|p| p.get("architecture"))
|
||||
.and_then(|v| v.as_str())
|
||||
.unwrap_or("");
|
||||
os == "linux" && arch == "amd64"
|
||||
})
|
||||
.or_else(|| manifests.first())
|
||||
.ok_or("No suitable platform manifest found")?;
|
||||
|
||||
let digest = target
|
||||
.get("digest")
|
||||
.and_then(|d| d.as_str())
|
||||
.ok_or("Manifest entry missing digest")?;
|
||||
|
||||
let (mf_bytes, mf_ct) = crate::registry::docker::fetch_manifest_from_upstream(
|
||||
client,
|
||||
upstream_url,
|
||||
name,
|
||||
digest,
|
||||
docker_auth,
|
||||
DEFAULT_TIMEOUT,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.map_err(|()| format!("Failed to fetch platform manifest {}", digest))?;
|
||||
|
||||
let mf_json: serde_json::Value = serde_json::from_slice(&mf_bytes)
|
||||
.map_err(|e| format!("Invalid platform manifest: {}", e))?;
|
||||
|
||||
Ok(vec![(mf_bytes, mf_json, mf_ct)])
|
||||
}
|
||||
|
||||
/// Extract all blob digests from a manifest (config + layers).
|
||||
fn extract_blob_digests(manifest: &serde_json::Value) -> Vec<String> {
|
||||
let mut digests = Vec::new();
|
||||
|
||||
// Config blob
|
||||
if let Some(digest) = manifest
|
||||
.get("config")
|
||||
.and_then(|c| c.get("digest"))
|
||||
.and_then(|d| d.as_str())
|
||||
{
|
||||
digests.push(digest.to_string());
|
||||
}
|
||||
|
||||
// Layer blobs
|
||||
if let Some(layers) = manifest.get("layers").and_then(|l| l.as_array()) {
|
||||
for layer in layers {
|
||||
if let Some(digest) = layer.get("digest").and_then(|d| d.as_str()) {
|
||||
digests.push(digest.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
digests
|
||||
}
|
||||
|
||||
/// Check if NORA already has a blob via HEAD request.
|
||||
async fn blob_exists(client: &Client, nora_base: &str, name: &str, digest: &str) -> bool {
|
||||
let url = format!("{}/v2/{}/blobs/{}", nora_base, name, digest);
|
||||
matches!(
|
||||
client
|
||||
.head(&url)
|
||||
.timeout(Duration::from_secs(10))
|
||||
.send()
|
||||
.await,
|
||||
Ok(r) if r.status().is_success()
|
||||
)
|
||||
}
|
||||
|
||||
/// Push a blob to NORA via monolithic upload.
|
||||
async fn push_blob(
|
||||
client: &Client,
|
||||
nora_base: &str,
|
||||
name: &str,
|
||||
digest: &str,
|
||||
data: &[u8],
|
||||
) -> Result<(), String> {
|
||||
// Start upload session
|
||||
let start_url = format!("{}/v2/{}/blobs/uploads/", nora_base, name);
|
||||
let response = client
|
||||
.post(&start_url)
|
||||
.timeout(Duration::from_secs(30))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to start blob upload: {}", e))?;
|
||||
|
||||
let location = response
|
||||
.headers()
|
||||
.get("location")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.ok_or("Missing Location header from upload start")?
|
||||
.to_string();
|
||||
|
||||
// Complete upload with digest
|
||||
let upload_url = if location.contains('?') {
|
||||
format!("{}&digest={}", location, digest)
|
||||
} else {
|
||||
format!("{}?digest={}", location, digest)
|
||||
};
|
||||
|
||||
// Make absolute URL if relative
|
||||
let upload_url = if upload_url.starts_with('/') {
|
||||
format!("{}{}", nora_base, upload_url)
|
||||
} else {
|
||||
upload_url
|
||||
};
|
||||
|
||||
let resp = client
|
||||
.put(&upload_url)
|
||||
.header("Content-Type", "application/octet-stream")
|
||||
.body(data.to_vec())
|
||||
.timeout(Duration::from_secs(DEFAULT_TIMEOUT))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to upload blob: {}", e))?;
|
||||
|
||||
if !resp.status().is_success() && resp.status().as_u16() != 201 {
|
||||
return Err(format!("Blob upload failed with status {}", resp.status()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Push a manifest to NORA.
|
||||
async fn push_manifest(
|
||||
client: &Client,
|
||||
nora_base: &str,
|
||||
name: &str,
|
||||
reference: &str,
|
||||
data: &[u8],
|
||||
content_type: &str,
|
||||
) -> Result<(), String> {
|
||||
let url = format!("{}/v2/{}/manifests/{}", nora_base, name, reference);
|
||||
let resp = client
|
||||
.put(&url)
|
||||
.header("Content-Type", content_type)
|
||||
.body(data.to_vec())
|
||||
.timeout(Duration::from_secs(30))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("Failed to push manifest: {}", e))?;
|
||||
|
||||
if !resp.status().is_success() && resp.status().as_u16() != 201 {
|
||||
return Err(format!(
|
||||
"Manifest push failed with status {}",
|
||||
resp.status()
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// --- parse_image_ref tests ---
|
||||
|
||||
#[test]
|
||||
fn test_parse_simple_name() {
|
||||
let r = parse_image_ref("alpine");
|
||||
assert_eq!(r.registry, DEFAULT_REGISTRY);
|
||||
assert_eq!(r.name, "library/alpine");
|
||||
assert_eq!(r.reference, "latest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_name_with_tag() {
|
||||
let r = parse_image_ref("alpine:3.20");
|
||||
assert_eq!(r.registry, DEFAULT_REGISTRY);
|
||||
assert_eq!(r.name, "library/alpine");
|
||||
assert_eq!(r.reference, "3.20");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_org_image() {
|
||||
let r = parse_image_ref("grafana/grafana:latest");
|
||||
assert_eq!(r.registry, DEFAULT_REGISTRY);
|
||||
assert_eq!(r.name, "grafana/grafana");
|
||||
assert_eq!(r.reference, "latest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_org_image_no_tag() {
|
||||
let r = parse_image_ref("grafana/grafana");
|
||||
assert_eq!(r.registry, DEFAULT_REGISTRY);
|
||||
assert_eq!(r.name, "grafana/grafana");
|
||||
assert_eq!(r.reference, "latest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_custom_registry() {
|
||||
let r = parse_image_ref("ghcr.io/owner/repo:v1.0");
|
||||
assert_eq!(r.registry, "https://ghcr.io");
|
||||
assert_eq!(r.name, "owner/repo");
|
||||
assert_eq!(r.reference, "v1.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_digest_reference() {
|
||||
let r = parse_image_ref("alpine@sha256:abcdef1234567890");
|
||||
assert_eq!(r.registry, DEFAULT_REGISTRY);
|
||||
assert_eq!(r.name, "library/alpine");
|
||||
assert_eq!(r.reference, "sha256:abcdef1234567890");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_registry_with_port() {
|
||||
let r = parse_image_ref("localhost:5000/myimage:v1");
|
||||
assert_eq!(r.registry, "https://localhost:5000");
|
||||
assert_eq!(r.name, "myimage");
|
||||
assert_eq!(r.reference, "v1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_deep_path() {
|
||||
let r = parse_image_ref("ghcr.io/org/sub/image:latest");
|
||||
assert_eq!(r.registry, "https://ghcr.io");
|
||||
assert_eq!(r.name, "org/sub/image");
|
||||
assert_eq!(r.reference, "latest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_trimmed() {
|
||||
let r = parse_image_ref(" alpine:3.20 ");
|
||||
assert_eq!(r.name, "library/alpine");
|
||||
assert_eq!(r.reference, "3.20");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_images_file() {
|
||||
let content = "alpine:3.20\n# comment\npostgres:15\n\nnginx:1.25\n";
|
||||
let images = parse_images_file(content);
|
||||
assert_eq!(images.len(), 3);
|
||||
assert_eq!(images[0].name, "library/alpine");
|
||||
assert_eq!(images[1].name, "library/postgres");
|
||||
assert_eq!(images[2].name, "library/nginx");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_images_file_empty() {
|
||||
let images = parse_images_file("");
|
||||
assert!(images.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_images_file_comments_only() {
|
||||
let images = parse_images_file("# comment\n# another\n");
|
||||
assert!(images.is_empty());
|
||||
}
|
||||
|
||||
// --- extract_blob_digests tests ---
|
||||
|
||||
#[test]
|
||||
fn test_extract_blob_digests_full_manifest() {
|
||||
let manifest = serde_json::json!({
|
||||
"config": {
|
||||
"digest": "sha256:config111"
|
||||
},
|
||||
"layers": [
|
||||
{"digest": "sha256:layer111"},
|
||||
{"digest": "sha256:layer222"}
|
||||
]
|
||||
});
|
||||
let digests = extract_blob_digests(&manifest);
|
||||
assert_eq!(digests.len(), 3);
|
||||
assert_eq!(digests[0], "sha256:config111");
|
||||
assert_eq!(digests[1], "sha256:layer111");
|
||||
assert_eq!(digests[2], "sha256:layer222");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_blob_digests_no_layers() {
|
||||
let manifest = serde_json::json!({
|
||||
"config": { "digest": "sha256:config111" }
|
||||
});
|
||||
let digests = extract_blob_digests(&manifest);
|
||||
assert_eq!(digests.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_blob_digests_empty() {
|
||||
let manifest = serde_json::json!({});
|
||||
let digests = extract_blob_digests(&manifest);
|
||||
assert!(digests.is_empty());
|
||||
}
|
||||
|
||||
// --- is_manifest_list tests ---
|
||||
|
||||
#[test]
|
||||
fn test_is_manifest_list_by_content_type() {
|
||||
let json = serde_json::json!({});
|
||||
assert!(is_manifest_list(
|
||||
"application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
&json
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_manifest_list_oci_index() {
|
||||
let json = serde_json::json!({});
|
||||
assert!(is_manifest_list(
|
||||
"application/vnd.oci.image.index.v1+json",
|
||||
&json
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_manifest_list_by_manifests_key() {
|
||||
let json = serde_json::json!({
|
||||
"manifests": [{"digest": "sha256:abc"}]
|
||||
});
|
||||
assert!(is_manifest_list(
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
&json
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_not_manifest_list() {
|
||||
let json = serde_json::json!({
|
||||
"config": {},
|
||||
"layers": []
|
||||
});
|
||||
assert!(!is_manifest_list(
|
||||
"application/vnd.docker.distribution.manifest.v2+json",
|
||||
&json
|
||||
));
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
//! `nora mirror` — pre-fetch dependencies through NORA proxy cache.
|
||||
|
||||
mod docker;
|
||||
mod npm;
|
||||
|
||||
use clap::Subcommand;
|
||||
@@ -25,12 +24,6 @@ pub enum MirrorFormat {
|
||||
#[arg(long)]
|
||||
all_versions: bool,
|
||||
},
|
||||
/// Mirror npm packages from yarn.lock
|
||||
Yarn {
|
||||
/// Path to yarn.lock
|
||||
#[arg(long)]
|
||||
lockfile: PathBuf,
|
||||
},
|
||||
/// Mirror Python packages
|
||||
Pip {
|
||||
/// Path to requirements.txt
|
||||
@@ -49,15 +42,6 @@ pub enum MirrorFormat {
|
||||
#[arg(long)]
|
||||
lockfile: PathBuf,
|
||||
},
|
||||
/// Mirror Docker images from upstream registries
|
||||
Docker {
|
||||
/// Comma-separated image references (e.g., alpine:3.20,postgres:15)
|
||||
#[arg(long, conflicts_with = "images_file", value_delimiter = ',')]
|
||||
images: Option<Vec<String>>,
|
||||
/// Path to file with image references (one per line)
|
||||
#[arg(long, conflicts_with = "images")]
|
||||
images_file: Option<PathBuf>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
|
||||
@@ -66,7 +50,6 @@ pub struct MirrorTarget {
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct MirrorResult {
|
||||
pub total: usize,
|
||||
pub fetched: usize,
|
||||
@@ -81,7 +64,7 @@ pub fn create_progress_bar(total: u64) -> ProgressBar {
|
||||
.template(
|
||||
"{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} ({eta}) {msg}",
|
||||
)
|
||||
.expect("static progress bar template is valid")
|
||||
.unwrap()
|
||||
.progress_chars("=>-"),
|
||||
);
|
||||
pb
|
||||
@@ -91,7 +74,6 @@ pub async fn run_mirror(
|
||||
format: MirrorFormat,
|
||||
registry: &str,
|
||||
concurrency: usize,
|
||||
json_output: bool,
|
||||
) -> Result<(), String> {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(120))
|
||||
@@ -128,27 +110,6 @@ pub async fn run_mirror(
|
||||
)
|
||||
.await?
|
||||
}
|
||||
MirrorFormat::Yarn { lockfile } => {
|
||||
let content = std::fs::read_to_string(&lockfile)
|
||||
.map_err(|e| format!("Cannot read {}: {}", lockfile.display(), e))?;
|
||||
let targets = npm::parse_yarn_lock(&content);
|
||||
if targets.is_empty() {
|
||||
println!("No packages found in {}", lockfile.display());
|
||||
MirrorResult {
|
||||
total: 0,
|
||||
fetched: 0,
|
||||
failed: 0,
|
||||
bytes: 0,
|
||||
}
|
||||
} else {
|
||||
println!(
|
||||
"Mirroring {} npm packages from yarn.lock via {}...",
|
||||
targets.len(),
|
||||
registry
|
||||
);
|
||||
npm::mirror_npm_packages(&client, registry, &targets, concurrency).await?
|
||||
}
|
||||
}
|
||||
MirrorFormat::Pip { lockfile } => {
|
||||
mirror_lockfile(&client, registry, "pip", &lockfile).await?
|
||||
}
|
||||
@@ -158,46 +119,15 @@ pub async fn run_mirror(
|
||||
MirrorFormat::Maven { lockfile } => {
|
||||
mirror_lockfile(&client, registry, "maven", &lockfile).await?
|
||||
}
|
||||
MirrorFormat::Docker {
|
||||
images,
|
||||
images_file,
|
||||
} => {
|
||||
let image_refs = if let Some(file) = images_file {
|
||||
let content = std::fs::read_to_string(&file)
|
||||
.map_err(|e| format!("Cannot read {}: {}", file.display(), e))?;
|
||||
docker::parse_images_file(&content)
|
||||
} else if let Some(imgs) = images {
|
||||
imgs.iter().map(|s| docker::parse_image_ref(s)).collect()
|
||||
} else {
|
||||
return Err("Either --images or --images-file is required".to_string());
|
||||
};
|
||||
if image_refs.is_empty() {
|
||||
return Err("No images specified".to_string());
|
||||
}
|
||||
println!(
|
||||
"Mirroring {} Docker images via {}...",
|
||||
image_refs.len(),
|
||||
registry
|
||||
);
|
||||
docker::run_docker_mirror(&client, registry, &image_refs, concurrency).await?
|
||||
}
|
||||
};
|
||||
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
if json_output {
|
||||
println!(
|
||||
"{}",
|
||||
serde_json::to_string_pretty(&result).unwrap_or_default()
|
||||
);
|
||||
} else {
|
||||
println!("\nMirror complete:");
|
||||
println!(" Total: {}", result.total);
|
||||
println!(" Fetched: {}", result.fetched);
|
||||
println!(" Failed: {}", result.failed);
|
||||
println!(" Size: {:.1} MB", result.bytes as f64 / 1_048_576.0);
|
||||
println!(" Time: {:.1}s", elapsed.as_secs_f64());
|
||||
}
|
||||
println!("\nMirror complete:");
|
||||
println!(" Total: {}", result.total);
|
||||
println!(" Fetched: {}", result.fetched);
|
||||
println!(" Failed: {}", result.failed);
|
||||
println!(" Size: {:.1} MB", result.bytes as f64 / 1_048_576.0);
|
||||
println!(" Time: {:.1}s", elapsed.as_secs_f64());
|
||||
|
||||
if result.failed > 0 {
|
||||
Err(format!("{} packages failed to mirror", result.failed))
|
||||
@@ -290,7 +220,7 @@ fn parse_requirements_txt(content: &str) -> Vec<MirrorTarget> {
|
||||
.lines()
|
||||
.filter(|l| !l.trim().is_empty() && !l.starts_with('#') && !l.starts_with('-'))
|
||||
.filter_map(|line| {
|
||||
let line = line.split('#').next().unwrap_or(line).trim();
|
||||
let line = line.split('#').next().unwrap().trim();
|
||||
if let Some((name, version)) = line.split_once("==") {
|
||||
Some(MirrorTarget {
|
||||
name: name.trim().to_string(),
|
||||
@@ -353,7 +283,6 @@ fn parse_maven_deps(content: &str) -> Vec<MirrorTarget> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -393,149 +322,4 @@ version = \"0.1.0\"
|
||||
assert_eq!(targets[0].name, "org.apache.commons:commons-lang3");
|
||||
assert_eq!(targets[0].version, "3.12.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_requirements_txt_empty() {
|
||||
let targets = parse_requirements_txt("");
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_requirements_txt_comments_only() {
|
||||
let content = "# This is a comment\n# Another comment\n\n";
|
||||
let targets = parse_requirements_txt(content);
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_requirements_txt_flags() {
|
||||
let content = "-r other-requirements.txt\n-i https://pypi.org/simple\nflask==2.0\n";
|
||||
let targets = parse_requirements_txt(content);
|
||||
assert_eq!(targets.len(), 1);
|
||||
assert_eq!(targets[0].name, "flask");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_requirements_txt_version_specifiers() {
|
||||
let content =
|
||||
"pkg1>=1.0\npkg2<2.0\npkg3!=1.5\npkg4~=1.0\npkg5==1.0 ; python_version>='3.8'\n";
|
||||
let targets = parse_requirements_txt(content);
|
||||
assert_eq!(targets.len(), 5);
|
||||
assert_eq!(targets[0].name, "pkg1");
|
||||
assert_eq!(targets[0].version, "latest");
|
||||
assert_eq!(targets[4].name, "pkg5");
|
||||
assert_eq!(targets[4].version, "1.0 ; python_version>='3.8'");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_requirements_txt_inline_comments() {
|
||||
let content = "flask==2.0 # web framework\n";
|
||||
let targets = parse_requirements_txt(content);
|
||||
assert_eq!(targets.len(), 1);
|
||||
assert_eq!(targets[0].name, "flask");
|
||||
assert_eq!(targets[0].version, "2.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cargo_lock_empty() {
|
||||
let content = "";
|
||||
let result = parse_cargo_lock(content);
|
||||
let targets = result.unwrap();
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cargo_lock_no_packages() {
|
||||
let content = "[metadata]\nsome = \"value\"\n";
|
||||
let targets = parse_cargo_lock(content).unwrap();
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cargo_lock_git_source() {
|
||||
let content = r#"
|
||||
[[package]]
|
||||
name = "my-dep"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/user/repo#abc123"
|
||||
"#;
|
||||
let targets = parse_cargo_lock(content).unwrap();
|
||||
assert!(targets.is_empty()); // git sources filtered out
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_cargo_lock_multiple() {
|
||||
let content = r#"
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.197"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.36.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[[package]]
|
||||
name = "local-crate"
|
||||
version = "0.1.0"
|
||||
"#;
|
||||
let targets = parse_cargo_lock(content).unwrap();
|
||||
assert_eq!(targets.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_maven_deps_empty() {
|
||||
let targets = parse_maven_deps("");
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_maven_deps_short_line() {
|
||||
let targets = parse_maven_deps("foo:bar\n");
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_maven_deps_multiple() {
|
||||
let content = "[INFO] org.slf4j:slf4j-api:jar:2.0.9:compile\n[INFO] com.google.guava:guava:jar:33.0.0-jre:compile\n";
|
||||
let targets = parse_maven_deps(content);
|
||||
assert_eq!(targets.len(), 2);
|
||||
assert_eq!(targets[0].name, "org.slf4j:slf4j-api");
|
||||
assert_eq!(targets[1].version, "33.0.0-jre");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_progress_bar() {
|
||||
let pb = create_progress_bar(100);
|
||||
assert_eq!(pb.length(), Some(100));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mirror_result_json_serialization() {
|
||||
let result = MirrorResult {
|
||||
total: 10,
|
||||
fetched: 8,
|
||||
failed: 2,
|
||||
bytes: 1048576,
|
||||
};
|
||||
let json = serde_json::to_string_pretty(&result).unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
|
||||
assert_eq!(parsed["total"], 10);
|
||||
assert_eq!(parsed["fetched"], 8);
|
||||
assert_eq!(parsed["failed"], 2);
|
||||
assert_eq!(parsed["bytes"], 1048576);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mirror_result_json_zero_values() {
|
||||
let result = MirrorResult {
|
||||
total: 0,
|
||||
fetched: 0,
|
||||
failed: 0,
|
||||
bytes: 0,
|
||||
};
|
||||
let json = serde_json::to_string(&result).unwrap();
|
||||
assert!(json.contains("\"total\":0"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -174,7 +174,7 @@ async fn resolve_npm_packages(
|
||||
}
|
||||
|
||||
/// Fetch packages through NORA (triggers proxy cache)
|
||||
pub async fn mirror_npm_packages(
|
||||
async fn mirror_npm_packages(
|
||||
client: &reqwest::Client,
|
||||
registry: &str,
|
||||
targets: &[MirrorTarget],
|
||||
@@ -200,11 +200,7 @@ pub async fn mirror_npm_packages(
|
||||
let mut handles = Vec::new();
|
||||
|
||||
for target in targets {
|
||||
let permit = sem
|
||||
.clone()
|
||||
.acquire_owned()
|
||||
.await
|
||||
.expect("semaphore closed unexpectedly");
|
||||
let permit = sem.clone().acquire_owned().await.unwrap();
|
||||
let client = client.clone();
|
||||
let pb = pb.clone();
|
||||
let fetched = fetched.clone();
|
||||
@@ -250,75 +246,7 @@ pub async fn mirror_npm_packages(
|
||||
})
|
||||
}
|
||||
|
||||
/// Parse yarn.lock v1 format
|
||||
/// Format: "package@version:\n version \"X.Y.Z\"\n resolved \"url\""
|
||||
pub fn parse_yarn_lock(content: &str) -> Vec<MirrorTarget> {
|
||||
let mut targets = Vec::new();
|
||||
let mut seen = HashSet::new();
|
||||
let mut current_name: Option<String> = None;
|
||||
|
||||
for line in content.lines() {
|
||||
let trimmed = line.trim();
|
||||
|
||||
// Skip comments and empty lines
|
||||
if trimmed.starts_with('#') || trimmed.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Package header: "lodash@^4.17.21:" or "@babel/core@^7.0.0, @babel/core@^7.26.0:"
|
||||
if !line.starts_with(' ') && !line.starts_with('\t') && trimmed.ends_with(':') {
|
||||
let header = trimmed.trim_end_matches(':');
|
||||
// Take first entry before comma (all resolve to same version)
|
||||
let first = header.split(',').next().unwrap_or(header).trim();
|
||||
// Remove quotes if present
|
||||
let first = first.trim_matches('"');
|
||||
// Extract package name: everything before last @
|
||||
if let Some(name) = extract_yarn_package_name(first) {
|
||||
current_name = Some(name.to_string());
|
||||
} else {
|
||||
current_name = None;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Version line: " version "4.17.21""
|
||||
if let Some(ref name) = current_name {
|
||||
if trimmed.starts_with("version ") {
|
||||
let ver = trimmed.trim_start_matches("version ").trim_matches('"');
|
||||
let pair = (name.clone(), ver.to_string());
|
||||
if seen.insert(pair.clone()) {
|
||||
targets.push(MirrorTarget {
|
||||
name: pair.0,
|
||||
version: pair.1,
|
||||
});
|
||||
}
|
||||
current_name = None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targets
|
||||
}
|
||||
|
||||
/// Extract package name from yarn.lock entry like "@babel/core@^7.0.0"
|
||||
fn extract_yarn_package_name(entry: &str) -> Option<&str> {
|
||||
if let Some(rest) = entry.strip_prefix('@') {
|
||||
// Scoped: @babel/core@^7.0.0 → find second @
|
||||
let after_scope = rest.find('@')?;
|
||||
Some(&entry[..after_scope + 1])
|
||||
} else {
|
||||
// Regular: lodash@^4.17.21 → find first @
|
||||
let at = entry.find('@')?;
|
||||
if at == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(&entry[..at])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -392,223 +320,4 @@ mod tests {
|
||||
assert_eq!(targets.len(), 1); // deduplicated
|
||||
assert_eq!(targets[0].name, "debug");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_package_name_simple() {
|
||||
assert_eq!(extract_package_name("node_modules/lodash"), Some("lodash"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_package_name_scoped() {
|
||||
assert_eq!(
|
||||
extract_package_name("node_modules/@babel/core"),
|
||||
Some("@babel/core")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_package_name_nested() {
|
||||
assert_eq!(
|
||||
extract_package_name("node_modules/foo/node_modules/@scope/bar"),
|
||||
Some("@scope/bar")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_package_name_no_node_modules() {
|
||||
assert_eq!(extract_package_name("just/a/path"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_package_name_empty_after() {
|
||||
assert_eq!(extract_package_name("node_modules/"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_v2() {
|
||||
let lockfile = serde_json::json!({
|
||||
"lockfileVersion": 2,
|
||||
"packages": {
|
||||
"": {"name": "root"},
|
||||
"node_modules/express": {"version": "4.18.2"},
|
||||
"node_modules/@types/node": {"version": "20.11.0"}
|
||||
}
|
||||
});
|
||||
let targets = parse_npm_lockfile(&lockfile.to_string()).unwrap();
|
||||
assert_eq!(targets.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_empty_packages() {
|
||||
let lockfile = serde_json::json!({
|
||||
"lockfileVersion": 3,
|
||||
"packages": {}
|
||||
});
|
||||
let targets = parse_npm_lockfile(&lockfile.to_string()).unwrap();
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_invalid_json() {
|
||||
let result = parse_npm_lockfile("not json at all");
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_v1_nested() {
|
||||
let lockfile = serde_json::json!({
|
||||
"lockfileVersion": 1,
|
||||
"dependencies": {
|
||||
"express": {
|
||||
"version": "4.18.2",
|
||||
"dependencies": {
|
||||
"accepts": {"version": "1.3.8"}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let targets = parse_npm_lockfile(&lockfile.to_string()).unwrap();
|
||||
assert_eq!(targets.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_v2_falls_back_to_v1() {
|
||||
// v2 with empty packages should fall back to v1 dependencies
|
||||
let lockfile = serde_json::json!({
|
||||
"lockfileVersion": 2,
|
||||
"packages": {},
|
||||
"dependencies": {
|
||||
"lodash": {"version": "4.17.21"}
|
||||
}
|
||||
});
|
||||
let targets = parse_npm_lockfile(&lockfile.to_string()).unwrap();
|
||||
assert_eq!(targets.len(), 1);
|
||||
assert_eq!(targets[0].name, "lodash");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_lockfile_no_version_field() {
|
||||
let lockfile = serde_json::json!({
|
||||
"packages": {
|
||||
"node_modules/something": {"resolved": "https://example.com"}
|
||||
}
|
||||
});
|
||||
let targets = parse_npm_lockfile(&lockfile.to_string()).unwrap();
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_basic() {
|
||||
let content = r#"# yarn lockfile v1
|
||||
|
||||
lodash@^4.17.21:
|
||||
version "4.17.21"
|
||||
resolved "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz"
|
||||
|
||||
express@^4.18.0:
|
||||
version "4.18.2"
|
||||
resolved "https://registry.npmjs.org/express/-/express-4.18.2.tgz"
|
||||
"#;
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert_eq!(targets.len(), 2);
|
||||
assert_eq!(targets[0].name, "lodash");
|
||||
assert_eq!(targets[0].version, "4.17.21");
|
||||
assert_eq!(targets[1].name, "express");
|
||||
assert_eq!(targets[1].version, "4.18.2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_scoped() {
|
||||
let content = r#"
|
||||
"@babel/core@^7.26.0":
|
||||
version "7.26.0"
|
||||
resolved "https://registry.npmjs.org/@babel/core/-/core-7.26.0.tgz"
|
||||
"#;
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert_eq!(targets.len(), 1);
|
||||
assert_eq!(targets[0].name, "@babel/core");
|
||||
assert_eq!(targets[0].version, "7.26.0");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_multiple_ranges() {
|
||||
let content = r#"
|
||||
debug@2.6.9, debug@^2.2.0:
|
||||
version "2.6.9"
|
||||
resolved "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz"
|
||||
|
||||
debug@^4.1.0, debug@^4.3.4:
|
||||
version "4.3.7"
|
||||
resolved "https://registry.npmjs.org/debug/-/debug-4.3.7.tgz"
|
||||
"#;
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert_eq!(targets.len(), 2);
|
||||
assert_eq!(targets[0].name, "debug");
|
||||
assert_eq!(targets[0].version, "2.6.9");
|
||||
assert_eq!(targets[1].name, "debug");
|
||||
assert_eq!(targets[1].version, "4.3.7");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_dedup() {
|
||||
let content = r#"
|
||||
lodash@^4.0.0:
|
||||
version "4.17.21"
|
||||
|
||||
lodash@^4.17.0:
|
||||
version "4.17.21"
|
||||
"#;
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert_eq!(targets.len(), 1); // same name+version deduped
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_empty() {
|
||||
let targets = parse_yarn_lock(
|
||||
"# yarn lockfile v1
|
||||
|
||||
",
|
||||
);
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_comments_only() {
|
||||
let content = "# yarn lockfile v1
|
||||
# comment
|
||||
";
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert!(targets.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_yarn_package_name_simple() {
|
||||
assert_eq!(extract_yarn_package_name("lodash@^4.17.21"), Some("lodash"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_yarn_package_name_scoped() {
|
||||
assert_eq!(
|
||||
extract_yarn_package_name("@babel/core@^7.0.0"),
|
||||
Some("@babel/core")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_yarn_package_name_no_at() {
|
||||
assert_eq!(extract_yarn_package_name("lodash"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_yarn_lock_quoted_headers() {
|
||||
let content = r#"
|
||||
"@types/node@^20.0.0":
|
||||
version "20.11.5"
|
||||
resolved "https://registry.npmjs.org/@types/node/-/node-20.11.5.tgz"
|
||||
"#;
|
||||
let targets = parse_yarn_lock(content);
|
||||
assert_eq!(targets.len(), 1);
|
||||
assert_eq!(targets[0].name, "@types/node");
|
||||
assert_eq!(targets[0].version, "20.11.5");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ pub fn auth_rate_limiter(
|
||||
.burst_size(config.auth_burst)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("failed to build auth rate limiter: invalid RateLimitConfig");
|
||||
.expect("Failed to build auth rate limiter");
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
}
|
||||
@@ -46,7 +46,7 @@ pub fn upload_rate_limiter(
|
||||
.burst_size(config.upload_burst)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("failed to build upload rate limiter: invalid RateLimitConfig");
|
||||
.expect("Failed to build upload rate limiter");
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
}
|
||||
@@ -65,7 +65,7 @@ pub fn general_rate_limiter(
|
||||
.burst_size(config.general_burst)
|
||||
.use_headers()
|
||||
.finish()
|
||||
.expect("failed to build general rate limiter: invalid RateLimitConfig");
|
||||
.expect("Failed to build general rate limiter");
|
||||
|
||||
tower_governor::GovernorLayer::new(gov_config)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::audit::AuditEntry;
|
||||
use crate::validation::validate_storage_key;
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
@@ -27,10 +26,6 @@ async fn get_metadata(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(crate_name): Path<String>,
|
||||
) -> Response {
|
||||
// Validate input to prevent path traversal
|
||||
if validate_storage_key(&crate_name).is_err() {
|
||||
return StatusCode::BAD_REQUEST.into_response();
|
||||
}
|
||||
let key = format!("cargo/{}/metadata.json", crate_name);
|
||||
match state.storage.get(&key).await {
|
||||
Ok(data) => (StatusCode::OK, data).into_response(),
|
||||
@@ -42,10 +37,6 @@ async fn download(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((crate_name, version)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
// Validate inputs to prevent path traversal
|
||||
if validate_storage_key(&crate_name).is_err() || validate_storage_key(&version).is_err() {
|
||||
return StatusCode::BAD_REQUEST.into_response();
|
||||
}
|
||||
let key = format!(
|
||||
"cargo/{}/{}/{}-{}.crate",
|
||||
crate_name, version, crate_name, version
|
||||
@@ -68,73 +59,3 @@ async fn download(
|
||||
Err(_) => StatusCode::NOT_FOUND.into_response(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::http::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cargo_metadata_not_found() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/cargo/api/v1/crates/nonexistent",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cargo_metadata_from_storage() {
|
||||
let ctx = create_test_context();
|
||||
let meta = r#"{"name":"test-crate","versions":[]}"#;
|
||||
ctx.state
|
||||
.storage
|
||||
.put("cargo/test-crate/metadata.json", meta.as_bytes())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let resp = send(&ctx.app, Method::GET, "/cargo/api/v1/crates/test-crate", "").await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(resp).await;
|
||||
assert_eq!(&body[..], meta.as_bytes());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cargo_download_not_found() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/cargo/api/v1/crates/missing/1.0.0/download",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cargo_download_from_storage() {
|
||||
let ctx = create_test_context();
|
||||
ctx.state
|
||||
.storage
|
||||
.put("cargo/my-crate/1.2.3/my-crate-1.2.3.crate", b"crate-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let resp = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/cargo/api/v1/crates/my-crate/1.2.3/download",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(resp).await;
|
||||
assert_eq!(&body[..], b"crate-data");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ pub struct LayerInfo {
|
||||
}
|
||||
|
||||
/// In-progress upload session with metadata
|
||||
pub struct UploadSession {
|
||||
struct UploadSession {
|
||||
data: Vec<u8>,
|
||||
name: String,
|
||||
created_at: std::time::Instant,
|
||||
@@ -75,16 +75,21 @@ fn max_session_size() -> usize {
|
||||
mb.saturating_mul(1024 * 1024)
|
||||
}
|
||||
|
||||
/// Remove expired upload sessions (called by background task)
|
||||
pub fn cleanup_expired_sessions(sessions: &RwLock<HashMap<String, UploadSession>>) {
|
||||
let mut guard = sessions.write();
|
||||
let before = guard.len();
|
||||
guard.retain(|_, s| s.created_at.elapsed() < SESSION_TTL);
|
||||
let removed = before - guard.len();
|
||||
/// In-progress upload sessions for chunked uploads
|
||||
/// Maps UUID -> UploadSession with limits and TTL
|
||||
static UPLOAD_SESSIONS: std::sync::LazyLock<RwLock<HashMap<String, UploadSession>>> =
|
||||
std::sync::LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
/// Remove expired upload sessions (called periodically)
|
||||
fn cleanup_expired_sessions() {
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
let before = sessions.len();
|
||||
sessions.retain(|_, s| s.created_at.elapsed() < SESSION_TTL);
|
||||
let removed = before - sessions.len();
|
||||
if removed > 0 {
|
||||
tracing::info!(
|
||||
removed = removed,
|
||||
remaining = guard.len(),
|
||||
remaining = sessions.len(),
|
||||
"Cleaned up expired upload sessions"
|
||||
);
|
||||
}
|
||||
@@ -284,9 +289,7 @@ async fn download_blob(
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = storage.put(&key_clone, &data_clone).await {
|
||||
tracing::warn!(key = %key_clone, error = %e, "Failed to cache blob in storage");
|
||||
}
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
return (
|
||||
@@ -302,14 +305,17 @@ async fn download_blob(
|
||||
StatusCode::NOT_FOUND.into_response()
|
||||
}
|
||||
|
||||
async fn start_upload(State(state): State<Arc<AppState>>, Path(name): Path<String>) -> Response {
|
||||
async fn start_upload(Path(name): Path<String>) -> Response {
|
||||
if let Err(e) = validate_docker_name(&name) {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
// Cleanup expired sessions before checking limits
|
||||
cleanup_expired_sessions();
|
||||
|
||||
// Enforce max concurrent sessions
|
||||
{
|
||||
let sessions = state.upload_sessions.read();
|
||||
let sessions = UPLOAD_SESSIONS.read();
|
||||
let max_sessions = max_upload_sessions();
|
||||
if sessions.len() >= max_sessions {
|
||||
tracing::warn!(
|
||||
@@ -325,7 +331,7 @@ async fn start_upload(State(state): State<Arc<AppState>>, Path(name): Path<Strin
|
||||
|
||||
// Create session with metadata
|
||||
{
|
||||
let mut sessions = state.upload_sessions.write();
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
sessions.insert(
|
||||
uuid.clone(),
|
||||
UploadSession {
|
||||
@@ -340,7 +346,7 @@ async fn start_upload(State(state): State<Arc<AppState>>, Path(name): Path<Strin
|
||||
(
|
||||
StatusCode::ACCEPTED,
|
||||
[
|
||||
(header::LOCATION, location),
|
||||
(header::LOCATION, location.clone()),
|
||||
(HeaderName::from_static("docker-upload-uuid"), uuid),
|
||||
],
|
||||
)
|
||||
@@ -349,18 +355,14 @@ async fn start_upload(State(state): State<Arc<AppState>>, Path(name): Path<Strin
|
||||
|
||||
/// PATCH handler for chunked blob uploads
|
||||
/// Docker client sends data chunks via PATCH, then finalizes with PUT
|
||||
async fn patch_blob(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path((name, uuid)): Path<(String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
async fn patch_blob(Path((name, uuid)): Path<(String, String)>, body: Bytes) -> Response {
|
||||
if let Err(e) = validate_docker_name(&name) {
|
||||
return (StatusCode::BAD_REQUEST, e.to_string()).into_response();
|
||||
}
|
||||
|
||||
// Append data to the upload session and get total size
|
||||
let total_size = {
|
||||
let mut sessions = state.upload_sessions.write();
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
let session = match sessions.get_mut(&uuid) {
|
||||
Some(s) => s,
|
||||
None => {
|
||||
@@ -447,7 +449,7 @@ async fn upload_blob(
|
||||
|
||||
// Get data from chunked session if exists, otherwise use body directly
|
||||
let data = {
|
||||
let mut sessions = state.upload_sessions.write();
|
||||
let mut sessions = UPLOAD_SESSIONS.write();
|
||||
if let Some(session) = sessions.remove(&uuid) {
|
||||
// Verify session belongs to this repository
|
||||
if session.name != name {
|
||||
@@ -486,7 +488,7 @@ async fn upload_blob(
|
||||
// Verify digest matches uploaded content (Docker Distribution Spec)
|
||||
{
|
||||
use sha2::Digest as _;
|
||||
let computed = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&data)));
|
||||
let computed = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
if computed != *digest {
|
||||
tracing::warn!(
|
||||
expected = %digest,
|
||||
@@ -562,7 +564,7 @@ async fn get_manifest(
|
||||
|
||||
// Calculate digest for Docker-Content-Digest header
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&data)));
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
|
||||
// Detect manifest media type from content
|
||||
let content_type = detect_manifest_media_type(&data);
|
||||
@@ -612,7 +614,7 @@ async fn get_manifest(
|
||||
|
||||
// Calculate digest for Docker-Content-Digest header
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&data)));
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
|
||||
// Cache manifest and create metadata (fire and forget)
|
||||
let storage = state.storage.clone();
|
||||
@@ -682,16 +684,14 @@ async fn get_manifest(
|
||||
));
|
||||
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&data)));
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
|
||||
// Cache under original name for future local hits
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.clone();
|
||||
let data_clone = data.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = storage.put(&key_clone, &data_clone).await {
|
||||
tracing::warn!(key = %key_clone, error = %e, "Failed to cache blob in storage");
|
||||
}
|
||||
let _ = storage.put(&key_clone, &data_clone).await;
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("docker");
|
||||
@@ -726,7 +726,7 @@ async fn put_manifest(
|
||||
|
||||
// Calculate digest
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&body)));
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&body));
|
||||
|
||||
// Store by tag/reference
|
||||
let key = format!("docker/{}/manifests/{}.json", name, reference);
|
||||
@@ -819,7 +819,7 @@ async fn delete_manifest(
|
||||
if is_tag {
|
||||
if let Ok(data) = state.storage.get(&key).await {
|
||||
use sha2::Digest;
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(&data)));
|
||||
let digest = format!("sha256:{:x}", sha2::Sha256::digest(&data));
|
||||
let digest_key = format!("docker/{}/manifests/{}.json", name, digest);
|
||||
let _ = state.storage.delete(&digest_key).await;
|
||||
let digest_meta = format!("docker/{}/manifests/{}.meta.json", name, digest);
|
||||
@@ -921,21 +921,17 @@ async fn download_blob_ns(
|
||||
download_blob(state, Path((full_name, digest))).await
|
||||
}
|
||||
|
||||
async fn start_upload_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name)): Path<(String, String)>,
|
||||
) -> Response {
|
||||
async fn start_upload_ns(Path((ns, name)): Path<(String, String)>) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
start_upload(state, Path(full_name)).await
|
||||
start_upload(Path(full_name)).await
|
||||
}
|
||||
|
||||
async fn patch_blob_ns(
|
||||
state: State<Arc<AppState>>,
|
||||
Path((ns, name, uuid)): Path<(String, String, String)>,
|
||||
body: Bytes,
|
||||
) -> Response {
|
||||
let full_name = format!("{}/{}", ns, name);
|
||||
patch_blob(state, Path((full_name, uuid)), body).await
|
||||
patch_blob(Path((full_name, uuid)), body).await
|
||||
}
|
||||
|
||||
async fn upload_blob_ns(
|
||||
@@ -990,7 +986,7 @@ async fn delete_blob_ns(
|
||||
}
|
||||
|
||||
/// Fetch a blob from an upstream Docker registry
|
||||
pub async fn fetch_blob_from_upstream(
|
||||
async fn fetch_blob_from_upstream(
|
||||
client: &reqwest::Client,
|
||||
upstream_url: &str,
|
||||
name: &str,
|
||||
@@ -1047,7 +1043,7 @@ pub async fn fetch_blob_from_upstream(
|
||||
|
||||
/// Fetch a manifest from an upstream Docker registry
|
||||
/// Returns (manifest_bytes, content_type)
|
||||
pub async fn fetch_manifest_from_upstream(
|
||||
async fn fetch_manifest_from_upstream(
|
||||
client: &reqwest::Client,
|
||||
upstream_url: &str,
|
||||
name: &str,
|
||||
@@ -1322,599 +1318,3 @@ async fn update_metadata_on_pull(storage: Storage, meta_key: String) {
|
||||
let _ = storage.put(&meta_key, &json).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_image_metadata_default() {
|
||||
let meta = ImageMetadata::default();
|
||||
assert_eq!(meta.push_timestamp, 0);
|
||||
assert_eq!(meta.last_pulled, 0);
|
||||
assert_eq!(meta.downloads, 0);
|
||||
assert_eq!(meta.size_bytes, 0);
|
||||
assert_eq!(meta.os, "");
|
||||
assert_eq!(meta.arch, "");
|
||||
assert!(meta.variant.is_none());
|
||||
assert!(meta.layers.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_image_metadata_serialization() {
|
||||
let meta = ImageMetadata {
|
||||
push_timestamp: 1700000000,
|
||||
last_pulled: 1700001000,
|
||||
downloads: 42,
|
||||
size_bytes: 1024000,
|
||||
os: "linux".to_string(),
|
||||
arch: "amd64".to_string(),
|
||||
variant: None,
|
||||
layers: vec![LayerInfo {
|
||||
digest: "sha256:abc123".to_string(),
|
||||
size: 512000,
|
||||
}],
|
||||
};
|
||||
let json = serde_json::to_string(&meta).unwrap();
|
||||
assert!(json.contains("\"os\":\"linux\""));
|
||||
assert!(json.contains("\"arch\":\"amd64\""));
|
||||
assert!(!json.contains("variant")); // None => skipped
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_image_metadata_with_variant() {
|
||||
let meta = ImageMetadata {
|
||||
variant: Some("v8".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let json = serde_json::to_string(&meta).unwrap();
|
||||
assert!(json.contains("\"variant\":\"v8\""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_image_metadata_deserialization() {
|
||||
let json = r#"{
|
||||
"push_timestamp": 1700000000,
|
||||
"last_pulled": 0,
|
||||
"downloads": 5,
|
||||
"size_bytes": 2048,
|
||||
"os": "linux",
|
||||
"arch": "arm64",
|
||||
"variant": "v8",
|
||||
"layers": [
|
||||
{"digest": "sha256:aaa", "size": 1024},
|
||||
{"digest": "sha256:bbb", "size": 1024}
|
||||
]
|
||||
}"#;
|
||||
let meta: ImageMetadata = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(meta.os, "linux");
|
||||
assert_eq!(meta.arch, "arm64");
|
||||
assert_eq!(meta.variant, Some("v8".to_string()));
|
||||
assert_eq!(meta.layers.len(), 2);
|
||||
assert_eq!(meta.layers[0].digest, "sha256:aaa");
|
||||
assert_eq!(meta.layers[1].size, 1024);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layer_info_serialization_roundtrip() {
|
||||
let layer = LayerInfo {
|
||||
digest: "sha256:deadbeef".to_string(),
|
||||
size: 999999,
|
||||
};
|
||||
let json = serde_json::to_value(&layer).unwrap();
|
||||
let restored: LayerInfo = serde_json::from_value(json).unwrap();
|
||||
assert_eq!(layer.digest, restored.digest);
|
||||
assert_eq!(layer.size, restored.size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cleanup_expired_sessions_empty() {
|
||||
let sessions: RwLock<HashMap<String, UploadSession>> = RwLock::new(HashMap::new());
|
||||
cleanup_expired_sessions(&sessions);
|
||||
assert_eq!(sessions.read().len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cleanup_expired_sessions_fresh() {
|
||||
let sessions: RwLock<HashMap<String, UploadSession>> = RwLock::new(HashMap::new());
|
||||
sessions.write().insert(
|
||||
"uuid-1".to_string(),
|
||||
UploadSession {
|
||||
data: vec![1, 2, 3],
|
||||
name: "test/image".to_string(),
|
||||
created_at: std::time::Instant::now(),
|
||||
},
|
||||
);
|
||||
cleanup_expired_sessions(&sessions);
|
||||
assert_eq!(sessions.read().len(), 1); // not expired
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_upload_sessions_default() {
|
||||
// Without env var set, should return default
|
||||
let max = max_upload_sessions();
|
||||
assert!(max > 0);
|
||||
assert_eq!(max, DEFAULT_MAX_UPLOAD_SESSIONS);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_max_session_size_default() {
|
||||
let max = max_session_size();
|
||||
assert_eq!(max, DEFAULT_MAX_SESSION_SIZE_MB * 1024 * 1024);
|
||||
}
|
||||
|
||||
// --- detect_manifest_media_type tests ---
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_explicit_media_type() {
|
||||
let manifest = serde_json::json!({
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"schemaVersion": 2
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_oci_media_type() {
|
||||
let manifest = serde_json::json!({
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"schemaVersion": 2
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(result, "application/vnd.oci.image.manifest.v1+json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_schema_v1() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 1,
|
||||
"name": "test/image"
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v1+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_docker_v2_from_config() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_oci_from_config() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.oci.image.config.v1+json",
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(result, "application/vnd.oci.image.manifest.v1+json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_no_config_media_type() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_index() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{"digest": "sha256:aaa", "platform": {"os": "linux", "architecture": "amd64"}}
|
||||
]
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(result, "application/vnd.oci.image.index.v1+json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_invalid_json() {
|
||||
let result = detect_manifest_media_type(b"not json at all");
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_empty() {
|
||||
let result = detect_manifest_media_type(b"{}");
|
||||
assert_eq!(
|
||||
result,
|
||||
"application/vnd.docker.distribution.manifest.v2+json"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detect_manifest_helm_chart() {
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.cncf.helm.config.v1+json",
|
||||
"digest": "sha256:abc"
|
||||
}
|
||||
});
|
||||
let result = detect_manifest_media_type(manifest.to_string().as_bytes());
|
||||
assert_eq!(result, "application/vnd.oci.image.manifest.v1+json");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::body::Body;
|
||||
use axum::http::{header, Method, StatusCode};
|
||||
use sha2::Digest;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_v2_check() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(&ctx.app, Method::GET, "/v2/", Body::empty()).await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_catalog_empty() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(&ctx.app, Method::GET, "/v2/_catalog", Body::empty()).await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(resp).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert!(json["repositories"].as_array().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_put_get_manifest() {
|
||||
let ctx = create_test_context();
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 0,
|
||||
"digest": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"layers": []
|
||||
});
|
||||
let manifest_bytes = serde_json::to_vec(&manifest).unwrap();
|
||||
|
||||
let put_resp = send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/v2/alpine/manifests/latest",
|
||||
Body::from(manifest_bytes.clone()),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(put_resp.status(), StatusCode::CREATED);
|
||||
let digest_header = put_resp
|
||||
.headers()
|
||||
.get("docker-content-digest")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
assert!(digest_header.starts_with("sha256:"));
|
||||
|
||||
let get_resp = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/v2/alpine/manifests/latest",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(get_resp.status(), StatusCode::OK);
|
||||
let get_digest = get_resp
|
||||
.headers()
|
||||
.get("docker-content-digest")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
assert_eq!(get_digest, digest_header);
|
||||
let body = body_bytes(get_resp).await;
|
||||
assert_eq!(body.as_ref(), manifest_bytes.as_slice());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_list_tags() {
|
||||
let ctx = create_test_context();
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 0,
|
||||
"digest": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"layers": []
|
||||
});
|
||||
send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/v2/alpine/manifests/latest",
|
||||
Body::from(serde_json::to_vec(&manifest).unwrap()),
|
||||
)
|
||||
.await;
|
||||
|
||||
let list_resp = send(&ctx.app, Method::GET, "/v2/alpine/tags/list", Body::empty()).await;
|
||||
assert_eq!(list_resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(list_resp).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert_eq!(json["name"], "alpine");
|
||||
let tags = json["tags"].as_array().unwrap();
|
||||
assert!(tags.contains(&serde_json::json!("latest")));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_delete_manifest() {
|
||||
let ctx = create_test_context();
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 0,
|
||||
"digest": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"layers": []
|
||||
});
|
||||
let put_resp = send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/v2/alpine/manifests/latest",
|
||||
Body::from(serde_json::to_vec(&manifest).unwrap()),
|
||||
)
|
||||
.await;
|
||||
let digest = put_resp
|
||||
.headers()
|
||||
.get("docker-content-digest")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
|
||||
let del = send(
|
||||
&ctx.app,
|
||||
Method::DELETE,
|
||||
&format!("/v2/alpine/manifests/{}", digest),
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(del.status(), StatusCode::ACCEPTED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_monolithic_upload() {
|
||||
let ctx = create_test_context();
|
||||
let blob_data = b"test blob data";
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(blob_data)));
|
||||
|
||||
let post_resp = send(
|
||||
&ctx.app,
|
||||
Method::POST,
|
||||
"/v2/alpine/blobs/uploads/",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(post_resp.status(), StatusCode::ACCEPTED);
|
||||
let location = post_resp
|
||||
.headers()
|
||||
.get("location")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let uuid = location.rsplit('/').next().unwrap();
|
||||
|
||||
let put_url = format!("/v2/alpine/blobs/uploads/{}?digest={}", uuid, digest);
|
||||
let put_resp = send(&ctx.app, Method::PUT, &put_url, Body::from(&blob_data[..])).await;
|
||||
assert_eq!(put_resp.status(), StatusCode::CREATED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_chunked_upload() {
|
||||
let ctx = create_test_context();
|
||||
let blob_data = b"test chunked blob";
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(blob_data)));
|
||||
|
||||
let post_resp = send(
|
||||
&ctx.app,
|
||||
Method::POST,
|
||||
"/v2/alpine/blobs/uploads/",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(post_resp.status(), StatusCode::ACCEPTED);
|
||||
let location = post_resp
|
||||
.headers()
|
||||
.get("location")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let uuid = location.rsplit('/').next().unwrap();
|
||||
|
||||
let patch_url = format!("/v2/alpine/blobs/uploads/{}", uuid);
|
||||
let patch_resp = send(
|
||||
&ctx.app,
|
||||
Method::PATCH,
|
||||
&patch_url,
|
||||
Body::from(&blob_data[..]),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(patch_resp.status(), StatusCode::ACCEPTED);
|
||||
|
||||
let put_url = format!("/v2/alpine/blobs/uploads/{}?digest={}", uuid, digest);
|
||||
let put_resp = send(&ctx.app, Method::PUT, &put_url, Body::empty()).await;
|
||||
assert_eq!(put_resp.status(), StatusCode::CREATED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_check_blob() {
|
||||
let ctx = create_test_context();
|
||||
let blob_data = b"test blob for head";
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(blob_data)));
|
||||
|
||||
let post_resp = send(
|
||||
&ctx.app,
|
||||
Method::POST,
|
||||
"/v2/alpine/blobs/uploads/",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
let location = post_resp
|
||||
.headers()
|
||||
.get("location")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let uuid = location.rsplit('/').next().unwrap();
|
||||
let put_url = format!("/v2/alpine/blobs/uploads/{}?digest={}", uuid, digest);
|
||||
send(&ctx.app, Method::PUT, &put_url, Body::from(&blob_data[..])).await;
|
||||
|
||||
let head_url = format!("/v2/alpine/blobs/{}", digest);
|
||||
let head_resp = send(&ctx.app, Method::HEAD, &head_url, Body::empty()).await;
|
||||
assert_eq!(head_resp.status(), StatusCode::OK);
|
||||
let cl = head_resp
|
||||
.headers()
|
||||
.get(header::CONTENT_LENGTH)
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.parse::<usize>()
|
||||
.unwrap();
|
||||
assert_eq!(cl, blob_data.len());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_download_blob() {
|
||||
let ctx = create_test_context();
|
||||
let blob_data = b"test blob for download";
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(blob_data)));
|
||||
|
||||
let post_resp = send(
|
||||
&ctx.app,
|
||||
Method::POST,
|
||||
"/v2/alpine/blobs/uploads/",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
let location = post_resp
|
||||
.headers()
|
||||
.get("location")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let uuid = location.rsplit('/').next().unwrap();
|
||||
let put_url = format!("/v2/alpine/blobs/uploads/{}?digest={}", uuid, digest);
|
||||
send(&ctx.app, Method::PUT, &put_url, Body::from(&blob_data[..])).await;
|
||||
|
||||
let get_url = format!("/v2/alpine/blobs/{}", digest);
|
||||
let get_resp = send(&ctx.app, Method::GET, &get_url, Body::empty()).await;
|
||||
assert_eq!(get_resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(get_resp).await;
|
||||
assert_eq!(body.as_ref(), &blob_data[..]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_blob_not_found() {
|
||||
let ctx = create_test_context();
|
||||
let fake_digest = "sha256:0000000000000000000000000000000000000000000000000000000000000000";
|
||||
let head_url = format!("/v2/alpine/blobs/{}", fake_digest);
|
||||
let resp = send(&ctx.app, Method::HEAD, &head_url, Body::empty()).await;
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_delete_blob() {
|
||||
let ctx = create_test_context();
|
||||
let blob_data = b"test blob for delete";
|
||||
let digest = format!("sha256:{}", hex::encode(sha2::Sha256::digest(blob_data)));
|
||||
|
||||
let post_resp = send(
|
||||
&ctx.app,
|
||||
Method::POST,
|
||||
"/v2/alpine/blobs/uploads/",
|
||||
Body::empty(),
|
||||
)
|
||||
.await;
|
||||
let location = post_resp
|
||||
.headers()
|
||||
.get("location")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string();
|
||||
let uuid = location.rsplit('/').next().unwrap();
|
||||
let put_url = format!("/v2/alpine/blobs/uploads/{}?digest={}", uuid, digest);
|
||||
send(&ctx.app, Method::PUT, &put_url, Body::from(&blob_data[..])).await;
|
||||
|
||||
let delete_url = format!("/v2/alpine/blobs/{}", digest);
|
||||
let delete_resp = send(&ctx.app, Method::DELETE, &delete_url, Body::empty()).await;
|
||||
assert_eq!(delete_resp.status(), StatusCode::ACCEPTED);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_docker_namespaced_routes() {
|
||||
let ctx = create_test_context();
|
||||
let manifest = serde_json::json!({
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"size": 0,
|
||||
"digest": "sha256:0000000000000000000000000000000000000000000000000000000000000000"
|
||||
},
|
||||
"layers": []
|
||||
});
|
||||
let put_resp = send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/v2/library/alpine/manifests/latest",
|
||||
Body::from(serde_json::to_vec(&manifest).unwrap()),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(put_resp.status(), StatusCode::CREATED);
|
||||
assert!(put_resp
|
||||
.headers()
|
||||
.get("docker-content-digest")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.starts_with("sha256:"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -139,7 +139,6 @@ fn parse_www_authenticate(header: &str) -> Option<HashMap<String, String>> {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -168,152 +167,4 @@ mod tests {
|
||||
Some(&"https://ghcr.io/token".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_no_bearer() {
|
||||
assert!(parse_www_authenticate("Basic realm=\"test\"").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_empty() {
|
||||
assert!(parse_www_authenticate("").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_partial() {
|
||||
let header = r#"Bearer realm="https://example.com/token""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
assert_eq!(
|
||||
params.get("realm"),
|
||||
Some(&"https://example.com/token".to_string())
|
||||
);
|
||||
assert!(!params.contains_key("service"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_docker_auth_default() {
|
||||
let auth = DockerAuth::default();
|
||||
assert!(auth.tokens.read().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_docker_auth_new() {
|
||||
let auth = DockerAuth::new(30);
|
||||
assert!(auth.tokens.read().is_empty());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_token_no_www_authenticate() {
|
||||
let auth = DockerAuth::default();
|
||||
let result = auth
|
||||
.get_token("https://registry.example.com", "library/test", None, None)
|
||||
.await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_token_cache_hit() {
|
||||
let auth = DockerAuth::default();
|
||||
// Manually insert a cached token
|
||||
{
|
||||
let mut tokens = auth.tokens.write();
|
||||
tokens.insert(
|
||||
"https://registry.example.com:library/test".to_string(),
|
||||
CachedToken {
|
||||
token: "cached-token-123".to_string(),
|
||||
expires_at: Instant::now() + Duration::from_secs(300),
|
||||
},
|
||||
);
|
||||
}
|
||||
let result = auth
|
||||
.get_token("https://registry.example.com", "library/test", None, None)
|
||||
.await;
|
||||
assert_eq!(result, Some("cached-token-123".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_token_cache_expired() {
|
||||
let auth = DockerAuth::default();
|
||||
{
|
||||
let mut tokens = auth.tokens.write();
|
||||
tokens.insert(
|
||||
"https://registry.example.com:library/test".to_string(),
|
||||
CachedToken {
|
||||
token: "expired-token".to_string(),
|
||||
expires_at: Instant::now() - Duration::from_secs(1),
|
||||
},
|
||||
);
|
||||
}
|
||||
// Without www_authenticate, returns None (can't fetch new token)
|
||||
let result = auth
|
||||
.get_token("https://registry.example.com", "library/test", None, None)
|
||||
.await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_bearer_only() {
|
||||
let params = parse_www_authenticate("Bearer ").unwrap();
|
||||
assert!(params.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_missing_realm() {
|
||||
let header = r#"Bearer service="registry.docker.io""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
assert!(params.get("realm").is_none());
|
||||
assert_eq!(
|
||||
params.get("service"),
|
||||
Some(&"registry.docker.io".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_missing_service() {
|
||||
let header = r#"Bearer realm="https://auth.docker.io/token""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
assert_eq!(
|
||||
params.get("realm"),
|
||||
Some(&"https://auth.docker.io/token".to_string())
|
||||
);
|
||||
assert!(params.get("service").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_www_authenticate_malformed_kv() {
|
||||
let header = r#"Bearer garbage,realm="https://auth.docker.io/token""#;
|
||||
let params = parse_www_authenticate(header).unwrap();
|
||||
assert_eq!(
|
||||
params.get("realm"),
|
||||
Some(&"https://auth.docker.io/token".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_token_invalid_url() {
|
||||
let auth = DockerAuth::new(1);
|
||||
let result = auth
|
||||
.get_token(
|
||||
"https://registry.example.com",
|
||||
"library/test",
|
||||
Some(r#"Bearer realm="http://127.0.0.1:1/token",service="test""#),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_fetch_token_missing_realm_in_header() {
|
||||
let auth = DockerAuth::default();
|
||||
let result = auth
|
||||
.get_token(
|
||||
"https://registry.example.com",
|
||||
"library/test",
|
||||
Some(r#"Bearer service="registry.docker.io""#),
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(result.is_none());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,523 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Go module proxy (GOPROXY protocol).
|
||||
//!
|
||||
//! Implements the 5 required endpoints:
|
||||
//! GET /go/{module}/@v/list — list known versions
|
||||
//! GET /go/{module}/@v/{ver}.info — version metadata (JSON)
|
||||
//! GET /go/{module}/@v/{ver}.mod — go.mod file
|
||||
//! GET /go/{module}/@v/{ver}.zip — module zip archive
|
||||
//! GET /go/{module}/@latest — latest version info
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::audit::AuditEntry;
|
||||
use crate::registry::{proxy_fetch, proxy_fetch_text, ProxyError};
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
http::{header, HeaderValue, StatusCode},
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use percent_encoding::percent_decode;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new().route("/go/{*path}", get(handle))
|
||||
}
|
||||
|
||||
/// Main handler — parses the wildcard path and dispatches to the right logic.
|
||||
async fn handle(State(state): State<Arc<AppState>>, Path(path): Path<String>) -> Response {
|
||||
// URL-decode the path: Go client sends %21 for !, Axum wildcard may not decode it
|
||||
let path = percent_decode(path.as_bytes())
|
||||
.decode_utf8()
|
||||
.map(|s| s.into_owned())
|
||||
.unwrap_or(path);
|
||||
|
||||
tracing::debug!(path = %path, "Go proxy request");
|
||||
|
||||
// Validate path: no traversal, no null bytes
|
||||
if !is_safe_path(&path) {
|
||||
tracing::debug!(path = %path, "Go proxy: unsafe path");
|
||||
return StatusCode::BAD_REQUEST.into_response();
|
||||
}
|
||||
|
||||
// Split: "github.com/!azure/sdk/@v/v1.0.0.info" → module + file
|
||||
let (module_encoded, file) = match split_go_path(&path) {
|
||||
Some(parts) => parts,
|
||||
None => {
|
||||
tracing::debug!(path = %path, "Go proxy: cannot split path");
|
||||
return StatusCode::NOT_FOUND.into_response();
|
||||
}
|
||||
};
|
||||
|
||||
let storage_key = format!("go/{}", path);
|
||||
let content_type = content_type_for(&file);
|
||||
|
||||
// Mutable endpoints: @v/list and @latest can be refreshed from upstream
|
||||
let is_mutable = file == "@v/list" || file == "@latest";
|
||||
// Immutable: .info, .mod, .zip — once cached, never overwrite
|
||||
let is_immutable = !is_mutable;
|
||||
|
||||
// 1. Try local cache (for immutable files, this is authoritative)
|
||||
if let Ok(data) = state.storage.get(&storage_key).await {
|
||||
state.metrics.record_download("go");
|
||||
state.metrics.record_cache_hit();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::CacheHit,
|
||||
format_artifact(&module_encoded, &file),
|
||||
"go",
|
||||
"CACHE",
|
||||
));
|
||||
return with_content_type(data.to_vec(), content_type);
|
||||
}
|
||||
|
||||
// 2. Try upstream proxy
|
||||
let proxy_url = match &state.config.go.proxy {
|
||||
Some(url) => url.clone(),
|
||||
None => return StatusCode::NOT_FOUND.into_response(),
|
||||
};
|
||||
|
||||
// Validate module path encoding (but keep encoded for upstream — proxy.golang.org expects ! encoding)
|
||||
if decode_module_path(&module_encoded).is_err() {
|
||||
return StatusCode::BAD_REQUEST.into_response();
|
||||
}
|
||||
|
||||
let upstream_url = format!(
|
||||
"{}/{}",
|
||||
proxy_url.trim_end_matches('/'),
|
||||
format_upstream_path(&module_encoded, &file)
|
||||
);
|
||||
|
||||
// Use longer timeout for .zip files
|
||||
let timeout = if file.ends_with(".zip") {
|
||||
state.config.go.proxy_timeout_zip
|
||||
} else {
|
||||
state.config.go.proxy_timeout
|
||||
};
|
||||
|
||||
// Fetch: binary for .zip, text for everything else
|
||||
let data = if file.ends_with(".zip") {
|
||||
proxy_fetch(
|
||||
&state.http_client,
|
||||
&upstream_url,
|
||||
timeout,
|
||||
state.config.go.proxy_auth.as_deref(),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
proxy_fetch_text(
|
||||
&state.http_client,
|
||||
&upstream_url,
|
||||
timeout,
|
||||
state.config.go.proxy_auth.as_deref(),
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.map(|s| s.into_bytes())
|
||||
};
|
||||
|
||||
match data {
|
||||
Ok(bytes) => {
|
||||
// Enforce size limit for .zip
|
||||
if file.ends_with(".zip") && bytes.len() as u64 > state.config.go.max_zip_size {
|
||||
tracing::warn!(
|
||||
module = module_encoded,
|
||||
size = bytes.len(),
|
||||
limit = state.config.go.max_zip_size,
|
||||
"Go module zip exceeds size limit"
|
||||
);
|
||||
return StatusCode::PAYLOAD_TOO_LARGE.into_response();
|
||||
}
|
||||
|
||||
state.metrics.record_download("go");
|
||||
state.metrics.record_cache_miss();
|
||||
state.activity.push(ActivityEntry::new(
|
||||
ActionType::ProxyFetch,
|
||||
format_artifact(&module_encoded, &file),
|
||||
"go",
|
||||
"PROXY",
|
||||
));
|
||||
state
|
||||
.audit
|
||||
.log(AuditEntry::new("proxy_fetch", "api", "", "go", ""));
|
||||
|
||||
// Background cache: immutable = put_if_absent, mutable = always overwrite
|
||||
let storage = state.storage.clone();
|
||||
let key = storage_key.clone();
|
||||
let data_clone = bytes.clone();
|
||||
tokio::spawn(async move {
|
||||
if is_immutable {
|
||||
// Only write if not already cached (immutability guarantee)
|
||||
if storage.stat(&key).await.is_none() {
|
||||
let _ = storage.put(&key, &data_clone).await;
|
||||
}
|
||||
} else {
|
||||
let _ = storage.put(&key, &data_clone).await;
|
||||
}
|
||||
});
|
||||
|
||||
state.repo_index.invalidate("go");
|
||||
with_content_type(bytes, content_type)
|
||||
}
|
||||
Err(ProxyError::NotFound) => StatusCode::NOT_FOUND.into_response(),
|
||||
Err(e) => {
|
||||
tracing::debug!(
|
||||
module = module_encoded,
|
||||
file = file,
|
||||
error = ?e,
|
||||
"Go upstream proxy error"
|
||||
);
|
||||
StatusCode::BAD_GATEWAY.into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Module path encoding/decoding
|
||||
// ============================================================================
|
||||
|
||||
/// Decode Go module path: `!x` → `X`
|
||||
///
|
||||
/// Go module proxy spec requires uppercase letters to be encoded as `!`
|
||||
/// followed by the lowercase letter. Raw uppercase in encoded path is invalid.
|
||||
fn decode_module_path(encoded: &str) -> Result<String, ()> {
|
||||
let mut result = String::with_capacity(encoded.len());
|
||||
let mut chars = encoded.chars();
|
||||
while let Some(c) = chars.next() {
|
||||
if c == '!' {
|
||||
match chars.next() {
|
||||
Some(next) if next.is_ascii_lowercase() => {
|
||||
result.push(next.to_ascii_uppercase());
|
||||
}
|
||||
_ => return Err(()),
|
||||
}
|
||||
} else if c.is_ascii_uppercase() {
|
||||
// Raw uppercase in encoded path is invalid per spec
|
||||
return Err(());
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Encode Go module path: `X` → `!x`
|
||||
#[cfg(test)]
|
||||
fn encode_module_path(path: &str) -> String {
|
||||
let mut result = String::with_capacity(path.len() + 8);
|
||||
for c in path.chars() {
|
||||
if c.is_ascii_uppercase() {
|
||||
result.push('!');
|
||||
result.push(c.to_ascii_lowercase());
|
||||
} else {
|
||||
result.push(c);
|
||||
}
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Path parsing helpers
|
||||
// ============================================================================
|
||||
|
||||
/// Split Go path into (encoded_module, file).
|
||||
///
|
||||
/// Examples:
|
||||
/// "github.com/user/repo/@v/v1.0.0.info" → ("github.com/user/repo", "@v/v1.0.0.info")
|
||||
/// "github.com/user/repo/v2/@v/list" → ("github.com/user/repo/v2", "@v/list")
|
||||
/// "github.com/user/repo/@latest" → ("github.com/user/repo", "@latest")
|
||||
fn split_go_path(path: &str) -> Option<(String, String)> {
|
||||
// Try @latest first (it's simpler)
|
||||
if let Some(pos) = path.rfind("/@latest") {
|
||||
let module = &path[..pos];
|
||||
if !module.is_empty() {
|
||||
return Some((module.to_string(), "@latest".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Try @v/ — find the last occurrence (handles /v2/@v/ correctly)
|
||||
if let Some(pos) = path.rfind("/@v/") {
|
||||
let module = &path[..pos];
|
||||
let file = &path[pos + 1..]; // "@v/..."
|
||||
if !module.is_empty() && !file.is_empty() {
|
||||
return Some((module.to_string(), file.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Path validation: no traversal attacks
|
||||
fn is_safe_path(path: &str) -> bool {
|
||||
!path.contains("..")
|
||||
&& !path.starts_with('/')
|
||||
&& !path.contains("//")
|
||||
&& !path.contains('\0')
|
||||
&& !path.is_empty()
|
||||
}
|
||||
|
||||
/// Content-Type for Go proxy responses
|
||||
fn content_type_for(file: &str) -> &'static str {
|
||||
if file.ends_with(".info") || file == "@latest" {
|
||||
"application/json"
|
||||
} else if file.ends_with(".zip") {
|
||||
"application/zip"
|
||||
} else {
|
||||
// .mod, @v/list
|
||||
"text/plain; charset=utf-8"
|
||||
}
|
||||
}
|
||||
|
||||
/// Build upstream URL path (uses decoded module path)
|
||||
fn format_upstream_path(module_decoded: &str, file: &str) -> String {
|
||||
format!("{}/{}", module_decoded, file)
|
||||
}
|
||||
|
||||
/// Human-readable artifact name for activity log
|
||||
fn format_artifact(module: &str, file: &str) -> String {
|
||||
if file == "@v/list" || file == "@latest" {
|
||||
format!("{} {}", module, file)
|
||||
} else if let Some(version_file) = file.strip_prefix("@v/") {
|
||||
// "v1.0.0.info" → "module@v1.0.0"
|
||||
let version = version_file
|
||||
.rsplit_once('.')
|
||||
.map(|(v, _ext)| v)
|
||||
.unwrap_or(version_file);
|
||||
format!("{}@{}", module, version)
|
||||
} else {
|
||||
format!("{}/{}", module, file)
|
||||
}
|
||||
}
|
||||
|
||||
/// Build response with Content-Type header
|
||||
fn with_content_type(data: Vec<u8>, content_type: &'static str) -> Response {
|
||||
(
|
||||
StatusCode::OK,
|
||||
[(header::CONTENT_TYPE, HeaderValue::from_static(content_type))],
|
||||
data,
|
||||
)
|
||||
.into_response()
|
||||
}
|
||||
|
||||
// ============================================================================
|
||||
// Tests
|
||||
// ============================================================================
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// ── Encoding/decoding ───────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_decode_azure() {
|
||||
assert_eq!(
|
||||
decode_module_path("github.com/!azure/sdk").unwrap(),
|
||||
"github.com/Azure/sdk"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_multiple_uppercase() {
|
||||
assert_eq!(
|
||||
decode_module_path("!google!cloud!platform/foo").unwrap(),
|
||||
"GoogleCloudPlatform/foo"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_no_uppercase() {
|
||||
assert_eq!(
|
||||
decode_module_path("github.com/user/repo").unwrap(),
|
||||
"github.com/user/repo"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_invalid_bang_at_end() {
|
||||
assert!(decode_module_path("foo!").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_invalid_bang_followed_by_uppercase() {
|
||||
assert!(decode_module_path("foo!A").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decode_raw_uppercase_is_invalid() {
|
||||
assert!(decode_module_path("github.com/Azure/sdk").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_roundtrip() {
|
||||
let original = "github.com/Azure/azure-sdk-for-go";
|
||||
let encoded = encode_module_path(original);
|
||||
assert_eq!(encoded, "github.com/!azure/azure-sdk-for-go");
|
||||
assert_eq!(decode_module_path(&encoded).unwrap(), original);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_encode_no_change() {
|
||||
assert_eq!(
|
||||
encode_module_path("github.com/user/repo"),
|
||||
"github.com/user/repo"
|
||||
);
|
||||
}
|
||||
|
||||
// ── Path splitting ──────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_split_version_info() {
|
||||
let (module, file) = split_go_path("github.com/user/repo/@v/v1.0.0.info").unwrap();
|
||||
assert_eq!(module, "github.com/user/repo");
|
||||
assert_eq!(file, "@v/v1.0.0.info");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_version_list() {
|
||||
let (module, file) = split_go_path("github.com/user/repo/@v/list").unwrap();
|
||||
assert_eq!(module, "github.com/user/repo");
|
||||
assert_eq!(file, "@v/list");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_latest() {
|
||||
let (module, file) = split_go_path("github.com/user/repo/@latest").unwrap();
|
||||
assert_eq!(module, "github.com/user/repo");
|
||||
assert_eq!(file, "@latest");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_major_version_suffix() {
|
||||
let (module, file) = split_go_path("github.com/user/repo/v2/@v/list").unwrap();
|
||||
assert_eq!(module, "github.com/user/repo/v2");
|
||||
assert_eq!(file, "@v/list");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_incompatible_version() {
|
||||
let (module, file) =
|
||||
split_go_path("github.com/user/repo/@v/v4.1.2+incompatible.info").unwrap();
|
||||
assert_eq!(module, "github.com/user/repo");
|
||||
assert_eq!(file, "@v/v4.1.2+incompatible.info");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_pseudo_version() {
|
||||
let (module, file) =
|
||||
split_go_path("github.com/user/repo/@v/v0.0.0-20210101000000-abcdef123456.info")
|
||||
.unwrap();
|
||||
assert_eq!(module, "github.com/user/repo");
|
||||
assert_eq!(file, "@v/v0.0.0-20210101000000-abcdef123456.info");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_no_at() {
|
||||
assert!(split_go_path("github.com/user/repo/v1.0.0").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_empty_module() {
|
||||
assert!(split_go_path("/@v/list").is_none());
|
||||
}
|
||||
|
||||
// ── Path safety ─────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_safe_path_normal() {
|
||||
assert!(is_safe_path("github.com/user/repo/@v/list"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_traversal() {
|
||||
assert!(!is_safe_path("../../etc/passwd"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_absolute() {
|
||||
assert!(!is_safe_path("/etc/passwd"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_double_slash() {
|
||||
assert!(!is_safe_path("github.com//evil/@v/list"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_null() {
|
||||
assert!(!is_safe_path("github.com/\0evil/@v/list"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reject_empty() {
|
||||
assert!(!is_safe_path(""));
|
||||
}
|
||||
|
||||
// ── Content-Type ────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_content_type_info() {
|
||||
assert_eq!(content_type_for("@v/v1.0.0.info"), "application/json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_latest() {
|
||||
assert_eq!(content_type_for("@latest"), "application/json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_zip() {
|
||||
assert_eq!(content_type_for("@v/v1.0.0.zip"), "application/zip");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_mod() {
|
||||
assert_eq!(
|
||||
content_type_for("@v/v1.0.0.mod"),
|
||||
"text/plain; charset=utf-8"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_list() {
|
||||
assert_eq!(content_type_for("@v/list"), "text/plain; charset=utf-8");
|
||||
}
|
||||
|
||||
// ── Artifact formatting ─────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn test_format_artifact_version() {
|
||||
assert_eq!(
|
||||
format_artifact("github.com/user/repo", "@v/v1.0.0.info"),
|
||||
"github.com/user/repo@v1.0.0"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_artifact_list() {
|
||||
assert_eq!(
|
||||
format_artifact("github.com/user/repo", "@v/list"),
|
||||
"github.com/user/repo @v/list"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_artifact_latest() {
|
||||
assert_eq!(
|
||||
format_artifact("github.com/user/repo", "@latest"),
|
||||
"github.com/user/repo @latest"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_artifact_zip() {
|
||||
assert_eq!(
|
||||
format_artifact("github.com/user/repo", "@v/v1.0.0.zip"),
|
||||
"github.com/user/repo@v1.0.0"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::audit::AuditEntry;
|
||||
use crate::registry::proxy_fetch;
|
||||
use crate::config::basic_auth_header;
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
@@ -14,6 +14,7 @@ use axum::{
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
@@ -52,7 +53,7 @@ async fn download(State(state): State<Arc<AppState>>, Path(path): Path<String>)
|
||||
for proxy in &state.config.maven.proxies {
|
||||
let url = format!("{}/{}", proxy.url().trim_end_matches('/'), path);
|
||||
|
||||
match proxy_fetch(
|
||||
match fetch_from_proxy(
|
||||
&state.http_client,
|
||||
&url,
|
||||
state.config.maven.proxy_timeout,
|
||||
@@ -127,6 +128,25 @@ async fn upload(
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_from_proxy(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let mut request = client.get(url).timeout(Duration::from_secs(timeout_secs));
|
||||
if let Some(credentials) = auth {
|
||||
request = request.header("Authorization", basic_auth_header(credentials));
|
||||
}
|
||||
let response = request.send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn with_content_type(
|
||||
path: &str,
|
||||
data: Bytes,
|
||||
@@ -145,148 +165,3 @@ fn with_content_type(
|
||||
|
||||
(StatusCode::OK, [(header::CONTENT_TYPE, content_type)], data)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_content_type_pom() {
|
||||
let (status, headers, _) =
|
||||
with_content_type("com/example/1.0/example-1.0.pom", Bytes::from("data"));
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
assert_eq!(headers[0].1, "application/xml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_jar() {
|
||||
let (_, headers, _) =
|
||||
with_content_type("com/example/1.0/example-1.0.jar", Bytes::from("data"));
|
||||
assert_eq!(headers[0].1, "application/java-archive");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_xml() {
|
||||
let (_, headers, _) =
|
||||
with_content_type("com/example/maven-metadata.xml", Bytes::from("data"));
|
||||
assert_eq!(headers[0].1, "application/xml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_sha1() {
|
||||
let (_, headers, _) =
|
||||
with_content_type("com/example/1.0/example-1.0.jar.sha1", Bytes::from("data"));
|
||||
assert_eq!(headers[0].1, "text/plain");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_md5() {
|
||||
let (_, headers, _) =
|
||||
with_content_type("com/example/1.0/example-1.0.jar.md5", Bytes::from("data"));
|
||||
assert_eq!(headers[0].1, "text/plain");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_unknown() {
|
||||
let (_, headers, _) = with_content_type("some/random/file.bin", Bytes::from("data"));
|
||||
assert_eq!(headers[0].1, "application/octet-stream");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_content_type_preserves_body() {
|
||||
let body = Bytes::from("test-jar-content");
|
||||
let (_, _, data) = with_content_type("test.jar", body.clone());
|
||||
assert_eq!(data, body);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::body::Body;
|
||||
use axum::http::{header, Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maven_put_get_roundtrip() {
|
||||
let ctx = create_test_context();
|
||||
let jar_data = b"fake-jar-content";
|
||||
|
||||
let put = send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/maven2/com/example/mylib/1.0/mylib-1.0.jar",
|
||||
Body::from(&jar_data[..]),
|
||||
)
|
||||
.await;
|
||||
assert_eq!(put.status(), StatusCode::CREATED);
|
||||
|
||||
let get = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/maven2/com/example/mylib/1.0/mylib-1.0.jar",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(get.status(), StatusCode::OK);
|
||||
let body = body_bytes(get).await;
|
||||
assert_eq!(&body[..], jar_data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maven_not_found_no_proxy() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/maven2/missing/artifact/1.0/artifact-1.0.jar",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maven_content_type_pom() {
|
||||
let ctx = create_test_context();
|
||||
send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/maven2/com/ex/1.0/ex-1.0.pom",
|
||||
Body::from("<project/>"),
|
||||
)
|
||||
.await;
|
||||
|
||||
let get = send(&ctx.app, Method::GET, "/maven2/com/ex/1.0/ex-1.0.pom", "").await;
|
||||
assert_eq!(get.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
get.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"application/xml"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_maven_content_type_jar() {
|
||||
let ctx = create_test_context();
|
||||
send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/maven2/org/test/app/2.0/app-2.0.jar",
|
||||
Body::from("jar-data"),
|
||||
)
|
||||
.await;
|
||||
|
||||
let get = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/maven2/org/test/app/2.0/app-2.0.jar",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
assert_eq!(get.status(), StatusCode::OK);
|
||||
assert_eq!(
|
||||
get.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"application/java-archive"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@
|
||||
mod cargo_registry;
|
||||
pub mod docker;
|
||||
pub mod docker_auth;
|
||||
mod go;
|
||||
mod maven;
|
||||
mod npm;
|
||||
mod pypi;
|
||||
@@ -13,108 +12,7 @@ mod raw;
|
||||
pub use cargo_registry::routes as cargo_routes;
|
||||
pub use docker::routes as docker_routes;
|
||||
pub use docker_auth::DockerAuth;
|
||||
pub use go::routes as go_routes;
|
||||
pub use maven::routes as maven_routes;
|
||||
pub use npm::routes as npm_routes;
|
||||
pub use pypi::routes as pypi_routes;
|
||||
pub use raw::routes as raw_routes;
|
||||
|
||||
use crate::config::basic_auth_header;
|
||||
use std::time::Duration;
|
||||
|
||||
#[derive(Debug)]
|
||||
#[allow(dead_code)]
|
||||
pub(crate) enum ProxyError {
|
||||
NotFound,
|
||||
Upstream(u16),
|
||||
Network(String),
|
||||
}
|
||||
|
||||
/// Core fetch logic with retry. Callers provide a response extractor.
|
||||
async fn proxy_fetch_core<T, F, Fut>(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
extra_headers: Option<(&str, &str)>,
|
||||
extract: F,
|
||||
) -> Result<T, ProxyError>
|
||||
where
|
||||
F: Fn(reqwest::Response) -> Fut + Copy,
|
||||
Fut: std::future::Future<Output = Result<T, reqwest::Error>>,
|
||||
{
|
||||
for attempt in 0..2 {
|
||||
let mut request = client.get(url).timeout(Duration::from_secs(timeout_secs));
|
||||
if let Some(credentials) = auth {
|
||||
request = request.header("Authorization", basic_auth_header(credentials));
|
||||
}
|
||||
if let Some((key, val)) = extra_headers {
|
||||
request = request.header(key, val);
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
return extract(response)
|
||||
.await
|
||||
.map_err(|e| ProxyError::Network(e.to_string()));
|
||||
}
|
||||
let status = response.status().as_u16();
|
||||
if (400..500).contains(&status) {
|
||||
return Err(ProxyError::NotFound);
|
||||
}
|
||||
if attempt == 0 {
|
||||
tracing::debug!(url, status, "upstream 5xx, retrying in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
return Err(ProxyError::Upstream(status));
|
||||
}
|
||||
Err(e) => {
|
||||
if attempt == 0 {
|
||||
tracing::debug!(url, error = %e, "upstream error, retrying in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
return Err(ProxyError::Network(e.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(ProxyError::Network("max retries exceeded".into()))
|
||||
}
|
||||
|
||||
/// Fetch binary content from upstream proxy with timeout and 1 retry.
|
||||
pub(crate) async fn proxy_fetch(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
) -> Result<Vec<u8>, ProxyError> {
|
||||
proxy_fetch_core(client, url, timeout_secs, auth, None, |r| async {
|
||||
r.bytes().await.map(|b| b.to_vec())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Fetch text content from upstream proxy with timeout and 1 retry.
|
||||
pub(crate) async fn proxy_fetch_text(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
extra_headers: Option<(&str, &str)>,
|
||||
) -> Result<String, ProxyError> {
|
||||
proxy_fetch_core(client, url, timeout_secs, auth, extra_headers, |r| r.text()).await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_proxy_fetch_invalid_url() {
|
||||
let client = reqwest::Client::new();
|
||||
let result = proxy_fetch(&client, "http://127.0.0.1:1/nonexistent", 2, None).await;
|
||||
assert!(matches!(result, Err(ProxyError::Network(_))));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::audit::AuditEntry;
|
||||
use crate::registry::proxy_fetch;
|
||||
use crate::config::basic_auth_header;
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
body::Bytes,
|
||||
@@ -16,6 +16,7 @@ use axum::{
|
||||
use base64::Engine;
|
||||
use sha2::Digest;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
@@ -107,7 +108,7 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
// Tarball: integrity check if hash exists
|
||||
let hash_key = format!("{}.sha256", key);
|
||||
if let Ok(stored_hash) = state.storage.get(&hash_key).await {
|
||||
let computed = hex::encode(sha2::Sha256::digest(&data));
|
||||
let computed = format!("{:x}", sha2::Sha256::digest(&data));
|
||||
let expected = String::from_utf8_lossy(&stored_hash);
|
||||
if computed != expected.as_ref() {
|
||||
tracing::error!(
|
||||
@@ -139,7 +140,7 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
if let Some(proxy_url) = &state.config.npm.proxy {
|
||||
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
|
||||
|
||||
if let Ok(data) = proxy_fetch(
|
||||
if let Ok(data) = fetch_from_proxy(
|
||||
&state.http_client,
|
||||
&url,
|
||||
state.config.npm.proxy_timeout,
|
||||
@@ -152,7 +153,7 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
|
||||
if is_tarball {
|
||||
// Compute and store sha256
|
||||
let hash = hex::encode(sha2::Sha256::digest(&data));
|
||||
let hash = format!("{:x}", sha2::Sha256::digest(&data));
|
||||
let hash_key = format!("{}.sha256", key);
|
||||
let storage = state.storage.clone();
|
||||
tokio::spawn(async move {
|
||||
@@ -176,7 +177,8 @@ async fn handle_request(State(state): State<Arc<AppState>>, Path(path): Path<Str
|
||||
} else {
|
||||
// Metadata: rewrite tarball URLs to point to NORA
|
||||
let nora_base = nora_base_url(&state);
|
||||
let rewritten = rewrite_tarball_urls(&data, &nora_base, proxy_url).unwrap_or(data);
|
||||
let rewritten = rewrite_tarball_urls(&data, &nora_base, proxy_url)
|
||||
.unwrap_or_else(|_| data.clone());
|
||||
|
||||
data_to_cache = rewritten.clone();
|
||||
data_to_serve = rewritten;
|
||||
@@ -206,7 +208,7 @@ async fn refetch_metadata(state: &Arc<AppState>, path: &str, key: &str) -> Optio
|
||||
let proxy_url = state.config.npm.proxy.as_ref()?;
|
||||
let url = format!("{}/{}", proxy_url.trim_end_matches('/'), path);
|
||||
|
||||
let data = proxy_fetch(
|
||||
let data = fetch_from_proxy(
|
||||
&state.http_client,
|
||||
&url,
|
||||
state.config.npm.proxy_timeout,
|
||||
@@ -216,7 +218,8 @@ async fn refetch_metadata(state: &Arc<AppState>, path: &str, key: &str) -> Optio
|
||||
.ok()?;
|
||||
|
||||
let nora_base = nora_base_url(state);
|
||||
let rewritten = rewrite_tarball_urls(&data, &nora_base, proxy_url).unwrap_or(data);
|
||||
let rewritten =
|
||||
rewrite_tarball_urls(&data, &nora_base, proxy_url).unwrap_or_else(|_| data.clone());
|
||||
|
||||
let storage = state.storage.clone();
|
||||
let key_clone = key.to_string();
|
||||
@@ -338,15 +341,13 @@ async fn handle_publish(
|
||||
}
|
||||
|
||||
// Store sha256
|
||||
let hash = hex::encode(sha2::Sha256::digest(&tarball_bytes));
|
||||
let hash = format!("{:x}", sha2::Sha256::digest(&tarball_bytes));
|
||||
let hash_key = format!("{}.sha256", tarball_key);
|
||||
let _ = state.storage.put(&hash_key, hash.as_bytes()).await;
|
||||
}
|
||||
|
||||
// Merge versions
|
||||
let Some(meta_obj) = metadata.as_object_mut() else {
|
||||
return (StatusCode::INTERNAL_SERVER_ERROR, "invalid metadata format").into_response();
|
||||
};
|
||||
let meta_obj = metadata.as_object_mut().unwrap();
|
||||
let stored_versions = meta_obj.entry("versions").or_insert(serde_json::json!({}));
|
||||
if let Some(sv) = stored_versions.as_object_mut() {
|
||||
for (ver, ver_data) in new_versions {
|
||||
@@ -418,6 +419,25 @@ async fn handle_publish(
|
||||
// Helpers
|
||||
// ============================================================================
|
||||
|
||||
async fn fetch_from_proxy(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let mut request = client.get(url).timeout(Duration::from_secs(timeout_secs));
|
||||
if let Some(credentials) = auth {
|
||||
request = request.header("Authorization", basic_auth_header(credentials));
|
||||
}
|
||||
let response = request.send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
fn with_content_type(
|
||||
is_tarball: bool,
|
||||
data: Bytes,
|
||||
@@ -432,7 +452,6 @@ fn with_content_type(
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -556,229 +575,4 @@ mod tests {
|
||||
assert!(!is_valid_attachment_name(""));
|
||||
assert!(!is_valid_attachment_name("foo\0bar.tgz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_content_type_tarball() {
|
||||
let data = Bytes::from("tarball-data");
|
||||
let (status, headers, body) = with_content_type(true, data.clone());
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
assert_eq!(headers[0].1, "application/octet-stream");
|
||||
assert_eq!(body, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_with_content_type_json() {
|
||||
let data = Bytes::from("json-data");
|
||||
let (status, headers, body) = with_content_type(false, data.clone());
|
||||
assert_eq!(status, StatusCode::OK);
|
||||
assert_eq!(headers[0].1, "application/json");
|
||||
assert_eq!(body, data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_tarball_urls_trailing_slash() {
|
||||
let metadata = serde_json::json!({
|
||||
"name": "test",
|
||||
"versions": {
|
||||
"1.0.0": {
|
||||
"dist": {
|
||||
"tarball": "https://registry.npmjs.org/test/-/test-1.0.0.tgz"
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let data = serde_json::to_vec(&metadata).unwrap();
|
||||
let result =
|
||||
rewrite_tarball_urls(&data, "http://nora:5000/", "https://registry.npmjs.org/")
|
||||
.unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&result).unwrap();
|
||||
let tarball = json["versions"]["1.0.0"]["dist"]["tarball"]
|
||||
.as_str()
|
||||
.unwrap();
|
||||
assert!(tarball.starts_with("http://nora:5000/npm/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_tarball_urls_preserves_other_fields() {
|
||||
let metadata = serde_json::json!({
|
||||
"name": "test",
|
||||
"description": "A test package",
|
||||
"versions": {
|
||||
"1.0.0": {
|
||||
"dist": {
|
||||
"tarball": "https://registry.npmjs.org/test/-/test-1.0.0.tgz",
|
||||
"shasum": "abc123"
|
||||
},
|
||||
"dependencies": {"lodash": "^4.0.0"}
|
||||
}
|
||||
}
|
||||
});
|
||||
let data = serde_json::to_vec(&metadata).unwrap();
|
||||
let result =
|
||||
rewrite_tarball_urls(&data, "http://nora:5000", "https://registry.npmjs.org").unwrap();
|
||||
let json: serde_json::Value = serde_json::from_slice(&result).unwrap();
|
||||
assert_eq!(json["description"], "A test package");
|
||||
assert_eq!(json["versions"]["1.0.0"]["dist"]["shasum"], "abc123");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_valid() {
|
||||
assert!(is_valid_attachment_name("package-1.0.0.tgz"));
|
||||
assert!(is_valid_attachment_name("@scope-pkg-2.0.tgz"));
|
||||
assert!(is_valid_attachment_name("my_pkg.tgz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_traversal() {
|
||||
assert!(!is_valid_attachment_name("../etc/passwd"));
|
||||
assert!(!is_valid_attachment_name("foo/../bar"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_slash() {
|
||||
assert!(!is_valid_attachment_name("path/file.tgz"));
|
||||
assert!(!is_valid_attachment_name("path\\file.tgz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_null_byte() {
|
||||
assert!(!is_valid_attachment_name("file\0.tgz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_empty() {
|
||||
assert!(!is_valid_attachment_name(""));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_attachment_name_special_chars() {
|
||||
assert!(!is_valid_attachment_name("file name.tgz")); // space
|
||||
assert!(!is_valid_attachment_name("file;cmd.tgz")); // semicolon
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::body::Body;
|
||||
use axum::http::{Method, StatusCode};
|
||||
use base64::Engine;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_npm_metadata_from_cache() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
let metadata = serde_json::json!({
|
||||
"name": "lodash",
|
||||
"versions": {
|
||||
"4.17.21": { "dist": { "tarball": "http://example.com/lodash.tgz" } }
|
||||
}
|
||||
});
|
||||
let metadata_bytes = serde_json::to_vec(&metadata).unwrap();
|
||||
|
||||
ctx.state
|
||||
.storage
|
||||
.put("npm/lodash/metadata.json", &metadata_bytes)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(&ctx.app, Method::GET, "/npm/lodash", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let json: serde_json::Value = serde_json::from_slice(&body).unwrap();
|
||||
assert_eq!(json["name"], "lodash");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_npm_tarball_from_cache() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
let tarball_data = b"fake-tarball-bytes";
|
||||
ctx.state
|
||||
.storage
|
||||
.put("npm/lodash/tarballs/lodash-4.17.21.tgz", tarball_data)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/npm/lodash/-/lodash-4.17.21.tgz",
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
assert_eq!(&body[..], tarball_data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_npm_not_found_no_proxy() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
// No proxy configured, no local data
|
||||
let response = send(&ctx.app, Method::GET, "/npm/nonexistent", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_npm_publish_basic() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
let tarball_data = b"fake-tarball";
|
||||
let base64_data = base64::engine::general_purpose::STANDARD.encode(tarball_data);
|
||||
|
||||
let payload = serde_json::json!({
|
||||
"name": "mypkg",
|
||||
"versions": {
|
||||
"1.0.0": { "dist": {} }
|
||||
},
|
||||
"_attachments": {
|
||||
"mypkg-1.0.0.tgz": { "data": base64_data }
|
||||
},
|
||||
"dist-tags": { "latest": "1.0.0" }
|
||||
});
|
||||
|
||||
let body_bytes = serde_json::to_vec(&payload).unwrap();
|
||||
let response = send(&ctx.app, Method::PUT, "/npm/mypkg", Body::from(body_bytes)).await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::CREATED);
|
||||
|
||||
// Verify tarball was stored
|
||||
let stored_tarball = ctx
|
||||
.state
|
||||
.storage
|
||||
.get("npm/mypkg/tarballs/mypkg-1.0.0.tgz")
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(&stored_tarball[..], tarball_data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_npm_publish_name_mismatch() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
let tarball_data = b"fake-tarball";
|
||||
let base64_data = base64::engine::general_purpose::STANDARD.encode(tarball_data);
|
||||
|
||||
let payload = serde_json::json!({
|
||||
"name": "other",
|
||||
"versions": {
|
||||
"1.0.0": { "dist": {} }
|
||||
},
|
||||
"_attachments": {
|
||||
"other-1.0.0.tgz": { "data": base64_data }
|
||||
},
|
||||
"dist-tags": { "latest": "1.0.0" }
|
||||
});
|
||||
|
||||
let body_bytes = serde_json::to_vec(&payload).unwrap();
|
||||
let response = send(&ctx.app, Method::PUT, "/npm/mypkg", Body::from(body_bytes)).await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
|
||||
use crate::activity_log::{ActionType, ActivityEntry};
|
||||
use crate::audit::AuditEntry;
|
||||
use crate::registry::{proxy_fetch, proxy_fetch_text};
|
||||
use crate::config::basic_auth_header;
|
||||
use crate::AppState;
|
||||
use axum::{
|
||||
extract::{Path, State},
|
||||
@@ -13,6 +13,7 @@ use axum::{
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub fn routes() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
@@ -86,12 +87,11 @@ async fn package_versions(
|
||||
if let Some(proxy_url) = &state.config.pypi.proxy {
|
||||
let url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
|
||||
|
||||
if let Ok(html) = proxy_fetch_text(
|
||||
if let Ok(html) = fetch_package_page(
|
||||
&state.http_client,
|
||||
&url,
|
||||
state.config.pypi.proxy_timeout,
|
||||
state.config.pypi.proxy_auth.as_deref(),
|
||||
Some(("Accept", "text/html")),
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -142,18 +142,17 @@ async fn download_file(
|
||||
// First, fetch the package page to find the actual download URL
|
||||
let page_url = format!("{}/{}/", proxy_url.trim_end_matches('/'), normalized);
|
||||
|
||||
if let Ok(html) = proxy_fetch_text(
|
||||
if let Ok(html) = fetch_package_page(
|
||||
&state.http_client,
|
||||
&page_url,
|
||||
state.config.pypi.proxy_timeout,
|
||||
state.config.pypi.proxy_auth.as_deref(),
|
||||
Some(("Accept", "text/html")),
|
||||
)
|
||||
.await
|
||||
{
|
||||
// Find the URL for this specific file
|
||||
if let Some(file_url) = find_file_url(&html, &filename) {
|
||||
if let Ok(data) = proxy_fetch(
|
||||
if let Ok(data) = fetch_file(
|
||||
&state.http_client,
|
||||
&file_url,
|
||||
state.config.pypi.proxy_timeout,
|
||||
@@ -206,6 +205,49 @@ fn normalize_name(name: &str) -> String {
|
||||
name.to_lowercase().replace(['-', '_', '.'], "-")
|
||||
}
|
||||
|
||||
/// Fetch package page from upstream
|
||||
async fn fetch_package_page(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
) -> Result<String, ()> {
|
||||
let mut request = client
|
||||
.get(url)
|
||||
.timeout(Duration::from_secs(timeout_secs))
|
||||
.header("Accept", "text/html");
|
||||
if let Some(credentials) = auth {
|
||||
request = request.header("Authorization", basic_auth_header(credentials));
|
||||
}
|
||||
let response = request.send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.text().await.map_err(|_| ())
|
||||
}
|
||||
|
||||
/// Fetch file from upstream
|
||||
async fn fetch_file(
|
||||
client: &reqwest::Client,
|
||||
url: &str,
|
||||
timeout_secs: u64,
|
||||
auth: Option<&str>,
|
||||
) -> Result<Vec<u8>, ()> {
|
||||
let mut request = client.get(url).timeout(Duration::from_secs(timeout_secs));
|
||||
if let Some(credentials) = auth {
|
||||
request = request.header("Authorization", basic_auth_header(credentials));
|
||||
}
|
||||
let response = request.send().await.map_err(|_| ())?;
|
||||
|
||||
if !response.status().is_success() {
|
||||
return Err(());
|
||||
}
|
||||
|
||||
response.bytes().await.map(|b| b.to_vec()).map_err(|_| ())
|
||||
}
|
||||
|
||||
/// Rewrite PyPI links to point to our registry
|
||||
fn rewrite_pypi_links(html: &str, package_name: &str) -> String {
|
||||
// Simple regex-free approach: find href="..." and rewrite
|
||||
@@ -305,311 +347,3 @@ fn find_file_url(html: &str, target_filename: &str) -> Option<String> {
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use proptest::prelude::*;
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn extract_filename_never_panics(s in "\\PC{0,500}") {
|
||||
let _ = extract_filename(&s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_filename_valid_tarball(
|
||||
name in "[a-z][a-z0-9_-]{0,20}",
|
||||
version in "[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}"
|
||||
) {
|
||||
let url = format!("https://files.example.com/packages/{}-{}.tar.gz", name, version);
|
||||
let result = extract_filename(&url);
|
||||
prop_assert!(result.is_some());
|
||||
prop_assert!(result.unwrap().ends_with(".tar.gz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_filename_valid_wheel(
|
||||
name in "[a-z][a-z0-9_]{0,20}",
|
||||
version in "[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}"
|
||||
) {
|
||||
let url = format!("https://files.example.com/{}-{}-py3-none-any.whl", name, version);
|
||||
let result = extract_filename(&url);
|
||||
prop_assert!(result.is_some());
|
||||
prop_assert!(result.unwrap().ends_with(".whl"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_filename_strips_hash(
|
||||
name in "[a-z]{1,10}",
|
||||
hash in "[a-f0-9]{64}"
|
||||
) {
|
||||
let url = format!("https://example.com/{}.tar.gz#sha256={}", name, hash);
|
||||
let result = extract_filename(&url);
|
||||
prop_assert!(result.is_some());
|
||||
let fname = result.unwrap();
|
||||
prop_assert!(!fname.contains('#'));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_filename_rejects_unknown_ext(
|
||||
name in "[a-z]{1,10}",
|
||||
ext in "(exe|dll|so|bin|dat)"
|
||||
) {
|
||||
let url = format!("https://example.com/{}.{}", name, ext);
|
||||
prop_assert!(extract_filename(&url).is_none());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_name_lowercase() {
|
||||
assert_eq!(normalize_name("Flask"), "flask");
|
||||
assert_eq!(normalize_name("REQUESTS"), "requests");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_name_separators() {
|
||||
assert_eq!(normalize_name("my-package"), "my-package");
|
||||
assert_eq!(normalize_name("my_package"), "my-package");
|
||||
assert_eq!(normalize_name("my.package"), "my-package");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_name_mixed() {
|
||||
assert_eq!(
|
||||
normalize_name("My_Complex.Package-Name"),
|
||||
"my-complex-package-name"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_name_empty() {
|
||||
assert_eq!(normalize_name(""), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_normalize_name_already_normal() {
|
||||
assert_eq!(normalize_name("simple"), "simple");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_tarball() {
|
||||
assert_eq!(
|
||||
extract_filename(
|
||||
"https://files.pythonhosted.org/packages/aa/bb/flask-2.0.0.tar.gz#sha256=abc123"
|
||||
),
|
||||
Some("flask-2.0.0.tar.gz")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_wheel() {
|
||||
assert_eq!(
|
||||
extract_filename(
|
||||
"https://files.pythonhosted.org/packages/aa/bb/flask-2.0.0-py3-none-any.whl"
|
||||
),
|
||||
Some("flask-2.0.0-py3-none-any.whl")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_tgz() {
|
||||
assert_eq!(
|
||||
extract_filename("https://example.com/package-1.0.tgz"),
|
||||
Some("package-1.0.tgz")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_zip() {
|
||||
assert_eq!(
|
||||
extract_filename("https://example.com/package-1.0.zip"),
|
||||
Some("package-1.0.zip")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_egg() {
|
||||
assert_eq!(
|
||||
extract_filename("https://example.com/package-1.0.egg"),
|
||||
Some("package-1.0.egg")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_unknown_ext() {
|
||||
assert_eq!(extract_filename("https://example.com/readme.txt"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_no_path() {
|
||||
assert_eq!(extract_filename(""), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_filename_bare() {
|
||||
assert_eq!(
|
||||
extract_filename("package-1.0.tar.gz"),
|
||||
Some("package-1.0.tar.gz")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_attribute_present() {
|
||||
let html = r#"<a href="url" data-core-metadata="true">link</a>"#;
|
||||
let result = remove_attribute(html, "data-core-metadata");
|
||||
assert_eq!(result, r#"<a href="url">link</a>"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_attribute_absent() {
|
||||
let html = r#"<a href="url">link</a>"#;
|
||||
let result = remove_attribute(html, "data-core-metadata");
|
||||
assert_eq!(result, html);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_remove_attribute_multiple() {
|
||||
let html =
|
||||
r#"<a data-core-metadata="true">one</a><a data-core-metadata="sha256=abc">two</a>"#;
|
||||
let result = remove_attribute(html, "data-core-metadata");
|
||||
assert_eq!(result, r#"<a>one</a><a>two</a>"#);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_pypi_links_basic() {
|
||||
let html = r#"<a href="https://files.pythonhosted.org/packages/aa/bb/flask-2.0.tar.gz#sha256=abc">flask-2.0.tar.gz</a>"#;
|
||||
let result = rewrite_pypi_links(html, "flask");
|
||||
assert!(result.contains("/simple/flask/flask-2.0.tar.gz"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_pypi_links_unknown_ext() {
|
||||
let html = r#"<a href="https://example.com/readme.txt">readme</a>"#;
|
||||
let result = rewrite_pypi_links(html, "test");
|
||||
assert!(result.contains("https://example.com/readme.txt"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_pypi_links_removes_metadata_attrs() {
|
||||
let html = r#"<a href="https://example.com/pkg-1.0.whl" data-core-metadata="sha256=abc" data-dist-info-metadata="sha256=def">pkg</a>"#;
|
||||
let result = rewrite_pypi_links(html, "pkg");
|
||||
assert!(!result.contains("data-core-metadata"));
|
||||
assert!(!result.contains("data-dist-info-metadata"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rewrite_pypi_links_empty() {
|
||||
assert_eq!(rewrite_pypi_links("", "pkg"), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_file_url_found() {
|
||||
let html = r#"<a href="https://files.pythonhosted.org/packages/aa/bb/flask-2.0.tar.gz#sha256=abc">flask-2.0.tar.gz</a>"#;
|
||||
let result = find_file_url(html, "flask-2.0.tar.gz");
|
||||
assert_eq!(
|
||||
result,
|
||||
Some("https://files.pythonhosted.org/packages/aa/bb/flask-2.0.tar.gz".to_string())
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_file_url_not_found() {
|
||||
let html = r#"<a href="https://example.com/other-1.0.tar.gz">other</a>"#;
|
||||
let result = find_file_url(html, "flask-2.0.tar.gz");
|
||||
assert_eq!(result, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_find_file_url_strips_hash() {
|
||||
let html = r#"<a href="https://example.com/pkg-1.0.whl#sha256=deadbeef">pkg</a>"#;
|
||||
let result = find_file_url(html, "pkg-1.0.whl");
|
||||
assert_eq!(result, Some("https://example.com/pkg-1.0.whl".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::{body_bytes, create_test_context, send};
|
||||
use axum::http::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pypi_list_empty() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/simple/", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let html = String::from_utf8_lossy(&body);
|
||||
assert!(html.contains("Simple Index"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pypi_list_with_packages() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
// Pre-populate storage with a package
|
||||
ctx.state
|
||||
.storage
|
||||
.put("pypi/flask/flask-2.0.tar.gz", b"fake-tarball-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(&ctx.app, Method::GET, "/simple/", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let html = String::from_utf8_lossy(&body);
|
||||
assert!(html.contains("flask"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pypi_versions_local() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
// Pre-populate storage
|
||||
ctx.state
|
||||
.storage
|
||||
.put("pypi/flask/flask-2.0.tar.gz", b"fake-data")
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(&ctx.app, Method::GET, "/simple/flask/", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
let html = String::from_utf8_lossy(&body);
|
||||
assert!(html.contains("flask-2.0.tar.gz"));
|
||||
assert!(html.contains("/simple/flask/flask-2.0.tar.gz"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pypi_download_local() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
let tarball_data = b"fake-tarball-content";
|
||||
ctx.state
|
||||
.storage
|
||||
.put("pypi/flask/flask-2.0.tar.gz", tarball_data)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let response = send(&ctx.app, Method::GET, "/simple/flask/flask-2.0.tar.gz", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let body = body_bytes(response).await;
|
||||
assert_eq!(&body[..], tarball_data);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_pypi_not_found_no_proxy() {
|
||||
let ctx = create_test_context();
|
||||
|
||||
// No proxy configured, no local data
|
||||
let response = send(&ctx.app, Method::GET, "/simple/nonexistent/", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -141,191 +141,3 @@ fn guess_content_type(path: &str) -> &'static str {
|
||||
_ => "application/octet-stream",
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_json() {
|
||||
assert_eq!(guess_content_type("config.json"), "application/json");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_xml() {
|
||||
assert_eq!(guess_content_type("data.xml"), "application/xml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_html() {
|
||||
assert_eq!(guess_content_type("index.html"), "text/html");
|
||||
assert_eq!(guess_content_type("page.htm"), "text/html");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_css() {
|
||||
assert_eq!(guess_content_type("style.css"), "text/css");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_js() {
|
||||
assert_eq!(guess_content_type("app.js"), "application/javascript");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_text() {
|
||||
assert_eq!(guess_content_type("readme.txt"), "text/plain");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_markdown() {
|
||||
assert_eq!(guess_content_type("README.md"), "text/markdown");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_yaml() {
|
||||
assert_eq!(guess_content_type("config.yaml"), "application/x-yaml");
|
||||
assert_eq!(guess_content_type("config.yml"), "application/x-yaml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_toml() {
|
||||
assert_eq!(guess_content_type("Cargo.toml"), "application/toml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_archives() {
|
||||
assert_eq!(guess_content_type("data.tar"), "application/x-tar");
|
||||
assert_eq!(guess_content_type("data.gz"), "application/gzip");
|
||||
assert_eq!(guess_content_type("data.gzip"), "application/gzip");
|
||||
assert_eq!(guess_content_type("data.zip"), "application/zip");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_images() {
|
||||
assert_eq!(guess_content_type("logo.png"), "image/png");
|
||||
assert_eq!(guess_content_type("photo.jpg"), "image/jpeg");
|
||||
assert_eq!(guess_content_type("photo.jpeg"), "image/jpeg");
|
||||
assert_eq!(guess_content_type("anim.gif"), "image/gif");
|
||||
assert_eq!(guess_content_type("icon.svg"), "image/svg+xml");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_special() {
|
||||
assert_eq!(guess_content_type("doc.pdf"), "application/pdf");
|
||||
assert_eq!(guess_content_type("module.wasm"), "application/wasm");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_unknown() {
|
||||
assert_eq!(guess_content_type("binary.bin"), "application/octet-stream");
|
||||
assert_eq!(guess_content_type("noext"), "application/octet-stream");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_guess_content_type_case_insensitive() {
|
||||
assert_eq!(guess_content_type("FILE.JSON"), "application/json");
|
||||
assert_eq!(guess_content_type("IMAGE.PNG"), "image/png");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::storage::{Storage, StorageError};
|
||||
use crate::test_helpers::{
|
||||
body_bytes, create_test_context, create_test_context_with_raw_disabled, send,
|
||||
};
|
||||
use axum::http::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_put_get_roundtrip() {
|
||||
let ctx = create_test_context();
|
||||
let put_resp = send(&ctx.app, Method::PUT, "/raw/test.txt", b"hello".to_vec()).await;
|
||||
assert_eq!(put_resp.status(), StatusCode::CREATED);
|
||||
|
||||
let get_resp = send(&ctx.app, Method::GET, "/raw/test.txt", "").await;
|
||||
assert_eq!(get_resp.status(), StatusCode::OK);
|
||||
let body = body_bytes(get_resp).await;
|
||||
assert_eq!(&body[..], b"hello");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_head() {
|
||||
let ctx = create_test_context();
|
||||
send(
|
||||
&ctx.app,
|
||||
Method::PUT,
|
||||
"/raw/test.txt",
|
||||
b"hello world".to_vec(),
|
||||
)
|
||||
.await;
|
||||
|
||||
let head_resp = send(&ctx.app, Method::HEAD, "/raw/test.txt", "").await;
|
||||
assert_eq!(head_resp.status(), StatusCode::OK);
|
||||
let cl = head_resp.headers().get("content-length").unwrap();
|
||||
assert_eq!(cl.to_str().unwrap(), "11");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_delete() {
|
||||
let ctx = create_test_context();
|
||||
send(&ctx.app, Method::PUT, "/raw/test.txt", b"data".to_vec()).await;
|
||||
|
||||
let del = send(&ctx.app, Method::DELETE, "/raw/test.txt", "").await;
|
||||
assert_eq!(del.status(), StatusCode::NO_CONTENT);
|
||||
|
||||
let get = send(&ctx.app, Method::GET, "/raw/test.txt", "").await;
|
||||
assert_eq!(get.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_not_found() {
|
||||
let ctx = create_test_context();
|
||||
let resp = send(&ctx.app, Method::GET, "/raw/missing.txt", "").await;
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_content_type_json() {
|
||||
let ctx = create_test_context();
|
||||
send(&ctx.app, Method::PUT, "/raw/file.json", b"{}".to_vec()).await;
|
||||
|
||||
let resp = send(&ctx.app, Method::GET, "/raw/file.json", "").await;
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
let ct = resp.headers().get("content-type").unwrap();
|
||||
assert_eq!(ct.to_str().unwrap(), "application/json");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_payload_too_large() {
|
||||
let ctx = create_test_context();
|
||||
let big = vec![0u8; 2 * 1024 * 1024]; // 2 MB > 1 MB limit
|
||||
let resp = send(&ctx.app, Method::PUT, "/raw/large.bin", big).await;
|
||||
assert_eq!(resp.status(), StatusCode::PAYLOAD_TOO_LARGE);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_raw_disabled() {
|
||||
let ctx = create_test_context_with_raw_disabled();
|
||||
let get = send(&ctx.app, Method::GET, "/raw/test.txt", "").await;
|
||||
assert_eq!(get.status(), StatusCode::NOT_FOUND);
|
||||
let put = send(&ctx.app, Method::PUT, "/raw/test.txt", b"data".to_vec()).await;
|
||||
assert_eq!(put.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_upload_path_traversal_rejected() {
|
||||
let temp_dir = tempfile::TempDir::new().unwrap();
|
||||
let storage = Storage::new_local(temp_dir.path().to_str().unwrap());
|
||||
|
||||
let result = storage.put("raw/../../../etc/passwd", b"pwned").await;
|
||||
assert!(result.is_err(), "path traversal key must be rejected");
|
||||
match result {
|
||||
Err(StorageError::Validation(v)) => {
|
||||
assert_eq!(format!("{}", v), "Path traversal detected");
|
||||
}
|
||||
other => panic!("expected Validation(PathTraversal), got {:?}", other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -80,8 +80,6 @@ pub struct RepoIndex {
|
||||
pub npm: RegistryIndex,
|
||||
pub cargo: RegistryIndex,
|
||||
pub pypi: RegistryIndex,
|
||||
pub go: RegistryIndex,
|
||||
pub raw: RegistryIndex,
|
||||
}
|
||||
|
||||
impl RepoIndex {
|
||||
@@ -92,8 +90,6 @@ impl RepoIndex {
|
||||
npm: RegistryIndex::new(),
|
||||
cargo: RegistryIndex::new(),
|
||||
pypi: RegistryIndex::new(),
|
||||
go: RegistryIndex::new(),
|
||||
raw: RegistryIndex::new(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -105,8 +101,6 @@ impl RepoIndex {
|
||||
"npm" => self.npm.invalidate(),
|
||||
"cargo" => self.cargo.invalidate(),
|
||||
"pypi" => self.pypi.invalidate(),
|
||||
"go" => self.go.invalidate(),
|
||||
"raw" => self.raw.invalidate(),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
@@ -119,8 +113,6 @@ impl RepoIndex {
|
||||
"npm" => &self.npm,
|
||||
"cargo" => &self.cargo,
|
||||
"pypi" => &self.pypi,
|
||||
"go" => &self.go,
|
||||
"raw" => &self.raw,
|
||||
_ => return Arc::new(Vec::new()),
|
||||
};
|
||||
|
||||
@@ -140,8 +132,6 @@ impl RepoIndex {
|
||||
"npm" => build_npm_index(storage).await,
|
||||
"cargo" => build_cargo_index(storage).await,
|
||||
"pypi" => build_pypi_index(storage).await,
|
||||
"go" => build_go_index(storage).await,
|
||||
"raw" => build_raw_index(storage).await,
|
||||
_ => Vec::new(),
|
||||
};
|
||||
info!(registry = registry, count = data.len(), "Index rebuilt");
|
||||
@@ -152,15 +142,13 @@ impl RepoIndex {
|
||||
}
|
||||
|
||||
/// Get counts for stats (no rebuild, just current state)
|
||||
pub fn counts(&self) -> (usize, usize, usize, usize, usize, usize, usize) {
|
||||
pub fn counts(&self) -> (usize, usize, usize, usize, usize) {
|
||||
(
|
||||
self.docker.count(),
|
||||
self.maven.count(),
|
||||
self.npm.count(),
|
||||
self.cargo.count(),
|
||||
self.pypi.count(),
|
||||
self.go.count(),
|
||||
self.raw.count(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -341,57 +329,6 @@ async fn build_pypi_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
to_sorted_vec(packages)
|
||||
}
|
||||
|
||||
async fn build_go_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("go/").await;
|
||||
let mut modules: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("go/") {
|
||||
// Pattern: go/{module}/@v/{version}.zip
|
||||
// Count .zip files as versions (authoritative artifacts)
|
||||
if rest.contains("/@v/") && key.ends_with(".zip") {
|
||||
// Extract module path: everything before /@v/
|
||||
if let Some(pos) = rest.rfind("/@v/") {
|
||||
let module = &rest[..pos];
|
||||
let entry = modules.entry(module.to_string()).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(modules)
|
||||
}
|
||||
|
||||
async fn build_raw_index(storage: &Storage) -> Vec<RepoInfo> {
|
||||
let keys = storage.list("raw/").await;
|
||||
let mut files: HashMap<String, (usize, u64, u64)> = HashMap::new();
|
||||
|
||||
for key in &keys {
|
||||
if let Some(rest) = key.strip_prefix("raw/") {
|
||||
// Group by top-level directory
|
||||
let group = rest.split('/').next().unwrap_or(rest).to_string();
|
||||
let entry = files.entry(group).or_insert((0, 0, 0));
|
||||
entry.0 += 1;
|
||||
if let Some(meta) = storage.stat(key).await {
|
||||
entry.1 += meta.size;
|
||||
if meta.modified > entry.2 {
|
||||
entry.2 = meta.modified;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
to_sorted_vec(files)
|
||||
}
|
||||
|
||||
/// Convert HashMap to sorted Vec<RepoInfo>
|
||||
fn to_sorted_vec(map: HashMap<String, (usize, u64, u64)>) -> Vec<RepoInfo> {
|
||||
let mut result: Vec<_> = map
|
||||
@@ -424,165 +361,3 @@ pub fn paginate<T: Clone>(data: &[T], page: usize, limit: usize) -> (Vec<T>, usi
|
||||
let end = (start + limit).min(total);
|
||||
(data[start..end].to_vec(), total)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_paginate_first_page() {
|
||||
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let (page, total) = paginate(&data, 1, 3);
|
||||
assert_eq!(page, vec![1, 2, 3]);
|
||||
assert_eq!(total, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_second_page() {
|
||||
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let (page, total) = paginate(&data, 2, 3);
|
||||
assert_eq!(page, vec![4, 5, 6]);
|
||||
assert_eq!(total, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_last_page_partial() {
|
||||
let data = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
let (page, total) = paginate(&data, 4, 3);
|
||||
assert_eq!(page, vec![10]);
|
||||
assert_eq!(total, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_beyond_range() {
|
||||
let data = vec![1, 2, 3];
|
||||
let (page, total) = paginate(&data, 5, 3);
|
||||
assert!(page.is_empty());
|
||||
assert_eq!(total, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_empty_data() {
|
||||
let data: Vec<i32> = vec![];
|
||||
let (page, total) = paginate(&data, 1, 10);
|
||||
assert!(page.is_empty());
|
||||
assert_eq!(total, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_page_zero() {
|
||||
// page 0 with saturating_sub becomes 0, so start = 0
|
||||
let data = vec![1, 2, 3];
|
||||
let (page, _) = paginate(&data, 0, 2);
|
||||
assert_eq!(page, vec![1, 2]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_paginate_large_limit() {
|
||||
let data = vec![1, 2, 3];
|
||||
let (page, total) = paginate(&data, 1, 100);
|
||||
assert_eq!(page, vec![1, 2, 3]);
|
||||
assert_eq!(total, 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_index_new() {
|
||||
let idx = RegistryIndex::new();
|
||||
assert_eq!(idx.count(), 0);
|
||||
assert!(idx.is_dirty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_index_invalidate() {
|
||||
let idx = RegistryIndex::new();
|
||||
// Initially dirty
|
||||
assert!(idx.is_dirty());
|
||||
|
||||
// Set data clears dirty
|
||||
idx.set(vec![RepoInfo {
|
||||
name: "test".to_string(),
|
||||
versions: 1,
|
||||
size: 100,
|
||||
updated: "2026-01-01".to_string(),
|
||||
}]);
|
||||
assert!(!idx.is_dirty());
|
||||
assert_eq!(idx.count(), 1);
|
||||
|
||||
// Invalidate makes it dirty again
|
||||
idx.invalidate();
|
||||
assert!(idx.is_dirty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_index_get_cached() {
|
||||
let idx = RegistryIndex::new();
|
||||
idx.set(vec![
|
||||
RepoInfo {
|
||||
name: "a".to_string(),
|
||||
versions: 2,
|
||||
size: 200,
|
||||
updated: "today".to_string(),
|
||||
},
|
||||
RepoInfo {
|
||||
name: "b".to_string(),
|
||||
versions: 1,
|
||||
size: 100,
|
||||
updated: "yesterday".to_string(),
|
||||
},
|
||||
]);
|
||||
|
||||
let cached = idx.get_cached();
|
||||
assert_eq!(cached.len(), 2);
|
||||
assert_eq!(cached[0].name, "a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_index_default() {
|
||||
let idx = RegistryIndex::default();
|
||||
assert_eq!(idx.count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repo_index_new() {
|
||||
let idx = RepoIndex::new();
|
||||
let (d, m, n, c, p, g, r) = idx.counts();
|
||||
assert_eq!((d, m, n, c, p, g, r), (0, 0, 0, 0, 0, 0, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repo_index_invalidate() {
|
||||
let idx = RepoIndex::new();
|
||||
// Should not panic for any registry
|
||||
idx.invalidate("docker");
|
||||
idx.invalidate("maven");
|
||||
idx.invalidate("npm");
|
||||
idx.invalidate("cargo");
|
||||
idx.invalidate("pypi");
|
||||
idx.invalidate("raw");
|
||||
idx.invalidate("unknown"); // should be a no-op
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_repo_index_default() {
|
||||
let idx = RepoIndex::default();
|
||||
let (d, m, n, c, p, g, r) = idx.counts();
|
||||
assert_eq!((d, m, n, c, p, g, r), (0, 0, 0, 0, 0, 0, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_sorted_vec() {
|
||||
let mut map = std::collections::HashMap::new();
|
||||
map.insert("zebra".to_string(), (3usize, 100u64, 0u64));
|
||||
map.insert("alpha".to_string(), (1, 50, 1700000000));
|
||||
|
||||
let result = to_sorted_vec(map);
|
||||
assert_eq!(result.len(), 2);
|
||||
assert_eq!(result[0].name, "alpha");
|
||||
assert_eq!(result[0].versions, 1);
|
||||
assert_eq!(result[0].size, 50);
|
||||
assert_ne!(result[0].updated, "N/A");
|
||||
assert_eq!(result[1].name, "zebra");
|
||||
assert_eq!(result[1].versions, 3);
|
||||
assert_eq!(result[1].updated, "N/A"); // modified = 0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,69 +92,4 @@ mod tests {
|
||||
let cloned = id.clone();
|
||||
assert_eq!(id.0, cloned.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_id_debug() {
|
||||
let id = RequestId("abc-def".to_string());
|
||||
let debug = format!("{:?}", id);
|
||||
assert!(debug.contains("abc-def"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_id_header_name() {
|
||||
assert_eq!(REQUEST_ID_HEADER.as_str(), "x-request-id");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_request_id_deref_string_methods() {
|
||||
let id = RequestId("req-12345".to_string());
|
||||
assert!(id.starts_with("req-"));
|
||||
assert_eq!(id.len(), 9);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod integration_tests {
|
||||
use crate::test_helpers::{create_test_context, send, send_with_headers};
|
||||
use axum::http::{Method, StatusCode};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_response_has_request_id() {
|
||||
let ctx = create_test_context();
|
||||
let response = send(&ctx.app, Method::GET, "/health", "").await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let request_id = response.headers().get("x-request-id");
|
||||
assert!(
|
||||
request_id.is_some(),
|
||||
"Response must have X-Request-ID header"
|
||||
);
|
||||
let value = request_id.unwrap().to_str().unwrap();
|
||||
assert!(!value.is_empty(), "X-Request-ID must not be empty");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_preserves_incoming_request_id() {
|
||||
let ctx = create_test_context();
|
||||
let custom_id = "custom-123";
|
||||
|
||||
let response = send_with_headers(
|
||||
&ctx.app,
|
||||
Method::GET,
|
||||
"/health",
|
||||
vec![("x-request-id", custom_id)],
|
||||
"",
|
||||
)
|
||||
.await;
|
||||
|
||||
assert_eq!(response.status(), StatusCode::OK);
|
||||
let returned_id = response
|
||||
.headers()
|
||||
.get("x-request-id")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.unwrap();
|
||||
assert_eq!(returned_id, custom_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,6 @@ impl SecretsProvider for EnvProvider {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
@@ -130,7 +130,6 @@ pub fn create_secrets_provider(
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
@@ -68,6 +68,10 @@ impl StorageBackend for LocalStorage {
|
||||
async fn get(&self, key: &str) -> Result<Bytes> {
|
||||
let path = self.key_to_path(key);
|
||||
|
||||
if !path.exists() {
|
||||
return Err(StorageError::NotFound);
|
||||
}
|
||||
|
||||
let mut file = fs::File::open(&path).await.map_err(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
StorageError::NotFound
|
||||
@@ -87,13 +91,13 @@ impl StorageBackend for LocalStorage {
|
||||
async fn delete(&self, key: &str) -> Result<()> {
|
||||
let path = self.key_to_path(key);
|
||||
|
||||
fs::remove_file(&path).await.map_err(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
StorageError::NotFound
|
||||
} else {
|
||||
StorageError::Io(e.to_string())
|
||||
}
|
||||
})?;
|
||||
if !path.exists() {
|
||||
return Err(StorageError::NotFound);
|
||||
}
|
||||
|
||||
fs::remove_file(&path)
|
||||
.await
|
||||
.map_err(|e| StorageError::Io(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -138,36 +142,12 @@ impl StorageBackend for LocalStorage {
|
||||
fs::create_dir_all(&self.base_path).await.is_ok()
|
||||
}
|
||||
|
||||
async fn total_size(&self) -> u64 {
|
||||
let base = self.base_path.clone();
|
||||
tokio::task::spawn_blocking(move || {
|
||||
fn dir_size(path: &std::path::Path) -> u64 {
|
||||
let mut total = 0u64;
|
||||
if let Ok(entries) = std::fs::read_dir(path) {
|
||||
for entry in entries.flatten() {
|
||||
let path = entry.path();
|
||||
if path.is_file() {
|
||||
total += entry.metadata().map(|m| m.len()).unwrap_or(0);
|
||||
} else if path.is_dir() {
|
||||
total += dir_size(&path);
|
||||
}
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
dir_size(&base)
|
||||
})
|
||||
.await
|
||||
.unwrap_or(0)
|
||||
}
|
||||
|
||||
fn backend_name(&self) -> &'static str {
|
||||
"local"
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
@@ -274,147 +254,4 @@ mod tests {
|
||||
let storage = LocalStorage::new(temp_dir.path().to_str().unwrap());
|
||||
assert_eq!(storage.backend_name(), "local");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_concurrent_writes_same_key() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = std::sync::Arc::new(LocalStorage::new(temp_dir.path().to_str().unwrap()));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..10u8 {
|
||||
let s = storage.clone();
|
||||
handles.push(tokio::spawn(async move {
|
||||
let data = vec![i; 1024];
|
||||
s.put("shared/key", &data).await
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.expect("task panicked").expect("put failed");
|
||||
}
|
||||
|
||||
let data = storage.get("shared/key").await.expect("get failed");
|
||||
assert_eq!(data.len(), 1024);
|
||||
let first = data[0];
|
||||
assert!(
|
||||
data.iter().all(|&b| b == first),
|
||||
"file is corrupted — mixed writers"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_concurrent_writes_different_keys() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = std::sync::Arc::new(LocalStorage::new(temp_dir.path().to_str().unwrap()));
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for i in 0..10u32 {
|
||||
let s = storage.clone();
|
||||
handles.push(tokio::spawn(async move {
|
||||
let key = format!("key/{}", i);
|
||||
s.put(&key, format!("data-{}", i).as_bytes()).await
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.expect("task panicked").expect("put failed");
|
||||
}
|
||||
|
||||
for i in 0..10u32 {
|
||||
let key = format!("key/{}", i);
|
||||
let data = storage.get(&key).await.expect("get failed");
|
||||
assert_eq!(&*data, format!("data-{}", i).as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_concurrent_read_during_write() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = std::sync::Arc::new(LocalStorage::new(temp_dir.path().to_str().unwrap()));
|
||||
|
||||
let old_data = vec![0u8; 4096];
|
||||
storage.put("rw/key", &old_data).await.expect("seed put");
|
||||
|
||||
let new_data = vec![1u8; 4096];
|
||||
let sw = storage.clone();
|
||||
let writer = tokio::spawn(async move {
|
||||
sw.put("rw/key", &new_data).await.expect("put failed");
|
||||
});
|
||||
|
||||
let sr = storage.clone();
|
||||
let reader = tokio::spawn(async move {
|
||||
match sr.get("rw/key").await {
|
||||
Ok(_data) => {
|
||||
// tokio::fs::write is not atomic, so partial reads
|
||||
// (mix of old and new bytes) are expected — not a bug.
|
||||
// We only verify the final state after both tasks complete.
|
||||
}
|
||||
Err(crate::storage::StorageError::NotFound) => {}
|
||||
Err(e) => panic!("unexpected error: {}", e),
|
||||
}
|
||||
});
|
||||
|
||||
writer.await.expect("writer panicked");
|
||||
reader.await.expect("reader panicked");
|
||||
|
||||
let data = storage.get("rw/key").await.expect("final get");
|
||||
assert_eq!(&*data, &vec![1u8; 4096]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_total_size_empty() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = LocalStorage::new(temp_dir.path().to_str().unwrap());
|
||||
assert_eq!(storage.total_size().await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_total_size_with_files() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = LocalStorage::new(temp_dir.path().to_str().unwrap());
|
||||
|
||||
storage.put("a/file1", b"hello").await.unwrap(); // 5 bytes
|
||||
storage.put("b/file2", b"world!").await.unwrap(); // 6 bytes
|
||||
|
||||
let size = storage.total_size().await;
|
||||
assert_eq!(size, 11);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_total_size_after_delete() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = LocalStorage::new(temp_dir.path().to_str().unwrap());
|
||||
|
||||
storage.put("file1", b"12345").await.unwrap();
|
||||
storage.put("file2", b"67890").await.unwrap();
|
||||
assert_eq!(storage.total_size().await, 10);
|
||||
|
||||
storage.delete("file1").await.unwrap();
|
||||
assert_eq!(storage.total_size().await, 5);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
async fn test_concurrent_deletes_same_key() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let storage = std::sync::Arc::new(LocalStorage::new(temp_dir.path().to_str().unwrap()));
|
||||
|
||||
storage.put("del/key", b"ephemeral").await.expect("put");
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for _ in 0..10 {
|
||||
let s = storage.clone();
|
||||
handles.push(tokio::spawn(async move {
|
||||
let _ = s.delete("del/key").await;
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.await.expect("task panicked");
|
||||
}
|
||||
|
||||
assert!(matches!(
|
||||
storage.get("del/key").await,
|
||||
Err(crate::storage::StorageError::NotFound)
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,8 +46,6 @@ pub trait StorageBackend: Send + Sync {
|
||||
async fn list(&self, prefix: &str) -> Vec<String>;
|
||||
async fn stat(&self, key: &str) -> Option<FileMeta>;
|
||||
async fn health_check(&self) -> bool;
|
||||
/// Total size of all stored artifacts in bytes
|
||||
async fn total_size(&self) -> u64;
|
||||
fn backend_name(&self) -> &'static str;
|
||||
}
|
||||
|
||||
@@ -112,10 +110,6 @@ impl Storage {
|
||||
self.inner.health_check().await
|
||||
}
|
||||
|
||||
pub async fn total_size(&self) -> u64 {
|
||||
self.inner.total_size().await
|
||||
}
|
||||
|
||||
pub fn backend_name(&self) -> &'static str {
|
||||
self.inner.backend_name()
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
use async_trait::async_trait;
|
||||
use axum::body::Bytes;
|
||||
use chrono::Utc;
|
||||
use hmac::{digest::KeyInit, Hmac, Mac};
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
use super::{FileMeta, Result, StorageBackend, StorageError};
|
||||
@@ -79,8 +79,7 @@ impl S3Storage {
|
||||
method, canonical_uri, canonical_query, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash =
|
||||
hex::encode(sha2::Sha256::digest(canonical_request.as_bytes()));
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
|
||||
// String to sign
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
@@ -258,8 +257,7 @@ impl StorageBackend for S3Storage {
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash =
|
||||
hex::encode(sha2::Sha256::digest(canonical_request.as_bytes()));
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
@@ -355,8 +353,7 @@ impl StorageBackend for S3Storage {
|
||||
canonical_uri, canonical_headers, signed_headers, payload_hash
|
||||
);
|
||||
|
||||
let canonical_request_hash =
|
||||
hex::encode(sha2::Sha256::digest(canonical_request.as_bytes()));
|
||||
let canonical_request_hash = hex::encode(Sha256::digest(canonical_request.as_bytes()));
|
||||
let credential_scope = format!("{}/{}/s3/aws4_request", date, self.region);
|
||||
let string_to_sign = format!(
|
||||
"AWS4-HMAC-SHA256\n{}\n{}\n{}",
|
||||
@@ -382,17 +379,6 @@ impl StorageBackend for S3Storage {
|
||||
}
|
||||
}
|
||||
|
||||
async fn total_size(&self) -> u64 {
|
||||
let keys = self.list("").await;
|
||||
let mut total = 0u64;
|
||||
for key in &keys {
|
||||
if let Some(meta) = self.stat(key).await {
|
||||
total += meta.size;
|
||||
}
|
||||
}
|
||||
total
|
||||
}
|
||||
|
||||
fn backend_name(&self) -> &'static str {
|
||||
"s3"
|
||||
}
|
||||
@@ -438,48 +424,4 @@ mod tests {
|
||||
let result = hmac_sha256(b"key", b"data");
|
||||
assert!(!result.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_safe_chars() {
|
||||
assert_eq!(uri_encode("hello"), "hello");
|
||||
assert_eq!(uri_encode("foo/bar"), "foo/bar");
|
||||
assert_eq!(uri_encode("test-file_v1.0"), "test-file_v1.0");
|
||||
assert_eq!(uri_encode("a~b"), "a~b");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_special_chars() {
|
||||
assert_eq!(uri_encode("hello world"), "hello%20world");
|
||||
assert_eq!(uri_encode("file name.txt"), "file%20name.txt");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_query_chars() {
|
||||
assert_eq!(uri_encode("key=value"), "key%3Dvalue");
|
||||
assert_eq!(uri_encode("a&b"), "a%26b");
|
||||
assert_eq!(uri_encode("a+b"), "a%2Bb");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_empty() {
|
||||
assert_eq!(uri_encode(""), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_all_safe_ranges() {
|
||||
// A-Z
|
||||
assert_eq!(uri_encode("ABCXYZ"), "ABCXYZ");
|
||||
// a-z
|
||||
assert_eq!(uri_encode("abcxyz"), "abcxyz");
|
||||
// 0-9
|
||||
assert_eq!(uri_encode("0123456789"), "0123456789");
|
||||
// Special safe: - _ . ~ /
|
||||
assert_eq!(uri_encode("-_.~/"), "-_.~/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uri_encode_percent() {
|
||||
assert_eq!(uri_encode("%"), "%25");
|
||||
assert_eq!(uri_encode("100%done"), "100%25done");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,255 +0,0 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
//! Shared test infrastructure for integration tests.
|
||||
//!
|
||||
//! Provides `TestContext` that builds a full axum Router backed by a
|
||||
//! tempdir-based local storage with all upstream proxies disabled.
|
||||
|
||||
#![allow(clippy::unwrap_used)] // tests may use .unwrap() freely
|
||||
|
||||
use axum::{body::Body, extract::DefaultBodyLimit, http::Request, middleware, Router};
|
||||
use http_body_util::BodyExt;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use crate::activity_log::ActivityLog;
|
||||
use crate::audit::AuditLog;
|
||||
use crate::auth::HtpasswdAuth;
|
||||
use crate::config::*;
|
||||
use crate::dashboard_metrics::DashboardMetrics;
|
||||
use crate::registry;
|
||||
use crate::repo_index::RepoIndex;
|
||||
use crate::storage::Storage;
|
||||
use crate::tokens::TokenStore;
|
||||
use crate::AppState;
|
||||
|
||||
use parking_lot::RwLock;
|
||||
|
||||
/// Everything a test needs: tempdir (must stay alive), shared state, and the router.
|
||||
pub struct TestContext {
|
||||
pub state: Arc<AppState>,
|
||||
pub app: Router,
|
||||
pub _tempdir: TempDir,
|
||||
}
|
||||
|
||||
/// Build a test context with auth **disabled** and all proxies off.
|
||||
pub fn create_test_context() -> TestContext {
|
||||
build_context(false, &[], false, |_| {})
|
||||
}
|
||||
|
||||
/// Build a test context with auth **enabled** (bcrypt cost=4 for speed).
|
||||
pub fn create_test_context_with_auth(users: &[(&str, &str)]) -> TestContext {
|
||||
build_context(true, users, false, |_| {})
|
||||
}
|
||||
|
||||
/// Build a test context with auth + anonymous_read.
|
||||
pub fn create_test_context_with_anonymous_read(users: &[(&str, &str)]) -> TestContext {
|
||||
build_context(true, users, true, |_| {})
|
||||
}
|
||||
|
||||
/// Build a test context with raw storage **disabled**.
|
||||
pub fn create_test_context_with_raw_disabled() -> TestContext {
|
||||
build_context(false, &[], false, |cfg| cfg.raw.enabled = false)
|
||||
}
|
||||
|
||||
fn build_context(
|
||||
auth_enabled: bool,
|
||||
users: &[(&str, &str)],
|
||||
anonymous_read: bool,
|
||||
customize: impl FnOnce(&mut Config),
|
||||
) -> TestContext {
|
||||
let tempdir = TempDir::new().expect("failed to create tempdir");
|
||||
let storage_path = tempdir.path().to_str().unwrap().to_string();
|
||||
|
||||
let mut config = Config {
|
||||
server: ServerConfig {
|
||||
host: "127.0.0.1".into(),
|
||||
port: 0,
|
||||
public_url: None,
|
||||
body_limit_mb: 2048,
|
||||
},
|
||||
storage: StorageConfig {
|
||||
mode: StorageMode::Local,
|
||||
path: storage_path.clone(),
|
||||
s3_url: String::new(),
|
||||
bucket: String::new(),
|
||||
s3_access_key: None,
|
||||
s3_secret_key: None,
|
||||
s3_region: String::new(),
|
||||
},
|
||||
maven: MavenConfig {
|
||||
proxies: vec![],
|
||||
proxy_timeout: 5,
|
||||
},
|
||||
npm: NpmConfig {
|
||||
proxy: None,
|
||||
proxy_auth: None,
|
||||
proxy_timeout: 5,
|
||||
metadata_ttl: 0,
|
||||
},
|
||||
pypi: PypiConfig {
|
||||
proxy: None,
|
||||
proxy_auth: None,
|
||||
proxy_timeout: 5,
|
||||
},
|
||||
go: GoConfig {
|
||||
proxy: None,
|
||||
proxy_auth: None,
|
||||
proxy_timeout: 5,
|
||||
proxy_timeout_zip: 30,
|
||||
max_zip_size: 10_485_760,
|
||||
},
|
||||
docker: DockerConfig {
|
||||
proxy_timeout: 5,
|
||||
upstreams: vec![],
|
||||
},
|
||||
raw: RawConfig {
|
||||
enabled: true,
|
||||
max_file_size: 1_048_576, // 1 MB
|
||||
},
|
||||
auth: AuthConfig {
|
||||
enabled: auth_enabled,
|
||||
anonymous_read,
|
||||
htpasswd_file: String::new(),
|
||||
token_storage: tempdir.path().join("tokens").to_str().unwrap().to_string(),
|
||||
},
|
||||
rate_limit: RateLimitConfig {
|
||||
enabled: false,
|
||||
..RateLimitConfig::default()
|
||||
},
|
||||
secrets: SecretsConfig::default(),
|
||||
};
|
||||
|
||||
// Apply any custom config tweaks
|
||||
customize(&mut config);
|
||||
|
||||
let storage = Storage::new_local(&storage_path);
|
||||
|
||||
let auth = if auth_enabled && !users.is_empty() {
|
||||
let htpasswd_path = tempdir.path().join("users.htpasswd");
|
||||
let mut content = String::new();
|
||||
for (username, password) in users {
|
||||
let hash = bcrypt::hash(password, 4).expect("bcrypt hash");
|
||||
content.push_str(&format!("{}:{}\n", username, hash));
|
||||
}
|
||||
std::fs::write(&htpasswd_path, &content).expect("write htpasswd");
|
||||
config.auth.htpasswd_file = htpasswd_path.to_str().unwrap().to_string();
|
||||
HtpasswdAuth::from_file(&htpasswd_path)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let tokens = if auth_enabled {
|
||||
Some(TokenStore::new(tempdir.path().join("tokens").as_path()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let docker_auth = registry::DockerAuth::new(config.docker.proxy_timeout);
|
||||
|
||||
let state = Arc::new(AppState {
|
||||
storage,
|
||||
config,
|
||||
start_time: Instant::now(),
|
||||
auth,
|
||||
tokens,
|
||||
metrics: DashboardMetrics::new(),
|
||||
activity: ActivityLog::new(50),
|
||||
audit: AuditLog::new(&storage_path),
|
||||
docker_auth,
|
||||
repo_index: RepoIndex::new(),
|
||||
http_client: reqwest::Client::new(),
|
||||
upload_sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
});
|
||||
|
||||
// Build router identical to run_server() but without TcpListener / rate-limiting
|
||||
let registry_routes = Router::new()
|
||||
.merge(registry::docker_routes())
|
||||
.merge(registry::maven_routes())
|
||||
.merge(registry::npm_routes())
|
||||
.merge(registry::cargo_routes())
|
||||
.merge(registry::pypi_routes())
|
||||
.merge(registry::raw_routes())
|
||||
.merge(registry::go_routes());
|
||||
|
||||
let public_routes = Router::new().merge(crate::health::routes());
|
||||
|
||||
let app_routes = Router::new()
|
||||
.merge(crate::auth::token_routes())
|
||||
.merge(registry_routes);
|
||||
|
||||
let app = Router::new()
|
||||
.merge(public_routes)
|
||||
.merge(app_routes)
|
||||
.layer(DefaultBodyLimit::max(
|
||||
state.config.server.body_limit_mb * 1024 * 1024,
|
||||
))
|
||||
.layer(middleware::from_fn(
|
||||
crate::request_id::request_id_middleware,
|
||||
))
|
||||
.layer(middleware::from_fn_with_state(
|
||||
state.clone(),
|
||||
crate::auth::auth_middleware,
|
||||
))
|
||||
.with_state(state.clone());
|
||||
|
||||
TestContext {
|
||||
state,
|
||||
app,
|
||||
_tempdir: tempdir,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Convenience helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Send a request through the router and return the response.
|
||||
pub async fn send(
|
||||
app: &Router,
|
||||
method: axum::http::Method,
|
||||
uri: &str,
|
||||
body: impl Into<Body>,
|
||||
) -> axum::http::Response<Body> {
|
||||
use tower::ServiceExt;
|
||||
|
||||
let request = Request::builder()
|
||||
.method(method)
|
||||
.uri(uri)
|
||||
.body(body.into())
|
||||
.unwrap();
|
||||
|
||||
app.clone().oneshot(request).await.unwrap()
|
||||
}
|
||||
|
||||
/// Send a request with custom headers.
|
||||
pub async fn send_with_headers(
|
||||
app: &Router,
|
||||
method: axum::http::Method,
|
||||
uri: &str,
|
||||
headers: Vec<(&str, &str)>,
|
||||
body: impl Into<Body>,
|
||||
) -> axum::http::Response<Body> {
|
||||
use tower::ServiceExt;
|
||||
|
||||
let mut builder = Request::builder().method(method).uri(uri);
|
||||
for (k, v) in headers {
|
||||
builder = builder.header(k, v);
|
||||
}
|
||||
let request = builder.body(body.into()).unwrap();
|
||||
|
||||
app.clone().oneshot(request).await.unwrap()
|
||||
}
|
||||
|
||||
/// Read the full response body into bytes.
|
||||
pub async fn body_bytes(response: axum::http::Response<Body>) -> axum::body::Bytes {
|
||||
response
|
||||
.into_body()
|
||||
.collect()
|
||||
.await
|
||||
.expect("failed to read body")
|
||||
.to_bytes()
|
||||
}
|
||||
@@ -1,36 +1,14 @@
|
||||
// Copyright (c) 2026 Volkov Pavel | DevITWay
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
use argon2::{
|
||||
password_hash::{rand_core::OsRng, PasswordHash, PasswordHasher, PasswordVerifier, SaltString},
|
||||
Argon2,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::fs;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use thiserror::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
use parking_lot::RwLock;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
/// TTL for cached token verifications (avoids Argon2 per request)
|
||||
const CACHE_TTL: Duration = Duration::from_secs(300);
|
||||
|
||||
/// Cached verification result
|
||||
#[derive(Clone)]
|
||||
struct CachedToken {
|
||||
user: String,
|
||||
role: Role,
|
||||
expires_at: u64,
|
||||
cached_at: Instant,
|
||||
}
|
||||
|
||||
const TOKEN_PREFIX: &str = "nra_";
|
||||
|
||||
/// Access role for API tokens
|
||||
@@ -83,25 +61,15 @@ fn default_role() -> Role {
|
||||
#[derive(Clone)]
|
||||
pub struct TokenStore {
|
||||
storage_path: PathBuf,
|
||||
/// In-memory cache: SHA256(token) -> verified result (avoids Argon2 per request)
|
||||
cache: Arc<RwLock<HashMap<String, CachedToken>>>,
|
||||
/// Pending last_used updates: file_id_prefix -> timestamp (flushed periodically)
|
||||
pending_last_used: Arc<RwLock<HashMap<String, u64>>>,
|
||||
}
|
||||
|
||||
impl TokenStore {
|
||||
/// Create a new token store
|
||||
pub fn new(storage_path: &Path) -> Self {
|
||||
// Ensure directory exists with restricted permissions
|
||||
// Ensure directory exists
|
||||
let _ = fs::create_dir_all(storage_path);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = fs::set_permissions(storage_path, fs::Permissions::from_mode(0o700));
|
||||
}
|
||||
Self {
|
||||
storage_path: storage_path.to_path_buf(),
|
||||
cache: Arc::new(RwLock::new(HashMap::new())),
|
||||
pending_last_used: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,9 +87,7 @@ impl TokenStore {
|
||||
TOKEN_PREFIX,
|
||||
Uuid::new_v4().to_string().replace("-", "")
|
||||
);
|
||||
let token_hash = hash_token_argon2(&raw_token)?;
|
||||
// Use SHA256 of token as filename (deterministic, for lookup)
|
||||
let file_id = sha256_hex(&raw_token);
|
||||
let token_hash = hash_token(&raw_token);
|
||||
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
@@ -131,7 +97,7 @@ impl TokenStore {
|
||||
let expires_at = now + (ttl_days * 24 * 60 * 60);
|
||||
|
||||
let info = TokenInfo {
|
||||
token_hash,
|
||||
token_hash: token_hash.clone(),
|
||||
user: user.to_string(),
|
||||
created_at: now,
|
||||
expires_at,
|
||||
@@ -140,84 +106,39 @@ impl TokenStore {
|
||||
role,
|
||||
};
|
||||
|
||||
// Save to file with restricted permissions
|
||||
let file_path = self.storage_path.join(format!("{}.json", &file_id[..16]));
|
||||
// Save to file
|
||||
let file_path = self
|
||||
.storage_path
|
||||
.join(format!("{}.json", &token_hash[..16]));
|
||||
let json =
|
||||
serde_json::to_string_pretty(&info).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
fs::write(&file_path, &json).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
set_file_permissions_600(&file_path);
|
||||
fs::write(&file_path, json).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
Ok(raw_token)
|
||||
}
|
||||
|
||||
/// Verify a token and return user info if valid.
|
||||
///
|
||||
/// Uses an in-memory cache to avoid Argon2 verification on every request.
|
||||
/// The `last_used` timestamp is updated in batch via `flush_last_used()`.
|
||||
/// Verify a token and return user info if valid
|
||||
pub fn verify_token(&self, token: &str) -> Result<(String, Role), TokenError> {
|
||||
if !token.starts_with(TOKEN_PREFIX) {
|
||||
return Err(TokenError::InvalidFormat);
|
||||
}
|
||||
|
||||
let cache_key = sha256_hex(token);
|
||||
let token_hash = hash_token(token);
|
||||
let file_path = self
|
||||
.storage_path
|
||||
.join(format!("{}.json", &token_hash[..16]));
|
||||
|
||||
// Fast path: check in-memory cache
|
||||
{
|
||||
let cache = self.cache.read();
|
||||
if let Some(cached) = cache.get(&cache_key) {
|
||||
if cached.cached_at.elapsed() < CACHE_TTL {
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
if now > cached.expires_at {
|
||||
return Err(TokenError::Expired);
|
||||
}
|
||||
// Schedule deferred last_used update
|
||||
self.pending_last_used
|
||||
.write()
|
||||
.insert(cache_key[..16].to_string(), now);
|
||||
return Ok((cached.user.clone(), cached.role.clone()));
|
||||
}
|
||||
}
|
||||
if !file_path.exists() {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
// Slow path: read from disk and verify Argon2
|
||||
let file_path = self.storage_path.join(format!("{}.json", &cache_key[..16]));
|
||||
|
||||
let content = match fs::read_to_string(&file_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
Err(e) => return Err(TokenError::Storage(e.to_string())),
|
||||
};
|
||||
|
||||
let content =
|
||||
fs::read_to_string(&file_path).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
let mut info: TokenInfo =
|
||||
serde_json::from_str(&content).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
// Verify hash: try Argon2id first, fall back to legacy SHA256
|
||||
let hash_valid = if info.token_hash.starts_with("$argon2") {
|
||||
verify_token_argon2(token, &info.token_hash)
|
||||
} else {
|
||||
// Legacy SHA256 hash (no salt) — verify and migrate
|
||||
let legacy_hash = sha256_hex(token);
|
||||
if info.token_hash == legacy_hash {
|
||||
// Migrate to Argon2id
|
||||
if let Ok(new_hash) = hash_token_argon2(token) {
|
||||
info.token_hash = new_hash;
|
||||
if let Ok(json) = serde_json::to_string_pretty(&info) {
|
||||
let _ = fs::write(&file_path, &json);
|
||||
set_file_permissions_600(&file_path);
|
||||
}
|
||||
}
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
};
|
||||
|
||||
if !hash_valid {
|
||||
// Verify hash matches
|
||||
if info.token_hash != token_hash {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
@@ -231,21 +152,11 @@ impl TokenStore {
|
||||
return Err(TokenError::Expired);
|
||||
}
|
||||
|
||||
// Populate cache
|
||||
self.cache.write().insert(
|
||||
cache_key[..16].to_string(),
|
||||
CachedToken {
|
||||
user: info.user.clone(),
|
||||
role: info.role.clone(),
|
||||
expires_at: info.expires_at,
|
||||
cached_at: Instant::now(),
|
||||
},
|
||||
);
|
||||
|
||||
// Schedule deferred last_used update
|
||||
self.pending_last_used
|
||||
.write()
|
||||
.insert(cache_key[..16].to_string(), now);
|
||||
// Update last_used
|
||||
info.last_used = Some(now);
|
||||
if let Ok(json) = serde_json::to_string_pretty(&info) {
|
||||
let _ = fs::write(&file_path, json);
|
||||
}
|
||||
|
||||
Ok((info.user, info.role))
|
||||
}
|
||||
@@ -270,56 +181,17 @@ impl TokenStore {
|
||||
tokens
|
||||
}
|
||||
|
||||
/// Flush pending last_used timestamps to disk (async to avoid blocking runtime).
|
||||
/// Called periodically by background task (every 30s).
|
||||
pub async fn flush_last_used(&self) {
|
||||
let pending: HashMap<String, u64> = {
|
||||
let mut map = self.pending_last_used.write();
|
||||
std::mem::take(&mut *map)
|
||||
};
|
||||
|
||||
if pending.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
for (file_prefix, timestamp) in &pending {
|
||||
let file_path = self.storage_path.join(format!("{}.json", file_prefix));
|
||||
let content = match tokio::fs::read_to_string(&file_path).await {
|
||||
Ok(c) => c,
|
||||
Err(_) => continue,
|
||||
};
|
||||
let mut info: TokenInfo = match serde_json::from_str(&content) {
|
||||
Ok(i) => i,
|
||||
Err(_) => continue,
|
||||
};
|
||||
info.last_used = Some(*timestamp);
|
||||
if let Ok(json) = serde_json::to_string_pretty(&info) {
|
||||
let _ = tokio::fs::write(&file_path, &json).await;
|
||||
set_file_permissions_600(&file_path);
|
||||
}
|
||||
}
|
||||
|
||||
tracing::debug!(count = pending.len(), "Flushed pending last_used updates");
|
||||
}
|
||||
|
||||
/// Remove a token from the in-memory cache (called on revoke)
|
||||
fn invalidate_cache(&self, hash_prefix: &str) {
|
||||
self.cache.write().remove(hash_prefix);
|
||||
}
|
||||
|
||||
/// Revoke a token by its hash prefix
|
||||
pub fn revoke_token(&self, hash_prefix: &str) -> Result<(), TokenError> {
|
||||
let file_path = self.storage_path.join(format!("{}.json", hash_prefix));
|
||||
|
||||
// TOCTOU fix: try remove directly
|
||||
match fs::remove_file(&file_path) {
|
||||
Ok(()) => {
|
||||
self.invalidate_cache(hash_prefix);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Err(TokenError::NotFound),
|
||||
Err(e) => Err(TokenError::Storage(e.to_string())),
|
||||
if !file_path.exists() {
|
||||
return Err(TokenError::NotFound);
|
||||
}
|
||||
|
||||
fs::remove_file(&file_path).map_err(|e| TokenError::Storage(e.to_string()))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Revoke all tokens for a user
|
||||
@@ -342,39 +214,11 @@ impl TokenStore {
|
||||
}
|
||||
}
|
||||
|
||||
/// Hash a token using Argon2id with random salt
|
||||
fn hash_token_argon2(token: &str) -> Result<String, TokenError> {
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
let argon2 = Argon2::default();
|
||||
argon2
|
||||
.hash_password(token.as_bytes(), &salt)
|
||||
.map(|h| h.to_string())
|
||||
.map_err(|e| TokenError::Storage(format!("hash error: {e}")))
|
||||
}
|
||||
|
||||
/// Verify a token against an Argon2id hash
|
||||
fn verify_token_argon2(token: &str, hash: &str) -> bool {
|
||||
match PasswordHash::new(hash) {
|
||||
Ok(parsed) => Argon2::default()
|
||||
.verify_password(token.as_bytes(), &parsed)
|
||||
.is_ok(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// SHA256 hex digest (used for file naming and legacy hash verification)
|
||||
fn sha256_hex(input: &str) -> String {
|
||||
/// Hash a token using SHA256
|
||||
fn hash_token(token: &str) -> String {
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(input.as_bytes());
|
||||
hex::encode(hasher.finalize())
|
||||
}
|
||||
|
||||
/// Set file permissions to 600 (owner read/write only)
|
||||
fn set_file_permissions_600(path: &Path) {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let _ = fs::set_permissions(path, fs::Permissions::from_mode(0o600));
|
||||
}
|
||||
hasher.update(token.as_bytes());
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
@@ -393,7 +237,6 @@ pub enum TokenError {
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[allow(clippy::unwrap_used)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::TempDir;
|
||||
@@ -411,19 +254,6 @@ mod tests {
|
||||
assert_eq!(token.len(), 4 + 32); // prefix + uuid without dashes
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_hash_is_argon2() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
let _token = store
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
|
||||
let tokens = store.list_tokens("testuser");
|
||||
assert!(tokens[0].token_hash.starts_with("$argon2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_valid_token() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
@@ -461,80 +291,24 @@ mod tests {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
// Create token and manually set it as expired
|
||||
let token = store
|
||||
.create_token("testuser", 1, None, Role::Write)
|
||||
.unwrap();
|
||||
let file_id = sha256_hex(&token);
|
||||
let file_path = temp_dir.path().join(format!("{}.json", &file_id[..16]));
|
||||
let token_hash = hash_token(&token);
|
||||
let file_path = temp_dir.path().join(format!("{}.json", &token_hash[..16]));
|
||||
|
||||
// Read and modify the token to be expired
|
||||
let content = std::fs::read_to_string(&file_path).unwrap();
|
||||
let mut info: TokenInfo = serde_json::from_str(&content).unwrap();
|
||||
info.expires_at = 0;
|
||||
info.expires_at = 0; // Set to epoch (definitely expired)
|
||||
std::fs::write(&file_path, serde_json::to_string(&info).unwrap()).unwrap();
|
||||
|
||||
// Token should now be expired
|
||||
let result = store.verify_token(&token);
|
||||
assert!(matches!(result, Err(TokenError::Expired)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_legacy_sha256_migration() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
// Simulate a legacy token with SHA256 hash
|
||||
let raw_token = "nra_00112233445566778899aabbccddeeff";
|
||||
let legacy_hash = sha256_hex(raw_token);
|
||||
let file_id = sha256_hex(raw_token);
|
||||
|
||||
let now = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs();
|
||||
|
||||
let info = TokenInfo {
|
||||
token_hash: legacy_hash.clone(),
|
||||
user: "legacyuser".to_string(),
|
||||
created_at: now,
|
||||
expires_at: now + 86400,
|
||||
last_used: None,
|
||||
description: None,
|
||||
role: Role::Read,
|
||||
};
|
||||
|
||||
let file_path = temp_dir.path().join(format!("{}.json", &file_id[..16]));
|
||||
fs::write(&file_path, serde_json::to_string_pretty(&info).unwrap()).unwrap();
|
||||
|
||||
// Verify should work with legacy hash
|
||||
let (user, role) = store.verify_token(raw_token).unwrap();
|
||||
assert_eq!(user, "legacyuser");
|
||||
assert_eq!(role, Role::Read);
|
||||
|
||||
// After verification, hash should be migrated to Argon2id
|
||||
let content = fs::read_to_string(&file_path).unwrap();
|
||||
let updated: TokenInfo = serde_json::from_str(&content).unwrap();
|
||||
assert!(updated.token_hash.starts_with("$argon2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_permissions() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
let token = store
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
|
||||
let file_id = sha256_hex(&token);
|
||||
let file_path = temp_dir.path().join(format!("{}.json", &file_id[..16]));
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let metadata = fs::metadata(&file_path).unwrap();
|
||||
let mode = metadata.permissions().mode() & 0o777;
|
||||
assert_eq!(mode, 0o600);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_list_tokens() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
@@ -562,13 +336,16 @@ mod tests {
|
||||
let token = store
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
let file_id = sha256_hex(&token);
|
||||
let hash_prefix = &file_id[..16];
|
||||
let token_hash = hash_token(&token);
|
||||
let hash_prefix = &token_hash[..16];
|
||||
|
||||
// Verify token works
|
||||
assert!(store.verify_token(&token).is_ok());
|
||||
|
||||
// Revoke
|
||||
store.revoke_token(hash_prefix).unwrap();
|
||||
|
||||
// Verify token no longer works
|
||||
let result = store.verify_token(&token);
|
||||
assert!(matches!(result, Err(TokenError::NotFound)));
|
||||
}
|
||||
@@ -598,8 +375,8 @@ mod tests {
|
||||
assert_eq!(store.list_tokens("user2").len(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_token_updates_last_used() {
|
||||
#[test]
|
||||
fn test_token_updates_last_used() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
@@ -607,57 +384,14 @@ mod tests {
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
|
||||
// First verification
|
||||
store.verify_token(&token).unwrap();
|
||||
|
||||
// last_used is deferred — flush to persist
|
||||
store.flush_last_used().await;
|
||||
|
||||
// Check last_used is set
|
||||
let tokens = store.list_tokens("testuser");
|
||||
assert!(tokens[0].last_used.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_verify_cache_hit() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
let token = store
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
|
||||
// First call: cold (disk + Argon2)
|
||||
let (user1, role1) = store.verify_token(&token).unwrap();
|
||||
// Second call: should hit cache (no Argon2)
|
||||
let (user2, role2) = store.verify_token(&token).unwrap();
|
||||
|
||||
assert_eq!(user1, user2);
|
||||
assert_eq!(role1, role2);
|
||||
assert_eq!(user1, "testuser");
|
||||
assert_eq!(role1, Role::Write);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_revoke_invalidates_cache() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let store = TokenStore::new(temp_dir.path());
|
||||
|
||||
let token = store
|
||||
.create_token("testuser", 30, None, Role::Write)
|
||||
.unwrap();
|
||||
let file_id = sha256_hex(&token);
|
||||
let hash_prefix = &file_id[..16];
|
||||
|
||||
// Populate cache
|
||||
assert!(store.verify_token(&token).is_ok());
|
||||
|
||||
// Revoke
|
||||
store.revoke_token(hash_prefix).unwrap();
|
||||
|
||||
// Cache should be invalidated
|
||||
let result = store.verify_token(&token);
|
||||
assert!(matches!(result, Err(TokenError::NotFound)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_token_with_description() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
@@ -23,8 +23,6 @@ pub struct RegistryStats {
|
||||
pub npm: usize,
|
||||
pub cargo: usize,
|
||||
pub pypi: usize,
|
||||
pub go: usize,
|
||||
pub raw: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -116,18 +114,14 @@ pub async fn api_stats(State(state): State<Arc<AppState>>) -> Json<RegistryStats
|
||||
let _ = state.repo_index.get("npm", &state.storage).await;
|
||||
let _ = state.repo_index.get("cargo", &state.storage).await;
|
||||
let _ = state.repo_index.get("pypi", &state.storage).await;
|
||||
let _ = state.repo_index.get("go", &state.storage).await;
|
||||
let _ = state.repo_index.get("raw", &state.storage).await;
|
||||
|
||||
let (docker, maven, npm, cargo, pypi, go, raw) = state.repo_index.counts();
|
||||
let (docker, maven, npm, cargo, pypi) = state.repo_index.counts();
|
||||
Json(RegistryStats {
|
||||
docker,
|
||||
maven,
|
||||
npm,
|
||||
cargo,
|
||||
pypi,
|
||||
go,
|
||||
raw,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -138,8 +132,6 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
|
||||
let npm_repos = state.repo_index.get("npm", &state.storage).await;
|
||||
let cargo_repos = state.repo_index.get("cargo", &state.storage).await;
|
||||
let pypi_repos = state.repo_index.get("pypi", &state.storage).await;
|
||||
let go_repos = state.repo_index.get("go", &state.storage).await;
|
||||
let raw_repos = state.repo_index.get("raw", &state.storage).await;
|
||||
|
||||
// Calculate sizes from cached index
|
||||
let docker_size: u64 = docker_repos.iter().map(|r| r.size).sum();
|
||||
@@ -147,10 +139,7 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
|
||||
let npm_size: u64 = npm_repos.iter().map(|r| r.size).sum();
|
||||
let cargo_size: u64 = cargo_repos.iter().map(|r| r.size).sum();
|
||||
let pypi_size: u64 = pypi_repos.iter().map(|r| r.size).sum();
|
||||
let go_size: u64 = go_repos.iter().map(|r| r.size).sum();
|
||||
let raw_size: u64 = raw_repos.iter().map(|r| r.size).sum();
|
||||
let total_storage =
|
||||
docker_size + maven_size + npm_size + cargo_size + pypi_size + go_size + raw_size;
|
||||
let total_storage = docker_size + maven_size + npm_size + cargo_size + pypi_size;
|
||||
|
||||
// Count total versions/tags, not just repositories
|
||||
let docker_versions: usize = docker_repos.iter().map(|r| r.versions).sum();
|
||||
@@ -158,15 +147,8 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
|
||||
let npm_versions: usize = npm_repos.iter().map(|r| r.versions).sum();
|
||||
let cargo_versions: usize = cargo_repos.iter().map(|r| r.versions).sum();
|
||||
let pypi_versions: usize = pypi_repos.iter().map(|r| r.versions).sum();
|
||||
let go_versions: usize = go_repos.iter().map(|r| r.versions).sum();
|
||||
let raw_versions: usize = raw_repos.iter().map(|r| r.versions).sum();
|
||||
let total_artifacts = docker_versions
|
||||
+ maven_versions
|
||||
+ npm_versions
|
||||
+ cargo_versions
|
||||
+ pypi_versions
|
||||
+ go_versions
|
||||
+ raw_versions;
|
||||
let total_artifacts =
|
||||
docker_versions + maven_versions + npm_versions + cargo_versions + pypi_versions;
|
||||
|
||||
let global_stats = GlobalStats {
|
||||
downloads: state.metrics.downloads.load(Ordering::Relaxed),
|
||||
@@ -212,20 +194,6 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
|
||||
uploads: 0,
|
||||
size_bytes: pypi_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "go".to_string(),
|
||||
artifact_count: go_versions,
|
||||
downloads: state.metrics.get_registry_downloads("go"),
|
||||
uploads: 0,
|
||||
size_bytes: go_size,
|
||||
},
|
||||
RegistryCardStats {
|
||||
name: "raw".to_string(),
|
||||
artifact_count: raw_versions,
|
||||
downloads: state.metrics.get_registry_downloads("raw"),
|
||||
uploads: state.metrics.get_registry_uploads("raw"),
|
||||
size_bytes: raw_size,
|
||||
},
|
||||
];
|
||||
|
||||
let mount_points = vec![
|
||||
@@ -259,16 +227,6 @@ pub async fn api_dashboard(State(state): State<Arc<AppState>>) -> Json<Dashboard
|
||||
mount_path: "/simple/".to_string(),
|
||||
proxy_upstream: state.config.pypi.proxy.clone(),
|
||||
},
|
||||
MountPoint {
|
||||
registry: "Go".to_string(),
|
||||
mount_path: "/go/".to_string(),
|
||||
proxy_upstream: state.config.go.proxy.clone(),
|
||||
},
|
||||
MountPoint {
|
||||
registry: "Raw".to_string(),
|
||||
mount_path: "/raw/".to_string(),
|
||||
proxy_upstream: None,
|
||||
},
|
||||
];
|
||||
|
||||
let activity = state.activity.recent(20);
|
||||
@@ -415,32 +373,12 @@ pub async fn get_registry_stats(storage: &Storage) -> RegistryStats {
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
let go = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("go/") && k.ends_with(".zip"))
|
||||
.filter_map(|k| {
|
||||
let rest = k.strip_prefix("go/")?;
|
||||
let pos = rest.rfind("/@v/")?;
|
||||
Some(rest[..pos].to_string())
|
||||
})
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
let raw = all_keys
|
||||
.iter()
|
||||
.filter(|k| k.starts_with("raw/"))
|
||||
.filter_map(|k| k.strip_prefix("raw/")?.split('/').next())
|
||||
.collect::<HashSet<_>>()
|
||||
.len();
|
||||
|
||||
RegistryStats {
|
||||
docker,
|
||||
maven,
|
||||
npm,
|
||||
cargo,
|
||||
pypi,
|
||||
go,
|
||||
raw,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -936,32 +874,6 @@ pub async fn get_pypi_detail(storage: &Storage, name: &str) -> PackageDetail {
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
pub async fn get_go_detail(storage: &Storage, module: &str) -> PackageDetail {
|
||||
let prefix = format!("go/{}/@v/", module);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in keys.iter().filter(|k| k.ends_with(".zip")) {
|
||||
if let Some(rest) = key.strip_prefix(&prefix) {
|
||||
if let Some(version) = rest.strip_suffix(".zip") {
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version: version.to_string(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
versions.sort_by(|a, b| b.version.cmp(&a.version));
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
fn extract_pypi_version(name: &str, filename: &str) -> Option<String> {
|
||||
// Handle both .tar.gz and .whl files
|
||||
let clean_name = name.replace('-', "_");
|
||||
@@ -985,26 +897,3 @@ fn extract_pypi_version(name: &str, filename: &str) -> Option<String> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_raw_detail(storage: &Storage, group: &str) -> PackageDetail {
|
||||
let prefix = format!("raw/{}/", group);
|
||||
let keys = storage.list(&prefix).await;
|
||||
|
||||
let mut versions = Vec::new();
|
||||
for key in &keys {
|
||||
if let Some(filename) = key.strip_prefix(&prefix) {
|
||||
let (size, published) = if let Some(meta) = storage.stat(key).await {
|
||||
(meta.size, format_timestamp(meta.modified))
|
||||
} else {
|
||||
(0, "N/A".to_string())
|
||||
};
|
||||
versions.push(VersionInfo {
|
||||
version: filename.to_string(),
|
||||
size,
|
||||
published,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
PackageDetail { versions }
|
||||
}
|
||||
|
||||
@@ -90,7 +90,7 @@ fn sidebar_dark(active_page: Option<&str>, t: &Translations) -> String {
|
||||
let docker_icon = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
let maven_icon = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
let npm_icon = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M6 2h12a1 1 0 011 1v8a1 1 0 01-1 1H6a1 1 0 01-1-1V3a1 1 0 011-1zm0 2v2h12V4H6zm0 3v2h12V7H6zM2 14h8a1 1 0 011 1v6a1 1 0 01-1 1H2a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16H2zM14 14h8a1 1 0 011 1v6a1 1 0 01-1 1h-8a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16h-8z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
let pypi_icon = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
|
||||
// Dashboard label is translated, registry names stay as-is
|
||||
@@ -109,20 +109,6 @@ fn sidebar_dark(active_page: Option<&str>, t: &Translations) -> String {
|
||||
("npm", "/ui/npm", "npm", npm_icon, false),
|
||||
("cargo", "/ui/cargo", "Cargo", cargo_icon, false),
|
||||
("pypi", "/ui/pypi", "PyPI", pypi_icon, false),
|
||||
(
|
||||
"raw",
|
||||
"/ui/raw",
|
||||
"Raw",
|
||||
r#"<path fill="currentColor" d="M14 2H6a2 2 0 00-2 2v16a2 2 0 002 2h12a2 2 0 002-2V8l-6-6zm4 18H6V4h7v5h5v11z"/>"#,
|
||||
false,
|
||||
),
|
||||
(
|
||||
"go",
|
||||
"/ui/go",
|
||||
"Go",
|
||||
r#"<path fill="currentColor" d="M2.64 9.56s.24-.14.65-.38c.41-.24.97-.5 1.63-.7A7.85 7.85 0 017.53 8c.86 0 1.67.17 2.37.52.7.35 1.26.87 1.63 1.51.37.64.54 1.41.54 2.27v.2h-2.7v-.16c0-.47-.09-.86-.28-1.15a1.7 1.7 0 00-.77-.67 2.7 2.7 0 00-1.14-.22c-.56 0-1.06.13-1.46.4-.41.27-.72.66-.93 1.16-.21.5-.31 1.1-.31 1.8 0 .69.1 1.28.32 1.78.21.5.53.88.94 1.15.41.27.9.4 1.47.4.38 0 .73-.06 1.04-.17.31-.12.56-.29.74-.52.19-.23.29-.51.29-.84v-.14H7.15v-1.76h5.07v1.3c0 .8-.17 1.48-.52 2.04a3.46 3.46 0 01-1.5 1.3c-.66.3-1.44.45-2.35.45-.99 0-1.87-.18-2.63-.55a4.2 4.2 0 01-1.77-1.59C3.15 14.82 3 13.94 3 12.89v-.28c0-1.04.16-1.93.48-2.65a3.08 3.08 0 01-.84-.4zm12.1-1.34c.92 0 1.74.18 2.44.55a3.96 3.96 0 011.66 1.59c.4.7.6 1.54.6 2.53v.28c0 .99-.2 1.83-.6 2.53a3.96 3.96 0 01-1.66 1.59c-.7.37-1.52.55-2.44.55s-1.74-.18-2.44-.55a3.96 3.96 0 01-1.66-1.59c-.4-.7-.6-1.54-.6-2.53v-.28c0-.99.2-1.83.6-2.53a3.96 3.96 0 011.66-1.59c.7-.37 1.52-.55 2.44-.55zm0 2.12c-.44 0-.82.12-1.14.37-.32.24-.56.6-.73 1.06-.17.46-.26 1.01-.26 1.65v.28c0 .64.09 1.19.26 1.65.17.46.41.82.73 1.06.32.25.7.37 1.14.37.44 0 .82-.12 1.14-.37.32-.24.56-.6.73-1.06.17-.46.26-1.01.26-1.65v-.28c0-.64-.09-1.19-.26-1.65a2.17 2.17 0 00-.73-1.06 1.78 1.78 0 00-1.14-.37z"/>"#,
|
||||
false,
|
||||
),
|
||||
];
|
||||
|
||||
let nav_html: String = nav_items.iter().map(|(id, href, label, icon_path, is_stroke)| {
|
||||
@@ -291,15 +277,15 @@ pub fn render_registry_card(
|
||||
) -> String {
|
||||
format!(
|
||||
r##"
|
||||
<a href="{}" id="registry-{}" class="block bg-[#1e293b] rounded-lg border border-slate-700 p-3 hover:border-blue-400 transition-all">
|
||||
<div class="flex items-center justify-between mb-2">
|
||||
<svg class="w-6 h-6 text-slate-400" fill="currentColor" viewBox="0 0 24 24">
|
||||
<a href="{}" id="registry-{}" class="block bg-[#1e293b] rounded-lg border border-slate-700 p-4 md:p-6 hover:border-blue-400 transition-all">
|
||||
<div class="flex items-center justify-between mb-3">
|
||||
<svg class="w-8 h-8 text-slate-400" fill="currentColor" viewBox="0 0 24 24">
|
||||
{}
|
||||
</svg>
|
||||
<span class="text-[10px] font-medium text-green-400 bg-green-400/10 px-1.5 py-0.5 rounded-full">{}</span>
|
||||
<span class="text-xs font-medium text-green-400 bg-green-400/10 px-2 py-1 rounded-full">{}</span>
|
||||
</div>
|
||||
<div class="text-sm font-semibold text-slate-200 mb-2">{}</div>
|
||||
<div class="grid grid-cols-2 gap-1 text-xs">
|
||||
<div class="text-lg font-semibold text-slate-200 mb-2">{}</div>
|
||||
<div class="grid grid-cols-2 gap-2 text-sm">
|
||||
<div>
|
||||
<span class="text-slate-500">{}</span>
|
||||
<div class="text-slate-300 font-medium">{}</div>
|
||||
@@ -347,9 +333,9 @@ pub fn render_mount_points_table(
|
||||
format!(
|
||||
r##"
|
||||
<tr class="border-b border-slate-700">
|
||||
<td class="px-4 py-3 text-slate-300">{}</td>
|
||||
<td class="px-4 py-3 font-mono text-blue-400">{}</td>
|
||||
<td class="px-4 py-3 text-slate-400">{}</td>
|
||||
<td class="py-3 text-slate-300">{}</td>
|
||||
<td class="py-3 font-mono text-blue-400">{}</td>
|
||||
<td class="py-3 text-slate-400">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
registry, mount_path, proxy_display
|
||||
@@ -372,7 +358,7 @@ pub fn render_mount_points_table(
|
||||
<th class="px-4 py-2">{}</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tbody class="px-4">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -402,11 +388,11 @@ pub fn render_activity_row(
|
||||
format!(
|
||||
r##"
|
||||
<tr class="border-b border-slate-700/50 text-sm">
|
||||
<td class="px-4 py-2 text-slate-500">{}</td>
|
||||
<td class="px-4 py-2 font-medium {}"><span class="px-2 py-0.5 bg-slate-700 rounded">{}</span></td>
|
||||
<td class="px-4 py-2 text-slate-300 font-mono text-xs">{}</td>
|
||||
<td class="px-4 py-2 text-slate-400">{}</td>
|
||||
<td class="px-4 py-2 text-slate-500">{}</td>
|
||||
<td class="py-2 text-slate-500">{}</td>
|
||||
<td class="py-2 font-medium {}"><span class="px-2 py-0.5 bg-slate-700 rounded">{}</span></td>
|
||||
<td class="py-2 text-slate-300 font-mono text-xs">{}</td>
|
||||
<td class="py-2 text-slate-400">{}</td>
|
||||
<td class="py-2 text-slate-500">{}</td>
|
||||
</tr>
|
||||
"##,
|
||||
timestamp,
|
||||
@@ -438,7 +424,7 @@ pub fn render_activity_log(rows: &str, t: &Translations) -> String {
|
||||
<th class="px-4 py-2">{}</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tbody class="px-4">
|
||||
{}
|
||||
</tbody>
|
||||
</table>
|
||||
@@ -504,7 +490,7 @@ fn sidebar(active_page: Option<&str>) -> String {
|
||||
let docker_icon = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
let maven_icon = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
let npm_icon = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M6 2h12a1 1 0 011 1v8a1 1 0 01-1 1H6a1 1 0 01-1-1V3a1 1 0 011-1zm0 2v2h12V4H6zm0 3v2h12V7H6zM2 14h8a1 1 0 011 1v6a1 1 0 01-1 1H2a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16H2zM14 14h8a1 1 0 011 1v6a1 1 0 01-1 1h-8a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16h-8z"/>"#;
|
||||
let cargo_icon = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
let pypi_icon = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
|
||||
let nav_items = [
|
||||
@@ -520,20 +506,6 @@ fn sidebar(active_page: Option<&str>) -> String {
|
||||
("npm", "/ui/npm", "npm", npm_icon, false),
|
||||
("cargo", "/ui/cargo", "Cargo", cargo_icon, false),
|
||||
("pypi", "/ui/pypi", "PyPI", pypi_icon, false),
|
||||
(
|
||||
"raw",
|
||||
"/ui/raw",
|
||||
"Raw",
|
||||
r#"<path fill="currentColor" d="M14 2H6a2 2 0 00-2 2v16a2 2 0 002 2h12a2 2 0 002-2V8l-6-6zm4 18H6V4h7v5h5v11z"/>"#,
|
||||
false,
|
||||
),
|
||||
(
|
||||
"go",
|
||||
"/ui/go",
|
||||
"Go",
|
||||
r#"<path fill="currentColor" d="M2.64 9.56s.24-.14.65-.38c.41-.24.97-.5 1.63-.7A7.85 7.85 0 017.53 8c.86 0 1.67.17 2.37.52.7.35 1.26.87 1.63 1.51.37.64.54 1.41.54 2.27v.2h-2.7v-.16c0-.47-.09-.86-.28-1.15a1.7 1.7 0 00-.77-.67 2.7 2.7 0 00-1.14-.22c-.56 0-1.06.13-1.46.4-.41.27-.72.66-.93 1.16-.21.5-.31 1.1-.31 1.8 0 .69.1 1.28.32 1.78.21.5.53.88.94 1.15.41.27.9.4 1.47.4.38 0 .73-.06 1.04-.17.31-.12.56-.29.74-.52.19-.23.29-.51.29-.84v-.14H7.15v-1.76h5.07v1.3c0 .8-.17 1.48-.52 2.04a3.46 3.46 0 01-1.5 1.3c-.66.3-1.44.45-2.35.45-.99 0-1.87-.18-2.63-.55a4.2 4.2 0 01-1.77-1.59C3.15 14.82 3 13.94 3 12.89v-.28c0-1.04.16-1.93.48-2.65a3.08 3.08 0 01-.84-.4zm12.1-1.34c.92 0 1.74.18 2.44.55a3.96 3.96 0 011.66 1.59c.4.7.6 1.54.6 2.53v.28c0 .99-.2 1.83-.6 2.53a3.96 3.96 0 01-1.66 1.59c-.7.37-1.52.55-2.44.55s-1.74-.18-2.44-.55a3.96 3.96 0 01-1.66-1.59c-.4-.7-.6-1.54-.6-2.53v-.28c0-.99.2-1.83.6-2.53a3.96 3.96 0 011.66-1.59c.7-.37 1.52-.55 2.44-.55zm0 2.12c-.44 0-.82.12-1.14.37-.32.24-.56.6-.73 1.06-.17.46-.26 1.01-.26 1.65v.28c0 .64.09 1.19.26 1.65.17.46.41.82.73 1.06.32.25.7.37 1.14.37.44 0 .82-.12 1.14-.37.32-.24.56-.6.73-1.06.17-.46.26-1.01.26-1.65v-.28c0-.64-.09-1.19-.26-1.65a2.17 2.17 0 00-.73-1.06 1.78 1.78 0 00-1.14-.37z"/>"#,
|
||||
false,
|
||||
),
|
||||
];
|
||||
|
||||
let nav_html: String = nav_items.iter().map(|(id, href, label, icon_path, is_stroke)| {
|
||||
@@ -641,9 +613,7 @@ pub mod icons {
|
||||
pub const DOCKER: &str = r#"<path fill="currentColor" d="M13.983 11.078h2.119a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.119a.185.185 0 00-.185.185v1.888c0 .102.083.185.185.185m-2.954-5.43h2.118a.186.186 0 00.186-.186V3.574a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.186m0 2.716h2.118a.187.187 0 00.186-.186V6.29a.186.186 0 00-.186-.185h-2.118a.185.185 0 00-.185.185v1.887c0 .102.082.185.185.186m-2.93 0h2.12a.186.186 0 00.184-.186V6.29a.185.185 0 00-.185-.185H8.1a.185.185 0 00-.185.185v1.887c0 .102.083.185.185.186m-2.964 0h2.119a.186.186 0 00.185-.186V6.29a.185.185 0 00-.185-.185H5.136a.186.186 0 00-.186.185v1.887c0 .102.084.185.186.186m5.893 2.715h2.118a.186.186 0 00.186-.185V9.006a.186.186 0 00-.186-.186h-2.118a.185.185 0 00-.185.185v1.888c0 .102.082.185.185.185m-2.93 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.083.185.185.185m-2.964 0h2.119a.185.185 0 00.185-.185V9.006a.185.185 0 00-.185-.186h-2.12a.186.186 0 00-.185.186v1.887c0 .102.084.185.186.185m-2.92 0h2.12a.185.185 0 00.184-.185V9.006a.185.185 0 00-.184-.186h-2.12a.185.185 0 00-.184.185v1.888c0 .102.082.185.185.185M23.763 9.89c-.065-.051-.672-.51-1.954-.51-.338.001-.676.03-1.01.087-.248-1.7-1.653-2.53-1.716-2.566l-.344-.199-.226.327c-.284.438-.49.922-.612 1.43-.23.97-.09 1.882.403 2.661-.595.332-1.55.413-1.744.42H.751a.751.751 0 00-.75.748 11.376 11.376 0 00.692 4.062c.545 1.428 1.355 2.48 2.41 3.124 1.18.723 3.1 1.137 5.275 1.137.983.003 1.963-.086 2.93-.266a12.248 12.248 0 003.823-1.389c.98-.567 1.86-1.288 2.61-2.136 1.252-1.418 1.998-2.997 2.553-4.4h.221c1.372 0 2.215-.549 2.68-1.009.309-.293.55-.65.707-1.046l.098-.288Z"/>"#;
|
||||
pub const MAVEN: &str = r#"<path fill="currentColor" d="M12 2C6.48 2 2 6.48 2 12s4.48 10 10 10 10-4.48 10-10S17.52 2 12 2zm-1 17.93c-3.95-.49-7-3.85-7-7.93 0-.62.08-1.21.21-1.79L9 15v1c0 1.1.9 2 2 2v1.93zm6.9-2.54c-.26-.81-1-1.39-1.9-1.39h-1v-3c0-.55-.45-1-1-1H8v-2h2c.55 0 1-.45 1-1V7h2c1.1 0 2-.9 2-2v-.41c2.93 1.19 5 4.06 5 7.41 0 2.08-.8 3.97-2.1 5.39z"/>"#;
|
||||
pub const NPM: &str = r#"<path fill="currentColor" d="M0 7.334v8h6.666v1.332H12v-1.332h12v-8H0zm6.666 6.664H5.334v-4H3.999v4H1.335V8.667h5.331v5.331zm4 0v1.336H8.001V8.667h5.334v5.332h-2.669v-.001zm12.001 0h-1.33v-4h-1.336v4h-1.335v-4h-1.33v4h-2.671V8.667h8.002v5.331zM10.665 10H12v2.667h-1.335V10z"/>"#;
|
||||
pub const CARGO: &str = r#"<path fill="currentColor" d="M6 2h12a1 1 0 011 1v8a1 1 0 01-1 1H6a1 1 0 01-1-1V3a1 1 0 011-1zm0 2v2h12V4H6zm0 3v2h12V7H6zM2 14h8a1 1 0 011 1v6a1 1 0 01-1 1H2a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16H2zM14 14h8a1 1 0 011 1v6a1 1 0 01-1 1h-8a1 1 0 01-1-1v-6a1 1 0 011-1zm0 2v1.5h8V16h-8z"/>"#;
|
||||
pub const GO: &str = r#"<path fill="currentColor" d="M2.64 9.56s.24-.14.65-.38c.41-.24.97-.5 1.63-.7A7.85 7.85 0 017.53 8c.86 0 1.67.17 2.37.52.7.35 1.26.87 1.63 1.51.37.64.54 1.41.54 2.27v.2h-2.7v-.16c0-.47-.09-.86-.28-1.15a1.7 1.7 0 00-.77-.67 2.7 2.7 0 00-1.14-.22c-.56 0-1.06.13-1.46.4-.41.27-.72.66-.93 1.16-.21.5-.31 1.1-.31 1.8 0 .69.1 1.28.32 1.78.21.5.53.88.94 1.15.41.27.9.4 1.47.4.38 0 .73-.06 1.04-.17.31-.12.56-.29.74-.52.19-.23.29-.51.29-.84v-.14H7.15v-1.76h5.07v1.3c0 .8-.17 1.48-.52 2.04a3.46 3.46 0 01-1.5 1.3c-.66.3-1.44.45-2.35.45-.99 0-1.87-.18-2.63-.55a4.2 4.2 0 01-1.77-1.59C3.15 14.82 3 13.94 3 12.89v-.28c0-1.04.16-1.93.48-2.65a3.08 3.08 0 01-.84-.4zm12.1-1.34c.92 0 1.74.18 2.44.55a3.96 3.96 0 011.66 1.59c.4.7.6 1.54.6 2.53v.28c0 .99-.2 1.83-.6 2.53a3.96 3.96 0 01-1.66 1.59c-.7.37-1.52.55-2.44.55s-1.74-.18-2.44-.55a3.96 3.96 0 01-1.66-1.59c-.4-.7-.6-1.54-.6-2.53v-.28c0-.99.2-1.83.6-2.53a3.96 3.96 0 011.66-1.59c.7-.37 1.52-.55 2.44-.55zm0 2.12c-.44 0-.82.12-1.14.37-.32.24-.56.6-.73 1.06-.17.46-.26 1.01-.26 1.65v.28c0 .64.09 1.19.26 1.65.17.46.41.82.73 1.06.32.25.7.37 1.14.37.44 0 .82-.12 1.14-.37.32-.24.56-.6.73-1.06.17-.46.26-1.01.26-1.65v-.28c0-.64-.09-1.19-.26-1.65a2.17 2.17 0 00-.73-1.06 1.78 1.78 0 00-1.14-.37z"/>"#;
|
||||
pub const RAW: &str = r#"<path fill="currentColor" d="M14 2H6a2 2 0 00-2 2v16a2 2 0 002 2h12a2 2 0 002-2V8l-6-6zm4 18H6V4h7v5h5v11z"/>"#;
|
||||
pub const CARGO: &str = r#"<path fill="currentColor" d="M20 8h-3V4H3c-1.1 0-2 .9-2 2v11h2c0 1.66 1.34 3 3 3s3-1.34 3-3h6c0 1.66 1.34 3 3 3s3-1.34 3-3h2v-5l-3-4zM6 18.5c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5zm13.5-9l1.96 2.5H17V9.5h2.5zm-1.5 9c-.83 0-1.5-.67-1.5-1.5s.67-1.5 1.5-1.5 1.5.67 1.5 1.5-.67 1.5-1.5 1.5z"/>"#;
|
||||
pub const PYPI: &str = r#"<path fill="currentColor" d="M14.25.18l.9.2.73.26.59.3.45.32.34.34.25.34.16.33.1.3.04.26.02.2-.01.13V8.5l-.05.63-.13.55-.21.46-.26.38-.3.31-.33.25-.35.19-.35.14-.33.1-.3.07-.26.04-.21.02H8.83l-.69.05-.59.14-.5.22-.41.27-.33.32-.27.35-.2.36-.15.37-.1.35-.07.32-.04.27-.02.21v3.06H3.23l-.21-.03-.28-.07-.32-.12-.35-.18-.36-.26-.36-.36-.35-.46-.32-.59-.28-.73-.21-.88-.14-1.05L0 11.97l.06-1.22.16-1.04.24-.87.32-.71.36-.57.4-.44.42-.33.42-.24.4-.16.36-.1.32-.05.24-.01h.16l.06.01h8.16v-.83H6.24l-.01-2.75-.02-.37.05-.34.11-.31.17-.28.25-.26.31-.23.38-.2.44-.18.51-.15.58-.12.64-.1.71-.06.77-.04.84-.02 1.27.05 1.07.13zm-6.3 1.98l-.23.33-.08.41.08.41.23.34.33.22.41.09.41-.09.33-.22.23-.34.08-.41-.08-.41-.23-.33-.33-.22-.41-.09-.41.09-.33.22zM21.1 6.11l.28.06.32.12.35.18.36.27.36.35.35.47.32.59.28.73.21.88.14 1.04.05 1.23-.06 1.23-.16 1.04-.24.86-.32.71-.36.57-.4.45-.42.33-.42.24-.4.16-.36.09-.32.05-.24.02-.16-.01h-8.22v.82h5.84l.01 2.76.02.36-.05.34-.11.31-.17.29-.25.25-.31.24-.38.2-.44.17-.51.15-.58.13-.64.09-.71.07-.77.04-.84.01-1.27-.04-1.07-.14-.9-.2-.73-.25-.59-.3-.45-.33-.34-.34-.25-.34-.16-.33-.1-.3-.04-.25-.02-.2.01-.13v-5.34l.05-.64.13-.54.21-.46.26-.38.3-.32.33-.24.35-.2.35-.14.33-.1.3-.06.26-.04.21-.02.13-.01h5.84l.69-.05.59-.14.5-.21.41-.28.33-.32.27-.35.2-.36.15-.36.1-.35.07-.32.04-.28.02-.21V6.07h2.09l.14.01.21.03zm-6.47 14.25l-.23.33-.08.41.08.41.23.33.33.23.41.08.41-.08.33-.23.23-.33.08-.41-.08-.41-.23-.33-.33-.23-.41-.08-.41.08-.33.23z"/>"#;
|
||||
}
|
||||
|
||||
@@ -705,7 +675,7 @@ pub fn render_bragging_footer(lang: Lang) -> String {
|
||||
</div>
|
||||
<div class="grid grid-cols-2 md:grid-cols-3 lg:grid-cols-6 gap-4 text-center">
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-blue-400">32 MB</div>
|
||||
<div class="text-2xl font-bold text-blue-400">34 MB</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
@@ -717,7 +687,7 @@ pub fn render_bragging_footer(lang: Lang) -> String {
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
<div class="text-2xl font-bold text-yellow-400">7</div>
|
||||
<div class="text-2xl font-bold text-yellow-400">5</div>
|
||||
<div class="text-xs text-slate-500 mt-1">{}</div>
|
||||
</div>
|
||||
<div class="p-3">
|
||||
|
||||
@@ -87,10 +87,6 @@ pub fn routes() -> Router<Arc<AppState>> {
|
||||
.route("/ui/cargo/{name}", get(cargo_detail))
|
||||
.route("/ui/pypi", get(pypi_list))
|
||||
.route("/ui/pypi/{name}", get(pypi_detail))
|
||||
.route("/ui/go", get(go_list))
|
||||
.route("/ui/go/{*name}", get(go_detail))
|
||||
.route("/ui/raw", get(raw_list))
|
||||
.route("/ui/raw/{*name}", get(raw_detail))
|
||||
// API endpoints for HTMX
|
||||
.route("/api/ui/stats", get(api_stats))
|
||||
.route("/api/ui/dashboard", get(api_dashboard))
|
||||
@@ -302,79 +298,3 @@ async fn pypi_detail(
|
||||
let detail = get_pypi_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("pypi", &name, &detail, lang))
|
||||
}
|
||||
|
||||
// Go pages
|
||||
async fn go_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_modules = state.repo_index.get("go", &state.storage).await;
|
||||
let (modules, total) = paginate(&all_modules, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"go",
|
||||
"Go Modules",
|
||||
&modules,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
}
|
||||
|
||||
async fn go_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_go_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("go", &name, &detail, lang))
|
||||
}
|
||||
|
||||
// Raw pages
|
||||
async fn raw_list(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Query(query): Query<ListQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang_from_list(&query, headers.get("cookie").and_then(|v| v.to_str().ok()));
|
||||
let page = query.page.unwrap_or(1).max(1);
|
||||
let limit = query.limit.unwrap_or(DEFAULT_PAGE_SIZE).min(100);
|
||||
|
||||
let all_files = state.repo_index.get("raw", &state.storage).await;
|
||||
let (files, total) = paginate(&all_files, page, limit);
|
||||
|
||||
Html(render_registry_list_paginated(
|
||||
"raw",
|
||||
"Raw Storage",
|
||||
&files,
|
||||
page,
|
||||
limit,
|
||||
total,
|
||||
lang,
|
||||
))
|
||||
}
|
||||
|
||||
async fn raw_detail(
|
||||
State(state): State<Arc<AppState>>,
|
||||
Path(name): Path<String>,
|
||||
Query(query): Query<LangQuery>,
|
||||
headers: axum::http::HeaderMap,
|
||||
) -> impl IntoResponse {
|
||||
let lang = extract_lang(
|
||||
&Query(query),
|
||||
headers.get("cookie").and_then(|v| v.to_str().ok()),
|
||||
);
|
||||
let detail = get_raw_detail(&state.storage, &name).await;
|
||||
Html(render_package_detail("raw", &name, &detail, lang))
|
||||
}
|
||||
|
||||
@@ -24,8 +24,22 @@ pub fn render_dashboard(data: &DashboardResponse, lang: Lang) -> String {
|
||||
.registry_stats
|
||||
.iter()
|
||||
.map(|r| {
|
||||
let icon = get_registry_icon(&r.name);
|
||||
let display_name = get_registry_title(&r.name);
|
||||
let icon = match r.name.as_str() {
|
||||
"docker" => icons::DOCKER,
|
||||
"maven" => icons::MAVEN,
|
||||
"npm" => icons::NPM,
|
||||
"cargo" => icons::CARGO,
|
||||
"pypi" => icons::PYPI,
|
||||
_ => icons::DOCKER,
|
||||
};
|
||||
let display_name = match r.name.as_str() {
|
||||
"docker" => "Docker",
|
||||
"maven" => "Maven",
|
||||
"npm" => "npm",
|
||||
"cargo" => "Cargo",
|
||||
"pypi" => "PyPI",
|
||||
_ => &r.name,
|
||||
};
|
||||
render_registry_card(
|
||||
display_name,
|
||||
icon,
|
||||
@@ -60,56 +74,16 @@ pub fn render_dashboard(data: &DashboardResponse, lang: Lang) -> String {
|
||||
t.no_activity
|
||||
)
|
||||
} else {
|
||||
// Group consecutive identical entries (same action+artifact+registry+source)
|
||||
struct GroupedActivity {
|
||||
time: String,
|
||||
action: String,
|
||||
artifact: String,
|
||||
registry: String,
|
||||
source: String,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
let mut grouped: Vec<GroupedActivity> = Vec::new();
|
||||
for entry in &data.activity {
|
||||
let action = entry.action.to_string();
|
||||
let is_repeat = grouped.last().is_some_and(|last| {
|
||||
last.action == action
|
||||
&& last.artifact == entry.artifact
|
||||
&& last.registry == entry.registry
|
||||
&& last.source == entry.source
|
||||
});
|
||||
|
||||
if is_repeat {
|
||||
if let Some(last) = grouped.last_mut() {
|
||||
last.count += 1;
|
||||
}
|
||||
} else {
|
||||
grouped.push(GroupedActivity {
|
||||
time: format_relative_time(&entry.timestamp),
|
||||
action,
|
||||
artifact: entry.artifact.clone(),
|
||||
registry: entry.registry.clone(),
|
||||
source: entry.source.clone(),
|
||||
count: 1,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
grouped
|
||||
data.activity
|
||||
.iter()
|
||||
.map(|g| {
|
||||
let display_artifact = if g.count > 1 {
|
||||
format!("{} (x{})", g.artifact, g.count)
|
||||
} else {
|
||||
g.artifact.clone()
|
||||
};
|
||||
.map(|entry| {
|
||||
let time_ago = format_relative_time(&entry.timestamp);
|
||||
render_activity_row(
|
||||
&g.time,
|
||||
&g.action,
|
||||
&display_artifact,
|
||||
&g.registry,
|
||||
&g.source,
|
||||
&time_ago,
|
||||
&entry.action.to_string(),
|
||||
&entry.artifact,
|
||||
&entry.registry,
|
||||
&entry.source,
|
||||
)
|
||||
})
|
||||
.collect()
|
||||
@@ -141,7 +115,7 @@ pub fn render_dashboard(data: &DashboardResponse, lang: Lang) -> String {
|
||||
|
||||
{}
|
||||
|
||||
<div class="grid grid-cols-2 md:grid-cols-4 lg:grid-cols-7 gap-3 mb-6">
|
||||
<div class="grid grid-cols-1 md:grid-cols-2 lg:grid-cols-3 xl:grid-cols-5 gap-4 mb-6">
|
||||
{}
|
||||
</div>
|
||||
|
||||
@@ -655,8 +629,6 @@ pub fn render_package_detail(
|
||||
"pip install {} --index-url http://127.0.0.1:4000/simple",
|
||||
name
|
||||
),
|
||||
"go" => format!("GOPROXY=http://127.0.0.1:4000/go go get {}", name),
|
||||
"raw" => format!("curl -O http://127.0.0.1:4000/raw/{}/<file>", name),
|
||||
_ => String::new(),
|
||||
};
|
||||
|
||||
@@ -823,8 +795,6 @@ fn get_registry_icon(registry_type: &str) -> &'static str {
|
||||
"npm" => icons::NPM,
|
||||
"cargo" => icons::CARGO,
|
||||
"pypi" => icons::PYPI,
|
||||
"go" => icons::GO,
|
||||
"raw" => icons::RAW,
|
||||
_ => {
|
||||
r#"<path fill="currentColor" d="M10 4H4c-1.1 0-1.99.9-1.99 2L2 18c0 1.1.9 2 2 2h16c1.1 0 2-.9 2-2V8c0-1.1-.9-2-2-2h-8l-2-2z"/>"#
|
||||
}
|
||||
@@ -838,8 +808,6 @@ fn get_registry_title(registry_type: &str) -> &'static str {
|
||||
"npm" => "npm Registry",
|
||||
"cargo" => "Cargo Registry",
|
||||
"pypi" => "PyPI Repository",
|
||||
"go" => "Go Modules",
|
||||
"raw" => "Raw Storage",
|
||||
_ => "Registry",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -178,12 +178,7 @@ pub fn validate_docker_name(name: &str) -> Result<(), ValidationError> {
|
||||
"empty path segment".to_string(),
|
||||
));
|
||||
}
|
||||
// Safety: segment.is_empty() checked above, but use match for defense-in-depth
|
||||
let Some(first) = segment.chars().next() else {
|
||||
return Err(ValidationError::InvalidDockerName(
|
||||
"empty path segment".to_string(),
|
||||
));
|
||||
};
|
||||
let first = segment.chars().next().unwrap();
|
||||
if !first.is_ascii_alphanumeric() {
|
||||
return Err(ValidationError::InvalidDockerName(
|
||||
"segment must start with alphanumeric".to_string(),
|
||||
@@ -297,10 +292,7 @@ pub fn validate_docker_reference(reference: &str) -> Result<(), ValidationError>
|
||||
}
|
||||
|
||||
// Validate as tag
|
||||
// Safety: empty check at function start, but use let-else for defense-in-depth
|
||||
let Some(first) = reference.chars().next() else {
|
||||
return Err(ValidationError::EmptyInput);
|
||||
};
|
||||
let first = reference.chars().next().unwrap();
|
||||
if !first.is_ascii_alphanumeric() {
|
||||
return Err(ValidationError::InvalidReference(
|
||||
"tag must start with alphanumeric".to_string(),
|
||||
@@ -504,150 +496,3 @@ mod tests {
|
||||
assert!(validate_docker_reference("-dash").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod proptests {
|
||||
use super::*;
|
||||
use proptest::prelude::*;
|
||||
|
||||
/// Valid lowercase Docker name component
|
||||
fn docker_component() -> impl Strategy<Value = String> {
|
||||
"[a-z0-9][a-z0-9._-]{0,30}".prop_filter("no consecutive separators", |s| {
|
||||
!s.contains("..") && !s.contains("//") && !s.contains("--") && !s.contains("__")
|
||||
})
|
||||
}
|
||||
|
||||
/// Valid sha256 hex string
|
||||
fn sha256_hex() -> impl Strategy<Value = String> {
|
||||
"[0-9a-f]{64}"
|
||||
}
|
||||
|
||||
/// Valid Docker tag (no `..` or `/` which trigger path traversal rejection)
|
||||
fn docker_tag() -> impl Strategy<Value = String> {
|
||||
"[a-zA-Z0-9][a-zA-Z0-9._-]{0,50}".prop_filter("no path traversal", |s| {
|
||||
!s.contains("..") && !s.contains('/')
|
||||
})
|
||||
}
|
||||
|
||||
// === validate_storage_key ===
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn storage_key_never_panics(s in "\\PC{0,2000}") {
|
||||
let _ = validate_storage_key(&s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_key_rejects_path_traversal(
|
||||
prefix in "[a-z]{0,10}",
|
||||
suffix in "[a-z]{0,10}"
|
||||
) {
|
||||
let key = format!("{}/../{}", prefix, suffix);
|
||||
prop_assert!(validate_storage_key(&key).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_key_rejects_absolute(path in "/[a-z/]{1,50}") {
|
||||
prop_assert!(validate_storage_key(&path).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn storage_key_accepts_valid(
|
||||
segments in prop::collection::vec("[a-z0-9]{1,20}", 1..5)
|
||||
) {
|
||||
let key = segments.join("/");
|
||||
prop_assert!(validate_storage_key(&key).is_ok());
|
||||
}
|
||||
}
|
||||
|
||||
// === validate_docker_name ===
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn docker_name_never_panics(s in "\\PC{0,500}") {
|
||||
let _ = validate_docker_name(&s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_name_accepts_valid_single(name in docker_component()) {
|
||||
prop_assert!(validate_docker_name(&name).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_name_accepts_valid_path(
|
||||
components in prop::collection::vec(docker_component(), 1..4)
|
||||
) {
|
||||
let name = components.join("/");
|
||||
prop_assert!(validate_docker_name(&name).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn docker_name_rejects_uppercase(
|
||||
lower in "[a-z]{1,10}",
|
||||
upper in "[A-Z]{1,10}"
|
||||
) {
|
||||
let name = format!("{}{}", lower, upper);
|
||||
prop_assert!(validate_docker_name(&name).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
// === validate_digest ===
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn digest_never_panics(s in "\\PC{0,200}") {
|
||||
let _ = validate_digest(&s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn digest_sha256_roundtrip(hash in sha256_hex()) {
|
||||
let digest = format!("sha256:{}", hash);
|
||||
prop_assert!(validate_digest(&digest).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn digest_sha512_roundtrip(hash in "[0-9a-f]{128}") {
|
||||
let digest = format!("sha512:{}", hash);
|
||||
prop_assert!(validate_digest(&digest).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn digest_wrong_algo_rejected(
|
||||
algo in "[a-z]{2,8}",
|
||||
hash in "[0-9a-f]{64}"
|
||||
) {
|
||||
prop_assume!(algo != "sha256" && algo != "sha512");
|
||||
let digest = format!("{}:{}", algo, hash);
|
||||
prop_assert!(validate_digest(&digest).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
// === validate_docker_reference ===
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn reference_never_panics(s in "\\PC{0,200}") {
|
||||
let _ = validate_docker_reference(&s);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reference_accepts_valid_tag(tag in docker_tag()) {
|
||||
prop_assert!(validate_docker_reference(&tag).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reference_accepts_valid_digest(hash in sha256_hex()) {
|
||||
let reference = format!("sha256:{}", hash);
|
||||
prop_assert!(validate_docker_reference(&reference).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reference_rejects_traversal(
|
||||
prefix in "[a-z]{0,5}",
|
||||
suffix in "[a-z]{0,5}"
|
||||
) {
|
||||
let reference = format!("{}../{}", prefix, suffix);
|
||||
prop_assert!(validate_docker_reference(&reference).is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
[toolchain]
|
||||
channel = "stable"
|
||||
components = ["clippy", "rustfmt"]
|
||||
@@ -1,9 +0,0 @@
|
||||
[nora]
|
||||
packages = ["nora-registry"]
|
||||
engine = "Llvm"
|
||||
fail-under = 38
|
||||
out = ["Json", "Html"]
|
||||
output-dir = "coverage"
|
||||
test-timeout = "5m"
|
||||
exclude-files = ["nora-registry/src/ui/*", "nora-registry/src/main.rs", "nora-registry/src/openapi.rs"]
|
||||
workspace = false
|
||||
276
tests/smoke.sh
276
tests/smoke.sh
@@ -178,284 +178,10 @@ NPM_COUNT=$(echo "$STATS" | python3 -c "import sys,json; print(json.load(sys.std
|
||||
if [ "$NPM_COUNT" -gt 0 ] 2>/dev/null; then
|
||||
pass "Dashboard npm count > 0 (got $NPM_COUNT)"
|
||||
else
|
||||
# Known issue: repo_index rebuild for npm proxy-cached packages
|
||||
# is not triggered by the npm handler (missing invalidate call).
|
||||
# Tracked separately — do not block smoke suite on this.
|
||||
echo " WARN: Dashboard npm count is $NPM_COUNT (known issue, skipping)"
|
||||
fail "Dashboard npm count is $NPM_COUNT, expected > 0"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "--- Docker Push/Pull + Digest Verify ---"
|
||||
|
||||
# Create a minimal Docker image, push, pull, verify digest
|
||||
DOCKER_AVAILABLE=true
|
||||
if ! docker info >/dev/null 2>&1; then
|
||||
echo " SKIP: Docker daemon not available"
|
||||
DOCKER_AVAILABLE=false
|
||||
fi
|
||||
|
||||
if [ "$DOCKER_AVAILABLE" = true ]; then
|
||||
DOCKER_IMG="localhost:${PORT}/smoke-test/hello:v1"
|
||||
|
||||
# Create tiny image from scratch
|
||||
DOCKER_BUILD_DIR=$(mktemp -d)
|
||||
echo "FROM scratch" > "$DOCKER_BUILD_DIR/Dockerfile"
|
||||
echo "smoke-test" > "$DOCKER_BUILD_DIR/data.txt"
|
||||
echo "COPY data.txt /data.txt" >> "$DOCKER_BUILD_DIR/Dockerfile"
|
||||
|
||||
if docker build -t "$DOCKER_IMG" "$DOCKER_BUILD_DIR" >/dev/null 2>&1; then
|
||||
pass "docker build smoke image"
|
||||
else
|
||||
fail "docker build smoke image"
|
||||
fi
|
||||
rm -rf "$DOCKER_BUILD_DIR"
|
||||
|
||||
# Push
|
||||
if docker push "$DOCKER_IMG" >/dev/null 2>&1; then
|
||||
pass "docker push to NORA"
|
||||
else
|
||||
fail "docker push to NORA"
|
||||
fi
|
||||
|
||||
# Get digest from registry
|
||||
MANIFEST=$(curl -sf -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \
|
||||
"$BASE/v2/smoke-test/hello/manifests/v1" 2>/dev/null || echo "")
|
||||
if [ -n "$MANIFEST" ] && echo "$MANIFEST" | python3 -c "import sys,json; json.load(sys.stdin)" >/dev/null 2>&1; then
|
||||
pass "docker manifest retrievable from NORA"
|
||||
else
|
||||
fail "docker manifest not retrievable"
|
||||
fi
|
||||
|
||||
# Remove local image and pull back
|
||||
docker rmi "$DOCKER_IMG" >/dev/null 2>&1 || true
|
||||
if docker pull "$DOCKER_IMG" >/dev/null 2>&1; then
|
||||
pass "docker pull from NORA"
|
||||
else
|
||||
fail "docker pull from NORA"
|
||||
fi
|
||||
|
||||
# Verify digest matches: push digest == pull digest
|
||||
PUSH_DIGEST=$(docker inspect "$DOCKER_IMG" --format='{{index .RepoDigests 0}}' 2>/dev/null | cut -d@ -f2)
|
||||
if [ -n "$PUSH_DIGEST" ] && echo "$PUSH_DIGEST" | grep -q "^sha256:"; then
|
||||
pass "docker digest verified (${PUSH_DIGEST:0:20}...)"
|
||||
else
|
||||
fail "docker digest verification failed"
|
||||
fi
|
||||
|
||||
# Cleanup
|
||||
docker rmi "$DOCKER_IMG" >/dev/null 2>&1 || true
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "--- npm Install + Integrity Verify ---"
|
||||
|
||||
# Test real npm client against NORA (not just curl)
|
||||
NPM_TEST_DIR=$(mktemp -d)
|
||||
cd "$NPM_TEST_DIR"
|
||||
|
||||
# Create minimal package.json
|
||||
cat > package.json << 'PKGJSON'
|
||||
{
|
||||
"name": "nora-smoke-test",
|
||||
"version": "1.0.0",
|
||||
"dependencies": {
|
||||
"chalk": "5.4.1"
|
||||
}
|
||||
}
|
||||
PKGJSON
|
||||
|
||||
# npm install using NORA as registry
|
||||
if npm install --registry "$BASE/npm/" --prefer-online --no-audit --no-fund >/dev/null 2>&1; then
|
||||
pass "npm install chalk via NORA registry"
|
||||
else
|
||||
fail "npm install chalk via NORA registry"
|
||||
fi
|
||||
|
||||
# Verify package was installed
|
||||
if [ -f "node_modules/chalk/package.json" ]; then
|
||||
INSTALLED_VER=$(python3 -c "import json; print(json.load(open('node_modules/chalk/package.json'))['version'])" 2>/dev/null || echo "")
|
||||
if [ "$INSTALLED_VER" = "5.4.1" ]; then
|
||||
pass "npm installed correct version (5.4.1)"
|
||||
else
|
||||
fail "npm installed wrong version: $INSTALLED_VER"
|
||||
fi
|
||||
else
|
||||
fail "npm node_modules/chalk not found"
|
||||
fi
|
||||
|
||||
# Verify integrity: check that package-lock.json has sha512 integrity
|
||||
if [ -f "package-lock.json" ]; then
|
||||
INTEGRITY=$(python3 -c "
|
||||
import json
|
||||
lock = json.load(open('package-lock.json'))
|
||||
pkgs = lock.get('packages', {})
|
||||
chalk = pkgs.get('node_modules/chalk', pkgs.get('chalk', {}))
|
||||
print(chalk.get('integrity', ''))
|
||||
" 2>/dev/null || echo "")
|
||||
if echo "$INTEGRITY" | grep -q "^sha512-"; then
|
||||
pass "npm integrity hash present (sha512)"
|
||||
else
|
||||
fail "npm integrity hash missing: $INTEGRITY"
|
||||
fi
|
||||
else
|
||||
fail "npm package-lock.json not created"
|
||||
fi
|
||||
|
||||
cd /tmp
|
||||
rm -rf "$NPM_TEST_DIR"
|
||||
|
||||
echo ""
|
||||
echo "--- Upstream Timeout Handling ---"
|
||||
|
||||
# Verify that requesting a non-existent package from upstream returns 404 quickly (not hang)
|
||||
TIMEOUT_START=$(date +%s)
|
||||
TIMEOUT_RESULT=$(curl -s -o /dev/null -w "%{http_code}" --max-time 15 \
|
||||
"$BASE/npm/@nora-smoke-test/nonexistent-package-xyz-12345")
|
||||
TIMEOUT_END=$(date +%s)
|
||||
TIMEOUT_DURATION=$((TIMEOUT_END - TIMEOUT_START))
|
||||
|
||||
if [ "$TIMEOUT_RESULT" = "404" ]; then
|
||||
pass "upstream 404 returned correctly"
|
||||
else
|
||||
fail "upstream returned $TIMEOUT_RESULT, expected 404"
|
||||
fi
|
||||
|
||||
if [ "$TIMEOUT_DURATION" -lt 10 ]; then
|
||||
pass "upstream 404 returned in ${TIMEOUT_DURATION}s (< 10s)"
|
||||
else
|
||||
fail "upstream 404 took ${TIMEOUT_DURATION}s (too slow, retry may hang)"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
# ============================================
|
||||
# Go Proxy Tests
|
||||
# ============================================
|
||||
echo ""
|
||||
echo "=== Go Proxy ==="
|
||||
|
||||
# Pre-seed a Go module for testing
|
||||
GO_MODULE="example.com/testmod"
|
||||
GO_VERSION="v1.0.0"
|
||||
GO_STORAGE="$STORAGE_DIR/go"
|
||||
mkdir -p "$GO_STORAGE/example.com/testmod/@v"
|
||||
|
||||
# Create .info file
|
||||
echo '{"Version":"v1.0.0","Time":"2026-01-01T00:00:00Z"}' > "$GO_STORAGE/example.com/testmod/@v/v1.0.0.info"
|
||||
|
||||
# Create .mod file
|
||||
echo 'module example.com/testmod
|
||||
|
||||
go 1.21' > "$GO_STORAGE/example.com/testmod/@v/v1.0.0.mod"
|
||||
|
||||
# Create list file
|
||||
echo "v1.0.0" > "$GO_STORAGE/example.com/testmod/@v/list"
|
||||
|
||||
# Test: Go module list
|
||||
check "Go list versions" \
|
||||
curl -sf "$BASE/go/example.com/testmod/@v/list" -o /dev/null
|
||||
|
||||
# Test: Go module .info
|
||||
INFO_RESULT=$(curl -sf "$BASE/go/example.com/testmod/@v/v1.0.0.info" 2>/dev/null)
|
||||
if echo "$INFO_RESULT" | grep -q "v1.0.0"; then
|
||||
pass "Go .info returns version"
|
||||
else
|
||||
fail "Go .info: $INFO_RESULT"
|
||||
fi
|
||||
|
||||
# Test: Go module .mod
|
||||
MOD_RESULT=$(curl -sf "$BASE/go/example.com/testmod/@v/v1.0.0.mod" 2>/dev/null)
|
||||
if echo "$MOD_RESULT" | grep -q "module example.com/testmod"; then
|
||||
pass "Go .mod returns module content"
|
||||
else
|
||||
fail "Go .mod: $MOD_RESULT"
|
||||
fi
|
||||
|
||||
# Test: Go @latest (200 with upstream, 404 without — both valid)
|
||||
LATEST_CODE=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/go/example.com/testmod/@latest")
|
||||
if [ "$LATEST_CODE" = "200" ] || [ "$LATEST_CODE" = "404" ]; then
|
||||
pass "Go @latest handled ($LATEST_CODE)"
|
||||
else
|
||||
fail "Go @latest returned $LATEST_CODE"
|
||||
fi
|
||||
|
||||
# Test: Go path traversal rejection
|
||||
TRAVERSAL_RESULT=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/go/../../etc/passwd/@v/list")
|
||||
if [ "$TRAVERSAL_RESULT" = "400" ] || [ "$TRAVERSAL_RESULT" = "404" ]; then
|
||||
pass "Go path traversal rejected ($TRAVERSAL_RESULT)"
|
||||
else
|
||||
fail "Go path traversal returned $TRAVERSAL_RESULT"
|
||||
fi
|
||||
|
||||
# Test: Go nonexistent module
|
||||
NOTFOUND=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/go/nonexistent.com/pkg/@v/list")
|
||||
if [ "$NOTFOUND" = "404" ]; then
|
||||
pass "Go 404 on nonexistent module"
|
||||
else
|
||||
fail "Go nonexistent returned $NOTFOUND"
|
||||
fi
|
||||
|
||||
# ============================================
|
||||
# Raw Registry Extended Tests
|
||||
# ============================================
|
||||
echo ""
|
||||
echo "=== Raw Registry (extended) ==="
|
||||
|
||||
# Test: Raw upload and download (basic — already exists, extend)
|
||||
echo "integration-test-data-$(date +%s)" | curl -sf -X PUT --data-binary @- "$BASE/raw/integration/test.txt" >/dev/null 2>&1
|
||||
check "Raw upload + download" \
|
||||
curl -sf "$BASE/raw/integration/test.txt" -o /dev/null
|
||||
|
||||
# Test: Raw HEAD (check exists)
|
||||
HEAD_RESULT=$(curl -sf -o /dev/null -w "%{http_code}" --head "$BASE/raw/integration/test.txt")
|
||||
if [ "$HEAD_RESULT" = "200" ]; then
|
||||
pass "Raw HEAD returns 200"
|
||||
else
|
||||
fail "Raw HEAD returned $HEAD_RESULT"
|
||||
fi
|
||||
|
||||
# Test: Raw 404 on nonexistent
|
||||
NOTFOUND=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/raw/nonexistent/file.bin")
|
||||
if [ "$NOTFOUND" = "404" ]; then
|
||||
pass "Raw 404 on nonexistent file"
|
||||
else
|
||||
fail "Raw nonexistent returned $NOTFOUND"
|
||||
fi
|
||||
|
||||
# Test: Raw path traversal
|
||||
TRAVERSAL=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/raw/../../../etc/passwd")
|
||||
if [ "$TRAVERSAL" = "400" ] || [ "$TRAVERSAL" = "404" ]; then
|
||||
pass "Raw path traversal rejected ($TRAVERSAL)"
|
||||
else
|
||||
fail "Raw path traversal returned $TRAVERSAL"
|
||||
fi
|
||||
|
||||
# Test: Raw overwrite
|
||||
echo "version-1" | curl -sf -X PUT --data-binary @- "$BASE/raw/integration/overwrite.txt" >/dev/null 2>&1
|
||||
echo "version-2" | curl -sf -X PUT --data-binary @- "$BASE/raw/integration/overwrite.txt" >/dev/null 2>&1
|
||||
CONTENT=$(curl -sf "$BASE/raw/integration/overwrite.txt" 2>/dev/null)
|
||||
if [ "$CONTENT" = "version-2" ]; then
|
||||
pass "Raw overwrite works"
|
||||
else
|
||||
fail "Raw overwrite: got '$CONTENT'"
|
||||
fi
|
||||
|
||||
# Test: Raw delete
|
||||
curl -sf -X DELETE "$BASE/raw/integration/overwrite.txt" >/dev/null 2>&1
|
||||
DELETE_CHECK=$(curl -s -o /dev/null -w "%{http_code}" "$BASE/raw/integration/overwrite.txt")
|
||||
if [ "$DELETE_CHECK" = "404" ]; then
|
||||
pass "Raw delete works"
|
||||
else
|
||||
fail "Raw delete: file still returns $DELETE_CHECK"
|
||||
fi
|
||||
|
||||
# Test: Raw binary data (not just text)
|
||||
dd if=/dev/urandom bs=1024 count=10 2>/dev/null | curl -sf -X PUT --data-binary @- "$BASE/raw/integration/binary.bin" >/dev/null 2>&1
|
||||
BIN_SIZE=$(curl -sf "$BASE/raw/integration/binary.bin" 2>/dev/null | wc -c)
|
||||
if [ "$BIN_SIZE" -ge 10000 ]; then
|
||||
pass "Raw binary upload/download (${BIN_SIZE} bytes)"
|
||||
else
|
||||
fail "Raw binary: expected ~10240, got $BIN_SIZE"
|
||||
fi
|
||||
echo "--- Mirror CLI ---"
|
||||
# Create a minimal lockfile
|
||||
LOCKFILE=$(mktemp)
|
||||
|
||||
Reference in New Issue
Block a user