chore: remove unused crates and demo traffic scripts

- Remove nora-cli (unimplemented stub)
- Remove nora-storage (standalone S3 server, not used)
- Remove demo traffic generator and systemd service
This commit is contained in:
2026-03-18 12:19:58 +00:00
parent 1cc5c8cc86
commit 59cdd4530b
7 changed files with 0 additions and 562 deletions

View File

@@ -1,83 +0,0 @@
#!/bin/bash
# Demo traffic simulator for NORA registry
# Generates random registry activity for dashboard demo
REGISTRY="http://localhost:4000"
LOG_FILE="/var/log/nora-demo-traffic.log"
# Sample packages to fetch
NPM_PACKAGES=("lodash" "express" "react" "axios" "moment" "underscore" "chalk" "debug")
MAVEN_ARTIFACTS=(
"org/apache/commons/commons-lang3/3.12.0/commons-lang3-3.12.0.pom"
"com/google/guava/guava/31.1-jre/guava-31.1-jre.pom"
"org/slf4j/slf4j-api/2.0.0/slf4j-api-2.0.0.pom"
)
DOCKER_IMAGES=("alpine" "busybox" "hello-world")
log() {
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1" >> "$LOG_FILE"
}
# Random sleep between min and max seconds
random_sleep() {
local min=$1
local max=$2
local delay=$((RANDOM % (max - min + 1) + min))
sleep $delay
}
# Fetch random npm package
fetch_npm() {
local pkg=${NPM_PACKAGES[$RANDOM % ${#NPM_PACKAGES[@]}]}
log "NPM: fetching $pkg"
curl -s "$REGISTRY/npm/$pkg" > /dev/null 2>&1
}
# Fetch random maven artifact
fetch_maven() {
local artifact=${MAVEN_ARTIFACTS[$RANDOM % ${#MAVEN_ARTIFACTS[@]}]}
log "MAVEN: fetching $artifact"
curl -s "$REGISTRY/maven2/$artifact" > /dev/null 2>&1
}
# Docker push/pull cycle
docker_cycle() {
local img=${DOCKER_IMAGES[$RANDOM % ${#DOCKER_IMAGES[@]}]}
local tag="demo-$(date +%s)"
log "DOCKER: push/pull cycle for $img"
# Tag and push
docker tag "$img:latest" "localhost:4000/demo/$img:$tag" 2>/dev/null
docker push "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
# Pull back
docker rmi "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
docker pull "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
# Cleanup
docker rmi "localhost:4000/demo/$img:$tag" > /dev/null 2>&1
}
# Main loop
log "Starting demo traffic simulator"
while true; do
# Random operation
op=$((RANDOM % 10))
case $op in
0|1|2|3) # 40% npm
fetch_npm
;;
4|5|6) # 30% maven
fetch_maven
;;
7|8|9) # 30% docker
docker_cycle
;;
esac
# Random delay: 30-120 seconds
random_sleep 30 120
done

View File

@@ -1,15 +0,0 @@
[Unit]
Description=NORA Demo Traffic Simulator
After=docker.service
Requires=docker.service
[Service]
Type=simple
ExecStart=/opt/nora/demo-traffic.sh
Restart=always
RestartSec=10
StandardOutput=journal
StandardError=journal
[Install]
WantedBy=multi-user.target

View File

@@ -1,23 +0,0 @@
[package]
name = "nora-cli"
version.workspace = true
edition.workspace = true
license.workspace = true
authors.workspace = true
repository.workspace = true
homepage.workspace = true
description = "CLI tool for NORA registry"
[[bin]]
name = "nora-cli"
path = "src/main.rs"
[dependencies]
tokio.workspace = true
reqwest.workspace = true
serde.workspace = true
serde_json.workspace = true
clap = { version = "4", features = ["derive"] }
indicatif = "0.18"
tar = "0.4"
flate2 = "1.1"

View File

@@ -1,55 +0,0 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use clap::{Parser, Subcommand};
#[derive(Parser)]
#[command(name = "nora-cli")]
#[command(about = "CLI tool for Nora registry")]
struct Cli {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand)]
enum Commands {
/// Login to a registry
Login {
#[arg(long)]
registry: String,
#[arg(short, long)]
username: String,
},
/// Push an artifact
Push {
#[arg(long)]
registry: String,
path: String,
},
/// Pull an artifact
Pull {
#[arg(long)]
registry: String,
artifact: String,
},
}
#[tokio::main]
async fn main() {
let cli = Cli::parse();
match cli.command {
Commands::Login { registry, username } => {
println!("Logging in to {} as {}", registry, username);
// TODO: implement
}
Commands::Push { registry, path } => {
println!("Pushing {} to {}", path, registry);
// TODO: implement
}
Commands::Pull { registry, artifact } => {
println!("Pulling {} from {}", artifact, registry);
// TODO: implement
}
}
}

View File

@@ -1,28 +0,0 @@
[package]
name = "nora-storage"
version.workspace = true
edition.workspace = true
license.workspace = true
authors.workspace = true
repository.workspace = true
homepage.workspace = true
description = "S3-compatible storage server for NORA"
[[bin]]
name = "nora-storage"
path = "src/main.rs"
[dependencies]
tokio.workspace = true
axum.workspace = true
serde.workspace = true
serde_json.workspace = true
tracing.workspace = true
tracing-subscriber.workspace = true
toml = "1.0"
uuid = { version = "1", features = ["v4"] }
sha2 = "0.10"
base64 = "0.22"
httpdate = "1"
chrono = { version = "0.4", features = ["serde"] }
quick-xml = { version = "0.39", features = ["serialize"] }

View File

@@ -1,47 +0,0 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
use serde::{Deserialize, Serialize};
use std::fs;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Config {
pub server: ServerConfig,
pub storage: StorageConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ServerConfig {
pub host: String,
pub port: u16,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StorageConfig {
pub data_dir: String,
pub max_body_size: usize,
}
impl Config {
pub fn load() -> Self {
fs::read_to_string("config.toml")
.ok()
.and_then(|content| toml::from_str(&content).ok())
.unwrap_or_default()
}
}
impl Default for Config {
fn default() -> Self {
Self {
server: ServerConfig {
host: String::from("127.0.0.1"),
port: 3000,
},
storage: StorageConfig {
data_dir: String::from("data"),
max_body_size: 1024 * 1024 * 1024, // 1GB
},
}
}
}

View File

@@ -1,311 +0,0 @@
// Copyright (c) 2026 Volkov Pavel | DevITWay
// SPDX-License-Identifier: MIT
mod config;
use axum::extract::DefaultBodyLimit;
use axum::{
body::Bytes,
extract::{Path, State},
http::StatusCode,
response::{IntoResponse, Response},
routing::{delete, get, put},
Router,
};
use chrono::Utc;
use config::Config;
use quick_xml::se::to_string as to_xml;
use serde::Serialize;
use std::fs;
use std::sync::Arc;
use tracing::info;
pub struct AppState {
pub config: Config,
}
#[derive(Serialize)]
#[serde(rename = "ListAllMyBucketsResult")]
struct ListBucketsResult {
#[serde(rename = "Buckets")]
buckets: Buckets,
}
#[derive(Serialize)]
struct Buckets {
#[serde(rename = "Bucket")]
bucket: Vec<BucketInfo>,
}
#[derive(Serialize)]
struct BucketInfo {
#[serde(rename = "Name")]
name: String,
#[serde(rename = "CreationDate")]
creation_date: String,
}
#[derive(Serialize)]
#[serde(rename = "ListBucketResult")]
struct ListObjectsResult {
#[serde(rename = "Name")]
name: String,
#[serde(rename = "Contents")]
contents: Vec<ObjectInfo>,
}
#[derive(Serialize)]
struct ObjectInfo {
#[serde(rename = "Key")]
key: String,
#[serde(rename = "Size")]
size: u64,
#[serde(rename = "LastModified")]
last_modified: String,
}
#[derive(Serialize)]
#[serde(rename = "Error")]
struct S3Error {
#[serde(rename = "Code")]
code: String,
#[serde(rename = "Message")]
message: String,
}
fn xml_response<T: Serialize>(data: T) -> Response {
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n{}",
to_xml(&data).unwrap_or_default()
);
(
StatusCode::OK,
[(axum::http::header::CONTENT_TYPE, "application/xml")],
xml,
)
.into_response()
}
fn error_response(status: StatusCode, code: &str, message: &str) -> Response {
let error = S3Error {
code: code.to_string(),
message: message.to_string(),
};
let xml = format!(
"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n{}",
to_xml(&error).unwrap_or_default()
);
(
status,
[(axum::http::header::CONTENT_TYPE, "application/xml")],
xml,
)
.into_response()
}
#[tokio::main]
async fn main() {
tracing_subscriber::fmt()
.with_env_filter(
tracing_subscriber::EnvFilter::from_default_env()
.add_directive("nora_storage=info".parse().expect("valid directive")),
)
.init();
let config = Config::load();
fs::create_dir_all(&config.storage.data_dir).expect("Failed to create data directory");
let state = Arc::new(AppState {
config: config.clone(),
});
let app = Router::new()
.route("/", get(list_buckets))
.route("/{bucket}", get(list_objects))
.route("/{bucket}", put(create_bucket))
.route("/{bucket}", delete(delete_bucket))
.route("/{bucket}/{*key}", put(put_object))
.route("/{bucket}/{*key}", get(get_object))
.route("/{bucket}/{*key}", delete(delete_object))
.layer(DefaultBodyLimit::max(config.storage.max_body_size))
.with_state(state);
let addr = format!("{}:{}", config.server.host, config.server.port);
let listener = tokio::net::TcpListener::bind(&addr)
.await
.expect("Failed to bind to address");
info!("nora-storage (S3 compatible) running on http://{}", addr);
axum::serve(listener, app).await.expect("Server error");
}
async fn list_buckets(State(state): State<Arc<AppState>>) -> Response {
let data_dir = &state.config.storage.data_dir;
let entries = match fs::read_dir(data_dir) {
Ok(e) => e,
Err(_) => {
return error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"InternalError",
"Failed to read data",
)
}
};
let bucket_list: Vec<BucketInfo> = entries
.filter_map(|e| e.ok())
.filter(|e| e.path().is_dir())
.filter_map(|e| {
let name = e.file_name().into_string().ok()?;
let modified = e.metadata().ok()?.modified().ok()?;
let datetime: chrono::DateTime<Utc> = modified.into();
Some(BucketInfo {
name,
creation_date: datetime.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
})
})
.collect();
xml_response(ListBucketsResult {
buckets: Buckets {
bucket: bucket_list,
},
})
}
async fn list_objects(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
if !std::path::Path::new(&bucket_path).is_dir() {
return error_response(
StatusCode::NOT_FOUND,
"NoSuchBucket",
"The specified bucket does not exist",
);
}
let objects = collect_files(std::path::Path::new(&bucket_path), "");
xml_response(ListObjectsResult {
name: bucket,
contents: objects,
})
}
fn collect_files(dir: &std::path::Path, prefix: &str) -> Vec<ObjectInfo> {
let mut objects = Vec::new();
if let Ok(entries) = fs::read_dir(dir) {
for entry in entries.filter_map(|e| e.ok()) {
let path = entry.path();
let name = entry.file_name().into_string().unwrap_or_default();
let key = if prefix.is_empty() {
name.clone()
} else {
format!("{}/{}", prefix, name)
};
if path.is_dir() {
objects.extend(collect_files(&path, &key));
} else if let Ok(metadata) = entry.metadata() {
if let Ok(modified) = metadata.modified() {
let datetime: chrono::DateTime<Utc> = modified.into();
objects.push(ObjectInfo {
key,
size: metadata.len(),
last_modified: datetime.format("%Y-%m-%dT%H:%M:%SZ").to_string(),
});
}
}
}
}
objects
}
async fn create_bucket(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
match fs::create_dir(&bucket_path) {
Ok(_) => (StatusCode::OK, "").into_response(),
Err(_) => error_response(
StatusCode::CONFLICT,
"BucketAlreadyExists",
"Bucket already exists",
),
}
}
async fn put_object(
State(state): State<Arc<AppState>>,
Path((bucket, key)): Path<(String, String)>,
body: Bytes,
) -> Response {
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
if let Some(parent) = std::path::Path::new(&file_path).parent() {
let _ = fs::create_dir_all(parent);
}
match fs::write(&file_path, &body) {
Ok(_) => {
println!("PUT {}/{} ({} bytes)", bucket, key, body.len());
(StatusCode::OK, "").into_response()
}
Err(e) => {
println!("ERROR writing {}/{}: {}", bucket, key, e);
error_response(
StatusCode::INTERNAL_SERVER_ERROR,
"InternalError",
"Failed to write object",
)
}
}
}
async fn get_object(
State(state): State<Arc<AppState>>,
Path((bucket, key)): Path<(String, String)>,
) -> Response {
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
match fs::read(&file_path) {
Ok(data) => (StatusCode::OK, data).into_response(),
Err(_) => error_response(
StatusCode::NOT_FOUND,
"NoSuchKey",
"The specified key does not exist",
),
}
}
async fn delete_object(
State(state): State<Arc<AppState>>,
Path((bucket, key)): Path<(String, String)>,
) -> Response {
let file_path = format!("{}/{}/{}", state.config.storage.data_dir, bucket, key);
match fs::remove_file(&file_path) {
Ok(_) => {
println!("DELETE {}/{}", bucket, key);
(StatusCode::NO_CONTENT, "").into_response()
}
Err(_) => error_response(
StatusCode::NOT_FOUND,
"NoSuchKey",
"The specified key does not exist",
),
}
}
async fn delete_bucket(State(state): State<Arc<AppState>>, Path(bucket): Path<String>) -> Response {
let bucket_path = format!("{}/{}", state.config.storage.data_dir, bucket);
match fs::remove_dir(&bucket_path) {
Ok(_) => {
println!("DELETE bucket {}", bucket);
(StatusCode::NO_CONTENT, "").into_response()
}
Err(_) => error_response(
StatusCode::CONFLICT,
"BucketNotEmpty",
"The bucket is not empty",
),
}
}