Monorepo consolidation: workspace, shared types, transport plans, docker/swam assets
Some checks failed
ci / rust (push) Failing after 2m34s
ci / ui (push) Failing after 30s

This commit is contained in:
2026-03-30 11:40:42 +03:00
parent 7e7041cf8b
commit 1298d9a3df
246 changed files with 55434 additions and 0 deletions

View File

@@ -0,0 +1,16 @@
#[test]
fn annotation_writer_produces_expected_grafana_payload() {
let a = api::build_grafana_deploy_annotation(api::DeployAnnotationArgs {
service: "gateway",
version: Some("1.2.3"),
git_sha: Some("abc123"),
time_ms: 1234567890,
});
assert_eq!(a.time, 1234567890);
assert!(a.tags.iter().any(|t| t == "deploy"));
assert!(a.tags.iter().any(|t| t == "service:gateway"));
assert!(a.tags.iter().any(|t| t == "version:1.2.3"));
assert!(a.tags.iter().any(|t| t == "git_sha:abc123"));
assert!(a.text.contains("deploy gateway"));
}

View File

@@ -0,0 +1,39 @@
#[test]
fn build_info_parser_extracts_expected_labels() {
let metrics = r#"
# HELP gateway_build_info build info
# TYPE gateway_build_info gauge
gateway_build_info{service="gateway",version="1.2.3",git_sha="abc"} 1
runner_build_info{service="runner",version="2.0.0",git_sha="def"} 1
unrelated_metric 5
"#;
let info = api::extract_build_info(metrics);
assert_eq!(info.len(), 2);
assert!(
info.iter()
.any(|i| i.service == "gateway" && i.version == "1.2.3" && i.git_sha == "abc")
);
assert!(
info.iter()
.any(|i| i.service == "runner" && i.version == "2.0.0" && i.git_sha == "def")
);
}
#[test]
fn build_info_snapshot_has_required_services() {
let metrics = r#"
gateway_build_info{service="gateway",version="1.2.3",git_sha="abc"} 1
aggregate_build_info{service="aggregate",version="1.0.0",git_sha="aaa"} 1
projection_build_info{service="projection",version="1.0.0",git_sha="bbb"} 1
runner_build_info{service="runner",version="2.0.0",git_sha="ccc"} 1
"#;
let info = api::extract_build_info(metrics);
for required in ["gateway", "aggregate", "projection", "runner"] {
assert!(
info.iter().any(|i| i.service == required),
"missing build_info for service={required}"
);
}
}

View File

@@ -0,0 +1,55 @@
use std::{fs, path::PathBuf, time::Duration};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
#[test]
fn docker_compose_files_parse_and_include_required_services() {
let root = repo_root();
let compose = fs::read_to_string(root.join("observability/docker-compose.yml")).unwrap();
let v: serde_yaml::Value = serde_yaml::from_str(&compose).unwrap();
let services = v
.get("services")
.and_then(|x| x.as_mapping())
.expect("missing services");
for required in ["grafana", "victoria-metrics", "vmagent", "loki", "tempo"] {
assert!(
services.contains_key(serde_yaml::Value::String(required.to_string())),
"missing service {required}"
);
}
}
#[tokio::test]
#[ignore]
async fn docker_compose_config_validation_is_gated_and_fast() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
let root = repo_root();
let compose = root.join("observability/docker-compose.yml");
let cmd = tokio::process::Command::new("docker")
.args(["compose", "-f"])
.arg(compose)
.args(["config"])
.output();
let out = tokio::time::timeout(Duration::from_secs(10), cmd)
.await
.expect("docker compose config timed out")
.expect("failed to run docker compose config");
assert!(
out.status.success(),
"docker compose config failed: {}",
String::from_utf8_lossy(&out.stderr)
);
}

View File

@@ -0,0 +1,6 @@
#[test]
#[ignore]
fn docker_integration_tests_are_gated() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
}

View File

@@ -0,0 +1,183 @@
use jsonwebtoken::{EncodingKey, Header, encode};
use serde::Serialize;
use std::{fs, net::TcpListener, time::Duration};
#[derive(Serialize)]
struct Claims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn free_port() -> u16 {
TcpListener::bind("127.0.0.1:0")
.unwrap()
.local_addr()
.unwrap()
.port()
}
fn token(secret: &[u8], perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 60) as usize;
encode(
&Header::default(),
&Claims {
sub: "op_1".to_string(),
session_id: "sess_1".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(secret),
)
.unwrap()
}
async fn wait_ready(url: &str) {
let client = reqwest::Client::new();
let start = tokio::time::Instant::now();
loop {
let ok = client
.get(format!("{url}/ready"))
.send()
.await
.map(|r| r.status().is_success())
.unwrap_or(false);
if ok {
return;
}
if start.elapsed() > Duration::from_secs(10) {
panic!("control-api did not become ready");
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
}
#[tokio::test]
#[ignore]
async fn control_plane_can_see_the_fleet_via_docker_stubs() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
let nginx_conf = r#"
server {
listen 80;
server_name _;
location = /health { return 200 "ok\n"; }
location = /ready { return 200 "ready\n"; }
location = /metrics { return 200 "stub_build_info{service=\"stub\",version=\"dev\",git_sha=\"000\"} 1\n"; }
}
"#;
let mut conf_path = std::env::temp_dir();
conf_path.push(format!(
"cloudlysis-control-nginx-{}.conf",
uuid::Uuid::new_v4()
));
fs::write(&conf_path, nginx_conf).unwrap();
let gateway_port = free_port();
let runner_port = free_port();
let aggregate_port = free_port();
let projection_port = free_port();
async fn run_stub(name: &str, port: u16, conf: &std::path::Path) -> String {
let out = tokio::process::Command::new("docker")
.args(["run", "-d", "--rm"])
.args(["-p", &format!("{port}:80")])
.args([
"-v",
&format!("{}:/etc/nginx/conf.d/default.conf:ro", conf.display()),
])
.arg("nginx:1.29-alpine")
.output()
.await
.expect("failed to run docker");
assert!(
out.status.success(),
"{name} stub failed: {}",
String::from_utf8_lossy(&out.stderr)
);
String::from_utf8_lossy(&out.stdout).trim().to_string()
}
let gateway_id = run_stub("gateway", gateway_port, &conf_path).await;
let runner_id = run_stub("runner", runner_port, &conf_path).await;
let aggregate_id = run_stub("aggregate", aggregate_port, &conf_path).await;
let projection_id = run_stub("projection", projection_port, &conf_path).await;
let secret = b"e2e_secret";
let api_port = free_port();
let api_url = format!("http://127.0.0.1:{api_port}");
let mut placement_path = std::env::temp_dir();
placement_path.push(format!(
"cloudlysis-control-placement-{}.json",
uuid::Uuid::new_v4()
));
fs::write(
&placement_path,
r#"{"revision":"e2e","aggregate_placement":{"placements":[]},"projection_placement":{"placements":[]},"runner_placement":{"placements":[]}}"#,
)
.unwrap();
let mut child = tokio::process::Command::new(env!("CARGO_BIN_EXE_api"))
.env("CONTROL_API_ADDR", format!("127.0.0.1:{api_port}"))
.env("CONTROL_GATEWAY_JWT_HS256_SECRET", "e2e_secret")
.env("CONTROL_PLACEMENT_PATH", placement_path.to_string_lossy().to_string())
.env(
"CONTROL_FLEET_SERVICES",
format!(
"gateway=http://127.0.0.1:{gateway_port},aggregate=http://127.0.0.1:{aggregate_port},projection=http://127.0.0.1:{projection_port},runner=http://127.0.0.1:{runner_port}"
),
)
.spawn()
.expect("failed to spawn control-api");
wait_ready(&api_url).await;
let client = reqwest::Client::new();
let t = token(secret, &["control:read"]);
let res = client
.get(format!("{api_url}/admin/v1/fleet/snapshot"))
.header(reqwest::header::AUTHORIZATION, format!("Bearer {t}"))
.send()
.await
.unwrap();
assert!(res.status().is_success());
let v: serde_json::Value = res.json().await.unwrap();
let services = v.get("services").and_then(|x| x.as_array()).unwrap();
assert!(
services.len() >= 5,
"expected at least 5 services (including control-api), got {}",
services.len()
);
let res = client
.get(format!("{api_url}/admin/v1/tenants"))
.header(reqwest::header::AUTHORIZATION, format!("Bearer {t}"))
.send()
.await
.unwrap();
assert!(res.status().is_success());
let _ = child.kill().await;
for id in [gateway_id, runner_id, aggregate_id, projection_id] {
let _ = tokio::process::Command::new("docker")
.args(["stop", &id])
.output()
.await;
}
let _ = fs::remove_file(&conf_path);
let _ = fs::remove_file(&placement_path);
}

View File

@@ -0,0 +1,30 @@
#[test]
fn fleet_services_env_parser_is_lenient() {
let services = {
fn parse(spec: &str) -> Vec<api::FleetService> {
spec.split(',')
.filter_map(|pair| {
let pair = pair.trim();
if pair.is_empty() {
return None;
}
let (name, url) = pair.split_once('=')?;
let name = name.trim();
let url = url.trim();
if name.is_empty() || url.is_empty() {
return None;
}
Some(api::FleetService {
name: name.to_string(),
base_url: url.to_string(),
})
})
.collect()
}
parse(" gateway=http://x , ,runner=http://y,broken, =http://z ")
};
assert_eq!(services.len(), 2);
assert_eq!(services[0].name, "gateway");
assert_eq!(services[1].name, "runner");
}

View File

@@ -0,0 +1,23 @@
use std::time::Duration;
#[tokio::test]
#[ignore]
async fn nats_integration_tests_are_gated_and_fast_fail() {
let url = std::env::var("CONTROL_TEST_NATS_URL").expect("CONTROL_TEST_NATS_URL is required");
let without_scheme = url.strip_prefix("nats://").unwrap_or(url.as_str());
let hostport = without_scheme.split('/').next().unwrap_or(without_scheme);
let mut parts = hostport.split(':');
let host = parts.next().unwrap_or("127.0.0.1");
let port: u16 = parts
.next()
.unwrap_or("4222")
.parse()
.expect("invalid port in CONTROL_TEST_NATS_URL");
let connect = tokio::net::TcpStream::connect((host, port));
tokio::time::timeout(Duration::from_secs(2), connect)
.await
.expect("tcp connect to NATS timed out")
.expect("failed to connect to NATS");
}

View File

@@ -0,0 +1,75 @@
use std::{collections::BTreeSet, fs, path::PathBuf};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
#[test]
fn grafana_provisioning_files_are_syntactically_valid() {
let root = repo_root();
let datasources = fs::read_to_string(
root.join("observability/grafana/provisioning/datasources/datasources.yml"),
)
.expect("missing grafana datasources provisioning file");
let dashboards = fs::read_to_string(
root.join("observability/grafana/provisioning/dashboards/dashboards.yml"),
)
.expect("missing grafana dashboards provisioning file");
let _datasources_yaml: serde_yaml::Value =
serde_yaml::from_str(&datasources).expect("invalid grafana datasources yaml");
let _dashboards_yaml: serde_yaml::Value =
serde_yaml::from_str(&dashboards).expect("invalid grafana dashboards yaml");
}
#[test]
fn grafana_dashboards_are_syntactically_valid_json() {
let root = repo_root();
let dashboards_dir = root.join("observability/grafana/dashboards");
let mut found = 0usize;
for entry in fs::read_dir(&dashboards_dir).expect("missing dashboards dir") {
let entry = entry.expect("failed to read dashboards dir entry");
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) != Some("json") {
continue;
}
found += 1;
let raw = fs::read_to_string(&path).expect("failed to read dashboard json");
let _: serde_json::Value =
serde_json::from_str(&raw).unwrap_or_else(|e| panic!("{path:?}: {e}"));
}
assert!(found > 0, "expected at least one dashboard json file");
}
#[test]
fn vmagent_config_parses_and_includes_required_jobs() {
let root = repo_root();
let scrape = fs::read_to_string(root.join("observability/vmagent/scrape.yml"))
.expect("missing vmagent scrape config");
let value: serde_yaml::Value =
serde_yaml::from_str(&scrape).expect("invalid vmagent scrape yaml");
let mut job_names = BTreeSet::<String>::new();
if let Some(scrape_configs) = value.get("scrape_configs").and_then(|v| v.as_sequence()) {
for cfg in scrape_configs {
if let Some(job) = cfg.get("job_name").and_then(|v| v.as_str()) {
job_names.insert(job.to_string());
}
}
}
for required in ["victoria-metrics", "vmagent", "control-api"] {
assert!(
job_names.contains(required),
"vmagent scrape config missing required job_name={required}"
);
}
}

View File

@@ -0,0 +1,61 @@
use std::{
net::TcpStream,
path::PathBuf,
process::Command,
time::{Duration, Instant},
};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
fn wait_for_tcp(addr: &str, timeout: Duration) -> bool {
let start = Instant::now();
while start.elapsed() < timeout {
if TcpStream::connect_timeout(
&addr.parse().expect("invalid socket addr"),
Duration::from_secs(1),
)
.is_ok()
{
return true;
}
std::thread::sleep(Duration::from_millis(250));
}
false
}
#[test]
#[ignore]
fn observability_stack_reaches_healthy_state_fast() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
let root = repo_root();
let compose = root.join("observability/docker-compose.yml");
let up = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["up", "-d"])
.status()
.expect("failed to run docker compose up");
assert!(up.success(), "docker compose up failed");
let ok = wait_for_tcp("127.0.0.1:3000", Duration::from_secs(30))
&& wait_for_tcp("127.0.0.1:8428", Duration::from_secs(30))
&& wait_for_tcp("127.0.0.1:3100", Duration::from_secs(30))
&& wait_for_tcp("127.0.0.1:3200", Duration::from_secs(30));
let _ = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["down", "-v"])
.status();
assert!(ok, "observability stack did not become reachable in time");
}

View File

@@ -0,0 +1,43 @@
use std::{fs, path::PathBuf, thread, time::Duration};
use api::PlacementStore;
fn tmp_file(name: &str) -> PathBuf {
let mut p = std::env::temp_dir();
p.push(format!(
"cloudlysis-control-{name}-{}-{}.json",
std::process::id(),
std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos()
));
p
}
#[test]
fn placement_store_hot_reload_swaps_atomically() {
let path = tmp_file("placement");
fs::write(
&path,
r#"{"revision":"r1","aggregate_placement":{"placements":[]},"projection_placement":{"placements":[]},"runner_placement":{"placements":[]}}"#,
)
.unwrap();
let store = PlacementStore::new(path.clone());
let a1 = store.get_for_kind(api::ServiceKind::Aggregate);
assert_eq!(a1.revision, "r1");
thread::sleep(Duration::from_millis(5));
fs::write(
&path,
r#"{"revision":"r2","aggregate_placement":{"placements":[]},"projection_placement":{"placements":[]},"runner_placement":{"placements":[]}}"#,
)
.unwrap();
let a2 = store.get_for_kind(api::ServiceKind::Aggregate);
assert_eq!(a2.revision, "r2");
let _ = fs::remove_file(&path);
}

View File

@@ -0,0 +1,31 @@
use std::{fs, path::PathBuf};
#[test]
fn swarm_store_is_deterministic_from_file() {
let mut path = std::env::temp_dir();
path.push(format!(
"cloudlysis-control-swarm-{}-{}.json",
std::process::id(),
uuid::Uuid::new_v4()
));
fs::write(
&path,
r#"{"services":[{"name":"gateway","image":"x","mode":"replicated","replicas":"1/1","updated_at":null}],"tasks":[{"id":"t1","service":"gateway","node":"n1","desired_state":"running","current_state":"running","error":null}]}"#,
)
.unwrap();
let store = api::SwarmStore::new(PathBuf::from(&path));
let services = store.list_services();
assert_eq!(services.len(), 1);
assert_eq!(services[0].name, "gateway");
let tasks = store.list_tasks("gateway");
assert_eq!(tasks.len(), 1);
assert_eq!(tasks[0].id, "t1");
let none = store.list_tasks("missing");
assert_eq!(none.len(), 0);
let _ = fs::remove_file(&path);
}

View File

@@ -0,0 +1,42 @@
use std::time::Duration;
#[tokio::test]
#[ignore]
async fn docker_swarm_smoke_test_is_gated_and_times_out() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
let stack = "cloudlysis_control_test";
let compose = std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.unwrap()
.join("swarm/stacks/control-plane.yml");
let deploy = tokio::process::Command::new("docker")
.args(["stack", "deploy", "-c"])
.arg(&compose)
.arg(stack)
.output();
let out = tokio::time::timeout(Duration::from_secs(30), deploy)
.await
.expect("docker stack deploy timed out")
.expect("failed to run docker stack deploy");
assert!(
out.status.success(),
"docker stack deploy failed: {}",
String::from_utf8_lossy(&out.stderr)
);
let ls = tokio::process::Command::new("docker")
.args(["service", "ls"])
.output();
let _ = tokio::time::timeout(Duration::from_secs(10), ls).await;
let rm = tokio::process::Command::new("docker")
.args(["stack", "rm"])
.arg(stack)
.output();
let _ = tokio::time::timeout(Duration::from_secs(10), rm).await;
}

View File

@@ -0,0 +1,40 @@
use std::{fs, path::PathBuf};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
#[test]
fn stack_files_parse_as_yaml() {
let root = repo_root();
for file in [
root.join("swarm/stacks/control-plane.yml"),
root.join("swarm/stacks/observability.yml"),
] {
let raw = fs::read_to_string(&file).unwrap();
let _: serde_yaml::Value = serde_yaml::from_str(&raw).unwrap();
}
}
#[test]
fn control_plane_stack_has_required_services() {
let root = repo_root();
let raw = fs::read_to_string(root.join("swarm/stacks/control-plane.yml")).unwrap();
let v: serde_yaml::Value = serde_yaml::from_str(&raw).unwrap();
let services = v
.get("services")
.and_then(|x| x.as_mapping())
.expect("missing services");
for required in ["control-api", "control-ui"] {
assert!(
services.contains_key(serde_yaml::Value::String(required.to_string())),
"missing service {required}"
);
}
}