feat(billing): implement tenant subscription entitlements system (milestones 0-6)
Some checks failed
ci / ui (push) Failing after 28s
ci / rust (push) Failing after 2m40s
images / build-and-push (push) Failing after 19s

This commit is contained in:
2026-03-30 18:41:23 +03:00
parent 5992044b7e
commit 2595e7f1c5
63 changed files with 8448 additions and 321 deletions

View File

@@ -0,0 +1,174 @@
use api::{
AppState, AuditStore, AuthConfig, ConfigLocks, JobStore, PlacementStore, SwarmStore,
TenantLocks, billing::BillingStore, config_registry::ConfigRegistry,
};
use axum::{
Router,
body::Body,
http::{Request, StatusCode, header},
};
use jsonwebtoken::{EncodingKey, Header, encode};
use metrics_exporter_prometheus::PrometheusBuilder;
use serde::Serialize;
use std::{
path::PathBuf,
sync::{Arc, OnceLock},
};
use tower::ServiceExt;
use uuid::Uuid;
fn prod_enabled() -> bool {
std::env::var("CONTROL_TEST_BILLING_PROD").ok().as_deref() == Some("1")
}
static HANDLE: OnceLock<metrics_exporter_prometheus::PrometheusHandle> = OnceLock::new();
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
#[derive(Serialize)]
struct TestClaims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn make_token(secret: &[u8], perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 60) as usize;
encode(
&Header::default(),
&TestClaims {
sub: "user_1".to_string(),
session_id: "sess_1".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(secret),
)
.unwrap()
}
fn test_app() -> Router {
let handle = HANDLE
.get_or_init(|| {
PrometheusBuilder::new()
.install_recorder()
.expect("failed to install prometheus recorder")
})
.clone();
let provider_type =
std::env::var("CONTROL_BILLING_PROVIDER").unwrap_or_else(|_| "mock".to_string());
let billing_provider: Arc<dyn api::billing::BillingProvider> = match provider_type.as_str() {
"stripe" => Arc::new(api::billing::StripeProvider {
secret_key: std::env::var("CONTROL_STRIPE_SECRET_KEY").unwrap_or_default(),
price_pro: std::env::var("CONTROL_STRIPE_PRICE_ID_PRO").unwrap_or_default(),
price_enterprise: std::env::var("CONTROL_STRIPE_PRICE_ID_ENTERPRISE")
.unwrap_or_default(),
}),
_ => Arc::new(api::billing::MockProvider),
};
api::build_app(AppState {
prometheus: handle,
auth: AuthConfig {
hs256_secret: Some(b"test_secret".to_vec()),
},
jobs: JobStore::default(),
audit: AuditStore::default(),
tenant_locks: TenantLocks::default(),
config_locks: ConfigLocks::default(),
http: reqwest::Client::new(),
placement: PlacementStore::new(repo_root().join("config/placement/dev.json")),
billing: BillingStore::new(std::env::temp_dir().join("billing-prod-smoke.json")),
billing_provider,
billing_enforcement_enabled: true,
config: ConfigRegistry::new(None, None),
fleet_services: vec![],
swarm: SwarmStore::new(repo_root().join("swarm/dev.json")),
docs: None,
})
}
#[tokio::test]
async fn billing_production_smoke_test() {
if !prod_enabled() {
eprintln!("skipping: set CONTROL_TEST_BILLING_PROD=1 to enable production smoke tests");
return;
}
let app = test_app();
let token = make_token(b"test_secret", &["control:read", "control:write"]);
let tenant_id = Uuid::new_v4();
// 1. Verify GET billing works (empty initially)
let res = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/admin/v1/tenants/{tenant_id}/billing"))
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.header("x-tenant-id", tenant_id.to_string())
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
// 2. Verify Checkout session generation
let res = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/admin/v1/tenants/{tenant_id}/billing/checkout"))
.method("POST")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.header("x-tenant-id", tenant_id.to_string())
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from(
serde_json::json!({
"plan": "pro",
"return_path": "/billing"
})
.to_string(),
))
.unwrap(),
)
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = axum::body::to_bytes(res.into_body(), 1024 * 1024)
.await
.unwrap();
let v: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert!(v.get("url").and_then(|u| u.as_str()).is_some());
// 3. Verify Portal session generation (may fail if tenant has no stripe customer id yet, which is expected for fresh tenant)
let res = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/admin/v1/tenants/{tenant_id}/billing/portal"))
.method("POST")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.header("x-tenant-id", tenant_id.to_string())
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
// For smoke test, we just want to see it reached the provider and didn't crash
assert!(res.status() == StatusCode::OK || res.status() == StatusCode::INTERNAL_SERVER_ERROR);
}

View File

@@ -0,0 +1,250 @@
use api::{
AppState, AuditStore, AuthConfig, ConfigLocks, ConfigRegistry, JobStore, PlacementStore,
SwarmStore, TenantLocks, config_registry::NatsKvSource,
};
use axum::{
Router,
body::Body,
http::{Request, StatusCode, header},
};
use jsonwebtoken::{EncodingKey, Header, encode};
use metrics_exporter_prometheus::PrometheusBuilder;
use serde::Serialize;
use std::{path::PathBuf, sync::OnceLock, time::Duration};
use tower::ServiceExt;
use uuid::Uuid;
fn enabled() -> bool {
std::env::var("CONTROL_TEST_NATS").ok().as_deref() == Some("1")
&& std::env::var("CONTROL_TEST_NATS_URL").is_ok()
}
#[derive(Serialize)]
struct TestClaims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn make_token(secret: &[u8], perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 60) as usize;
encode(
&Header::default(),
&TestClaims {
sub: "user_1".to_string(),
session_id: "sess_1".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(secret),
)
.unwrap()
}
static HANDLE: OnceLock<metrics_exporter_prometheus::PrometheusHandle> = OnceLock::new();
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
async fn wait_done(app: Router, job_id: Uuid, token: &str) -> serde_json::Value {
let start = tokio::time::Instant::now();
loop {
let res = app
.clone()
.oneshot(
Request::builder()
.uri(format!("/admin/v1/jobs/{job_id}"))
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = axum::body::to_bytes(res.into_body(), 1024 * 1024)
.await
.unwrap();
let job: serde_json::Value = serde_json::from_slice(&body).unwrap();
let status = job
.get("status")
.and_then(|v| v.as_str())
.unwrap_or("unknown");
if status != "pending" && status != "running" {
return job;
}
if start.elapsed() > Duration::from_secs(2) {
return job;
}
tokio::time::sleep(Duration::from_millis(25)).await;
}
}
#[tokio::test]
async fn config_jobs_with_nats_kv_are_env_gated() {
if !enabled() {
eprintln!(
"skipping: set CONTROL_TEST_NATS=1 and CONTROL_TEST_NATS_URL=nats://... to enable nats config tests"
);
return;
}
let nats_url = std::env::var("CONTROL_TEST_NATS_URL").unwrap();
unsafe {
std::env::set_var("CONTROL_CONFIG_NATS_URL", &nats_url);
}
let bucket = format!("cloudlysis-test-config-{}", Uuid::new_v4());
let routing_key = format!("routing/{}", Uuid::new_v4());
let placement_key = format!("placement/{}", Uuid::new_v4());
let routing_src = NatsKvSource::connect(nats_url.clone(), bucket.clone(), routing_key)
.await
.expect("connect routing kv");
let placement_src = NatsKvSource::connect(nats_url.clone(), bucket.clone(), placement_key)
.await
.expect("connect placement kv");
let config = ConfigRegistry::new(
Some(std::sync::Arc::new(routing_src)),
Some(std::sync::Arc::new(placement_src)),
);
let secret = b"test_secret".to_vec();
let token = make_token(&secret, &["control:write", "control:read"]);
let handle = HANDLE
.get_or_init(|| {
PrometheusBuilder::new()
.install_recorder()
.expect("failed to install prometheus recorder")
})
.clone();
let app = api::build_app(AppState {
prometheus: handle,
auth: AuthConfig {
hs256_secret: Some(secret),
},
jobs: JobStore::default(),
audit: AuditStore::default(),
tenant_locks: TenantLocks::default(),
config_locks: ConfigLocks::default(),
http: reqwest::Client::new(),
placement: PlacementStore::new(repo_root().join("config/placement/dev.json")),
billing: api::billing::BillingStore::new(std::env::temp_dir().join("billing-test.json")),
billing_provider: std::sync::Arc::new(api::billing::MockProvider),
billing_enforcement_enabled: false,
config,
fleet_services: vec![],
swarm: SwarmStore::new(repo_root().join("swarm/dev.json")),
docs: None,
});
let routing_value = serde_json::json!({
"revision": 1,
"aggregate_placement": { "t1": "local" },
"projection_placement": { "t1": "local" },
"runner_placement": { "t1": "local" },
"aggregate_shards": { "local": ["http://aggregate:50051"] },
"projection_shards": { "local": ["http://projection:8080"] },
"runner_shards": { "local": ["http://runner:8080"] }
});
let apply = app
.clone()
.oneshot(
Request::builder()
.uri("/admin/v1/jobs/config/apply")
.method("POST")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.header("idempotency-key", format!("k-{}", Uuid::new_v4()))
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from(
serde_json::json!({
"domain": "routing",
"expected_revision": null,
"reason": "test apply",
"value": routing_value
})
.to_string(),
))
.unwrap(),
)
.await
.unwrap();
assert_eq!(apply.status(), StatusCode::OK);
let body = axum::body::to_bytes(apply.into_body(), 1024 * 1024)
.await
.unwrap();
let v: serde_json::Value = serde_json::from_slice(&body).unwrap();
let job_id = Uuid::parse_str(v.get("job_id").unwrap().as_str().unwrap()).unwrap();
let job = wait_done(app.clone(), job_id, &token).await;
assert_eq!(
job.get("status").and_then(|v| v.as_str()),
Some("succeeded")
);
let get = app
.clone()
.oneshot(
Request::builder()
.uri("/admin/v1/config/routing")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(get.status(), StatusCode::OK);
let body = axum::body::to_bytes(get.into_body(), 1024 * 1024)
.await
.unwrap();
let got: serde_json::Value = serde_json::from_slice(&body).unwrap();
assert_eq!(got.get("domain").unwrap().as_str().unwrap(), "routing");
assert!(got.get("revision").unwrap().as_u64().unwrap_or(0) > 0);
let rollback = app
.clone()
.oneshot(
Request::builder()
.uri("/admin/v1/jobs/config/rollback")
.method("POST")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.header("idempotency-key", format!("k-{}", Uuid::new_v4()))
.header(header::CONTENT_TYPE, "application/json")
.body(Body::from(
serde_json::json!({
"domain": "routing",
"reason": "test rollback"
})
.to_string(),
))
.unwrap(),
)
.await
.unwrap();
assert_eq!(rollback.status(), StatusCode::OK);
let body = axum::body::to_bytes(rollback.into_body(), 1024 * 1024)
.await
.unwrap();
let v: serde_json::Value = serde_json::from_slice(&body).unwrap();
let rb_id = Uuid::parse_str(v.get("job_id").unwrap().as_str().unwrap()).unwrap();
let rb_job = wait_done(app.clone(), rb_id, &token).await;
assert_eq!(
rb_job.get("status").and_then(|v| v.as_str()),
Some("succeeded")
);
}

View File

@@ -0,0 +1,157 @@
use jsonwebtoken::{EncodingKey, Header, encode};
use reqwest::StatusCode;
use serde::Serialize;
use serde_json::json;
use std::time::Duration;
use uuid::Uuid;
#[derive(Serialize)]
struct TestClaims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn make_token(secret: &[u8], perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 300) as usize;
encode(
&Header::default(),
&TestClaims {
sub: "smoke".to_string(),
session_id: "smoke".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(secret),
)
.unwrap()
}
#[tokio::test]
async fn control_api_docs_smoke_is_env_gated() {
let enabled = std::env::var("CONTROL_TEST_SMOKE").ok();
if enabled.as_deref() != Some("1") {
eprintln!("skipping: set CONTROL_TEST_SMOKE=1 to enable env smoke tests");
return;
}
let base_url =
std::env::var("CONTROL_TEST_BASE_URL").expect("CONTROL_TEST_BASE_URL is required");
let base_url = base_url.trim_end_matches('/').to_string();
// Either provide a token directly, or provide secret+perms to mint one.
let token = if let Ok(t) = std::env::var("CONTROL_TEST_TOKEN") {
t
} else {
let secret = std::env::var("CONTROL_TEST_JWT_SECRET")
.expect("CONTROL_TEST_TOKEN or CONTROL_TEST_JWT_SECRET is required");
make_token(secret.as_bytes(), &["control:read", "control:write"])
};
let tenant_id = std::env::var("CONTROL_TEST_TENANT_ID")
.ok()
.unwrap_or_else(|| Uuid::new_v4().to_string());
let http = reqwest::Client::builder()
.timeout(Duration::from_secs(15))
.build()
.unwrap();
// Health.
let health = http
.get(format!("{base_url}/health"))
.send()
.await
.expect("health request failed");
assert!(health.status().is_success(), "health not ok");
// Presign upload.
let doc_id = Uuid::new_v4().to_string();
let filename = "smoke.txt";
let presign_up = http
.post(format!(
"{base_url}/admin/v1/tenants/{tenant_id}/docs/presign/upload"
))
.header("authorization", format!("Bearer {token}"))
.header("x-tenant-id", &tenant_id)
.json(&json!({
"doc_type": "deployments",
"doc_id": doc_id,
"filename": filename,
"content_type": "text/plain",
}))
.send()
.await
.expect("presign upload failed");
assert!(
presign_up.status().is_success(),
"presign upload not ok: {}",
presign_up.status()
);
let up_json: serde_json::Value = presign_up.json().await.unwrap();
let put_url = up_json.get("url").and_then(|v| v.as_str()).unwrap();
let key = up_json
.get("key")
.and_then(|v| v.as_str())
.unwrap()
.to_string();
// PUT bytes to S3 directly.
let payload = b"hello-smoke".to_vec();
let put = http
.put(put_url)
.header("content-type", "text/plain")
.body(payload.clone())
.send()
.await
.expect("s3 put failed");
assert!(put.status().is_success(), "s3 put not ok: {}", put.status());
// List should include key.
let list = http
.get(format!(
"{base_url}/admin/v1/tenants/{tenant_id}/docs?prefix=deployments/"
))
.header("authorization", format!("Bearer {token}"))
.header("x-tenant-id", &tenant_id)
.send()
.await
.expect("list failed");
assert!(list.status().is_success(), "list not ok");
let list_json: serde_json::Value = list.json().await.unwrap();
let objects = list_json.get("objects").and_then(|v| v.as_array()).unwrap();
assert!(
objects
.iter()
.any(|o| o.get("key").and_then(|k| k.as_str()) == Some(key.as_str())),
"expected list to include presigned upload key"
);
// Presign download and fetch bytes.
let presign_down = http
.post(format!(
"{base_url}/admin/v1/tenants/{tenant_id}/docs/presign/download"
))
.header("authorization", format!("Bearer {token}"))
.header("x-tenant-id", &tenant_id)
.json(&json!({ "key": key }))
.send()
.await
.expect("presign download failed");
assert!(
presign_down.status().is_success(),
"presign download not ok"
);
let down_json: serde_json::Value = presign_down.json().await.unwrap();
let get_url = down_json.get("url").and_then(|v| v.as_str()).unwrap();
let got = http.get(get_url).send().await.expect("s3 get failed");
assert_eq!(got.status(), StatusCode::OK);
let got_bytes = got.bytes().await.unwrap().to_vec();
assert_eq!(got_bytes, payload);
}

View File

@@ -11,7 +11,7 @@ fn repo_root() -> PathBuf {
#[test]
fn docker_compose_files_parse_and_include_required_services() {
let root = repo_root();
let compose = fs::read_to_string(root.join("observability/docker-compose.yml")).unwrap();
let compose = fs::read_to_string(root.join("docker-compose.yml")).unwrap();
let v: serde_yaml::Value = serde_yaml::from_str(&compose).unwrap();
let services = v
@@ -19,7 +19,15 @@ fn docker_compose_files_parse_and_include_required_services() {
.and_then(|x| x.as_mapping())
.expect("missing services");
for required in ["grafana", "victoria-metrics", "vmagent", "loki", "tempo"] {
// Core + optional observability services are all declared in one compose file.
for required in [
"grafana",
"victoria-metrics",
"vmagent",
"loki",
"tempo",
"mailhog",
] {
assert!(
services.contains_key(serde_yaml::Value::String(required.to_string())),
"missing service {required}"
@@ -28,17 +36,19 @@ fn docker_compose_files_parse_and_include_required_services() {
}
#[tokio::test]
#[ignore]
async fn docker_compose_config_validation_is_gated_and_fast() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
if enabled.as_deref() != Some("1") {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker compose validation");
return;
}
let root = repo_root();
let compose = root.join("observability/docker-compose.yml");
let compose = root.join("docker-compose.yml");
let cmd = tokio::process::Command::new("docker")
.args(["compose", "-f"])
.arg(compose)
.arg(&compose)
.args(["config"])
.output();
@@ -52,4 +62,22 @@ async fn docker_compose_config_validation_is_gated_and_fast() {
"docker compose config failed: {}",
String::from_utf8_lossy(&out.stderr)
);
// Validate full-stack profile wiring too.
let cmd = tokio::process::Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["--profile", "observability", "config"])
.output();
let out = tokio::time::timeout(Duration::from_secs(10), cmd)
.await
.expect("docker compose config (observability profile) timed out")
.expect("failed to run docker compose config (observability profile)");
assert!(
out.status.success(),
"docker compose config (observability profile) failed: {}",
String::from_utf8_lossy(&out.stderr)
);
}

View File

@@ -1,6 +1,9 @@
#[test]
#[ignore]
fn docker_integration_tests_are_gated() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
if enabled.as_deref() != Some("1") {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker integration tests");
return;
}
assert_eq!(enabled.as_deref(), Some("1"));
}

View File

@@ -0,0 +1,169 @@
use jsonwebtoken::{EncodingKey, Header, encode};
use reqwest::header::{HeaderMap, HeaderValue};
use serde::Serialize;
use std::{path::PathBuf, process::Command, time::Duration};
use uuid::Uuid;
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
fn docker_enabled() -> bool {
std::env::var("CONTROL_TEST_DOCKER")
.ok()
.is_some_and(|v| v.trim() == "1")
}
fn compose_file() -> PathBuf {
repo_root().join("docker-compose.yml")
}
#[derive(Serialize)]
struct TestClaims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn make_token(secret: &[u8], perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 300) as usize;
encode(
&Header::default(),
&TestClaims {
sub: "user_1".to_string(),
session_id: "sess_1".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(secret),
)
.unwrap()
}
#[tokio::test]
async fn documents_upload_list_download_roundtrip_via_control_api_compose() {
if !docker_enabled() {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker compose tests");
return;
}
// Must match docker-compose.yml CONTROL_GATEWAY_JWT_HS256_SECRET.
let jwt_secret = b"dev_secret";
let token = make_token(jwt_secret, &["control:read", "control:write"]);
let compose = compose_file();
let up = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["up", "-d", "control-api"])
.status()
.expect("failed to run docker compose up control-api");
assert!(up.success(), "docker compose up control-api failed");
// Wait for control-api to be reachable (port publish is in compose).
let http = reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.build()
.unwrap();
let base = "http://127.0.0.1:38080";
let health_deadline = tokio::time::Instant::now() + Duration::from_secs(30);
loop {
if tokio::time::Instant::now() > health_deadline {
panic!("control-api did not become healthy in time");
}
match http.get(format!("{base}/health")).send().await {
Ok(res) if res.status().is_success() => break,
_ => tokio::time::sleep(Duration::from_millis(250)).await,
}
}
let tenant_id = Uuid::new_v4().to_string();
let doc_type = "deployments";
let doc_id = Uuid::new_v4().to_string();
let filename = "hello.txt";
let bytes = b"hello-docs".to_vec();
let mut headers = HeaderMap::new();
headers.insert(
"authorization",
HeaderValue::from_str(&format!("Bearer {token}")).unwrap(),
);
headers.insert("x-tenant-id", HeaderValue::from_str(&tenant_id).unwrap());
// Upload (proxy endpoint).
let put_url =
format!("{base}/admin/v1/tenants/{tenant_id}/docs/{doc_type}/{doc_id}/{filename}");
let put = http
.put(&put_url)
.headers(headers.clone())
.header("content-type", "text/plain")
.body(bytes.clone())
.send()
.await
.expect("upload request failed");
assert!(
put.status().is_success(),
"upload failed: {}",
put.text().await.unwrap_or_default()
);
let put_json: serde_json::Value = put.json().await.expect("invalid upload json");
let key = put_json
.get("key")
.and_then(|v| v.as_str())
.expect("missing key")
.to_string();
// List should include the key.
let list_url = format!("{base}/admin/v1/tenants/{tenant_id}/docs?prefix={doc_type}/");
let list = http
.get(&list_url)
.headers(headers.clone())
.send()
.await
.expect("list request failed");
assert!(list.status().is_success(), "list failed");
let list_json: serde_json::Value = list.json().await.expect("invalid list json");
let objects = list_json
.get("objects")
.and_then(|v| v.as_array())
.expect("missing objects");
assert!(
objects
.iter()
.any(|o| o.get("key").and_then(|k| k.as_str()) == Some(key.as_str())),
"expected list to include uploaded key"
);
// Download (proxy endpoint) returns same bytes.
let get_url = format!(
"{base}/admin/v1/tenants/{tenant_id}/docs/object/{}",
urlencoding::encode(&key)
);
let got = http
.get(&get_url)
.headers(headers.clone())
.send()
.await
.expect("download request failed");
assert!(got.status().is_success(), "download failed");
let got_bytes = got.bytes().await.expect("download bytes failed").to_vec();
assert_eq!(got_bytes, bytes);
// Best-effort cleanup.
let _ = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["down", "-v"])
.status();
}

View File

@@ -0,0 +1,123 @@
use api::{
AppState, AuditStore, AuthConfig, ConfigLocks, ConfigRegistry, JobStore, PlacementStore,
SwarmStore, TenantLocks,
};
use axum::{
Router,
body::Body,
http::{Request, StatusCode, header},
};
use jsonwebtoken::{EncodingKey, Header, encode};
use metrics_exporter_prometheus::PrometheusBuilder;
use serde::Serialize;
use std::{fs, path::PathBuf, sync::OnceLock};
use tower::ServiceExt;
static HANDLE: OnceLock<metrics_exporter_prometheus::PrometheusHandle> = OnceLock::new();
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
#[derive(Serialize)]
struct TestClaims {
sub: String,
session_id: String,
permissions: Vec<String>,
exp: usize,
}
fn make_token(perms: &[&str]) -> String {
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 60) as usize;
encode(
&Header::default(),
&TestClaims {
sub: "user_1".to_string(),
session_id: "sess_1".to_string(),
permissions: perms.iter().map(|p| (*p).to_string()).collect(),
exp,
},
&EncodingKey::from_secret(b"test_secret"),
)
.unwrap()
}
fn temp_swarm_file(raw: &str) -> PathBuf {
let mut dst = std::env::temp_dir();
dst.push(format!(
"cloudlysis-control-swarm-{}-{}.json",
std::process::id(),
uuid::Uuid::new_v4()
));
fs::write(&dst, raw).expect("failed to write temp swarm file");
dst
}
fn test_app_with_swarm(swarm_path: PathBuf) -> Router {
let handle = HANDLE
.get_or_init(|| {
PrometheusBuilder::new()
.install_recorder()
.expect("failed to install prometheus recorder")
})
.clone();
api::build_app(AppState {
prometheus: handle,
auth: AuthConfig {
hs256_secret: Some(b"test_secret".to_vec()),
},
jobs: JobStore::default(),
audit: AuditStore::default(),
tenant_locks: TenantLocks::default(),
config_locks: ConfigLocks::default(),
http: reqwest::Client::new(),
placement: PlacementStore::new(repo_root().join("config/placement/dev.json")),
billing: api::billing::BillingStore::new(
std::env::temp_dir().join("billing-drift-test.json"),
),
billing_provider: std::sync::Arc::new(api::billing::MockProvider),
billing_enforcement_enabled: false,
config: ConfigRegistry::new(None, None),
fleet_services: vec![],
swarm: SwarmStore::new(swarm_path),
docs: None,
})
}
#[tokio::test]
async fn drift_marks_extra_services_vs_desired_observation_set() {
let swarm = temp_swarm_file(
r#"{ "services": [{"name":"extra-1","image":null,"mode":null,"replicas":null,"updated_at":null}], "tasks": [] }"#,
);
let app = test_app_with_swarm(swarm);
let token = make_token(&["control:read"]);
let res = app
.oneshot(
Request::builder()
.uri("/admin/v1/platform/drift")
.header(header::AUTHORIZATION, format!("Bearer {token}"))
.body(Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(res.status(), StatusCode::OK);
let body = axum::body::to_bytes(res.into_body(), 1024 * 1024)
.await
.unwrap();
let v: serde_json::Value = serde_json::from_slice(&body).unwrap();
let items = v.get("items").and_then(|x| x.as_array()).unwrap();
assert!(items.iter().any(|i| {
i.get("kind").and_then(|k| k.as_str()) == Some("extra")
&& i.get("service").and_then(|s| s.as_str()) == Some("extra-1")
}));
}

View File

@@ -0,0 +1,137 @@
#[tokio::test]
async fn platform_drift_docker_test_is_gated() {
use tower::ServiceExt;
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
if enabled.as_deref() != Some("1") {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker drift tests");
return;
}
// We only run the "real" drift check when Swarm is available locally.
// If Swarm isn't active, we skip to keep CI/dev machines happy.
let info = std::process::Command::new("docker")
.args(["info", "--format", "{{.Swarm.LocalNodeState}}"])
.output();
let Ok(info) = info else {
eprintln!("skipping: docker not available");
return;
};
if !info.status.success() {
eprintln!("skipping: docker info failed");
return;
}
let state = String::from_utf8_lossy(&info.stdout).trim().to_string();
if state != "active" {
eprintln!("skipping: docker swarm not active (LocalNodeState={state})");
return;
}
// Create a short-lived service so drift can see an "extra" observed service.
let name = format!("cloudlysis-drift-extra-{}", uuid::Uuid::new_v4());
let create = std::process::Command::new("docker")
.args([
"service",
"create",
"--name",
&name,
"--restart-condition",
"none",
"busybox:1.36",
"sh",
"-c",
"sleep 60",
])
.output()
.expect("docker service create");
if !create.status.success() {
eprintln!("skipping: failed to create swarm service (maybe permissions?)");
return;
}
// Ensure cleanup even if assertion fails.
struct Cleanup(String);
impl Drop for Cleanup {
fn drop(&mut self) {
let _ = std::process::Command::new("docker")
.args(["service", "rm", &self.0])
.output();
}
}
let _cleanup = Cleanup(name.clone());
// Now call drift via a minimal in-process app configured for docker-cli swarm observation.
let handle = metrics_exporter_prometheus::PrometheusBuilder::new()
.install_recorder()
.expect("failed to install prometheus recorder");
let app = api::build_app(api::AppState {
prometheus: handle,
auth: api::AuthConfig {
hs256_secret: Some(b"test_secret".to_vec()),
},
jobs: api::JobStore::default(),
audit: api::AuditStore::default(),
tenant_locks: api::TenantLocks::default(),
config_locks: api::ConfigLocks::default(),
http: reqwest::Client::new(),
placement: api::PlacementStore::new(
std::path::PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.unwrap()
.join("config/placement/dev.json"),
),
billing: api::billing::BillingStore::new(
std::env::temp_dir().join("billing-drift-test.json"),
),
billing_provider: std::sync::Arc::new(api::billing::MockProvider),
billing_enforcement_enabled: false,
config: api::ConfigRegistry::new(None, None),
fleet_services: vec![],
swarm: api::SwarmStore::new_docker_cli(),
docs: None,
});
// Auth token (control:read).
let exp = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs()
+ 60) as usize;
let token = jsonwebtoken::encode(
&jsonwebtoken::Header::default(),
&serde_json::json!({
"sub": "user_1",
"session_id": "sess_1",
"permissions": ["control:read"],
"exp": exp
}),
&jsonwebtoken::EncodingKey::from_secret(b"test_secret"),
)
.unwrap();
let res = app
.oneshot(
axum::http::Request::builder()
.uri("/admin/v1/platform/drift")
.header(axum::http::header::AUTHORIZATION, format!("Bearer {token}"))
.body(axum::body::Body::empty())
.unwrap(),
)
.await
.unwrap();
assert_eq!(res.status(), axum::http::StatusCode::OK);
let body = axum::body::to_bytes(res.into_body(), 1024 * 1024)
.await
.unwrap();
let v: serde_json::Value = serde_json::from_slice(&body).unwrap();
let items = v.get("items").and_then(|x| x.as_array()).unwrap();
assert!(
items.iter().any(|i| {
i.get("kind").and_then(|k| k.as_str()) == Some("extra")
&& i.get("service").and_then(|s| s.as_str()) == Some(name.as_str())
}),
"expected drift to include extra service {name}, got: {v}"
);
}

View File

@@ -0,0 +1,77 @@
use std::{path::PathBuf, process::Command, time::Duration};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
fn docker_enabled() -> bool {
std::env::var("CONTROL_TEST_DOCKER")
.ok()
.is_some_and(|v| v.trim() == "1")
}
fn compose_file() -> PathBuf {
repo_root().join("docker-compose.yml")
}
#[test]
fn minio_docs_bucket_exists_and_credentials_work_in_compose_network() {
if !docker_enabled() {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker compose tests");
return;
}
let compose = compose_file();
let up = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["up", "-d", "minio"])
.status()
.expect("failed to run docker compose up minio");
assert!(up.success(), "docker compose up minio failed");
// The `minio-init` service runs `mc` inside the compose network.
let out = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args([
"run",
"--rm",
"minio-init",
"/bin/sh",
"-lc",
"mc alias set local http://minio:9000 minioadmin minioadmin && mc ls local/cloudlysis-docs-0 && mc ls local/cloudlysis-docs-1 && mc ls local/cloudlysis-docs-2",
])
.output()
.expect("failed to run docker compose run minio-init");
// Best-effort cleanup (keep it short; other docker tests may reuse this env).
let _ = Command::new("docker")
.args(["compose", "-f"])
.arg(&compose)
.args(["down", "-v"])
.status();
assert!(
out.status.success(),
"minio-init bucket check failed: {}",
String::from_utf8_lossy(&out.stderr)
);
// `mc ls` prints at least one line when the bucket exists (even if empty it prints the bucket line).
let stdout = String::from_utf8_lossy(&out.stdout);
assert!(
stdout.contains("cloudlysis-docs-0")
&& stdout.contains("cloudlysis-docs-1")
&& stdout.contains("cloudlysis-docs-2"),
"expected mc ls output to mention bucket: {stdout}"
);
// Avoid tests hanging due to docker flakiness.
std::thread::sleep(Duration::from_millis(10));
}

View File

@@ -8,6 +8,20 @@ fn repo_root() -> PathBuf {
.to_path_buf()
}
#[test]
fn loki_and_tempo_s3_config_variants_are_syntactically_valid() {
let root = repo_root();
for file in [
root.join("observability/loki/config.s3.yml"),
root.join("observability/tempo/config.s3.yml"),
] {
let raw = fs::read_to_string(&file).unwrap_or_else(|e| panic!("{file:?}: {e}"));
let _: serde_yaml::Value =
serde_yaml::from_str(&raw).unwrap_or_else(|e| panic!("{file:?}: {e}"));
}
}
#[test]
fn grafana_provisioning_files_are_syntactically_valid() {
let root = repo_root();

View File

@@ -0,0 +1,218 @@
use reqwest::StatusCode;
use serde_json::json;
use std::{
net::TcpStream,
path::PathBuf,
process::Command,
time::{Duration, Instant},
};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
fn docker_enabled() -> bool {
std::env::var("CONTROL_TEST_DOCKER")
.ok()
.is_some_and(|v| v.trim() == "1")
}
fn wait_for_tcp(addr: &str, timeout: Duration) -> bool {
let start = Instant::now();
while start.elapsed() < timeout {
if TcpStream::connect_timeout(
&addr.parse().expect("invalid socket addr"),
Duration::from_secs(1),
)
.is_ok()
{
return true;
}
std::thread::sleep(Duration::from_millis(250));
}
false
}
fn mc_ls_bucket(compose: &PathBuf, bucket: &str) -> std::process::Output {
// Run inside compose network so it can reach `minio:9000`.
Command::new("docker")
.args(["compose", "-f"])
.arg(compose)
.args([
"run",
"--rm",
"minio-init",
"/bin/sh",
"-lc",
&format!(
"mc alias set local http://minio:9000 minioadmin minioadmin >/dev/null && mc ls --recursive local/{bucket}"
),
])
.output()
.expect("failed to run mc ls")
}
#[tokio::test]
async fn loki_and_tempo_write_objects_to_minio_in_s3_mode() {
if !docker_enabled() {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker tests");
return;
}
let root = repo_root();
let base = root.join("docker-compose.yml");
let obs = root.join("observability/docker-compose.yml");
let obs_s3 = root.join("observability/docker-compose.s3.yml");
let up = Command::new("docker")
.args(["compose", "-f"])
.arg(&base)
.args(["-f"])
.arg(&obs)
.args(["-f"])
.arg(&obs_s3)
.args(["up", "-d"])
.status()
.expect("failed to run docker compose up");
assert!(up.success(), "docker compose up failed");
let reachable = wait_for_tcp("127.0.0.1:3100", Duration::from_secs(45))
&& wait_for_tcp("127.0.0.1:3200", Duration::from_secs(45))
&& wait_for_tcp("127.0.0.1:9411", Duration::from_secs(45))
&& wait_for_tcp("127.0.0.1:9000", Duration::from_secs(45));
assert!(reachable, "loki/tempo/minio ports not reachable in time");
let http = reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.build()
.unwrap();
// Push one log line into Loki.
let ts_ns = (std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos())
.to_string();
let push = http
.post("http://127.0.0.1:3100/loki/api/v1/push")
.json(&json!({
"streams": [{
"stream": { "app": "cloudlysis-test" },
"values": [[ts_ns, "hello from test"]]
}]
}))
.send()
.await
.expect("loki push request failed");
assert!(
push.status() == StatusCode::NO_CONTENT,
"unexpected loki push status: {}",
push.status()
);
// Emit one trace span via Zipkin v2.
let zipkin = http
.post("http://127.0.0.1:9411/api/v2/spans")
.json(&json!([{
"traceId": "463ac35c9f6413ad48485a3953bb6124",
"id": "a2fb4a1d1a96d312",
"name": "test-span",
"timestamp": 1700000000000000u64,
"duration": 1000u64,
"localEndpoint": { "serviceName": "cloudlysis-test" }
}]))
.send()
.await
.expect("zipkin post failed");
assert!(
zipkin.status().is_success(),
"zipkin ingest failed: {}",
zipkin.status()
);
// Query Loki back to ensure the line is retrievable (not just accepted).
// Loki may need a short delay to index.
let loki_deadline = Instant::now() + Duration::from_secs(30);
let mut loki_ok = false;
while Instant::now() < loki_deadline && !loki_ok {
let q = http
.get("http://127.0.0.1:3100/loki/api/v1/query")
.query(&[("query", r#"{app="cloudlysis-test"}"#)])
.send()
.await
.expect("loki query failed");
if q.status().is_success() {
let v: serde_json::Value = q.json().await.expect("invalid loki query json");
// We only need to see any non-empty result.
let has = v
.get("data")
.and_then(|d| d.get("result"))
.and_then(|r| r.as_array())
.is_some_and(|a| !a.is_empty());
if has {
loki_ok = true;
break;
}
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
// Query Tempo back by trace id (Zipkin traceId used above).
let tempo_deadline = Instant::now() + Duration::from_secs(30);
let mut tempo_ok = false;
while Instant::now() < tempo_deadline && !tempo_ok {
let res = http
.get("http://127.0.0.1:3200/api/traces/463ac35c9f6413ad48485a3953bb6124")
.send()
.await
.expect("tempo get trace failed");
if res.status().is_success() {
tempo_ok = true;
break;
}
tokio::time::sleep(Duration::from_millis(500)).await;
}
// Poll buckets until at least one object appears.
let deadline = Instant::now() + Duration::from_secs(45);
let mut loki_has_objects = false;
let mut tempo_has_objects = false;
while Instant::now() < deadline && (!loki_has_objects || !tempo_has_objects) {
let loki_out = mc_ls_bucket(&base, "cloudlysis-loki");
if loki_out.status.success() && !loki_out.stdout.is_empty() {
loki_has_objects = true;
}
let tempo_out = mc_ls_bucket(&base, "cloudlysis-tempo");
if tempo_out.status.success() && !tempo_out.stdout.is_empty() {
tempo_has_objects = true;
}
if !loki_has_objects || !tempo_has_objects {
tokio::time::sleep(Duration::from_millis(500)).await;
}
}
let _ = Command::new("docker")
.args(["compose", "-f"])
.arg(&base)
.args(["-f"])
.arg(&obs)
.args(["-f"])
.arg(&obs_s3)
.args(["down", "-v"])
.status();
assert!(loki_has_objects, "expected Loki to write objects to MinIO");
assert!(
tempo_has_objects,
"expected Tempo to write objects to MinIO"
);
assert!(loki_ok, "expected Loki query to return a result");
assert!(tempo_ok, "expected Tempo to return the ingested trace");
}

View File

@@ -30,10 +30,12 @@ fn wait_for_tcp(addr: &str, timeout: Duration) -> bool {
}
#[test]
#[ignore]
fn observability_stack_reaches_healthy_state_fast() {
let enabled = std::env::var("CONTROL_TEST_DOCKER").ok();
assert_eq!(enabled.as_deref(), Some("1"));
if enabled.as_deref() != Some("1") {
eprintln!("skipping: set CONTROL_TEST_DOCKER=1 to enable docker observability smoke test");
return;
}
let root = repo_root();
let compose = root.join("observability/docker-compose.yml");

View File

@@ -0,0 +1,116 @@
use api::s3_docs::{DocsConfig, DocsStore};
use uuid::Uuid;
fn s3_env_ready() -> bool {
// Gate integration tests without requiring `-- --ignored`.
// If CI/local wants these tests to run, it must provide S3 env vars.
let required = [
"CONTROL_S3_ENDPOINT",
"CONTROL_S3_ACCESS_KEY_ID",
"CONTROL_S3_SECRET_ACCESS_KEY",
"CONTROL_S3_BUCKET_DOCS",
];
required
.iter()
.all(|k| std::env::var(k).ok().is_some_and(|v| !v.trim().is_empty()))
}
#[tokio::test]
async fn s3_docs_roundtrip_put_get_list_delete() {
if !s3_env_ready() {
eprintln!("skipping: missing S3 env (see S3_PLAN.md)");
return;
}
let cfg = DocsConfig::from_env().expect("missing S3 env (see S3_PLAN.md)");
let store = DocsStore::new(cfg)
.await
.expect("failed to init docs store");
let tenant_id = Uuid::new_v4().to_string();
let doc_type = "test";
let doc_id = Uuid::new_v4().to_string();
let filename = "hello.txt";
let key = store
.key_for(&tenant_id, doc_type, &doc_id, filename)
.expect("invalid key");
store
.put_for_tenant(
&tenant_id,
&key,
b"hello".to_vec(),
Some("text/plain".to_string()),
)
.await
.expect("put failed");
let (bytes, _ct) = store
.get_bytes_for_tenant(&tenant_id, &key)
.await
.expect("get failed");
assert_eq!(bytes, b"hello");
let prefix = format!("{}{}", store.prefix(), tenant_id);
let objects = store
.list_for_tenant(&tenant_id, &format!("{prefix}/"))
.await
.expect("list failed");
assert!(objects.iter().any(|o| o.key == key));
store
.delete_for_tenant(&tenant_id, &key)
.await
.expect("delete failed");
}
#[tokio::test]
async fn s3_docs_tenant_prefix_isolation() {
if !s3_env_ready() {
eprintln!("skipping: missing S3 env (see S3_PLAN.md)");
return;
}
let cfg = DocsConfig::from_env().expect("missing S3 env (see S3_PLAN.md)");
let store = DocsStore::new(cfg)
.await
.expect("failed to init docs store");
let tenant_a = Uuid::new_v4().to_string();
let tenant_b = Uuid::new_v4().to_string();
let doc_type = "test";
let doc_id = Uuid::new_v4().to_string();
let filename = "hello.txt";
let key_a = store
.key_for(&tenant_a, doc_type, &doc_id, filename)
.expect("invalid key");
store
.put_for_tenant(
&tenant_a,
&key_a,
b"hello-a".to_vec(),
Some("text/plain".to_string()),
)
.await
.expect("put failed");
let prefix_a = format!("{}{tenant_a}/", store.prefix());
let prefix_b = format!("{}{tenant_b}/", store.prefix());
let objects_a = store
.list_for_tenant(&tenant_a, &prefix_a)
.await
.expect("list a failed");
let objects_b = store
.list_for_tenant(&tenant_b, &prefix_b)
.await
.expect("list b failed");
assert!(objects_a.iter().any(|o| o.key == key_a));
assert!(!objects_b.iter().any(|o| o.key == key_a));
store
.delete_for_tenant(&tenant_a, &key_a)
.await
.expect("delete failed");
}

View File

@@ -0,0 +1,36 @@
use std::{path::PathBuf, process::Command};
fn repo_root() -> PathBuf {
PathBuf::from(env!("CARGO_MANIFEST_DIR"))
.parent()
.and_then(|p| p.parent())
.expect("api crate should live under repo root")
.to_path_buf()
}
fn is_enabled() -> bool {
std::env::var("CONTROL_TEST_AWSCLI")
.ok()
.is_some_and(|v| v.trim() == "1")
}
#[test]
fn s3_docs_permissions_can_be_verified_with_aws_cli() {
if !is_enabled() {
eprintln!("skipping: set CONTROL_TEST_AWSCLI=1 to enable aws-cli S3 permission checks");
return;
}
let script = repo_root().join("docker/scripts/s3_verify_docs.sh");
let out = Command::new("sh")
.arg(script)
.output()
.expect("failed to run s3_verify_docs.sh (requires aws cli and S3_* env)");
assert!(
out.status.success(),
"s3 verify script failed: {}\n{}",
String::from_utf8_lossy(&out.stdout),
String::from_utf8_lossy(&out.stderr)
);
}

View File

@@ -13,6 +13,7 @@ fn stack_files_parse_as_yaml() {
let root = repo_root();
for file in [
root.join("swarm/stacks/control-plane.yml"),
root.join("swarm/stacks/control-plane-prod.yml"),
root.join("swarm/stacks/observability.yml"),
] {
let raw = fs::read_to_string(&file).unwrap();
@@ -38,3 +39,36 @@ fn control_plane_stack_has_required_services() {
);
}
}
#[test]
fn control_plane_prod_stack_has_control_api_and_external_s3_secrets() {
let root = repo_root();
let raw = fs::read_to_string(root.join("swarm/stacks/control-plane-prod.yml")).unwrap();
let v: serde_yaml::Value = serde_yaml::from_str(&raw).unwrap();
let services = v
.get("services")
.and_then(|x| x.as_mapping())
.expect("missing services");
assert!(services.contains_key(serde_yaml::Value::String("control-api".to_string())));
assert!(services.contains_key(serde_yaml::Value::String("control-ui".to_string())));
assert!(
!services.contains_key(serde_yaml::Value::String("minio".to_string())),
"prod stack must not bundle MinIO"
);
let secrets = v
.get("secrets")
.and_then(|x| x.as_mapping())
.expect("missing secrets");
for name in ["control_s3_access_key_id", "control_s3_secret_access_key"] {
let entry = secrets
.get(serde_yaml::Value::String(name.to_string()))
.unwrap_or_else(|| panic!("missing secret {name}"));
let external = entry
.get(serde_yaml::Value::String("external".to_string()))
.and_then(|x| x.as_bool())
.unwrap_or(false);
assert!(external, "secret {name} must be external: true");
}
}