transport: complete M0–M7
Some checks failed
ci / rust (push) Failing after 2m21s
ci / ui (push) Failing after 28s
images / build-and-push (push) Failing after 18s

shared: add stream+consumer policy helpers; NATS context header builder

aggregate/runner/projection: centralize stream validation and header usage; set bounded consumer params

projection: add QueryService gRPC and wire into main; settings include PROJECTION_GRPC_ADDR

gateway: gRPC routing to Projection/Runner with deadlines; bounded read-only retries; pooled gRPC channels (bounded LRU+TTL); admin proxy forwards to gRPC; probes use concurrency limiter + TTL cache

runner: add RunnerAdmin gRPC server (drain, status, reload) and wire into main; settings include RUNNER_GRPC_ADDR

tests: add gateway authz for runner admin, projection tenant isolation, runner admin drain semantics

docs: update TRANSPORT_DEVELOPMENT_PLAN to reflect completed milestones and details
This commit is contained in:
2026-03-30 14:24:14 +03:00
parent 1ab112438b
commit 90c307016d
41 changed files with 2391 additions and 505 deletions

View File

@@ -1,4 +1,4 @@
pub const TENANT_ID_METADATA_KEY: &str = "x-tenant-id";
pub const TENANT_ID_METADATA_KEY: &str = shared::HEADER_X_TENANT_ID;
pub mod proto {
tonic::include_proto!("aggregate.gateway.v1");

View File

@@ -48,14 +48,14 @@ impl CommandService for GrpcCommandServer {
) -> Result<Response<SubmitCommandResponse>, Status> {
let correlation_id = request
.metadata()
.get("x-correlation-id")
.get(shared::HEADER_X_CORRELATION_ID)
.and_then(|v| v.to_str().ok())
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.to_string());
let traceparent = request
.metadata()
.get("traceparent")
.get(shared::HEADER_TRACEPARENT)
.and_then(|v| v.to_str().ok())
.map(|s| s.trim())
.filter(|s| !s.is_empty())
@@ -172,12 +172,16 @@ impl CommandService for GrpcCommandServer {
});
if let Some(correlation_id) = correlation_id.as_deref() {
if let Ok(v) = tonic::metadata::MetadataValue::try_from(correlation_id) {
response.metadata_mut().insert("x-correlation-id", v);
response
.metadata_mut()
.insert(shared::HEADER_X_CORRELATION_ID, v);
}
}
if let Some(traceparent) = traceparent.as_deref() {
if let Ok(v) = tonic::metadata::MetadataValue::try_from(traceparent) {
response.metadata_mut().insert("traceparent", v);
response
.metadata_mut()
.insert(shared::HEADER_TRACEPARENT, v);
}
}
Ok(response)

View File

@@ -54,7 +54,7 @@ impl CommandRequest {
);
if let Some(correlation_id) = self
.headers
.get("x-correlation-id")
.get(shared::HEADER_X_CORRELATION_ID)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
{
@@ -65,7 +65,7 @@ impl CommandRequest {
}
if let Some(traceparent) = self
.headers
.get("traceparent")
.get(shared::HEADER_TRACEPARENT)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
{
@@ -124,7 +124,7 @@ impl CommandServer {
pub fn extract_tenant_id(&self, headers: &HashMap<String, String>) -> TenantId {
headers
.get("x-tenant-id")
.get(shared::HEADER_X_TENANT_ID)
.map(TenantId::new)
.unwrap_or_default()
}
@@ -163,13 +163,13 @@ impl CommandServer {
let correlation_id = request
.headers
.get("x-correlation-id")
.get(shared::HEADER_X_CORRELATION_ID)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|s| s.to_string());
let trace_id = request
.headers
.get("traceparent")
.get(shared::HEADER_TRACEPARENT)
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.and_then(trace_id_from_traceparent);

View File

@@ -18,6 +18,12 @@ use tokio::sync::RwLock;
use tokio::time::Instant;
const AGGREGATE_STREAM_NAME: &str = "AGGREGATE_EVENTS";
const FETCH_CONSUMER_MAX_ACK_PENDING: i64 = 256;
const FETCH_CONSUMER_MAX_DELIVER: i64 = 1;
const FETCH_CONSUMER_ACK_WAIT: Duration = Duration::from_secs(3);
const SUBSCRIBE_CONSUMER_MAX_ACK_PENDING: i64 = 256;
const SUBSCRIBE_CONSUMER_MAX_DELIVER: i64 = 10;
const SUBSCRIBE_CONSUMER_ACK_WAIT: Duration = Duration::from_secs(30);
#[derive(Debug)]
pub struct StreamConfigSettings {
@@ -107,21 +113,18 @@ impl StreamClient {
}
};
let config = StreamConfig {
name: AGGREGATE_STREAM_NAME.to_string(),
subjects: vec!["tenant.*.aggregate.*.*".to_string()],
max_messages: settings.max_messages,
max_bytes: settings.max_bytes,
max_age: settings.max_age,
duplicate_window: settings.duplicate_window,
..Default::default()
};
let expected = stream_policy_config(settings);
let stream = jetstream
.get_or_create_stream(config)
let mut stream = jetstream
.get_or_create_stream(expected.clone())
.await
.map_err(|e| AggregateError::StreamError(format!("Failed to create stream: {}", e)))?;
let info = stream.info().await.map_err(|e| {
AggregateError::StreamError(format!("Failed to load stream info: {}", e))
})?;
validate_stream_config(&expected, &info.config)?;
Ok(stream)
}
@@ -139,28 +142,16 @@ impl StreamClient {
match &self.backend {
StreamBackend::JetStream(jetstream) => {
for event in &events {
let subject =
build_subject(&event.tenant_id, &event.aggregate_type, &event.aggregate_id);
let subject = shared::nats_subject_aggregate_event(
event.tenant_id.as_str(),
event.aggregate_type.as_str(),
&event.aggregate_id.to_string(),
);
let payload = serde_json::to_vec(event).map_err(|e| {
AggregateError::StreamError(format!("Serialization error: {}", e))
})?;
let mut headers = async_nats::HeaderMap::new();
headers.insert("Nats-Msg-Id", event.event_id.to_string().as_str());
headers.insert("aggregate-version", event.version.to_string().as_str());
headers.insert("tenant-id", event.tenant_id.as_str());
headers.insert("aggregate-type", event.aggregate_type.as_str());
headers.insert("event-type", event.event_type.as_str());
if let Some(correlation_id) = event.correlation_id.as_deref() {
headers.insert("x-correlation-id", correlation_id);
headers.insert("correlation-id", correlation_id);
}
if let Some(traceparent) = event.traceparent.as_deref() {
headers.insert("traceparent", traceparent);
if let Some(trace_id) = shared::trace_id_from_traceparent(traceparent) {
headers.insert("trace-id", trace_id);
}
}
let headers = build_event_headers(event);
let result = jetstream
.publish_with_headers(subject.clone(), headers.clone(), payload.into())
@@ -248,6 +239,9 @@ impl StreamClient {
filter_subject: subject.clone(),
deliver_policy: DeliverPolicy::All,
ack_policy: AckPolicy::Explicit,
ack_wait: FETCH_CONSUMER_ACK_WAIT,
max_ack_pending: FETCH_CONSUMER_MAX_ACK_PENDING,
max_deliver: FETCH_CONSUMER_MAX_DELIVER,
replay_policy: ReplayPolicy::Instant,
..Default::default()
};
@@ -348,8 +342,14 @@ impl StreamClient {
let consumer_name = format!("sub_{}_{}", tenant_id.as_str(), aggregate_id);
let consumer_config = PullConfig {
durable_name: Some(consumer_name.clone()),
filter_subject: subject,
deliver_policy: DeliverPolicy::New,
ack_policy: AckPolicy::Explicit,
ack_wait: SUBSCRIBE_CONSUMER_ACK_WAIT,
replay_policy: ReplayPolicy::Instant,
max_ack_pending: SUBSCRIBE_CONSUMER_MAX_ACK_PENDING,
max_deliver: SUBSCRIBE_CONSUMER_MAX_DELIVER,
..Default::default()
};
@@ -487,16 +487,80 @@ impl StreamClient {
}
}
fn stream_policy_config(settings: StreamConfigSettings) -> StreamConfig {
let policy = shared::stream_policy_defaults(
AGGREGATE_STREAM_NAME.to_string(),
vec![shared::NATS_SUBJECT_AGGREGATE_EVENTS_ALL.to_string()],
);
StreamConfig {
name: policy.name,
subjects: policy.subjects,
max_messages: settings.max_messages,
max_bytes: settings.max_bytes,
max_age: settings.max_age,
duplicate_window: settings.duplicate_window,
..Default::default()
}
}
fn validate_stream_config(
expected: &StreamConfig,
actual: &StreamConfig,
) -> Result<(), AggregateError> {
let expected = shared::stream_policy_from_parts(
expected.name.as_str(),
expected.subjects.clone(),
expected.max_messages,
expected.max_bytes,
expected.max_age,
expected.duplicate_window,
);
let actual = shared::stream_policy_from_parts(
actual.name.as_str(),
actual.subjects.clone(),
actual.max_messages,
actual.max_bytes,
actual.max_age,
actual.duplicate_window,
);
shared::validate_stream_policy(&expected, &actual)
.map_err(|e| AggregateError::StreamError(e.to_string()))
}
fn build_event_headers(event: &Event) -> async_nats::HeaderMap {
let mut headers = async_nats::HeaderMap::new();
let aggregate_version = event.version.to_string();
let aggregate_type = event.aggregate_type.as_str().to_string();
let event_type = event.event_type.to_string();
headers.insert("aggregate-version", aggregate_version);
headers.insert("aggregate-type", aggregate_type);
headers.insert("event-type", event_type);
let ctx = shared::nats_context_headers_required(
event.tenant_id.as_str(),
Some(&event.event_id.to_string()),
event.correlation_id.as_deref(),
event.traceparent.as_deref(),
None,
);
for (k, v) in ctx {
headers.insert(k, v);
}
headers
}
pub fn build_subject(
tenant_id: &TenantId,
aggregate_type: &AggregateType,
aggregate_id: &AggregateId,
) -> String {
format!(
"tenant.{}.aggregate.{}.{}",
shared::nats_subject_aggregate_event(
tenant_id.as_str(),
aggregate_type.as_str(),
aggregate_id
&aggregate_id.to_string(),
)
}
@@ -521,6 +585,49 @@ mod tests {
assert!(subject.starts_with("tenant.acme-corp.aggregate."));
}
#[test]
fn event_headers_include_required_context() {
let tenant_id = TenantId::new("tenant-a");
let aggregate_id = AggregateId::new_v7();
let aggregate_type = AggregateType::from("Account");
let event = Event::new(
tenant_id,
aggregate_id,
aggregate_type,
Version::from(1),
"created",
json!({"ok": true}),
uuid::Uuid::now_v7(),
);
let headers = build_event_headers(&event);
assert!(headers.get(shared::NATS_HEADER_TENANT_ID).is_some());
assert!(headers.get(shared::NATS_HEADER_NATS_MSG_ID).is_some());
assert!(headers.get(shared::HEADER_X_CORRELATION_ID).is_some());
assert!(headers.get(shared::NATS_HEADER_CORRELATION_ID).is_some());
assert!(headers.get(shared::HEADER_TRACEPARENT).is_some());
assert!(headers.get(shared::HEADER_TRACE_ID).is_some());
}
#[test]
fn stream_config_validation_allows_subject_superset() {
let expected = stream_policy_config(StreamConfigSettings::default());
let mut actual = expected.clone();
actual
.subjects
.push("tenant.*.aggregate.extra.*".to_string());
validate_stream_config(&expected, &actual).unwrap();
}
#[test]
fn stream_config_validation_rejects_missing_subject() {
let expected = stream_policy_config(StreamConfigSettings::default());
let mut actual = expected.clone();
actual.subjects.clear();
assert!(validate_stream_config(&expected, &actual).is_err());
}
#[test]
fn stream_config_settings_defaults() {
let settings = StreamConfigSettings::default();