Monorepo consolidation: workspace, shared types, transport plans, docker/swam assets
Some checks failed
ci / ui (push) Failing after 30s
ci / rust (push) Failing after 2m34s

This commit is contained in:
2026-03-30 11:40:42 +03:00
parent 7e7041cf8b
commit 1298d9a3df
246 changed files with 55434 additions and 0 deletions

View File

@@ -0,0 +1,594 @@
use super::{AggregateProjection, QueryRequest, QueryResponse};
use crate::types::TenantId;
use futures::stream::Stream;
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use std::pin::Pin;
use std::sync::Arc;
use thiserror::Error;
use tokio::sync::broadcast;
use tokio::sync::RwLock;
#[derive(Debug, Error)]
pub enum QueryError {
#[error("Query syntax error: {0}")]
SyntaxError(String),
#[error("Connection error: {0}")]
ConnectionError(String),
#[error("Tenant not found: {0}")]
TenantNotFound(String),
#[error("Internal error: {0}")]
InternalError(String),
}
pub type QueryResult<T> = Result<T, QueryError>;
#[derive(Debug, Clone)]
pub struct QueryConfig {
pub endpoint: Option<String>,
pub embedded: bool,
pub cache_size: usize,
pub cache_ttl_seconds: u64,
}
impl Default for QueryConfig {
fn default() -> Self {
Self {
endpoint: None,
embedded: true,
cache_size: 1000,
cache_ttl_seconds: 60,
}
}
}
impl QueryConfig {
pub fn embedded() -> Self {
Self {
embedded: true,
..Default::default()
}
}
pub fn remote(endpoint: impl Into<String>) -> Self {
Self {
endpoint: Some(endpoint.into()),
embedded: false,
..Default::default()
}
}
}
#[derive(Debug, Clone)]
struct CacheEntry {
projection: AggregateProjection,
inserted_at: std::time::Instant,
}
#[derive(Debug, Clone)]
pub struct QueryClient {
config: QueryConfig,
storage: Arc<RwLock<HashMap<String, Vec<AggregateProjection>>>>,
cache: Arc<RwLock<lru::LruCache<String, CacheEntry>>>,
updates: broadcast::Sender<AggregateProjection>,
}
impl QueryClient {
pub fn new(config: QueryConfig) -> Self {
let cache = lru::LruCache::new(
std::num::NonZeroUsize::new(config.cache_size)
.unwrap_or_else(|| std::num::NonZeroUsize::new(1000).unwrap()),
);
let (updates, _) = broadcast::channel(1024);
Self {
config,
storage: Arc::new(RwLock::new(HashMap::new())),
cache: Arc::new(RwLock::new(cache)),
updates,
}
}
pub fn embedded() -> Self {
Self::new(QueryConfig::embedded())
}
fn make_key(tenant_id: &str, aggregate_id: &str) -> String {
format!("{}:{}", tenant_id, aggregate_id)
}
pub async fn index(&self, projection: AggregateProjection) -> QueryResult<()> {
let key = Self::make_key(&projection.tenant_id, &projection.aggregate_id);
let _ = self.updates.send(projection.clone());
{
let mut cache = self.cache.write().await;
cache.put(
key.clone(),
CacheEntry {
projection: projection.clone(),
inserted_at: std::time::Instant::now(),
},
);
}
let mut storage = self.storage.write().await;
let tenant_projections = storage.entry(projection.tenant_id.clone()).or_default();
if let Some(existing) = tenant_projections
.iter_mut()
.find(|p| p.aggregate_id == projection.aggregate_id)
{
*existing = projection;
} else {
tenant_projections.push(projection);
}
Ok(())
}
pub fn subscribe(
&self,
tenant_id: TenantId,
) -> Pin<Box<dyn Stream<Item = AggregateProjection> + Send>> {
let tenant_id = tenant_id.as_str().to_string();
let receiver = self.updates.subscribe();
Box::pin(futures::stream::unfold(
(receiver, tenant_id),
|(mut receiver, tenant_id)| async move {
loop {
match receiver.recv().await {
Ok(proj) => {
if proj.tenant_id == tenant_id {
return Some((proj, (receiver, tenant_id)));
}
}
Err(broadcast::error::RecvError::Lagged(_)) => continue,
Err(broadcast::error::RecvError::Closed) => return None,
}
}
},
))
}
pub async fn query(&self, request: QueryRequest) -> QueryResult<QueryResponse> {
let storage = self.storage.read().await;
let tenant_projections = storage.get(&request.tenant_id);
let projections: Vec<AggregateProjection> = match tenant_projections {
Some(projs) => {
let mut filtered: Vec<_> = projs
.iter()
.filter(|p| {
if let Some(ref at) = request.aggregate_type {
&p.aggregate_type == at
} else {
true
}
})
.filter(|p| {
if let Some(ref filter) = request.filter {
self.evaluate_filter(&p.state, filter).unwrap_or(false)
} else {
true
}
})
.cloned()
.collect();
filtered.sort_by(|a, b| b.updated_at.cmp(&a.updated_at));
filtered
}
None => Vec::new(),
};
let total = projections.len();
let offset = request.offset.unwrap_or(0);
let limit = request.limit.unwrap_or(100);
let results: Vec<AggregateProjection> =
projections.into_iter().skip(offset).take(limit).collect();
Ok(QueryResponse::from_results(results, total, Some(limit)))
}
pub async fn get(
&self,
tenant_id: &TenantId,
aggregate_id: &str,
) -> QueryResult<Option<AggregateProjection>> {
let key = Self::make_key(tenant_id.as_str(), aggregate_id);
{
let mut cache = self.cache.write().await;
if let Some(entry) = cache.get(&key) {
let elapsed = entry.inserted_at.elapsed().as_secs();
if elapsed < self.config.cache_ttl_seconds {
return Ok(Some(entry.projection.clone()));
}
cache.pop(&key);
}
}
let storage = self.storage.read().await;
let tenant_projections = storage.get(tenant_id.as_str());
Ok(tenant_projections.and_then(|projs| {
projs
.iter()
.find(|p| p.aggregate_id == aggregate_id)
.cloned()
}))
}
pub async fn delete(&self, tenant_id: &TenantId, aggregate_id: &str) -> QueryResult<bool> {
let key = Self::make_key(tenant_id.as_str(), aggregate_id);
{
let mut cache = self.cache.write().await;
cache.pop(&key);
}
let mut storage = self.storage.write().await;
if let Some(tenant_projections) = storage.get_mut(tenant_id.as_str()) {
let len_before = tenant_projections.len();
tenant_projections.retain(|p| p.aggregate_id != aggregate_id);
return Ok(tenant_projections.len() < len_before);
}
Ok(false)
}
pub async fn clear_tenant(&self, tenant_id: &TenantId) -> QueryResult<usize> {
let mut storage = self.storage.write().await;
let count = storage
.remove(tenant_id.as_str())
.map(|v| v.len())
.unwrap_or(0);
let mut cache = self.cache.write().await;
let prefix = format!("{}:", tenant_id.as_str());
let keys_to_remove: Vec<_> = cache
.iter()
.filter(|(k, _)| k.starts_with(&prefix))
.map(|(k, _)| k.clone())
.collect();
for key in keys_to_remove {
cache.pop(&key);
}
Ok(count)
}
fn evaluate_filter(&self, state: &JsonValue, filter: &str) -> QueryResult<bool> {
let filter = filter.trim();
if filter.is_empty() || filter == "*" {
return Ok(true);
}
if let Some((field, op_value)) = filter.split_once('>') {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a > b);
}
if let Some((field, op_value)) = filter.split_once('<') {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a < b);
}
if let Some((field, op_value)) = filter.split_once("==") {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a == b);
}
if let Some((field, op_value)) = filter.split_once("!=") {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a != b);
}
if let Some((field, op_value)) = filter.split_once(">=") {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a >= b);
}
if let Some((field, op_value)) = filter.split_once("<=") {
let field = field.trim();
let value = op_value.trim();
return self.compare_field(state, field, value, |a, b| a <= b);
}
Ok(false)
}
fn compare_field<F>(
&self,
state: &JsonValue,
field: &str,
value_str: &str,
compare: F,
) -> QueryResult<bool>
where
F: Fn(f64, f64) -> bool,
{
let field_value = state.get(field);
let field_num = match field_value {
Some(JsonValue::Number(n)) => n.as_f64().unwrap_or(f64::NAN),
Some(JsonValue::String(s)) => s.parse::<f64>().unwrap_or(f64::NAN),
_ => return Ok(false),
};
let compare_num = value_str.parse::<f64>().unwrap_or(f64::NAN);
if field_num.is_nan() || compare_num.is_nan() {
return Ok(false);
}
Ok(compare(field_num, compare_num))
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::StreamExt;
use serde_json::json;
fn create_test_client() -> QueryClient {
QueryClient::embedded()
}
fn create_test_projection(tenant: &str, id: &str, balance: i64) -> AggregateProjection {
AggregateProjection::new(tenant, id, "Account", 1, json!({"balance": balance}))
}
#[tokio::test]
async fn query_client_index_and_query() {
let client = create_test_client();
let proj = create_test_projection("tenant-a", "acc-1", 100);
client.index(proj).await.unwrap();
let request = QueryRequest::new("tenant-a").with_filter("balance > 50");
let response = client.query(request).await.unwrap();
assert_eq!(response.results.len(), 1);
assert_eq!(response.results[0].aggregate_id, "acc-1");
}
#[tokio::test]
async fn query_client_tenant_isolation() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
client
.index(create_test_projection("tenant-b", "acc-2", 200))
.await
.unwrap();
let response_a = client.query(QueryRequest::new("tenant-a")).await.unwrap();
let response_b = client.query(QueryRequest::new("tenant-b")).await.unwrap();
assert_eq!(response_a.results.len(), 1);
assert_eq!(response_b.results.len(), 1);
assert_eq!(response_a.results[0].state["balance"], 100);
assert_eq!(response_b.results[0].state["balance"], 200);
}
#[tokio::test]
async fn query_client_filter_operations() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
client
.index(create_test_projection("tenant-a", "acc-2", 50))
.await
.unwrap();
client
.index(create_test_projection("tenant-a", "acc-3", 150))
.await
.unwrap();
let gt_response = client
.query(QueryRequest::new("tenant-a").with_filter("balance > 75"))
.await
.unwrap();
assert_eq!(gt_response.results.len(), 2);
let lt_response = client
.query(QueryRequest::new("tenant-a").with_filter("balance < 75"))
.await
.unwrap();
assert_eq!(lt_response.results.len(), 1);
let eq_response = client
.query(QueryRequest::new("tenant-a").with_filter("balance == 100"))
.await
.unwrap();
assert_eq!(eq_response.results.len(), 1);
}
#[tokio::test]
async fn query_client_pagination() {
let client = create_test_client();
for i in 0..25 {
client
.index(create_test_projection(
"tenant-a",
&format!("acc-{}", i),
i * 10,
))
.await
.unwrap();
}
let page1 = client
.query(QueryRequest::new("tenant-a").with_limit(10))
.await
.unwrap();
assert_eq!(page1.results.len(), 10);
assert!(page1.has_more);
let page2 = client
.query(QueryRequest::new("tenant-a").with_limit(10).with_offset(10))
.await
.unwrap();
assert_eq!(page2.results.len(), 10);
let page3 = client
.query(QueryRequest::new("tenant-a").with_limit(10).with_offset(20))
.await
.unwrap();
assert_eq!(page3.results.len(), 5);
assert!(!page3.has_more);
}
#[tokio::test]
async fn query_client_get_by_id() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
let tenant = TenantId::new("tenant-a");
let result = client.get(&tenant, "acc-1").await.unwrap();
assert!(result.is_some());
let proj = result.unwrap();
assert_eq!(proj.aggregate_id, "acc-1");
assert_eq!(proj.state["balance"], 100);
}
#[tokio::test]
async fn query_client_delete() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
let tenant = TenantId::new("tenant-a");
let deleted = client.delete(&tenant, "acc-1").await.unwrap();
assert!(deleted);
let result = client.get(&tenant, "acc-1").await.unwrap();
assert!(result.is_none());
}
#[tokio::test]
async fn query_client_clear_tenant() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
client
.index(create_test_projection("tenant-a", "acc-2", 200))
.await
.unwrap();
client
.index(create_test_projection("tenant-b", "acc-3", 300))
.await
.unwrap();
let tenant = TenantId::new("tenant-a");
let count = client.clear_tenant(&tenant).await.unwrap();
assert_eq!(count, 2);
let response_a = client.query(QueryRequest::new("tenant-a")).await.unwrap();
assert_eq!(response_a.results.len(), 0);
let response_b = client.query(QueryRequest::new("tenant-b")).await.unwrap();
assert_eq!(response_b.results.len(), 1);
}
#[tokio::test]
async fn query_client_update_existing() {
let client = create_test_client();
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
client
.index(AggregateProjection::new(
"tenant-a",
"acc-1",
"Account",
2,
json!({"balance": 250}),
))
.await
.unwrap();
let response = client.query(QueryRequest::new("tenant-a")).await.unwrap();
assert_eq!(response.results.len(), 1);
assert_eq!(response.results[0].version, 2);
assert_eq!(response.results[0].state["balance"], 250);
}
#[tokio::test]
async fn query_client_subscribe_receives_updates() {
let client = create_test_client();
let mut updates = client.subscribe(TenantId::new("tenant-a"));
client
.index(create_test_projection("tenant-a", "acc-1", 100))
.await
.unwrap();
let next = updates.next().await.unwrap();
assert_eq!(next.tenant_id, "tenant-a");
assert_eq!(next.aggregate_id, "acc-1");
assert_eq!(next.state["balance"], 100);
}
#[test]
fn query_config_defaults() {
let config = QueryConfig::default();
assert!(config.embedded);
assert!(config.endpoint.is_none());
assert_eq!(config.cache_size, 1000);
assert_eq!(config.cache_ttl_seconds, 60);
}
#[test]
fn query_request_builder() {
let request = QueryRequest::new("tenant-a")
.with_aggregate_type("Account")
.with_filter("balance > 100")
.with_limit(50)
.with_offset(10);
assert_eq!(request.tenant_id, "tenant-a");
assert_eq!(request.aggregate_type, Some("Account".to_string()));
assert_eq!(request.filter, Some("balance > 100".to_string()));
assert_eq!(request.limit, Some(50));
assert_eq!(request.offset, Some(10));
}
}

193
aggregate/src/query/mod.rs Normal file
View File

@@ -0,0 +1,193 @@
mod client;
mod projection;
pub use client::{QueryClient, QueryConfig, QueryError, QueryResult};
pub use projection::{ProjectionConfig, StateProjection};
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AggregateProjection {
pub tenant_id: String,
pub aggregate_id: String,
pub aggregate_type: String,
pub version: u64,
pub state: JsonValue,
pub updated_at: chrono::DateTime<chrono::Utc>,
}
impl AggregateProjection {
pub fn new(
tenant_id: impl Into<String>,
aggregate_id: impl Into<String>,
aggregate_type: impl Into<String>,
version: u64,
state: JsonValue,
) -> Self {
Self {
tenant_id: tenant_id.into(),
aggregate_id: aggregate_id.into(),
aggregate_type: aggregate_type.into(),
version,
state,
updated_at: chrono::Utc::now(),
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryRequest {
pub tenant_id: String,
pub aggregate_type: Option<String>,
pub filter: Option<String>,
pub limit: Option<usize>,
pub offset: Option<usize>,
}
impl QueryRequest {
pub fn new(tenant_id: impl Into<String>) -> Self {
Self {
tenant_id: tenant_id.into(),
aggregate_type: None,
filter: None,
limit: None,
offset: None,
}
}
pub fn with_aggregate_type(mut self, aggregate_type: impl Into<String>) -> Self {
self.aggregate_type = Some(aggregate_type.into());
self
}
pub fn with_filter(mut self, filter: impl Into<String>) -> Self {
self.filter = Some(filter.into());
self
}
pub fn with_limit(mut self, limit: usize) -> Self {
self.limit = Some(limit);
self
}
pub fn with_offset(mut self, offset: usize) -> Self {
self.offset = Some(offset);
self
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct QueryResponse {
pub results: Vec<AggregateProjection>,
pub total: usize,
pub has_more: bool,
}
impl QueryResponse {
pub fn empty() -> Self {
Self {
results: Vec::new(),
total: 0,
has_more: false,
}
}
pub fn from_results(
results: Vec<AggregateProjection>,
total: usize,
limit: Option<usize>,
) -> Self {
let has_more = limit.is_some_and(|l| results.len() == l && total > results.len());
Self {
results,
total,
has_more,
}
}
}
#[derive(Debug, Clone)]
pub struct QueryServer {
query: QueryClient,
}
impl QueryServer {
pub fn new(query: QueryClient) -> Self {
Self { query }
}
pub fn query_client(&self) -> &QueryClient {
&self.query
}
pub async fn handle(&self, request: QueryRequest) -> QueryResult<QueryResponse> {
self.query.query(request).await
}
pub async fn handle_raw(
&self,
tenant_id: impl Into<String>,
aggregate_type: Option<String>,
filter: Option<String>,
limit: Option<usize>,
offset: Option<usize>,
) -> QueryResult<QueryResponse> {
let mut request = QueryRequest::new(tenant_id);
request.aggregate_type = aggregate_type;
request.filter = filter;
request.limit = limit;
request.offset = offset;
self.handle(request).await
}
}
#[cfg(test)]
mod tests {
use super::*;
use serde_json::json;
#[tokio::test]
async fn query_server_filters_by_tenant() {
let query = QueryClient::embedded();
let server = QueryServer::new(query.clone());
query
.index(AggregateProjection::new(
"tenant-a",
"agg-1",
"Account",
1,
json!({ "balance": 100 }),
))
.await
.unwrap();
query
.index(AggregateProjection::new(
"tenant-b",
"agg-2",
"Account",
1,
json!({ "balance": 200 }),
))
.await
.unwrap();
let resp = server
.handle_raw(
"tenant-a",
Some("Account".to_string()),
Some("balance > 50".to_string()),
Some(100),
Some(0),
)
.await
.unwrap();
assert_eq!(resp.total, 1);
assert_eq!(resp.results[0].tenant_id, "tenant-a");
assert_eq!(resp.results[0].state["balance"], 100);
}
}

View File

@@ -0,0 +1,217 @@
use super::AggregateProjection;
use crate::types::{AggregateId, AggregateType, Event, TenantId, Version};
use serde_json::Value as JsonValue;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Debug, Clone)]
pub struct ProjectionConfig {
pub batch_size: usize,
pub projection_timeout_ms: u64,
}
impl Default for ProjectionConfig {
fn default() -> Self {
Self {
batch_size: 100,
projection_timeout_ms: 5000,
}
}
}
pub struct StateProjection {
config: ProjectionConfig,
handlers: Arc<RwLock<HashMap<String, ProjectionHandler>>>,
}
type ProjectionHandler = Box<dyn Fn(&Event) -> Option<AggregateProjection> + Send + Sync>;
impl StateProjection {
pub fn new(config: ProjectionConfig) -> Self {
Self {
config,
handlers: Arc::new(RwLock::new(HashMap::new())),
}
}
pub fn new_default() -> Self {
Self::new(ProjectionConfig::default())
}
pub async fn register_handler<F>(&self, aggregate_type: &str, handler: F)
where
F: Fn(&Event) -> Option<AggregateProjection> + Send + Sync + 'static,
{
let mut handlers = self.handlers.write().await;
handlers.insert(aggregate_type.to_string(), Box::new(handler));
}
pub async fn project_event(&self, event: &Event) -> Option<AggregateProjection> {
let handlers = self.handlers.read().await;
let aggregate_type = event.aggregate_type.as_str();
handlers.get(aggregate_type).and_then(|h| h(event))
}
pub async fn project_events(&self, events: &[Event]) -> Vec<AggregateProjection> {
let mut projections = Vec::with_capacity(events.len().min(self.config.batch_size));
for event in events.iter().take(self.config.batch_size) {
if let Some(proj) = self.project_event(event).await {
projections.push(proj);
}
}
projections
}
pub fn default_projection_from_event(event: &Event) -> AggregateProjection {
AggregateProjection::new(
event.tenant_id.as_str(),
event.aggregate_id.to_string(),
event.aggregate_type.as_str(),
event.version.as_u64(),
event.payload.clone(),
)
}
pub fn default_projection_from_state(
tenant_id: &TenantId,
aggregate_id: &AggregateId,
aggregate_type: &AggregateType,
version: &Version,
state: &JsonValue,
) -> AggregateProjection {
AggregateProjection::new(
tenant_id.as_str(),
aggregate_id.to_string(),
aggregate_type.as_str(),
version.as_u64(),
state.clone(),
)
}
}
#[cfg(test)]
mod tests {
use super::*;
use chrono::Utc;
use serde_json::json;
fn create_test_event(tenant: &str, version: u64, event_type: &str) -> Event {
Event {
event_id: uuid::Uuid::now_v7(),
tenant_id: TenantId::new(tenant),
aggregate_id: AggregateId::new_v7(),
aggregate_type: AggregateType::from("Account"),
version: Version::from(version),
event_type: event_type.to_string(),
payload: json!({"amount": 100}),
timestamp: Utc::now(),
command_id: uuid::Uuid::nil(),
correlation_id: None,
traceparent: None,
}
}
#[tokio::test]
async fn state_projection_registers_handler() {
let projection = StateProjection::new_default();
projection
.register_handler("Account", |event| {
Some(AggregateProjection::new(
event.tenant_id.as_str(),
event.aggregate_id.to_string(),
"Account",
event.version.as_u64(),
event.payload.clone(),
))
})
.await;
let event = create_test_event("tenant-a", 1, "deposited");
let result = projection.project_event(&event).await;
assert!(result.is_some());
let proj = result.unwrap();
assert_eq!(proj.aggregate_type, "Account");
}
#[tokio::test]
async fn state_projection_project_events_batch() {
let projection = StateProjection::new_default();
projection
.register_handler("Account", |event| {
Some(AggregateProjection::new(
event.tenant_id.as_str(),
event.aggregate_id.to_string(),
"Account",
event.version.as_u64(),
event.payload.clone(),
))
})
.await;
let events = vec![
create_test_event("tenant-a", 1, "deposited"),
create_test_event("tenant-a", 1, "deposited"),
create_test_event("tenant-a", 1, "deposited"),
];
let projections = projection.project_events(&events).await;
assert_eq!(projections.len(), 3);
}
#[tokio::test]
async fn state_projection_no_handler_returns_none() {
let projection = StateProjection::new_default();
let event = create_test_event("tenant-a", 1, "deposited");
let result = projection.project_event(&event).await;
assert!(result.is_none());
}
#[test]
fn default_projection_from_event() {
let event = create_test_event("tenant-a", 5, "deposited");
let proj = StateProjection::default_projection_from_event(&event);
assert_eq!(proj.tenant_id, "tenant-a");
assert_eq!(proj.version, 5);
assert_eq!(proj.state["amount"], 100);
}
#[test]
fn default_projection_from_state() {
let tenant_id = TenantId::new("tenant-a");
let aggregate_id = AggregateId::new_v7();
let aggregate_type = AggregateType::from("Account");
let version = Version::from(10);
let state = json!({"balance": 1000});
let proj = StateProjection::default_projection_from_state(
&tenant_id,
&aggregate_id,
&aggregate_type,
&version,
&state,
);
assert_eq!(proj.tenant_id, "tenant-a");
assert_eq!(proj.aggregate_type, "Account");
assert_eq!(proj.version, 10);
assert_eq!(proj.state["balance"], 1000);
}
#[test]
fn projection_config_defaults() {
let config = ProjectionConfig::default();
assert_eq!(config.batch_size, 100);
assert_eq!(config.projection_timeout_ms, 5000);
}
}