chore: full stack stability and migration fixes, plus react UI progress
Some checks failed
CI / podman-build (push) Has been cancelled
CI / rust (push) Has been cancelled

This commit is contained in:
2026-03-18 09:01:38 +02:00
parent 38cab8c246
commit a66d908eff
142 changed files with 12210 additions and 3402 deletions

View File

@@ -1,5 +1,5 @@
mod middleware;
mod state;
use gateway::middleware;
use gateway::state::AppState;
use axum::{
extract::{Request, Query},
@@ -8,10 +8,10 @@ use axum::{
routing::get,
Router,
};
use tower_http::services::{ServeDir, ServeFile};
use axum::http::StatusCode;
use axum_prometheus::PrometheusMetricLayer;
use common::{init_pool, Config};
use state::AppState;
use common::{init_pool, Config, JwtConfig};
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
@@ -63,9 +63,7 @@ async fn log_headers(req: Request, next: Next) -> Response {
next.run(req).await
}
async fn dashboard_handler() -> axum::response::Html<&'static str> {
axum::response::Html(include_str!("../../web/admin.html"))
}
// Dashboard handler removed in favor of direct SPA serving from /web
async fn wait_for_db(db_url: &str) -> sqlx::PgPool {
loop {
@@ -79,6 +77,73 @@ async fn wait_for_db(db_url: &str) -> sqlx::PgPool {
}
}
async fn validate_configuration(config: &Config, pool: &sqlx::PgPool) -> anyhow::Result<()> {
tracing::info!("Validating configuration...");
// 1. Validate JWT secret format
if config.jwt_secret.len() < 32 {
anyhow::bail!(
"JWT_SECRET too short ({} chars, minimum 32 required)",
config.jwt_secret.len()
);
}
// 1.1 Validate JWT issuer
let jwt_issuer = std::env::var("JWT_ISSUER").unwrap_or_else(|_| "madbase".to_string());
tracing::info!(
jwt_issuer = %jwt_issuer,
"JWT issuer configured"
);
// 2. Validate JWT secret consistency with database
let row = sqlx::query_as::<_, (String, String, String, String)>(
r#"
SELECT name, jwt_secret, anon_key, service_role_key
FROM projects
WHERE name = 'default'
LIMIT 1
"#
)
.fetch_optional(pool)
.await?;
if let Some((name, jwt_secret, anon_key, service_role_key)) = row {
if jwt_secret != config.jwt_secret {
anyhow::bail!(
"JWT_SECRET mismatch between environment and database (project 'default')\n\
Environment: {}...\n\
Database: {}...\n\
Run 'scripts/setup_default_project.sh' to fix this.",
&config.jwt_secret[..8],
&jwt_secret[..8]
);
}
// Validate that anon_key and service_role_key are present
if anon_key.is_empty() {
anyhow::bail!("Project 'default' has empty anon_key");
}
if service_role_key.is_empty() {
anyhow::bail!("Project 'default' has empty service_role_key");
}
tracing::info!(
project_name = name,
jwt_secret_preview = &jwt_secret[..8],
anon_key_present = !anon_key.is_empty(),
service_role_key_present = !service_role_key.is_empty(),
"Project configuration validated"
);
} else {
anyhow::bail!(
"Default project not found in database. Run 'scripts/setup_default_project.sh' to create it."
);
}
tracing::info!("Configuration validation successful.");
Ok(())
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Load configuration
@@ -87,17 +152,42 @@ async fn main() -> anyhow::Result<()> {
// Initialize tracing
let rust_log = std::env::var("RUST_LOG").unwrap_or_else(|_| "debug".into());
let is_json = std::env::var("LOG_FORMAT").ok().as_deref() == Some("json");
if std::env::var("LOG_FORMAT").ok().as_deref() == Some("json") {
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(&rust_log))
.with(tracing_subscriber::fmt::layer().json())
.init();
use tracing_subscriber::Layer;
let filter = tracing_subscriber::EnvFilter::new(&rust_log).boxed();
let fmt_layer = if is_json {
tracing_subscriber::fmt::layer().json().boxed()
} else {
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(&rust_log))
.with(tracing_subscriber::fmt::layer())
.init();
tracing_subscriber::fmt::layer().boxed()
};
let otel_layer = if let Ok(otlp_endpoint) = std::env::var("OTEL_EXPORTER_OTLP_ENDPOINT") {
use opentelemetry_otlp::WithExportConfig;
let tracer = opentelemetry_otlp::new_pipeline()
.tracing()
.with_exporter(opentelemetry_otlp::new_exporter().tonic().with_endpoint(otlp_endpoint))
.with_trace_config(opentelemetry_sdk::trace::config().with_resource(
opentelemetry_sdk::Resource::new(vec![opentelemetry::KeyValue::new("service.name", "madbase-gateway")])
))
.install_batch(opentelemetry_sdk::runtime::Tokio)
.expect("Failed to initialize OTLP tracer");
Some(tracing_opentelemetry::layer().with_tracer(tracer).boxed())
} else {
None
};
let registry = tracing_subscriber::registry()
.with(filter)
.with(fmt_layer);
if let Some(otel) = otel_layer {
registry.with(otel).init();
} else {
registry.init();
}
tracing::info!("Starting MadBase Gateway v4.1 (Admin UI)...");
@@ -107,13 +197,16 @@ async fn main() -> anyhow::Result<()> {
let pool = wait_for_db(&config.database_url).await;
tracing::info!("Database connected successfully.");
// Run Migrations
tracing::info!("Running database migrations...");
// Run Migrations (Tenant only for Gateway)
tracing::info!("Running tenant database migrations...");
sqlx::migrate!("../migrations")
.run(&pool)
.await
.expect("Failed to run migrations");
tracing::info!("Migrations applied successfully.");
.expect("Failed to run tenant migrations");
tracing::info!("Tenant migrations applied successfully.");
// Validate Configuration
validate_configuration(&config, &pool).await?;
let app_state = AppState {
control_db: pool.clone(),
@@ -131,11 +224,42 @@ async fn main() -> anyhow::Result<()> {
session_manager,
};
let replica_pool = if let Ok(url) = std::env::var("READ_REPLICA_URL") {
tracing::info!("Connecting to read replica at {}...", url);
Some(wait_for_db(&url).await)
} else {
None
};
let data_state = data_api::handlers::DataState {
db: pool.clone(),
replica_pool,
config: config.clone(),
cache: Arc::new(data_api::schema_cache::SchemaCache::new()),
};
// Register DDL invalidation listener
let ddl_cache = data_state.cache.clone();
let ddl_pool = pool.clone();
tokio::spawn(async move {
let mut listener = match sqlx::postgres::PgListener::connect_with(&ddl_pool).await {
Ok(l) => l,
Err(e) => {
tracing::error!("Failed to connect PgListener: {}", e);
return;
}
};
if let Err(e) = listener.listen("madbase_schema_change").await {
tracing::error!("Failed to listen on madbase_schema_change: {}", e);
return;
}
tracing::info!("DDL invalidation listener started.");
while let Ok(notification) = listener.recv().await {
tracing::info!("Received DDL change notification: {}. Invalidating SchemaCache.", notification.payload());
ddl_cache.invalidate_all().await;
}
});
// Initialize Tenant Database (for Realtime)
let default_tenant_db_url = std::env::var("DEFAULT_TENANT_DB_URL")
.expect("DEFAULT_TENANT_DB_URL must be set");
@@ -146,19 +270,20 @@ async fn main() -> anyhow::Result<()> {
let control_state = control_plane::ControlPlaneState {
db: pool.clone(),
tenant_db: tenant_pool.clone(),
server_manager: None,
};
let mut tenant_config = config.clone();
tenant_config.database_url = default_tenant_db_url;
tenant_config.database_url = default_tenant_db_url.clone();
// Realtime Init
let (realtime_router, realtime_state) = realtime::init(tenant_pool.clone(), tenant_config.clone());
// Start Replication Listener
let repl_config = tenant_config.clone();
let repl_tx = realtime_state.broadcast_tx.clone();
// Start Replication Listener (for default tenant)
let repl_state = realtime_state.clone();
let default_db_url = default_tenant_db_url.clone();
tokio::spawn(async move {
if let Err(e) = realtime::replication::start_replication_listener(repl_config, repl_tx).await {
if let Err(e) = realtime::replication::start_replication_listener("default".to_string(), default_db_url, repl_state).await {
tracing::error!("Replication listener failed: {}", e);
}
});
@@ -169,23 +294,35 @@ async fn main() -> anyhow::Result<()> {
// Functions Init
let functions_runtime = Arc::new(functions::runtime::WasmRuntime::new().expect("Failed to initialize WASM runtime"));
let deno_runtime = Arc::new(functions::deno_runtime::DenoRuntime::new());
let pool_size = std::env::var("DENO_POOL_SIZE").unwrap_or_else(|_| "4".to_string()).parse::<usize>().unwrap_or(4);
let deno_pool = Arc::new(functions::worker_pool::DenoPool::new(pool_size));
let functions_state = functions::FunctionsState {
db: pool.clone(),
config: config.clone(),
runtime: functions_runtime,
deno_runtime,
deno_pool,
};
// Auth Middleware State
let jwt_config = JwtConfig::from_env()?;
let auth_middleware_state = auth::AuthMiddlewareState {
config: config.clone(),
jwt_config,
};
// Project Middleware State
let project_middleware_state = middleware::ProjectMiddlewareState {
control_db: app_state.control_db.clone(),
tenant_pools: app_state.tenant_pools.clone(),
project_cache: Cache::new(100),
tenant_pools: moka::future::Cache::builder()
.max_capacity(100)
.time_to_idle(Duration::from_secs(300))
.build(),
project_cache: moka::future::Cache::builder()
.max_capacity(100)
.time_to_live(Duration::from_secs(60))
.build(),
realtime: realtime_state,
};
// Construct App
@@ -247,11 +384,9 @@ async fn main() -> anyhow::Result<()> {
.finish()
.unwrap(),
);
let app = Router::new()
.route("/", get(|| async { "Hello, MadBase!" }))
.route("/metrics", get(|| async move { metric_handle.render() }))
.route("/dashboard", get(dashboard_handler))
.nest("/", tenant_routes) // Apply project resolution to these
.nest(
"/platform/v1", // Admin/Control Plane API (No project resolution needed)
@@ -286,14 +421,24 @@ async fn main() -> anyhow::Result<()> {
})
.layer(TraceLayer::new_for_http())
.layer(from_fn(log_headers))
.layer(prometheus_layer);
.layer(prometheus_layer)
.fallback_service(ServeDir::new("web").fallback(ServeFile::new("web/index.html")));
// Run it
let addr = SocketAddr::from(([0, 0, 0, 0], config.port));
tracing::info!("Listening on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, app.into_make_service_with_connect_info::<SocketAddr>()).await?;
let shutdown = async {
tokio::signal::ctrl_c().await.ok();
tracing::info!("Shutdown signal received, draining connections...");
};
axum::serve(listener, app.into_make_service_with_connect_info::<SocketAddr>())
.with_graceful_shutdown(shutdown)
.await?;
tracing::info!("Gateway Server shut down cleanly.");
Ok(())
}