added initial roadmap and implementation

This commit is contained in:
2026-03-11 22:23:16 +02:00
parent 39b97a6db5
commit c0792f2e1d
62 changed files with 12410 additions and 1 deletions

27
gateway/Cargo.toml Normal file
View File

@@ -0,0 +1,27 @@
[package]
name = "gateway"
version = "0.1.0"
edition = "2021"
[dependencies]
common = { workspace = true }
auth = { workspace = true }
data_api = { workspace = true }
control_plane = { workspace = true }
realtime = { workspace = true }
storage = { workspace = true }
tokio = { workspace = true }
axum = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
serde = { workspace = true }
serde_json = { workspace = true }
sqlx = { workspace = true }
dotenvy = { workspace = true }
anyhow = { workspace = true }
axum-prometheus = "0.6"
tower_governor = "0.4.2"
tower-http = { version = "0.6.8", features = ["cors", "trace"] }
moka = { version = "0.12.14", features = ["future"] }

220
gateway/src/main.rs Normal file
View File

@@ -0,0 +1,220 @@
mod middleware;
mod state;
use axum::{
extract::Request,
middleware::{from_fn, from_fn_with_state, Next},
response::Response,
routing::get,
Router,
};
use axum_prometheus::PrometheusMetricLayer;
use common::{init_pool, Config};
use state::AppState;
use std::collections::HashMap;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::RwLock;
use tower_governor::{governor::GovernorConfigBuilder, key_extractor::SmartIpKeyExtractor, GovernorLayer};
use tower_http::cors::{Any, CorsLayer};
use tower_http::trace::TraceLayer;
use moka::future::Cache;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
async fn log_headers(req: Request, next: Next) -> Response {
tracing::debug!("Request Headers: {:?}", req.headers());
next.run(req).await
}
async fn dashboard_handler() -> axum::response::Html<&'static str> {
axum::response::Html(include_str!("../../web/index.html"))
}
async fn wait_for_db(db_url: &str) -> sqlx::PgPool {
loop {
match init_pool(db_url).await {
Ok(pool) => return pool,
Err(e) => {
tracing::warn!("Database not ready yet, retrying in 2s: {}", e);
tokio::time::sleep(Duration::from_secs(2)).await;
}
}
}
}
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// Load configuration
dotenvy::dotenv().ok();
let config = Config::new().expect("Failed to load configuration");
// Initialize tracing
let rust_log = std::env::var("RUST_LOG").unwrap_or_else(|_| "debug".into());
if std::env::var("LOG_FORMAT").ok().as_deref() == Some("json") {
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(&rust_log))
.with(tracing_subscriber::fmt::layer().json())
.init();
} else {
tracing_subscriber::registry()
.with(tracing_subscriber::EnvFilter::new(&rust_log))
.with(tracing_subscriber::fmt::layer())
.init();
}
tracing::info!("Starting MadBase Gateway...");
// Initialize Database (Control Plane / Main DB)
tracing::info!("Connecting to database at {}...", config.database_url);
let pool = wait_for_db(&config.database_url).await;
tracing::info!("Database connected successfully.");
// Run Migrations
tracing::info!("Running database migrations...");
sqlx::migrate!("../migrations")
.run(&pool)
.await
.expect("Failed to run migrations");
tracing::info!("Migrations applied successfully.");
let app_state = AppState {
control_db: pool.clone(),
tenant_pools: Arc::new(RwLock::new(HashMap::new())),
};
// Auth State (Legacy/Fallback)
let auth_state = auth::AuthState {
db: pool.clone(),
config: config.clone(),
};
let data_state = data_api::handlers::DataState {
db: pool.clone(),
config: config.clone(),
};
let control_state = control_plane::ControlPlaneState { db: pool.clone() };
// Initialize Tenant Database (for Realtime)
let default_tenant_db_url = std::env::var("DEFAULT_TENANT_DB_URL")
.expect("DEFAULT_TENANT_DB_URL must be set");
tracing::info!("Connecting to default tenant database at {}...", default_tenant_db_url);
let tenant_pool = wait_for_db(&default_tenant_db_url).await;
tracing::info!("Tenant Database connected successfully.");
let mut tenant_config = config.clone();
tenant_config.database_url = default_tenant_db_url;
// Realtime Init
let (realtime_router, realtime_state) = realtime::init(tenant_pool.clone(), tenant_config.clone());
// Start Replication Listener
let repl_config = tenant_config.clone();
let repl_tx = realtime_state.broadcast_tx.clone();
tokio::spawn(async move {
if let Err(e) = realtime::replication::start_replication_listener(repl_config, repl_tx).await {
tracing::error!("Replication listener failed: {}", e);
}
});
// Storage Init
let storage_router = storage::init(pool.clone(), config.clone()).await;
// Auth Middleware State
let auth_middleware_state = auth::AuthMiddlewareState {
config: config.clone(),
};
// Project Middleware State
let project_middleware_state = middleware::ProjectMiddlewareState {
control_db: app_state.control_db.clone(),
tenant_pools: app_state.tenant_pools.clone(),
project_cache: Cache::new(100),
};
// Construct App
// We apply `resolve_project` middleware to /auth, /rest, /storage, /realtime
// But NOT /platform (admin)
let tenant_routes = Router::new()
.nest(
"/auth/v1",
auth::router()
.layer(from_fn_with_state(
auth_middleware_state.clone(),
auth::auth_middleware,
))
.with_state(auth_state),
)
.nest(
"/rest/v1",
data_api::router()
.layer(from_fn_with_state(
auth_middleware_state.clone(),
auth::auth_middleware,
))
.with_state(data_state),
)
.nest("/realtime/v1", realtime_router)
.nest(
"/storage/v1",
storage_router.layer(from_fn_with_state(
auth_middleware_state.clone(),
auth::auth_middleware,
)),
)
.layer(from_fn_with_state(
project_middleware_state.clone(),
middleware::inject_tenant_pool,
))
.layer(from_fn_with_state(
project_middleware_state,
middleware::resolve_project,
));
// Metrics
let (prometheus_layer, metric_handle) = PrometheusMetricLayer::pair();
// Rate Limiting Configuration
let governor_conf = Arc::new(
GovernorConfigBuilder::default()
.per_second(config.rate_limit_per_second)
.burst_size(config.rate_limit_per_second as u32 * 2)
.key_extractor(SmartIpKeyExtractor)
.finish()
.unwrap(),
);
let app = Router::new()
.route("/", get(|| async { "Hello, MadBase!" }))
.route("/metrics", get(|| async move { metric_handle.render() }))
.route("/dashboard", get(dashboard_handler))
.nest("/", tenant_routes) // Apply project resolution to these
.nest(
"/platform/v1", // Admin/Control Plane API (No project resolution needed)
control_plane::router(control_state),
)
.layer(GovernorLayer {
config: governor_conf,
})
.layer(
CorsLayer::new()
.allow_origin(Any)
.allow_methods(Any)
.allow_headers(Any),
)
.layer(TraceLayer::new_for_http())
.layer(from_fn(log_headers))
.layer(prometheus_layer);
// Run it
let addr = SocketAddr::from(([0, 0, 0, 0], config.port));
tracing::info!("Listening on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, app.into_make_service_with_connect_info::<SocketAddr>()).await?;
Ok(())
}

133
gateway/src/middleware.rs Normal file
View File

@@ -0,0 +1,133 @@
use axum::{
extract::{Request, State},
http::StatusCode,
middleware::Next,
response::Response,
};
use common::init_pool;
use common::ProjectContext;
use moka::future::Cache;
use sqlx::PgPool;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::warn;
#[derive(Clone)]
pub struct ProjectMiddlewareState {
pub control_db: PgPool,
pub tenant_pools: Arc<RwLock<HashMap<String, PgPool>>>,
pub project_cache: Cache<String, ProjectContext>,
}
pub async fn resolve_project(
State(state): State<ProjectMiddlewareState>,
mut req: Request,
next: Next,
) -> Result<Response, StatusCode> {
// 1. Extract Project Ref from Header or Subdomain
let project_ref = if let Some(val) = req.headers().get("x-project-ref") {
val.to_str()
.map_err(|_| StatusCode::BAD_REQUEST)?
.to_string()
} else {
"default".to_string()
};
// 2. Check Cache
if let Some(ctx) = state.project_cache.get(&project_ref).await {
req.extensions_mut().insert(ctx);
return Ok(next.run(req).await);
}
// 3. Fetch Project Config from DB
// Use a common Record struct or map manually to avoid anonymous struct type mismatch in if/else
#[derive(sqlx::FromRow)]
struct ProjectRecord {
db_url: String,
jwt_secret: String,
anon_key: Option<String>,
service_role_key: Option<String>,
}
let record = if project_ref == "default" {
sqlx::query_as::<_, ProjectRecord>(
"SELECT db_url, jwt_secret, anon_key, service_role_key FROM projects LIMIT 1",
)
.fetch_optional(&state.control_db)
.await
.map_err(|e| {
warn!("DB Error: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?
} else {
sqlx::query_as::<_, ProjectRecord>(
"SELECT db_url, jwt_secret, anon_key, service_role_key FROM projects WHERE name = $1",
)
.bind(&project_ref)
.fetch_optional(&state.control_db)
.await
.map_err(|e| {
warn!("DB Error: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?
};
if record.is_none() {
warn!("Project not found: {}", project_ref);
return Err(StatusCode::NOT_FOUND);
}
let project = record.unwrap();
// 4. Construct ProjectContext
let ctx = ProjectContext {
project_ref: project_ref.clone(),
db_url: project.db_url,
jwt_secret: project.jwt_secret,
anon_key: project.anon_key,
service_role_key: project.service_role_key,
};
// 5. Update Cache
state.project_cache.insert(project_ref.clone(), ctx.clone()).await;
// 6. Inject into Request
req.extensions_mut().insert(ctx);
Ok(next.run(req).await)
}
pub async fn inject_tenant_pool(
State(state): State<ProjectMiddlewareState>,
mut req: Request,
next: Next,
) -> Result<Response, StatusCode> {
let project_ctx = req
.extensions()
.get::<ProjectContext>()
.cloned()
.ok_or(StatusCode::INTERNAL_SERVER_ERROR)?;
let db_url = project_ctx.db_url.clone();
let existing = { state.tenant_pools.read().await.get(&db_url).cloned() };
let pool = if let Some(p) = existing {
p
} else {
let new_pool = init_pool(&db_url)
.await
.map_err(|e| {
warn!("Failed to init tenant pool for {}: {}", db_url, e);
StatusCode::INTERNAL_SERVER_ERROR
})?;
let mut w = state.tenant_pools.write().await;
let entry = w.entry(db_url).or_insert_with(|| new_pool.clone());
entry.clone()
};
req.extensions_mut().insert(pool);
Ok(next.run(req).await)
}

10
gateway/src/state.rs Normal file
View File

@@ -0,0 +1,10 @@
use sqlx::PgPool;
use std::collections::HashMap;
use std::sync::Arc;
use tokio::sync::RwLock;
#[derive(Clone)]
pub struct AppState {
pub control_db: PgPool,
pub tenant_pools: Arc<RwLock<HashMap<String, PgPool>>>,
}