chore: full stack stability and migration fixes, plus react UI progress
Some checks failed
CI / podman-build (push) Has been cancelled
CI / rust (push) Has been cancelled

This commit is contained in:
2026-03-18 09:01:38 +02:00
parent 38cab8c246
commit a66d908eff
142 changed files with 12210 additions and 3402 deletions

View File

@@ -1,15 +1,23 @@
use axum::{
body::Body,
extract::{Request, State},
http::StatusCode,
extract::{Request, State, ws::WebSocketUpgrade},
http::{StatusCode, HeaderMap},
response::Response,
routing::get,
Router,
};
use axum::extract::ws::{Message, WebSocket};
use tokio_tungstenite::{
tungstenite::protocol::Message as TungsteniteMessage,
};
use futures::{SinkExt, StreamExt};
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{error, info, debug};
use moka::future::Cache;
use common::{init_pool, ProjectContext};
use sqlx::PgPool;
#[derive(Clone, Debug)]
struct Upstream {
@@ -34,10 +42,12 @@ struct ProxyState {
worker_upstreams: Arc<RwLock<Vec<Upstream>>>,
current_worker_index: Arc<RwLock<usize>>,
http_client: reqwest::Client,
control_db: PgPool,
project_cache: Cache<String, ProjectContext>,
}
impl ProxyState {
fn new(control_url: String, worker_urls: Vec<String>) -> Self {
fn new(control_url: String, worker_urls: Vec<String>, control_db: PgPool) -> Self {
let worker_upstreams = worker_urls
.into_iter()
.map(|url| Upstream::new(format!("worker-{}", url), url))
@@ -55,6 +65,8 @@ impl ProxyState {
worker_upstreams: Arc::new(RwLock::new(worker_upstreams)),
current_worker_index: Arc::new(RwLock::new(0)),
http_client,
control_db,
project_cache: Cache::new(100),
}
}
@@ -88,6 +100,24 @@ impl ProxyState {
loop {
interval.tick().await;
// Optional Dynamic Worker Discovery via Control Plane Polling
// You can replace this whole background loop seamlessly to query the control plane
let control_scan_url = format!("{}/workers", self.control_upstream.url);
if let Ok(res) = self.http_client.get(&control_scan_url).send().await {
if let Ok(workers) = res.json::<Vec<String>>().await {
let mut current = self.worker_upstreams.write().await;
// Retain healthy upstreams or register new ones
let updated: Vec<Upstream> = workers.into_iter().map(|url| {
if let Some(existing) = current.iter().find(|w| w.url == url) {
existing.clone()
} else {
Upstream::new(format!("worker-{}", url), url)
}
}).collect();
*current = updated;
}
}
// Check workers
let worker_upstreams = self.worker_upstreams.read().await;
for worker in worker_upstreams.iter() {
@@ -130,6 +160,197 @@ impl ProxyState {
}
}
async fn resolve_project_from_headers(
state: &ProxyState,
headers: &HeaderMap,
) -> Result<ProjectContext, StatusCode> {
let project_ref = if let Some(val) = headers.get("x-project-ref") {
val.to_str()
.map_err(|_| StatusCode::BAD_REQUEST)?
.to_string()
} else {
"default".to_string()
};
if let Some(ctx) = state.project_cache.get(&project_ref).await {
return Ok(ctx);
}
#[derive(sqlx::FromRow)]
struct ProjectRecord {
db_url: String,
jwt_secret: String,
anon_key: Option<String>,
service_role_key: Option<String>,
}
let record = if project_ref == "default" {
sqlx::query_as::<_, ProjectRecord>(
"SELECT db_url, jwt_secret, anon_key, service_role_key FROM projects LIMIT 1",
)
.fetch_optional(&state.control_db)
.await
.map_err(|e| {
error!("DB Error: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?
} else {
sqlx::query_as::<_, ProjectRecord>(
"SELECT db_url, jwt_secret, anon_key, service_role_key FROM projects WHERE name = $1",
)
.bind(&project_ref)
.fetch_optional(&state.control_db)
.await
.map_err(|e| {
error!("DB Error: {}", e);
StatusCode::INTERNAL_SERVER_ERROR
})?
};
if record.is_none() {
error!("Project not found: {}", project_ref);
return Err(StatusCode::NOT_FOUND);
}
let project = record.unwrap();
let ctx = ProjectContext {
project_ref: project_ref.clone(),
db_url: project.db_url,
redis_url: None,
jwt_secret: project.jwt_secret,
anon_key: project.anon_key,
service_role_key: project.service_role_key,
};
state.project_cache.insert(project_ref.clone(), ctx.clone()).await;
Ok(ctx)
}
async fn proxy_websocket(
State(state): State<ProxyState>,
ws: WebSocket,
headers: HeaderMap,
) {
let result = async {
let request_id = headers.get("x-request-id")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
let span = tracing::info_span!("proxy_websocket", request_id = %request_id);
let _enter = span.enter();
let _project_ctx = resolve_project_from_headers(&state, &headers).await?;
let upstream = state.get_next_healthy_worker().await.ok_or_else(|| {
error!("No healthy workers available");
StatusCode::SERVICE_UNAVAILABLE
})?;
let target_url_str = format!("{}/realtime/v1/websocket", upstream.url.replace("http://", "ws://"));
debug!("Proxying WebSocket -> {}", target_url_str);
use tokio_tungstenite::tungstenite::client::IntoClientRequest;
let mut req = target_url_str.clone().into_client_request().map_err(|e| {
error!("Failed to create WebSocket request: {}", e);
StatusCode::BAD_GATEWAY
})?;
for (name, value) in headers.iter() {
let name_str = name.as_str();
if name_str == "apikey" || name_str == "authorization" || name_str == "x-project-ref" {
info!("Forwarding header: {}", name_str);
req.headers_mut().insert(name, value.clone());
}
}
info!("Connecting to worker WebSocket at: {}", target_url_str);
let (server_ws, response) = tokio_tungstenite::connect_async(req)
.await
.map_err(|e| {
error!("Failed to connect to WebSocket upstream {}: {}", upstream.name, e);
StatusCode::BAD_GATEWAY
})?;
info!("Worker WebSocket connection established. Response status: {:?}", response.status());
let (ws_sender, ws_receiver) = ws.split();
let (server_sink, server_stream) = server_ws.split();
let tx_to_client = async move {
let mut ws_receiver = ws_receiver;
let mut server_sink = server_sink;
debug!("Starting tx_to_client loop");
while let Some(msg_result) = ws_receiver.next().await {
match msg_result {
Ok(msg) => {
debug!("Received message from client: {:?}", msg);
let tungstenite_msg = match msg {
Message::Text(text) => TungsteniteMessage::Text(text),
Message::Binary(data) => TungsteniteMessage::Binary(data),
Message::Close(_) => TungsteniteMessage::Close(None),
Message::Ping(data) => TungsteniteMessage::Ping(data),
Message::Pong(data) => TungsteniteMessage::Pong(data),
};
if server_sink.send(tungstenite_msg).await.is_err() {
debug!("Failed to send to upstream, closing tx_to_client");
break;
}
}
Err(e) => {
error!("Error receiving from client WebSocket: {}", e);
break;
}
}
}
debug!("tx_to_client loop ended");
};
let tx_to_upstream = async move {
let mut ws_sender = ws_sender;
let mut server_stream = server_stream;
debug!("Starting tx_to_upstream loop");
while let Some(msg_result) = server_stream.next().await {
match msg_result {
Ok(msg) => {
debug!("Received message from upstream: {:?}", msg);
let axum_msg = match msg {
TungsteniteMessage::Text(text) => Message::Text(text),
TungsteniteMessage::Binary(data) => Message::Binary(data),
TungsteniteMessage::Close(_) => Message::Close(None),
TungsteniteMessage::Ping(data) => Message::Ping(data),
TungsteniteMessage::Pong(data) => Message::Pong(data),
_ => continue,
};
if ws_sender.send(axum_msg).await.is_err() {
debug!("Failed to send to client, closing tx_to_upstream");
break;
}
}
Err(e) => {
error!("Error receiving from upstream WebSocket: {}", e);
break;
}
}
}
debug!("tx_to_upstream loop ended");
};
tokio::select! {
_ = tx_to_client => {},
_ = tx_to_upstream => {},
}
Ok::<(), StatusCode>(())
};
if let Err(e) = result.await {
error!("WebSocket proxy error: {:?}", e);
}
}
async fn proxy_request(
State(state): State<ProxyState>,
req: Request,
@@ -166,8 +387,15 @@ async fn forward_request(
req: Request,
upstream: Upstream,
) -> Result<Response, StatusCode> {
// Extract body before consuming the request (1.1.1)
let (parts, body) = req.into_parts();
let request_id = parts.headers.get("x-request-id")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_else(|| uuid::Uuid::new_v4().to_string());
let span = tracing::info_span!("forward_request", request_id = %request_id, path = %parts.uri.path());
let _enter = span.enter();
let body_bytes = axum::body::to_bytes(body, 1024 * 1024 * 100) // 100MB limit
.await
.map_err(|_| StatusCode::BAD_REQUEST)?;
@@ -196,6 +424,7 @@ async fn forward_request(
request_builder = request_builder.header(name.as_str(), v);
}
}
request_builder = request_builder.header("x-request-id", &request_id);
// Attach body (1.1.1)
let request_builder = request_builder.body(body_bytes);
@@ -226,6 +455,7 @@ async fn forward_request(
}
response_builder
.header("x-request-id", &request_id)
.body(body)
.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)
}
@@ -238,10 +468,13 @@ pub async fn run() -> anyhow::Result<()> {
info!("Starting MadBase Proxy...");
let control_url = std::env::var("CONTROL_UPSTREAM_URL")
.unwrap_or_else(|_| "http://control:8001".to_string());
.unwrap_or_else(|_| "http://system:8001".to_string());
let control_db_url = std::env::var("CONTROL_DB_URL")
.unwrap_or_else(|_| "postgres://admin:admin_password@localhost:5433/madbase_control".to_string());
let worker_urls_str = std::env::var("WORKER_UPSTREAM_URLS")
.unwrap_or_else(|_| "http://worker1:8002".to_string());
.unwrap_or_else(|_| "http://worker:8002".to_string());
let worker_urls: Vec<String> = worker_urls_str
.split(',')
@@ -250,9 +483,12 @@ pub async fn run() -> anyhow::Result<()> {
.collect();
info!("Control upstream: {}", control_url);
info!("Control DB: {}", control_db_url);
info!("Worker upstreams: {:?}", worker_urls);
let state = ProxyState::new(control_url, worker_urls);
let control_db = init_pool(&control_db_url).await?;
let state = ProxyState::new(control_url, worker_urls, control_db);
// Start health check loop in background
let state_clone = state.clone();
@@ -262,6 +498,12 @@ pub async fn run() -> anyhow::Result<()> {
let app = Router::new()
.route("/health", get(health_check))
.route("/realtime/v1/websocket",
get(|ws: WebSocketUpgrade, State(state): State<ProxyState>, req: Request| async move {
let headers = req.headers().clone();
ws.on_upgrade(move |socket| proxy_websocket(State(state.clone()), socket, headers))
})
)
.fallback(proxy_request)
.with_state(state);
@@ -291,9 +533,19 @@ mod tests {
async fn test_proxy_round_robin() {
let _guard = ENV_LOCK.lock().unwrap();
let control_db = PgPool::connect("postgres://postgres:postgres@localhost:5432/test").await.ok();
let dummy_db_url = "postgres://postgres:postgres@localhost:5432/test";
let control_pool = if let Some(pool) = control_db {
pool
} else {
let pool = sqlx::PgPool::connect(dummy_db_url).await.unwrap();
pool
};
let state = ProxyState::new(
"http://control:8001".to_string(),
vec!["http://worker1:8002".to_string(), "http://worker2:8002".to_string()]
vec!["http://worker1:8002".to_string(), "http://worker2:8002".to_string()],
control_pool,
);
// Mark all as healthy
@@ -315,9 +567,11 @@ mod tests {
#[tokio::test]
async fn test_proxy_single_http_client() {
let control_pool = sqlx::PgPool::connect("postgres://postgres:postgres@localhost:5432/test").await.unwrap();
let state = ProxyState::new(
"http://control:8001".to_string(),
vec!["http://worker1:8002".to_string()]
vec!["http://worker1:8002".to_string()],
control_pool,
);
// Verify http_client is created and usable