Some checks failed
CI/CD Pipeline / unit-tests (push) Failing after 1m16s
CI/CD Pipeline / integration-tests (push) Failing after 2m32s
CI/CD Pipeline / lint (push) Successful in 5m22s
CI/CD Pipeline / e2e-tests (push) Has been skipped
CI/CD Pipeline / build (push) Has been skipped
153 lines
5.4 KiB
Rust
153 lines
5.4 KiB
Rust
### /Users/vlad/Developer/madapes/madbase/gateway/src/worker.rs
|
|
```rust
|
|
1: use axum::{
|
|
2: middleware::{from_fn_with_state},
|
|
3: routing::get,
|
|
4: Router,
|
|
5: };
|
|
6: use axum_prometheus::PrometheusMetricLayer;
|
|
7: use common::{init_pool, Config};
|
|
8: use crate::state::AppState;
|
|
9: use crate::middleware;
|
|
10: use sqlx::PgPool;
|
|
11: use std::collections::HashMap;
|
|
12: use std::net::SocketAddr;
|
|
13: use std::sync::Arc;
|
|
14: use std::time::Duration;
|
|
15: use tokio::sync::RwLock;
|
|
16: use tower_http::cors::{AllowOrigin, CorsLayer};
|
|
use axum::http::{HeaderValue, Method};
|
|
use axum::http::header;
|
|
17: use tower_http::trace::TraceLayer;
|
|
18:
|
|
19: async fn wait_for_db(db_url: &str) -> PgPool {
|
|
20: loop {
|
|
21: match init_pool(db_url).await {
|
|
22: Ok(pool) => return pool,
|
|
23: Err(e) => {
|
|
24: tracing::warn!("Database not ready yet, retrying in 2s: {}", e);
|
|
25: tokio::time::sleep(Duration::from_secs(2)).await;
|
|
26: }
|
|
27: }
|
|
28: }
|
|
29: }
|
|
30:
|
|
31: pub async fn run() -> anyhow::Result<()> {
|
|
32: let config = Config::new().expect("Failed to load configuration");
|
|
33:
|
|
34: tracing::info!("Starting MadBase Worker...");
|
|
35:
|
|
36: let pool = wait_for_db(&config.database_url).await;
|
|
37:
|
|
38: let app_state = AppState {
|
|
39: control_db: pool.clone(),
|
|
40: tenant_pools: Arc::new(RwLock::new(HashMap::new())),
|
|
41: };
|
|
42:
|
|
43: let auth_state = auth::AuthState {
|
|
44: db: pool.clone(),
|
|
45: config: config.clone(),
|
|
46: };
|
|
47:
|
|
48: let data_state = data_api::handlers::DataState {
|
|
49: db: pool.clone(),
|
|
50: config: config.clone(),
|
|
51: };
|
|
52:
|
|
53: let default_tenant_db_url = std::env::var("DEFAULT_TENANT_DB_URL")
|
|
54: .expect("DEFAULT_TENANT_DB_URL must be set");
|
|
55: let tenant_pool = wait_for_db(&default_tenant_db_url).await;
|
|
56:
|
|
57: let mut tenant_config = config.clone();
|
|
58: tenant_config.database_url = default_tenant_db_url.clone();
|
|
59:
|
|
60: // Realtime Init
|
|
61: let (realtime_router, realtime_state) = realtime::init(tenant_pool.clone(), tenant_config.clone());
|
|
62:
|
|
63: // Replication Listener
|
|
64: let repl_config = tenant_config.clone();
|
|
65: let repl_tx = realtime_state.broadcast_tx.clone();
|
|
66: tokio::spawn(async move {
|
|
67: if let Err(e) = realtime::replication::start_replication_listener(repl_config, repl_tx).await {
|
|
68: tracing::error!("Replication listener failed: {}", e);
|
|
69: }
|
|
70: });
|
|
71:
|
|
72: // Storage Init
|
|
73: let storage_router = storage::init(pool.clone(), config.clone()).await;
|
|
74:
|
|
75: // Functions Init
|
|
76: let functions_runtime = Arc::new(
|
|
77: functions::runtime::WasmRuntime::new()
|
|
78: .expect("Failed to initialize WASM runtime")
|
|
79: );
|
|
80: let deno_runtime = Arc::new(functions::deno_runtime::DenoRuntime::new());
|
|
81: let functions_state = functions::FunctionsState {
|
|
82: db: pool.clone(),
|
|
83: config: config.clone(),
|
|
84: runtime: functions_runtime,
|
|
85: deno_runtime,
|
|
86: };
|
|
87:
|
|
88: // Auth Middleware State
|
|
89: let auth_middleware_state = auth::AuthMiddlewareState {
|
|
90: config: config.clone(),
|
|
91: };
|
|
92:
|
|
93: // Project Middleware State
|
|
94: let project_middleware_state = middleware::ProjectMiddlewareState {
|
|
95: control_db: app_state.control_db.clone(),
|
|
96: tenant_pools: app_state.tenant_pools.clone(),
|
|
97: project_cache: moka::future::Cache::new(100),
|
|
98: };
|
|
99:
|
|
100: // Construct Worker Routes
|
|
101: let tenant_routes = Router::new()
|
|
102: .nest("/auth/v1", auth::router().with_state(auth_state))
|
|
103: .nest("/rest/v1", data_api::router().with_state(data_state))
|
|
104: .nest("/realtime/v1", realtime_router)
|
|
105: .nest("/storage/v1", storage_router)
|
|
106: .nest("/functions/v1", functions::router(functions_state))
|
|
107: .layer(from_fn_with_state(
|
|
108: auth_middleware_state,
|
|
109: auth::auth_middleware,
|
|
110: ))
|
|
111: .layer(from_fn_with_state(
|
|
112: project_middleware_state.clone(),
|
|
113: middleware::inject_tenant_pool,
|
|
114: ))
|
|
115: .layer(from_fn_with_state(
|
|
116: project_middleware_state,
|
|
117: middleware::resolve_project,
|
|
118: ));
|
|
119:
|
|
120: let (prometheus_layer, metric_handle) = PrometheusMetricLayer::pair();
|
|
121:
|
|
122: let app = Router::new()
|
|
123: .route("/health", get(|| async { "OK" }))
|
|
124: .route("/metrics", get(|| async move { metric_handle.render() }))
|
|
125: .route("/ready", get(|| async { "Ready" }))
|
|
126: .nest("/", tenant_routes)
|
|
127: .layer(
|
|
128: CorsLayer::new()
|
|
129: .allow_origin(Any)
|
|
130: .allow_methods(Any)
|
|
131: .allow_headers(Any),
|
|
132: )
|
|
133: .layer(TraceLayer::new_for_http())
|
|
134: .layer(prometheus_layer);
|
|
135:
|
|
136: let port = std::env::var("WORKER_PORT")
|
|
137: .unwrap_or_else(|_| "8002".to_string())
|
|
138: .parse::<u16>()?;
|
|
139:
|
|
140: let addr = SocketAddr::from(([0, 0, 0, 0], port));
|
|
141: tracing::info!("Worker listening on {}", addr);
|
|
142:
|
|
143: let listener = tokio::net::TcpListener::bind(addr).await?;
|
|
144: axum::serve(listener, app.into_make_service_with_connect_info::<SocketAddr>()).await?;
|
|
145:
|
|
146: Ok(())
|
|
147: }
|
|
```
|