Files
madbase/docker-compose.yml
Vlad Durnea 0179cc285d
Some checks failed
CI/CD Pipeline / lint (push) Successful in 3m45s
CI/CD Pipeline / integration-tests (push) Failing after 57s
CI/CD Pipeline / unit-tests (push) Failing after 1m1s
CI/CD Pipeline / e2e-tests (push) Has been skipped
CI/CD Pipeline / build (push) Has been skipped
M1 foundation: fix proxy, pool HTTP clients, split services, add ApiError + RLS
- Fix proxy body forwarding, round-robin load balancing, response streaming
- Pool reqwest::Client in proxy, control, and gateway (no per-request alloc)
- Harden CORS in gateway/main.rs (was allow_origin(Any), now uses ALLOWED_ORIGINS)
- Add common/src/error.rs: ApiError type with structured JSON responses
- Add common/src/rls.rs: RlsTransaction extractor for deduplicated RLS setup
- Fix tracing in all standalone binaries (EnvFilter instead of unused var)
- Dockerfile multi-stage: separate worker-runtime, control-runtime, proxy-runtime targets
- docker-compose.yml: split into worker/system/proxy services with health checks
- Fix Grafana port mapping in pillar-system (3030:3000)
- Add config/prometheus.yml and config/vmagent.yml
- Add .env.example with all required variables
- 55 tests pass (49 run + 6 ignored integration tests requiring external services)

Made-with: Cursor
2026-03-15 13:38:49 +02:00

188 lines
5.5 KiB
YAML

services:
# ── Databases ─────────────────────────────────────────────────
db:
image: postgres:17-alpine
container_name: madbase_db
restart: unless-stopped
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
POSTGRES_DB: postgres
command: ["postgres", "-c", "wal_level=logical"]
ports:
- "5432:5432"
volumes:
- madbase_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 3s
retries: 10
control_db:
image: postgres:17-alpine
container_name: madbase_control_db
restart: unless-stopped
environment:
POSTGRES_USER: admin
POSTGRES_PASSWORD: ${CONTROL_DB_PASSWORD:-admin_password}
POSTGRES_DB: madbase_control
ports:
- "5433:5432"
volumes:
- madbase_control_db_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U admin"]
interval: 5s
timeout: 3s
retries: 10
# ── Infrastructure ────────────────────────────────────────────
redis:
image: redis:7-alpine
container_name: madbase_redis
restart: unless-stopped
command: redis-server --appendonly yes
ports:
- "6379:6379"
volumes:
- madbase_redis_data:/data
healthcheck:
test: ["CMD", "redis-cli", "ping"]
interval: 5s
timeout: 3s
retries: 5
minio:
image: quay.io/minio/minio:RELEASE.2024-06-13T22-53-53Z
container_name: madbase_minio
restart: unless-stopped
command: server /data --console-address ":9001"
ports:
- "9000:9000"
- "9001:9001"
environment:
MINIO_ROOT_USER: ${S3_ACCESS_KEY:-minioadmin}
MINIO_ROOT_PASSWORD: ${S3_SECRET_KEY:-minioadmin}
volumes:
- madbase_minio_data:/data
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 3s
retries: 5
# ── Application ───────────────────────────────────────────────
worker:
build:
context: .
target: worker-runtime
container_name: madbase_worker
restart: unless-stopped
ports:
- "8002:8002"
environment:
DATABASE_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres
DEFAULT_TENANT_DB_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres
JWT_SECRET: ${JWT_SECRET}
REDIS_URL: redis://redis:6379
S3_ENDPOINT: http://minio:9000
S3_ACCESS_KEY: ${S3_ACCESS_KEY:-minioadmin}
S3_SECRET_KEY: ${S3_SECRET_KEY:-minioadmin}
S3_BUCKET: ${S3_BUCKET:-madbase}
S3_REGION: ${S3_REGION:-us-east-1}
ALLOWED_ORIGINS: ${ALLOWED_ORIGINS:-http://localhost:3000,http://localhost:8000}
RUST_LOG: ${RUST_LOG:-info}
depends_on:
db:
condition: service_healthy
redis:
condition: service_healthy
minio:
condition: service_healthy
system:
build:
context: .
target: control-runtime
container_name: madbase_system
restart: unless-stopped
ports:
- "8001:8001"
environment:
DATABASE_URL: postgres://admin:${CONTROL_DB_PASSWORD:-admin_password}@control_db:5432/madbase_control
DEFAULT_TENANT_DB_URL: postgres://postgres:${POSTGRES_PASSWORD:-postgres}@db:5432/postgres
JWT_SECRET: ${JWT_SECRET}
ADMIN_PASSWORD: ${ADMIN_PASSWORD}
LOKI_URL: http://loki:3100
ALLOWED_ORIGINS: ${ALLOWED_ORIGINS:-http://localhost:3000,http://localhost:8000,http://localhost:8001}
RUST_LOG: ${RUST_LOG:-info}
depends_on:
db:
condition: service_healthy
control_db:
condition: service_healthy
proxy:
build:
context: .
target: proxy-runtime
container_name: madbase_proxy
restart: unless-stopped
ports:
- "8000:8000"
environment:
CONTROL_UPSTREAM_URL: http://system:8001
WORKER_UPSTREAM_URLS: http://worker:8002
RUST_LOG: ${RUST_LOG:-info}
depends_on:
- system
- worker
# ── Observability ─────────────────────────────────────────────
victoriametrics:
image: victoriametrics/victoria-metrics:v1.93.0
container_name: madbase_vm
ports:
- "8428:8428"
volumes:
- madbase_vm_data:/victoria-metrics-data
- ./config/prometheus.yml:/etc/prometheus/prometheus.yml
command:
- "--storageDataPath=/victoria-metrics-data"
- "--httpListenAddr=:8428"
- "--promscrape.config=/etc/prometheus/prometheus.yml"
extra_hosts:
- "host.docker.internal:host-gateway"
loki:
image: grafana/loki:2.9.2
container_name: madbase_loki
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
volumes:
- madbase_loki_data:/loki
grafana:
image: grafana/grafana:10.2.0
container_name: madbase_grafana
ports:
- "3030:3000"
environment:
- GF_SECURITY_ADMIN_PASSWORD=${GRAFANA_PASSWORD:-admin}
volumes:
- madbase_grafana_data:/var/lib/grafana
depends_on:
- victoriametrics
- loki
volumes:
madbase_db_data:
madbase_control_db_data:
madbase_minio_data:
madbase_redis_data:
madbase_vm_data:
madbase_loki_data:
madbase_grafana_data: