wip:milestone 0 fixes
Some checks failed
CI/CD Pipeline / unit-tests (push) Failing after 1m16s
CI/CD Pipeline / integration-tests (push) Failing after 2m32s
CI/CD Pipeline / lint (push) Successful in 5m22s
CI/CD Pipeline / e2e-tests (push) Has been skipped
CI/CD Pipeline / build (push) Has been skipped

This commit is contained in:
2026-03-15 12:35:42 +02:00
parent 6708cf28a7
commit cffdf8af86
61266 changed files with 4511646 additions and 1938 deletions

132
templates/all-in-one.yaml Normal file
View File

@@ -0,0 +1,132 @@
id: all-in-one
name: All-in-One Development Node
description: Complete MadBase stack on a single server for development/testing
version: 1.0
min_hetzner_plan: CX41
estimated_monthly_cost: 25.60
services:
- id: postgresql
name: PostgreSQL
image: registry.gitlab.com/postgres-ai/postgresql-autobase/patroni:3.0.2
ports: ["5432:5432", "8008:8008"]
resource_profile: balanced
- id: etcd
name: etcd
image: quay.io/coreos/etcd:v3.5.9
ports: ["2379:2379", "2380:2380"]
resource_profile: minimal
- id: haproxy
name: HAProxy
image: haproxy:2.8-alpine
ports: ["5433:5433"]
resource_profile: minimal
- id: redis
name: Redis
image: redis:7-alpine
ports: ["6379:6379"]
resource_profile: minimal
- id: minio
name: MinIO
image: quay.io/minio/minio:latest
ports: ["9000:9000", "9001:9001"]
command: ["server", "/data", "--console-address", ":9001"]
environment:
MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minioadmin}
MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-minioadmin}
volumes:
- minio_data:/data
resource_profile: balanced
optional: true
condition: USE_SELF_HOSTED_STORAGE == true
- id: worker
name: MadBase Worker
image: madbase/worker:latest
ports: ["8002:8002"]
replicas: 2
resource_profile: cpu_intensive
- id: proxy
name: Gateway Proxy
image: madbase/proxy:latest
ports: ["8080:8080"]
resource_profile: balanced
- id: control
name: Control Plane API
image: madbase/control:latest
ports: ["8001:8001"]
resource_profile: balanced
- id: victoria
name: VictoriaMetrics
image: victoriametrics/victoria-metrics:latest
ports: ["8428:8428"]
volumes:
- vm_data:/victoria-metrics-data
resource_profile: balanced
- id: loki
name: Loki
image: grafana/loki:latest
ports: ["3100:3100"]
volumes:
- loki_data:/loki
resource_profile: balanced
- id: vmagent
name: VictoriaMetrics Agent
image: victoriametrics/vmagent:latest
ports: ["8429:8429"]
resource_profile: minimal
- id: promtail
name: Promtail
image: grafana/promtail:latest
volumes:
- /var/log:/var/log:ro
- ./config/promtail.yml:/etc/promtail/config.yml:ro
resource_profile: minimal
- id: grafana
name: Grafana
image: grafana/grafana:latest
ports: ["3030:3030"]
environment:
GF_SECURITY_ADMIN_USER: ${GRAFANA_USER:-admin}
GF_SECURITY_ADMIN_PASSWORD: ${GRAFANA_PASSWORD}
GF_SERVER_ROOT_URL: http://localhost:3030
GF_INSTALL_PLUGINS: grafana-piechart-panel
volumes:
- grafana_data:/var/lib/grafana
resource_profile: balanced
requirements:
min_nodes: 1
max_nodes: 1
supports_ha: false
recommended_deployment: "Single server for development, testing, or production MVP"
notes: |
This template runs ALL services on one machine.
Not recommended for production with significant traffic.
Upgrade to dedicated templates when:
- CPU usage > 70% sustained
- Memory usage > 80%
- Need true HA for database
networks:
frontend:
ports: [8080, 3030]
backend:
ports: [8001, 8002, 5433, 6379]
monitoring:
ports: [8428, 3100, 8429]
storage:
ports: [9000, 9001]

View File

@@ -0,0 +1,31 @@
id: control-plane-node
name: Control Plane Node
description: Management APIs and Studio UI
version: 1.0
min_hetzner_plan: CX11
estimated_monthly_cost: 3.69
services:
- id: proxy
name: Gateway Proxy
image: madbase/proxy:latest
ports: ["8080:8080"]
- id: control
name: Control Plane API
image: madbase/control:latest
ports: ["8001:8001"]
- id: grafana
name: Grafana
image: grafana/grafana:latest
ports: ["3030:3030"]
optional: true
- id: keepalived
name: Keepalived
image: osixia/keepalived:latest
network_mode: host
optional: true
condition: HA_MODE == true
requirements:
min_nodes: 1
max_nodes: 2
supports_ha: true
floating_ip_required: true

28
templates/db-node.yaml Normal file
View File

@@ -0,0 +1,28 @@
id: db-node
name: State Node (PostgreSQL + Redis)
description: High-availability State Pillar with PostgreSQL and Redis for shared caching
version: 2.0
min_hetzner_plan: CX21
estimated_monthly_cost: 6.94
services:
- id: postgresql
name: PostgreSQL
image: registry.gitlab.com/postgres-ai/postgresql-autobase/patroni:3.0.2
ports: ["5432:5432", "8008:8008"]
- id: etcd
name: etcd
image: quay.io/coreos/etcd:v3.5.9
ports: ["2379:2379", "2380:2380"]
- id: redis
name: Redis
image: redis:7-alpine
ports: ["6379:6379"]
command: redis-server --appendonly yes --maxmemory 256mb --maxmemory-policy allkeys-lru
- id: haproxy
name: HAProxy
image: haproxy:2.8-alpine
ports: ["5433:5433", "6379:6379", "7000:7000"]
requirements:
min_nodes: 3
max_nodes: 7
supports_ha: true

View File

@@ -0,0 +1,24 @@
id: monitoring-node
name: Monitoring Node
description: Centralized metrics and logging
version: 1.0
min_hetzner_plan: CX11
estimated_monthly_cost: 3.69
services:
- id: victoriametrics
name: VictoriaMetrics
image: victoriametrics/victoria-metrics:latest
ports: ["8428:8428"]
- id: loki
name: Loki
image: grafana/loki:latest
ports: ["3100:3100"]
- id: alertmanager
name: Alertmanager
image: prom/alertmanager:latest
ports: ["9093:9093"]
optional: true
requirements:
min_nodes: 1
max_nodes: 3
supports_ha: true

View File

@@ -0,0 +1,101 @@
id: worker-db-combo
name: Worker + Database Combo
description: Combined worker and database node for smaller deployments (2-3 servers)
version: 1.0
min_hetzner_plan: CX31
estimated_monthly_cost: 14.21
services:
- id: postgresql
name: PostgreSQL
image: registry.gitlab.com/postgres-ai/postgresql-autobase/patroni:3.0.2
ports: ["5432:5432", "8008:8008"]
environment:
POSTGRES_USER: ${DB_USER}
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: madbase
PATRONI_SCOPE: madbase-cluster
PATRONI_NAME: node1
PATRONI_RESTAPI_LISTEN: 0.0.0.0:8008
PATRONI_POSTGRESQL_LISTEN: 0.0.0.0:5432
PATRONI_POSTGRESQL_DATA_DIR: /var/lib/postgresql/data
PATRONI_ETCD3_HOSTS: http://localhost:2379
volumes:
- postgres_data:/var/lib/postgresql/data
resource_profile: balanced
- id: etcd
name: etcd
image: quay.io/coreos/etcd:v3.5.9
ports: ["2379:2379", "2380:2380"]
environment:
ETCD_NAME: node1
ETCD_INITIAL_ADVERTISE_PEER_URLS: http://localhost:2380
ETCD_LISTEN_PEER_URLS: http://0.0.0.0:2380
ETCD_LISTEN_CLIENT_URLS: http://0.0.0.0:2379
ETCD_ADVERTISE_CLIENT_URLS: http://localhost:2379
ETCD_INITIAL_CLUSTER: node1=http://localhost:2380
ETCD_INITIAL_CLUSTER_STATE: new
ETCD_INITIAL_CLUSTER_TOKEN: madbase-etcd
volumes:
- etcd_data:/etcd-data
resource_profile: minimal
- id: haproxy
name: HAProxy
image: haproxy:2.8-alpine
ports: ["5433:5433"]
volumes:
- ./config/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
resource_profile: minimal
depends_on:
- postgresql
- id: worker
name: MadBase Worker
image: madbase/worker:latest
ports: ["8002:8002"]
environment:
WORKER_PORT: 8002
DATABASE_URL: postgresql://${DB_USER}:${DB_PASSWORD}@localhost:5433/madbase
REDIS_URL: redis://${REDIS_HOST}:6379
S3_ENDPOINT: ${S3_ENDPOINT}
S3_ACCESS_KEY: ${S3_ACCESS_KEY}
S3_SECRET_KEY: ${S3_SECRET_KEY}
S3_BUCKET: ${S3_BUCKET}
S3_REGION: ${S3_REGION}
resource_profile: cpu_intensive
depends_on:
- postgresql
- id: vmagent
name: VictoriaMetrics Agent
image: victoriametrics/vmagent:latest
ports: ["8429:8429"]
command: ["--promscrape.config=/etc/vmagent/prometheus.yml", "--remoteWrite.url=http://victoriametrics:8428/api/v1/write"]
volumes:
- ./config/vmagent.yml:/etc/vmagent/prometheus.yml:ro
resource_profile: minimal
requirements:
min_nodes: 1
max_nodes: 2
supports_ha: true
recommended_deployment: "2-3 servers: 1x worker-db-combo + 1x control-plane + optional monitoring node"
scaling:
can_add_workers: true
can_split_database: true
upgrade_path: "Migrate to dedicated db-node.yaml + worker-node.yaml when load increases"
resource_profiles:
minimal:
cpu_limit: "0.5"
memory_limit: "512Mi"
balanced:
cpu_limit: "2"
memory_limit: "2Gi"
cpu_intensive:
cpu_limit: "4"
memory_limit: "4Gi"

View File

@@ -0,0 +1,69 @@
id: worker-monitor-combo
name: Worker + Monitoring Combo
description: Worker node with local VictoriaMetrics and Loki for smaller deployments
version: 1.0
min_hetzner_plan: CX21
estimated_monthly_cost: 6.94
services:
- id: worker
name: MadBase Worker
image: madbase/worker:latest
ports: ["8002:8002"]
environment:
WORKER_PORT: 8002
DATABASE_URL: ${DATABASE_URL}
REDIS_URL: ${REDIS_URL}
S3_ENDPOINT: ${S3_ENDPOINT}
S3_ACCESS_KEY: ${S3_ACCESS_KEY}
S3_SECRET_KEY: ${S3_SECRET_KEY}
S3_BUCKET: ${S3_BUCKET}
S3_REGION: ${S3_REGION}
resource_profile: cpu_intensive
- id: victoria
name: VictoriaMetrics
image: victoriametrics/victoria-metrics:latest
ports: ["8428:8428"]
volumes:
- vm_data:/victoria-metrics-data
resource_profile: balanced
- id: loki
name: Loki
image: grafana/loki:latest
ports: ["3100:3100"]
volumes:
- loki_data:/loki
resource_profile: balanced
- id: vmagent
name: VictoriaMetrics Agent
image: victoriametrics/vmagent:latest
ports: ["8429:8429"]
command: ["--promscrape.config=/etc/vmagent/prometheus.yml", "--remoteWrite.url=http://localhost:8428/api/v1/write"]
volumes:
- ./config/vmagent.yml:/etc/vmagent/prometheus.yml:ro
resource_profile: minimal
- id: promtail
name: Promtail
image: grafana/promtail:latest
volumes:
- /var/log:/var/log:ro
- ./config/promtail.yml:/etc/promtail/config.yml:ro
resource_profile: minimal
requirements:
min_nodes: 1
max_nodes: 3
supports_ha: true
recommended_deployment: "For 2-3 server deployments with monitoring on worker node"
notes: |
VictoriaMetrics and Loki run on the same node as worker.
Other workers can send metrics/logs to this node.
Upgrade to dedicated monitoring node when:
- Worker CPU > 60% (monitoring competes for resources)
- Need to scale workers horizontally

View File

@@ -0,0 +1,26 @@
id: worker-node
name: Worker Node
description: API worker nodes for horizontal scaling
version: 1.0
min_hetzner_plan: CX11
estimated_monthly_cost: 3.69
services:
- id: worker
name: MadBase Worker
image: madbase/worker:latest
replicas: 1
ports: ["8002-8020:8002-8020"]
- id: vmagent
name: VictoriaMetrics Agent
image: victoriametrics/vmagent:latest
ports: ["8429:8429"]
optional: true
requirements:
min_nodes: 1
max_nodes: 20
supports_ha: true
auto_scale:
enabled: true
metric: cpu_usage_percent
scale_up_threshold: 70
scale_down_threshold: 20