added initial roadmap and implementation
This commit is contained in:
23
migrations/20240101000000_init_auth.sql
Normal file
23
migrations/20240101000000_init_auth.sql
Normal file
@@ -0,0 +1,23 @@
|
||||
CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
|
||||
|
||||
CREATE TABLE users (
|
||||
id UUID PRIMARY KEY DEFAULT uuid_generate_v4(),
|
||||
email TEXT UNIQUE NOT NULL,
|
||||
encrypted_password TEXT NOT NULL,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
last_sign_in_at TIMESTAMPTZ,
|
||||
raw_app_meta_data JSONB DEFAULT '{}'::jsonb,
|
||||
raw_user_meta_data JSONB DEFAULT '{}'::jsonb,
|
||||
is_super_admin BOOLEAN DEFAULT false,
|
||||
confirmed_at TIMESTAMPTZ,
|
||||
email_confirmed_at TIMESTAMPTZ,
|
||||
phone TEXT,
|
||||
phone_confirmed_at TIMESTAMPTZ,
|
||||
confirmation_token TEXT,
|
||||
recovery_token TEXT,
|
||||
email_change_token_new TEXT,
|
||||
email_change TEXT
|
||||
);
|
||||
|
||||
CREATE INDEX users_email_idx ON users (email);
|
||||
14
migrations/20240101000001_refresh_tokens.sql
Normal file
14
migrations/20240101000001_refresh_tokens.sql
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
CREATE TABLE IF NOT EXISTS refresh_tokens (
|
||||
id BIGSERIAL PRIMARY KEY,
|
||||
token TEXT NOT NULL UNIQUE,
|
||||
user_id UUID NOT NULL REFERENCES users(id) ON DELETE CASCADE,
|
||||
revoked BOOLEAN NOT NULL DEFAULT false,
|
||||
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
parent TEXT,
|
||||
session_id UUID
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS refresh_tokens_token_idx ON refresh_tokens(token);
|
||||
CREATE INDEX IF NOT EXISTS refresh_tokens_user_id_idx ON refresh_tokens(user_id);
|
||||
72
migrations/20240101000002_storage_schema.sql
Normal file
72
migrations/20240101000002_storage_schema.sql
Normal file
@@ -0,0 +1,72 @@
|
||||
|
||||
-- Create roles if they don't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'authenticated') THEN
|
||||
CREATE ROLE authenticated NOLOGIN;
|
||||
END IF;
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'anon') THEN
|
||||
CREATE ROLE anon NOLOGIN;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
CREATE SCHEMA IF NOT EXISTS storage;
|
||||
|
||||
-- Grant usage
|
||||
GRANT USAGE ON SCHEMA storage TO authenticated, anon;
|
||||
GRANT USAGE ON SCHEMA public TO authenticated, anon;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS storage.buckets (
|
||||
id TEXT PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
public BOOLEAN DEFAULT false,
|
||||
owner UUID REFERENCES public.users(id),
|
||||
created_at TIMESTAMPTZ DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS storage.objects (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
bucket_id TEXT REFERENCES storage.buckets(id),
|
||||
name TEXT NOT NULL,
|
||||
owner UUID REFERENCES public.users(id),
|
||||
created_at TIMESTAMPTZ DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ DEFAULT now(),
|
||||
last_accessed_at TIMESTAMPTZ DEFAULT now(),
|
||||
metadata JSONB,
|
||||
UNIQUE (bucket_id, name)
|
||||
);
|
||||
|
||||
-- Grant table access (RLS will filter rows)
|
||||
GRANT ALL ON TABLE storage.buckets TO authenticated, anon;
|
||||
GRANT ALL ON TABLE storage.objects TO authenticated, anon;
|
||||
|
||||
ALTER TABLE storage.buckets ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE storage.objects ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Helper to allow public access to public buckets
|
||||
CREATE POLICY "Public Buckets are viewable by everyone"
|
||||
ON storage.buckets FOR SELECT
|
||||
USING ( public = true );
|
||||
|
||||
-- Helper to allow authenticated users to view their own buckets
|
||||
CREATE POLICY "Users can view their own buckets"
|
||||
ON storage.buckets FOR SELECT
|
||||
TO authenticated
|
||||
USING ( owner = current_setting('request.jwt.claim.sub', true)::uuid );
|
||||
|
||||
-- Objects policies depend on bucket public status or object owner
|
||||
CREATE POLICY "Public Objects are viewable by everyone"
|
||||
ON storage.objects FOR SELECT
|
||||
USING ( bucket_id IN (SELECT id FROM storage.buckets WHERE public = true) );
|
||||
|
||||
CREATE POLICY "Users can view their own objects"
|
||||
ON storage.objects FOR SELECT
|
||||
TO authenticated
|
||||
USING ( owner = current_setting('request.jwt.claim.sub', true)::uuid );
|
||||
|
||||
CREATE POLICY "Users can insert their own objects"
|
||||
ON storage.objects FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK ( owner = current_setting('request.jwt.claim.sub', true)::uuid );
|
||||
30
migrations/20240101000003_control_plane.sql
Normal file
30
migrations/20240101000003_control_plane.sql
Normal file
@@ -0,0 +1,30 @@
|
||||
|
||||
-- This migration runs on the CONTROL PLANE database (port 5433), not the tenant DB.
|
||||
-- We need to ensure we migrate the correct DB.
|
||||
-- For MVP, if we only have one migration pipeline, we might mix them?
|
||||
-- Ideally we use `sqlx migrate run --database-url ...` for this specific migration.
|
||||
-- Or we just put this table in the main DB for the MVP to avoid infrastructure complexity?
|
||||
-- The `docker-compose.yml` has `control_db`.
|
||||
-- Let's try to use the main DB for everything in MVP to reduce friction,
|
||||
-- OR use a separate folder for control plane migrations.
|
||||
|
||||
-- Let's put `projects` in the `public` schema of the main DB for simplicity of the "Single Tenant / Self Hosted" mode.
|
||||
-- In a real SaaS, this would be separate.
|
||||
|
||||
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS projects (
|
||||
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
|
||||
name TEXT NOT NULL,
|
||||
owner_id UUID, -- No FK to users strictly required if users are in tenant DB, but here they are same DB.
|
||||
status TEXT DEFAULT 'active',
|
||||
db_url TEXT NOT NULL,
|
||||
jwt_secret TEXT NOT NULL DEFAULT encode(gen_random_bytes(32), 'hex'),
|
||||
anon_key TEXT,
|
||||
service_role_key TEXT,
|
||||
created_at TIMESTAMPTZ DEFAULT now(),
|
||||
updated_at TIMESTAMPTZ DEFAULT now()
|
||||
);
|
||||
|
||||
-- Trigger to generate keys on insert? Or handle in code.
|
||||
-- Let's handle in code for keys.
|
||||
49
migrations/20240101000004_realtime_triggers.sql
Normal file
49
migrations/20240101000004_realtime_triggers.sql
Normal file
@@ -0,0 +1,49 @@
|
||||
|
||||
-- Realtime schema
|
||||
CREATE SCHEMA IF NOT EXISTS madbase_realtime;
|
||||
|
||||
-- Generic Trigger Function
|
||||
CREATE OR REPLACE FUNCTION madbase_realtime.broadcast_changes()
|
||||
RETURNS trigger AS $$
|
||||
DECLARE
|
||||
payload jsonb;
|
||||
topic text;
|
||||
BEGIN
|
||||
-- Construct payload
|
||||
payload = jsonb_build_object(
|
||||
'schema', TG_TABLE_SCHEMA,
|
||||
'table', TG_TABLE_NAME,
|
||||
'type', TG_OP,
|
||||
'timestamp', now()
|
||||
);
|
||||
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
payload = payload || jsonb_build_object('record', row_to_json(NEW)::jsonb);
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
payload = payload || jsonb_build_object(
|
||||
'record', row_to_json(NEW)::jsonb,
|
||||
'old_record', row_to_json(OLD)::jsonb
|
||||
);
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
payload = payload || jsonb_build_object('old_record', row_to_json(OLD)::jsonb);
|
||||
END IF;
|
||||
|
||||
-- Send notification
|
||||
-- Payload limit is 8000 bytes. Larger payloads will fail or need truncation.
|
||||
-- For MVP, we assume it fits.
|
||||
PERFORM pg_notify('madbase_realtime', payload::text);
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Example: Enable for public.users (if it exists)
|
||||
-- DO $$
|
||||
-- BEGIN
|
||||
-- IF EXISTS (SELECT FROM pg_tables WHERE schemaname = 'public' AND tablename = 'users') THEN
|
||||
-- CREATE TRIGGER realtime_users_changes
|
||||
-- AFTER INSERT OR UPDATE OR DELETE ON public.users
|
||||
-- FOR EACH ROW EXECUTE FUNCTION madbase_realtime.broadcast_changes();
|
||||
-- END IF;
|
||||
-- END
|
||||
-- $$;
|
||||
71
migrations/20260311000000_realtime_history.sql
Normal file
71
migrations/20260311000000_realtime_history.sql
Normal file
@@ -0,0 +1,71 @@
|
||||
-- Create History Table
|
||||
CREATE TABLE IF NOT EXISTS madbase_realtime.messages (
|
||||
id bigserial PRIMARY KEY,
|
||||
topic text NOT NULL, -- schema:table
|
||||
payload jsonb NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_realtime_messages_topic_id ON madbase_realtime.messages (topic, id);
|
||||
|
||||
-- Update Trigger Function
|
||||
CREATE OR REPLACE FUNCTION madbase_realtime.broadcast_changes()
|
||||
RETURNS trigger AS $$
|
||||
DECLARE
|
||||
base_payload jsonb;
|
||||
final_payload jsonb;
|
||||
topic text;
|
||||
msg_id bigint;
|
||||
BEGIN
|
||||
-- Construct topic
|
||||
topic = TG_TABLE_SCHEMA || ':' || TG_TABLE_NAME;
|
||||
|
||||
-- Construct base payload
|
||||
base_payload = jsonb_build_object(
|
||||
'schema', TG_TABLE_SCHEMA,
|
||||
'table', TG_TABLE_NAME,
|
||||
'type', TG_OP,
|
||||
'timestamp', now()
|
||||
);
|
||||
|
||||
IF (TG_OP = 'INSERT') THEN
|
||||
base_payload = base_payload || jsonb_build_object('record', row_to_json(NEW)::jsonb);
|
||||
ELSIF (TG_OP = 'UPDATE') THEN
|
||||
base_payload = base_payload || jsonb_build_object(
|
||||
'record', row_to_json(NEW)::jsonb,
|
||||
'old_record', row_to_json(OLD)::jsonb
|
||||
);
|
||||
ELSIF (TG_OP = 'DELETE') THEN
|
||||
base_payload = base_payload || jsonb_build_object('old_record', row_to_json(OLD)::jsonb);
|
||||
END IF;
|
||||
|
||||
-- Insert into history
|
||||
INSERT INTO madbase_realtime.messages (topic, payload)
|
||||
VALUES (topic, base_payload)
|
||||
RETURNING id INTO msg_id;
|
||||
|
||||
-- Add ID to payload
|
||||
final_payload = base_payload || jsonb_build_object('id', msg_id);
|
||||
|
||||
-- Send notification
|
||||
-- Payload limit is 8000 bytes. Larger payloads will fail or need truncation.
|
||||
-- If payload is too large, we can send a "payload too large" message with ID,
|
||||
-- and client can fetch it from history.
|
||||
-- For MVP, we assume it fits or fail silently on notify (but insert succeeds).
|
||||
BEGIN
|
||||
PERFORM pg_notify('madbase_realtime', final_payload::text);
|
||||
EXCEPTION WHEN string_data_right_truncation OR others THEN
|
||||
-- If notification fails, client can still rely on history if they poll or reconnect.
|
||||
-- We could notify just the ID.
|
||||
PERFORM pg_notify('madbase_realtime', jsonb_build_object(
|
||||
'id', msg_id,
|
||||
'schema', TG_TABLE_SCHEMA,
|
||||
'table', TG_TABLE_NAME,
|
||||
'type', TG_OP,
|
||||
'truncated', true
|
||||
)::text);
|
||||
END;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
35
migrations/20260311000001_fix_storage_permissions.sql
Normal file
35
migrations/20260311000001_fix_storage_permissions.sql
Normal file
@@ -0,0 +1,35 @@
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'service_role') THEN
|
||||
CREATE ROLE service_role NOLOGIN;
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
ALTER ROLE service_role WITH BYPASSRLS;
|
||||
|
||||
GRANT USAGE ON SCHEMA storage TO service_role;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA storage TO service_role;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA storage TO service_role;
|
||||
GRANT ALL ON ALL FUNCTIONS IN SCHEMA storage TO service_role;
|
||||
|
||||
-- Policies for service_role
|
||||
CREATE POLICY "Service role can do anything on buckets"
|
||||
ON storage.buckets
|
||||
FOR ALL
|
||||
TO service_role
|
||||
USING (true)
|
||||
WITH CHECK (true);
|
||||
|
||||
CREATE POLICY "Service role can do anything on objects"
|
||||
ON storage.objects
|
||||
FOR ALL
|
||||
TO service_role
|
||||
USING (true)
|
||||
WITH CHECK (true);
|
||||
|
||||
-- Also grant usage on public schema just in case
|
||||
GRANT USAGE ON SCHEMA public TO service_role;
|
||||
GRANT ALL ON ALL TABLES IN SCHEMA public TO service_role;
|
||||
GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO service_role;
|
||||
GRANT ALL ON ALL FUNCTIONS IN SCHEMA public TO service_role;
|
||||
Reference in New Issue
Block a user