added more support for supabase-js

This commit is contained in:
2026-03-12 10:18:52 +02:00
parent c0792f2e1d
commit 6708cf28a7
62 changed files with 6563 additions and 526 deletions

View File

@@ -2,19 +2,23 @@ use auth::AuthContext;
use aws_sdk_s3::{primitives::ByteStream, Client};
use axum::{
body::{Body, Bytes},
extract::{FromRequest, Multipart, Path, Request, State},
extract::{FromRequest, Multipart, Path, Query, Request, State},
http::{header::{self, CONTENT_TYPE}, HeaderMap, StatusCode},
response::{IntoResponse, Json},
Extension,
};
use common::{Config, ProjectContext};
use futures::stream::StreamExt;
use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation};
use serde::{Deserialize, Serialize};
use serde_json::json;
use sqlx::{PgPool, Row};
use std::collections::HashMap;
use std::sync::Arc;
use uuid::Uuid;
use http_body_util::BodyExt; // For collect()
use http_body_util::BodyExt;
use image::ImageOutputFormat;
use std::io::Cursor;
#[derive(Clone)]
pub struct StorageState {
@@ -24,6 +28,26 @@ pub struct StorageState {
pub bucket_name: String, // Global S3 Bucket Name
}
#[derive(Serialize, Deserialize)]
pub struct SignedUrlClaims {
pub bucket: String,
pub key: String,
pub exp: usize,
pub project_ref: String,
}
#[derive(Deserialize)]
pub struct SignObjectRequest {
#[serde(alias = "expiresIn")]
pub expires_in: u64, // seconds
}
#[derive(Serialize)]
pub struct SignedUrlResponse {
#[serde(rename = "signedURL")]
pub signed_url: String,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct FileObject {
pub name: String,
@@ -34,13 +58,22 @@ pub struct FileObject {
pub metadata: Option<serde_json::Value>,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct Bucket {
pub id: String,
pub name: String,
pub owner: Option<Uuid>,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
pub updated_at: Option<chrono::DateTime<chrono::Utc>>,
pub public: bool,
}
pub async fn list_buckets(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(_project_ctx): Extension<ProjectContext>,
) -> Result<Json<Vec<String>>, (StatusCode, String)> {
// Query storage.buckets with RLS
) -> Result<Json<Vec<Bucket>>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
@@ -72,45 +105,11 @@ pub async fn list_buckets(
})?;
}
// In a real system, `storage.buckets` table would have a `project_id` column?
// OR we just use the single DB (which is shared in MVP) but RLS handles ownership?
// Wait, the DB tables are shared across all tenants in this MVP architecture?
// Yes, we only have one Postgres instance.
// So we need to filter by tenant/project if we had a project_id column.
// But `storage.buckets` schema (from Supabase) usually doesn't have project_id if it's per-tenant DB.
// Since we share the DB, we must add a way to segregate.
// BUT, for MVP, let's assume `buckets` are global within the DB?
// No, that leaks data.
// Simplification: We prefix bucket IDs with `project_ref` in the DB?
// Or we just rely on RLS.
// If we rely on RLS, we need to know WHICH buckets belong to WHICH project.
// `storage.buckets` has an `owner` column (User UUID).
// Users are unique per project? No, we share `auth.users` too in MVP?
// Actually, `auth.users` is global in this MVP implementation (single table).
// So users from Project A and Project B are all in the same table.
// If a user creates a bucket, they own it.
// So `list_buckets` will show buckets owned by the user.
// This is "User Multitenancy", not "Project Multitenancy".
// If we want "Project Multitenancy", we need to filter by Project Context.
// Let's assume for now we just list what RLS allows.
let buckets: Vec<String> = sqlx::query_scalar("SELECT id FROM storage.buckets")
let buckets = sqlx::query_as::<_, Bucket>("SELECT * FROM storage.buckets")
.fetch_all(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
// Filter buckets that start with project_ref?
// Or just return all visible.
// Let's filter by prefix to enforce project isolation if we adopt a naming convention.
// Convention: "{project_ref}_{bucket_name}"
// But user sends "bucket_name".
// Let's assume we return "bucket_name" by stripping prefix?
// Too complex for MVP.
// Let's just return what RLS gives us.
Ok(Json(buckets))
}
@@ -157,10 +156,6 @@ pub async fn list_objects(
})?;
}
// Ensure we are accessing a bucket that belongs to this project?
// We can check if `bucket_id` matches expected pattern or if we use a project_id column.
// For MVP, we trust RLS on the `storage.buckets` table.
let bucket_exists: Option<String> =
sqlx::query_scalar("SELECT id FROM storage.buckets WHERE id = $1")
.bind(&bucket_id)
@@ -215,7 +210,6 @@ pub async fn upload_object(
}
file_data.ok_or((StatusCode::BAD_REQUEST, "No file found in multipart".to_string()))?
} else {
// Raw body
let body = request.into_body();
body.collect().await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
@@ -331,12 +325,50 @@ pub async fn upload_object(
Ok((StatusCode::CREATED, Json(file_object)))
}
// Helper to transform image
fn transform_image(bytes: Bytes, width: Option<u32>, height: Option<u32>, quality: Option<u8>, format: Option<String>) -> Result<(Bytes, String), String> {
if width.is_none() && height.is_none() && format.is_none() {
return Err("No transformation parameters".to_string());
}
let img = image::load_from_memory(&bytes).map_err(|e| e.to_string())?;
let mut img = img;
if let (Some(w), Some(h)) = (width, height) {
img = img.resize_exact(w, h, image::imageops::FilterType::Lanczos3);
} else if let Some(w) = width {
img = img.resize(w, u32::MAX, image::imageops::FilterType::Lanczos3);
} else if let Some(h) = height {
img = img.resize(u32::MAX, h, image::imageops::FilterType::Lanczos3);
}
let mut output = Cursor::new(Vec::new());
let fmt = match format.as_deref() {
Some("png") => ImageOutputFormat::Png,
Some("jpeg") | Some("jpg") => ImageOutputFormat::Jpeg(quality.unwrap_or(80)),
Some("webp") => ImageOutputFormat::WebP,
_ => ImageOutputFormat::Png,
};
img.write_to(&mut output, fmt).map_err(|e| e.to_string())?;
let content_type = match format.as_deref() {
Some("png") => "image/png",
Some("jpeg") | Some("jpg") => "image/jpeg",
Some("webp") => "image/webp",
_ => "image/png",
};
Ok((Bytes::from(output.into_inner()), content_type.to_string()))
}
pub async fn download_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
@@ -384,7 +416,6 @@ pub async fn download_object(
));
}
// S3 Key Namespacing: {project_ref}/{bucket_id}/{filename}
let key = format!("{}/{}/{}", project_ctx.project_ref, bucket_id, filename);
let resp = state
@@ -415,10 +446,157 @@ pub async fn download_object(
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
.into_bytes();
if let Ok(s) = std::str::from_utf8(&body_bytes) {
tracing::info!("Downloaded content (utf8): {}", s);
} else {
tracing::info!("Downloaded content (binary): {} bytes", body_bytes.len());
// Check for transformations
let width = params.get("width").or(params.get("w")).and_then(|v| v.parse::<u32>().ok());
let height = params.get("height").or(params.get("h")).and_then(|v| v.parse::<u32>().ok());
let quality = params.get("quality").or(params.get("q")).and_then(|v| v.parse::<u8>().ok());
let format = params.get("format").or(params.get("f")).cloned();
if width.is_some() || height.is_some() || format.is_some() {
match transform_image(body_bytes.clone(), width, height, quality, format) {
Ok((new_bytes, new_ct)) => {
headers.insert("Content-Type", new_ct.parse().unwrap());
return Ok((headers, Body::from(new_bytes)));
},
Err(e) => {
tracing::warn!("Image transformation failed: {}", e);
// Fallback to original
}
}
}
let body = Body::from(body_bytes);
Ok((headers, body))
}
pub async fn sign_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Json(payload): Json<SignObjectRequest>,
) -> Result<Json<SignedUrlResponse>, (StatusCode, String)> {
tracing::info!("Sign Object Request: bucket={}, file={}, role={}", bucket_id, filename, auth_ctx.role);
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
}
let object_exists: Option<Uuid> =
sqlx::query_scalar("SELECT id FROM storage.objects WHERE bucket_id = $1 AND name = $2")
.bind(&bucket_id)
.bind(&filename)
.fetch_optional(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if object_exists.is_none() {
return Err((StatusCode::NOT_FOUND, "File not found or access denied".to_string()));
}
let now = chrono::Utc::now();
let exp = now.timestamp() as usize + payload.expires_in as usize;
let claims = SignedUrlClaims {
bucket: bucket_id.clone(),
key: filename.clone(),
exp,
project_ref: project_ctx.project_ref.clone(),
};
let token = encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(project_ctx.jwt_secret.as_bytes()),
).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
let signed_url = format!("/object/sign/{}/{}?token={}", bucket_id, filename, token);
Ok(Json(SignedUrlResponse { signed_url }))
}
pub async fn get_signed_object(
State(state): State<StorageState>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let token = params.get("token").ok_or((StatusCode::BAD_REQUEST, "Missing token".to_string()))?;
let validation = Validation::new(Algorithm::HS256);
let token_data = decode::<SignedUrlClaims>(
token,
&DecodingKey::from_secret(project_ctx.jwt_secret.as_bytes()),
&validation,
).map_err(|_| (StatusCode::FORBIDDEN, "Invalid or expired token".to_string()))?;
if token_data.claims.bucket != bucket_id || token_data.claims.key != filename || token_data.claims.project_ref != project_ctx.project_ref {
return Err((StatusCode::FORBIDDEN, "Token does not match requested resource".to_string()));
}
let key = format!("{}/{}/{}", project_ctx.project_ref, bucket_id, filename);
let resp = state
.s3_client
.get_object()
.bucket(&state.bucket_name)
.key(&key)
.send()
.await
.map_err(|_e| {
(
StatusCode::NOT_FOUND,
"File content not found in storage".to_string(),
)
})?;
let mut headers = HeaderMap::new();
if let Some(ct) = resp.content_type() {
if let Ok(val) = ct.parse() {
headers.insert("Content-Type", val);
}
}
let body_bytes = resp
.body
.collect()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
.into_bytes();
// Check for transformations
let width = params.get("width").or(params.get("w")).and_then(|v| v.parse::<u32>().ok());
let height = params.get("height").or(params.get("h")).and_then(|v| v.parse::<u32>().ok());
let quality = params.get("quality").or(params.get("q")).and_then(|v| v.parse::<u8>().ok());
let format = params.get("format").or(params.get("f")).cloned();
if width.is_some() || height.is_some() || format.is_some() {
match transform_image(body_bytes.clone(), width, height, quality, format) {
Ok((new_bytes, new_ct)) => {
headers.insert("Content-Type", new_ct.parse().unwrap());
return Ok((headers, Body::from(new_bytes)));
},
Err(e) => {
tracing::warn!("Image transformation failed: {}", e);
}
}
}
let body = Body::from(body_bytes);