Files
madbase/storage/src/handlers.rs
Vlad Durnea 8ade39ae2d
Some checks failed
CI/CD Pipeline / e2e-tests (push) Has been cancelled
CI/CD Pipeline / build (push) Has been cancelled
CI/CD Pipeline / unit-tests (push) Has been cancelled
CI/CD Pipeline / lint (push) Successful in 3m45s
CI/CD Pipeline / integration-tests (push) Failing after 53s
M0 security hardening: fix all vulnerabilities and resolve build errors
- Fix 5 source files corrupted with markdown formatting by previous AI
- Remove secret logging from auth middleware, signup, and recovery handlers
- Add role validation (ALLOWED_ROLES allowlist) to all 10 data_api + storage handlers
- Fix JavaScript injection in Deno runtime via double-serialization
- Add UUID validation to TUS upload paths to prevent path traversal
- Gate token issuance on email confirmation (AUTH_AUTO_CONFIRM env var)
- Reject unconfirmed users on login with 403
- Prevent OAuth account takeover (409 on email conflict with different provider)
- Replace permissive CORS (allow_origin Any) with ALLOWED_ORIGINS env var
- Wire session-based admin auth into control plane, add POST /platform/v1/login
- Hide secrets from list_projects API via ProjectSummary struct
- Add missing deps (redis, uuid, chrono, tower-http fs feature)
- Fix http version mismatch between reqwest 0.11 and axum 0.7 in proxy
- Clean up all unused imports across workspace

Build: zero errors, zero warnings. Tests: 10 passed, 0 failed.
Made-with: Cursor
2026-03-15 12:54:21 +02:00

618 lines
20 KiB
Rust

use auth::AuthContext;
use aws_sdk_s3::{primitives::ByteStream, Client};
use axum::{
body::{Body, Bytes},
extract::{FromRequest, Multipart, Path, Query, Request, State},
http::{header::CONTENT_TYPE, HeaderMap, StatusCode},
response::{IntoResponse, Json},
Extension,
};
use common::{Config, ProjectContext};
use jsonwebtoken::{decode, encode, Algorithm, DecodingKey, EncodingKey, Header, Validation};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::collections::HashMap;
use uuid::Uuid;
use http_body_util::BodyExt;
use image::ImageOutputFormat;
use std::io::Cursor;
const ALLOWED_ROLES: &[&str] = &["anon", "authenticated", "service_role"];
fn validate_role(role: &str) -> Result<(), (StatusCode, String)> {
if ALLOWED_ROLES.contains(&role) {
Ok(())
} else {
Err((StatusCode::FORBIDDEN, format!("Invalid role: {}", role)))
}
}
#[derive(Clone)]
pub struct StorageState {
pub db: PgPool,
pub s3_client: Client,
pub config: Config,
pub bucket_name: String, // Global S3 Bucket Name
}
#[derive(Serialize, Deserialize)]
pub struct SignedUrlClaims {
pub bucket: String,
pub key: String,
pub exp: usize,
pub project_ref: String,
}
#[derive(Deserialize)]
pub struct SignObjectRequest {
#[serde(alias = "expiresIn")]
pub expires_in: u64, // seconds
}
#[derive(Serialize)]
pub struct SignedUrlResponse {
#[serde(rename = "signedURL")]
pub signed_url: String,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct FileObject {
pub name: String,
pub id: Option<Uuid>,
pub updated_at: Option<chrono::DateTime<chrono::Utc>>,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
pub last_accessed_at: Option<chrono::DateTime<chrono::Utc>>,
pub metadata: Option<serde_json::Value>,
}
#[derive(Serialize, sqlx::FromRow)]
pub struct Bucket {
pub id: String,
pub name: String,
pub owner: Option<Uuid>,
pub created_at: Option<chrono::DateTime<chrono::Utc>>,
pub updated_at: Option<chrono::DateTime<chrono::Utc>>,
pub public: bool,
}
pub async fn list_buckets(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(_project_ctx): Extension<ProjectContext>,
) -> Result<Json<Vec<Bucket>>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
validate_role(&auth_ctx.role)?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set role: {}", e),
)
})?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set claims: {}", e),
)
})?;
}
let buckets = sqlx::query_as::<_, Bucket>("SELECT * FROM storage.buckets")
.fetch_all(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
Ok(Json(buckets))
}
pub async fn list_objects(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(_project_ctx): Extension<ProjectContext>,
Path(bucket_id): Path<String>,
) -> Result<Json<Vec<FileObject>>, (StatusCode, String)> {
tracing::info!("Starting list_objects for bucket: {}", bucket_id);
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| {
tracing::error!("Failed to begin transaction: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string())
})?;
validate_role(&auth_ctx.role)?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| {
tracing::error!("Failed to set role: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set role: {}", e),
)
})?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set claims: {}", e),
)
})?;
}
let bucket_exists: Option<String> =
sqlx::query_scalar("SELECT id FROM storage.buckets WHERE id = $1")
.bind(&bucket_id)
.fetch_optional(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if bucket_exists.is_none() {
return Err((StatusCode::NOT_FOUND, "Bucket not found".to_string()));
}
let objects = sqlx::query_as::<_, FileObject>(
r#"
SELECT name, id, updated_at, created_at, last_accessed_at, metadata
FROM storage.objects
WHERE bucket_id = $1
"#,
)
.bind(&bucket_id)
.fetch_all(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
Ok(Json(objects))
}
pub async fn upload_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
request: Request,
) -> Result<impl IntoResponse, (StatusCode, String)> {
tracing::info!("Starting upload_object for bucket: {}, filename: {}", bucket_id, filename);
let content_type = request.headers().get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.unwrap_or("");
let data = if content_type.starts_with("multipart/form-data") {
let mut multipart = Multipart::from_request(request, &state).await
.map_err(|e| (StatusCode::BAD_REQUEST, e.to_string()))?;
let mut file_data = None;
while let Ok(Some(field)) = multipart.next_field().await {
if field.name() == Some("file") || field.name() == Some("") {
let bytes = field.bytes().await.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
file_data = Some(bytes);
break;
}
}
file_data.ok_or((StatusCode::BAD_REQUEST, "No file found in multipart".to_string()))?
} else {
let body = request.into_body();
body.collect().await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
.to_bytes()
};
let size = data.len();
tracing::info!("File size: {} bytes", size);
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| {
tracing::error!("Failed to begin transaction: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string())
})?;
validate_role(&auth_ctx.role)?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| {
tracing::error!("Failed to set role: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set role: {}", e),
)
})?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| {
tracing::error!("Failed to set claims: {}", e);
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set claims: {}", e),
)
})?;
}
let bucket_exists: Option<String> =
sqlx::query_scalar("SELECT id FROM storage.buckets WHERE id = $1")
.bind(&bucket_id)
.fetch_optional(&mut *tx)
.await
.map_err(|e| {
tracing::error!("Failed to check bucket existence: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string())
})?;
if bucket_exists.is_none() {
tracing::warn!("Bucket not found: {}", bucket_id);
return Err((StatusCode::NOT_FOUND, "Bucket not found".to_string()));
}
let key = format!("{}/{}/{}", project_ctx.project_ref, bucket_id, filename);
tracing::info!("Uploading to S3 with key: {}", key);
state
.s3_client
.put_object()
.bucket(&state.bucket_name)
.key(&key)
.body(ByteStream::from(data))
.send()
.await
.map_err(|e| {
tracing::error!("S3 PutObject error: {:?}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string())
})?;
tracing::info!("S3 upload successful");
let user_id = auth_ctx
.claims
.as_ref()
.and_then(|c| Uuid::parse_str(&c.sub).ok());
tracing::info!("Inserting metadata into DB");
let file_object = sqlx::query_as::<_, FileObject>(
r#"
INSERT INTO storage.objects (bucket_id, name, owner, metadata)
VALUES ($1, $2, $3, $4)
ON CONFLICT (bucket_id, name)
DO UPDATE SET updated_at = now(), metadata = $4
RETURNING name, id, updated_at, created_at, last_accessed_at, metadata
"#,
)
.bind(&bucket_id)
.bind(&filename)
.bind(user_id)
.bind(serde_json::json!({ "size": size, "mimetype": "application/octet-stream" }))
.fetch_one(&mut *tx)
.await
.map_err(|e| {
tracing::error!("DB Insert Object error: {:?}", e);
(StatusCode::FORBIDDEN, format!("Permission denied: {}", e))
})?;
tx.commit()
.await
.map_err(|e| {
tracing::error!("Commit error: {}", e);
(StatusCode::INTERNAL_SERVER_ERROR, e.to_string())
})?;
Ok((StatusCode::CREATED, Json(file_object)))
}
// Helper to transform image
fn transform_image(bytes: Bytes, width: Option<u32>, height: Option<u32>, quality: Option<u8>, format: Option<String>) -> Result<(Bytes, String), String> {
if width.is_none() && height.is_none() && format.is_none() {
return Err("No transformation parameters".to_string());
}
let img = image::load_from_memory(&bytes).map_err(|e| e.to_string())?;
let mut img = img;
if let (Some(w), Some(h)) = (width, height) {
img = img.resize_exact(w, h, image::imageops::FilterType::Lanczos3);
} else if let Some(w) = width {
img = img.resize(w, u32::MAX, image::imageops::FilterType::Lanczos3);
} else if let Some(h) = height {
img = img.resize(u32::MAX, h, image::imageops::FilterType::Lanczos3);
}
let mut output = Cursor::new(Vec::new());
let fmt = match format.as_deref() {
Some("png") => ImageOutputFormat::Png,
Some("jpeg") | Some("jpg") => ImageOutputFormat::Jpeg(quality.unwrap_or(80)),
Some("webp") => ImageOutputFormat::WebP,
_ => ImageOutputFormat::Png,
};
img.write_to(&mut output, fmt).map_err(|e| e.to_string())?;
let content_type = match format.as_deref() {
Some("png") => "image/png",
Some("jpeg") | Some("jpg") => "image/jpeg",
Some("webp") => "image/webp",
_ => "image/png",
};
Ok((Bytes::from(output.into_inner()), content_type.to_string()))
}
pub async fn download_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
validate_role(&auth_ctx.role)?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set role: {}", e),
)
})?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| {
(
StatusCode::INTERNAL_SERVER_ERROR,
format!("Failed to set claims: {}", e),
)
})?;
}
let object_exists: Option<Uuid> =
sqlx::query_scalar("SELECT id FROM storage.objects WHERE bucket_id = $1 AND name = $2")
.bind(&bucket_id)
.bind(&filename)
.fetch_optional(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if object_exists.is_none() {
return Err((
StatusCode::NOT_FOUND,
"File not found or access denied".to_string(),
));
}
let key = format!("{}/{}/{}", project_ctx.project_ref, bucket_id, filename);
let resp = state
.s3_client
.get_object()
.bucket(&state.bucket_name)
.key(&key)
.send()
.await
.map_err(|_e| {
(
StatusCode::NOT_FOUND,
"File content not found in storage".to_string(),
)
})?;
let mut headers = HeaderMap::new();
if let Some(ct) = resp.content_type() {
if let Ok(val) = ct.parse() {
headers.insert("Content-Type", val);
}
}
let body_bytes = resp
.body
.collect()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
.into_bytes();
// Check for transformations
let width = params.get("width").or(params.get("w")).and_then(|v| v.parse::<u32>().ok());
let height = params.get("height").or(params.get("h")).and_then(|v| v.parse::<u32>().ok());
let quality = params.get("quality").or(params.get("q")).and_then(|v| v.parse::<u8>().ok());
let format = params.get("format").or(params.get("f")).cloned();
if width.is_some() || height.is_some() || format.is_some() {
match transform_image(body_bytes.clone(), width, height, quality, format) {
Ok((new_bytes, new_ct)) => {
headers.insert("Content-Type", new_ct.parse().unwrap());
return Ok((headers, Body::from(new_bytes)));
},
Err(e) => {
tracing::warn!("Image transformation failed: {}", e);
// Fallback to original
}
}
}
let body = Body::from(body_bytes);
Ok((headers, body))
}
pub async fn sign_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Json(payload): Json<SignObjectRequest>,
) -> Result<Json<SignedUrlResponse>, (StatusCode, String)> {
tracing::info!("Sign Object Request: bucket={}, file={}, role={}", bucket_id, filename, auth_ctx.role);
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let mut tx = db
.begin()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
validate_role(&auth_ctx.role)?;
let role_query = format!("SET LOCAL role = '{}'", auth_ctx.role);
sqlx::query(&role_query)
.execute(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if let Some(claims) = &auth_ctx.claims {
let sub_query = "SELECT set_config('request.jwt.claim.sub', $1, true)";
sqlx::query(sub_query)
.bind(&claims.sub)
.execute(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
}
let object_exists: Option<Uuid> =
sqlx::query_scalar("SELECT id FROM storage.objects WHERE bucket_id = $1 AND name = $2")
.bind(&bucket_id)
.bind(&filename)
.fetch_optional(&mut *tx)
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
if object_exists.is_none() {
return Err((StatusCode::NOT_FOUND, "File not found or access denied".to_string()));
}
let now = chrono::Utc::now();
let exp = now.timestamp() as usize + payload.expires_in as usize;
let claims = SignedUrlClaims {
bucket: bucket_id.clone(),
key: filename.clone(),
exp,
project_ref: project_ctx.project_ref.clone(),
};
let token = encode(
&Header::default(),
&claims,
&EncodingKey::from_secret(project_ctx.jwt_secret.as_bytes()),
).map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;
let signed_url = format!("/object/sign/{}/{}?token={}", bucket_id, filename, token);
Ok(Json(SignedUrlResponse { signed_url }))
}
pub async fn get_signed_object(
State(state): State<StorageState>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let token = params.get("token").ok_or((StatusCode::BAD_REQUEST, "Missing token".to_string()))?;
let validation = Validation::new(Algorithm::HS256);
let token_data = decode::<SignedUrlClaims>(
token,
&DecodingKey::from_secret(project_ctx.jwt_secret.as_bytes()),
&validation,
).map_err(|_| (StatusCode::FORBIDDEN, "Invalid or expired token".to_string()))?;
if token_data.claims.bucket != bucket_id || token_data.claims.key != filename || token_data.claims.project_ref != project_ctx.project_ref {
return Err((StatusCode::FORBIDDEN, "Token does not match requested resource".to_string()));
}
let key = format!("{}/{}/{}", project_ctx.project_ref, bucket_id, filename);
let resp = state
.s3_client
.get_object()
.bucket(&state.bucket_name)
.key(&key)
.send()
.await
.map_err(|_e| {
(
StatusCode::NOT_FOUND,
"File content not found in storage".to_string(),
)
})?;
let mut headers = HeaderMap::new();
if let Some(ct) = resp.content_type() {
if let Ok(val) = ct.parse() {
headers.insert("Content-Type", val);
}
}
let body_bytes = resp
.body
.collect()
.await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?
.into_bytes();
// Check for transformations
let width = params.get("width").or(params.get("w")).and_then(|v| v.parse::<u32>().ok());
let height = params.get("height").or(params.get("h")).and_then(|v| v.parse::<u32>().ok());
let quality = params.get("quality").or(params.get("q")).and_then(|v| v.parse::<u8>().ok());
let format = params.get("format").or(params.get("f")).cloned();
if width.is_some() || height.is_some() || format.is_some() {
match transform_image(body_bytes.clone(), width, height, quality, format) {
Ok((new_bytes, new_ct)) => {
headers.insert("Content-Type", new_ct.parse().unwrap());
return Ok((headers, Body::from(new_bytes)));
},
Err(e) => {
tracing::warn!("Image transformation failed: {}", e);
}
}
}
let body = Body::from(body_bytes);
Ok((headers, body))
}