chore: full stack stability and migration fixes, plus react UI progress
Some checks failed
CI / podman-build (push) Has been cancelled
CI / rust (push) Has been cancelled

This commit is contained in:
2026-03-18 09:01:38 +02:00
parent 38cab8c246
commit a66d908eff
142 changed files with 12210 additions and 3402 deletions

View File

@@ -103,12 +103,11 @@ fn map_api_error(e: common::error::ApiError) -> (StatusCode, String) {
}
pub async fn list_buckets(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
State(_state): State<StorageState>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(_project_ctx): Extension<ProjectContext>,
) -> Result<Json<Vec<Bucket>>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -126,11 +125,10 @@ pub async fn list_buckets(
pub async fn create_bucket(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Json(payload): Json<CreateBucketRequest>,
) -> Result<Json<Bucket>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -163,11 +161,10 @@ pub async fn create_bucket(
pub async fn delete_bucket(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Path(bucket_id): Path<String>,
) -> Result<StatusCode, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -209,12 +206,11 @@ pub async fn delete_bucket(
pub async fn list_objects(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(_project_ctx): Extension<ProjectContext>,
Path(bucket_id): Path<String>,
) -> Result<Json<Vec<FileObject>>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -250,7 +246,7 @@ pub async fn list_objects(
pub async fn upload_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
@@ -289,7 +285,6 @@ pub async fn upload_object(
"Upload completed"
);
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(|e| {
@@ -416,13 +411,12 @@ fn transform_image(bytes: bytes::Bytes, width: Option<u32>, height: Option<u32>,
pub async fn download_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Query(params): Query<HashMap<String, String>>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -502,12 +496,11 @@ pub async fn download_object(
pub async fn delete_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
) -> Result<StatusCode, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -543,12 +536,11 @@ pub async fn delete_object(
pub async fn copy_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Json(payload): Json<CopyMoveRequest>,
) -> Result<Json<FileObject>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;
@@ -610,13 +602,13 @@ pub async fn copy_object(
pub async fn move_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Json(payload): Json<CopyMoveRequest>,
) -> Result<Json<FileObject>, (StatusCode, String)> {
// First copy, then delete source
let copied = copy_object(State(state.clone()), db, Extension(auth_ctx.clone()), Extension(project_ctx.clone()), Json(payload.clone())).await?;
let copied = copy_object(State(state.clone()), Extension(db.clone()), Extension(auth_ctx.clone()), Extension(project_ctx.clone()), Json(payload.clone())).await?;
// Now delete source (need to reconstruct filename because payload is moved)
let src_filename = payload.source_key.strip_prefix(&format!("{}/", payload.bucket_id))
@@ -626,7 +618,7 @@ pub async fn move_object(
let _ = delete_object(
State(state),
None,
Extension(db),
Extension(auth_ctx),
Extension(project_ctx),
Path((payload.bucket_id, src_filename.to_string()))
@@ -637,10 +629,9 @@ pub async fn move_object(
pub async fn get_public_url(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Path((bucket_id, filename)): Path<(String, String)>,
) -> Result<impl IntoResponse, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
// Check if bucket is public
let bucket: Option<Bucket> = sqlx::query_as::<_, Bucket>("SELECT * FROM storage.buckets WHERE id = $1")
@@ -661,13 +652,12 @@ pub async fn get_public_url(
pub async fn sign_object(
State(state): State<StorageState>,
db: Option<Extension<PgPool>>,
Extension(db): Extension<PgPool>,
Extension(auth_ctx): Extension<AuthContext>,
Extension(project_ctx): Extension<ProjectContext>,
Path((bucket_id, filename)): Path<(String, String)>,
Json(payload): Json<SignObjectRequest>,
) -> Result<Json<SignedUrlResponse>, (StatusCode, String)> {
let db = db.map(|Extension(p)| p).unwrap_or_else(|| state.db.clone());
let sub = auth_ctx.claims.as_ref().map(|c| c.sub.as_str());
let mut rls = RlsTransaction::begin(&db, &auth_ctx.role, sub).await
.map_err(map_api_error)?;

View File

@@ -85,7 +85,7 @@ pub async fn tus_create_upload(
let mut metadata_map = HashMap::new();
for pair in metadata_header.split(',') {
let parts: Vec<&str> = pair.trim().split_whitespace().collect();
let parts: Vec<&str> = pair.split_whitespace().collect();
if parts.len() == 2 {
if let Ok(decoded_val) = general_purpose::STANDARD.decode(parts[1]) {
if let Ok(val_str) = String::from_utf8(decoded_val) {
@@ -254,7 +254,7 @@ pub async fn tus_patch_upload(
} else {
// If we reached S3 chunk size (5MB), upload part and clear local file
const S3_MIN_PART_SIZE: u64 = 5 * 1024 * 1024;
if new_offset - (new_offset % S3_MIN_PART_SIZE) > current_offset - (current_offset % S3_MIN_PART_SIZE) || new_offset % S3_MIN_PART_SIZE == 0 && new_offset > current_offset {
if new_offset - (new_offset % S3_MIN_PART_SIZE) > current_offset - (current_offset % S3_MIN_PART_SIZE) || new_offset.is_multiple_of(S3_MIN_PART_SIZE) && new_offset > current_offset {
// This is a bit simplified, but basically if we crossed a 5MB boundary
let local_data = fs::read(&upload_path).await
.map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e.to_string()))?;