feat(billing): implement tenant subscription entitlements system (milestones 0-6)
Some checks failed
ci / ui (push) Failing after 28s
ci / rust (push) Failing after 2m40s
images / build-and-push (push) Failing after 19s

This commit is contained in:
2026-03-30 18:41:23 +03:00
parent 5992044b7e
commit 2595e7f1c5
63 changed files with 8448 additions and 321 deletions

View File

@@ -0,0 +1,56 @@
#!/bin/sh
set -eu
# Applies an S3 lifecycle configuration to the docs bucket.
#
# This is an operator tool (it has side effects). It is still automatable and scriptable.
#
# Required env:
# - S3_ENDPOINT
# - S3_REGION
# - S3_BUCKET_DOCS
#
# Optional env:
# - S3_LIFECYCLE_JSON (path to JSON file; default: docs/usage/s3_lifecycle_docs_default.json)
#
# Usage:
# export S3_ENDPOINT=...
# export S3_REGION=...
# export S3_BUCKET_DOCS=...
# sh docker/scripts/s3_apply_lifecycle_docs.sh
need() {
name="$1"
val="$(printenv "$name" 2>/dev/null || true)"
if [ -z "$val" ]; then
echo "missing env: $name" >&2
exit 2
fi
}
need S3_ENDPOINT
need S3_REGION
need S3_BUCKET_DOCS
if ! command -v aws >/dev/null 2>&1; then
echo "missing dependency: aws (AWS CLI v2 recommended)" >&2
exit 2
fi
export AWS_EC2_METADATA_DISABLED=true
export AWS_DEFAULT_REGION="$S3_REGION"
export AWS_REGION="$S3_REGION"
S3_LIFECYCLE_JSON="${S3_LIFECYCLE_JSON:-docs/usage/s3_lifecycle_docs_default.json}"
if [ ! -f "$S3_LIFECYCLE_JSON" ]; then
echo "missing lifecycle config file: $S3_LIFECYCLE_JSON" >&2
exit 2
fi
aws s3api put-bucket-lifecycle-configuration \
--endpoint-url "$S3_ENDPOINT" \
--bucket "$S3_BUCKET_DOCS" \
--lifecycle-configuration "file://$S3_LIFECYCLE_JSON" >/dev/null
echo "ok: applied lifecycle config to bucket $S3_BUCKET_DOCS"

View File

@@ -0,0 +1,89 @@
#!/bin/sh
set -eu
# Idempotently provisions the S3 docs bucket with sane defaults.
#
# This script is intended for CI/CD (Gitea Actions) or operator usage.
# It is safe to run repeatedly:
# - If the bucket exists, it will NOT recreate it.
# - It will (re)apply public-access-block and optional versioning/lifecycle.
#
# Required env:
# - S3_ENDPOINT
# - S3_REGION
# - S3_BUCKET_DOCS
#
# Optional env:
# - S3_ENABLE_VERSIONING (true/false; default false)
# - S3_LIFECYCLE_JSON (path; default docs/usage/s3_lifecycle_docs_default.json)
#
# Credentials:
# - AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY (or AWS_PROFILE)
#
# Notes:
# - Some S3-compatible providers ignore LocationConstraint; this script tries to be compatible.
need() {
name="$1"
val="$(printenv "$name" 2>/dev/null || true)"
if [ -z "$val" ]; then
echo "missing env: $name" >&2
exit 2
fi
}
need S3_ENDPOINT
need S3_REGION
need S3_BUCKET_DOCS
if ! command -v aws >/dev/null 2>&1; then
echo "missing dependency: aws (AWS CLI v2 recommended)" >&2
exit 2
fi
export AWS_EC2_METADATA_DISABLED=true
export AWS_DEFAULT_REGION="$S3_REGION"
export AWS_REGION="$S3_REGION"
endpoint_args="--endpoint-url=$S3_ENDPOINT"
bucket="$S3_BUCKET_DOCS"
echo "== ensure bucket exists =="
if aws s3api head-bucket $endpoint_args --bucket "$bucket" >/dev/null 2>&1; then
echo "bucket exists: $bucket"
else
# Try create-bucket without LocationConstraint first (works for many S3-compatible providers).
if aws s3api create-bucket $endpoint_args --bucket "$bucket" >/dev/null 2>&1; then
echo "created bucket: $bucket"
else
# Fallback for AWS-style regions.
aws s3api create-bucket $endpoint_args --bucket "$bucket" \
--create-bucket-configuration "LocationConstraint=$S3_REGION" >/dev/null
echo "created bucket (with location constraint): $bucket"
fi
fi
echo "== apply public access block =="
aws s3api put-public-access-block $endpoint_args --bucket "$bucket" --public-access-block-configuration \
"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true" >/dev/null
S3_ENABLE_VERSIONING="${S3_ENABLE_VERSIONING:-false}"
if [ "$S3_ENABLE_VERSIONING" = "true" ] || [ "$S3_ENABLE_VERSIONING" = "1" ]; then
echo "== enable versioning =="
aws s3api put-bucket-versioning $endpoint_args --bucket "$bucket" --versioning-configuration Status=Enabled >/dev/null
fi
echo "== apply lifecycle (optional) =="
S3_LIFECYCLE_JSON="${S3_LIFECYCLE_JSON:-docs/usage/s3_lifecycle_docs_default.json}"
if [ -f "$S3_LIFECYCLE_JSON" ]; then
aws s3api put-bucket-lifecycle-configuration \
--endpoint-url "$S3_ENDPOINT" \
--bucket "$bucket" \
--lifecycle-configuration "file://$S3_LIFECYCLE_JSON" >/dev/null
else
echo "lifecycle file missing, skipping: $S3_LIFECYCLE_JSON" >&2
fi
echo "ok: provisioned bucket $bucket"

View File

@@ -0,0 +1,77 @@
#!/bin/sh
set -eu
# Verifies Control API S3 document storage permissions using `aws` CLI.
#
# This script is intentionally parameterized so it can run against Hetzner or any S3-compatible backend.
# It does NOT require Control API to be running; it validates the underlying bucket/prefix permissions.
#
# Required env:
# - S3_ENDPOINT (e.g. https://<hetzner-endpoint>)
# - S3_REGION
# - S3_BUCKET_DOCS
# Optional env:
# - S3_PREFIX_DOCS (default docs/)
# - S3_FORCE_PATH_STYLE (true/false; default false)
# - AWS_ACCESS_KEY_ID / AWS_SECRET_ACCESS_KEY (or AWS_PROFILE)
#
# Notes:
# - For S3-compatible providers, prefer `aws s3api` with `--endpoint-url`.
# - We set `AWS_EC2_METADATA_DISABLED=true` to avoid IMDS delays in containers/CI.
need() {
name="$1"
val="$(printenv "$name" 2>/dev/null || true)"
if [ -z "$val" ]; then
echo "missing env: $name" >&2
exit 2
fi
}
need S3_ENDPOINT
need S3_REGION
need S3_BUCKET_DOCS
S3_PREFIX_DOCS="${S3_PREFIX_DOCS:-docs/}"
case "$S3_PREFIX_DOCS" in
*/) ;;
*) S3_PREFIX_DOCS="${S3_PREFIX_DOCS}/" ;;
esac
S3_FORCE_PATH_STYLE="${S3_FORCE_PATH_STYLE:-false}"
if ! command -v aws >/dev/null 2>&1; then
echo "missing dependency: aws (AWS CLI v2 recommended)" >&2
exit 2
fi
export AWS_EC2_METADATA_DISABLED=true
export AWS_DEFAULT_REGION="$S3_REGION"
export AWS_REGION="$S3_REGION"
endpoint_args="--endpoint-url=$S3_ENDPOINT"
path_style_args=""
if [ "$S3_FORCE_PATH_STYLE" = "true" ] || [ "$S3_FORCE_PATH_STYLE" = "1" ]; then
path_style_args="--no-verify-ssl --cli-connect-timeout 10 --cli-read-timeout 30"
# NOTE: AWS CLI doesn't have a universal "force path style" flag for all s3api calls.
# For S3-compatible endpoints it generally works as long as the endpoint expects path-style.
# If your provider requires it and aws CLI fails, consider setting AWS_S3_FORCE_PATH_STYLE=1
# in newer CLIs or using s3cmd/minio client for validation.
fi
key="${S3_PREFIX_DOCS}smoke/$(date +%s)-$$.txt"
tmp="$(mktemp)"
trap 'rm -f "$tmp" >/dev/null 2>&1 || true' EXIT
printf "cloudlysis s3 verify\n" >"$tmp"
echo "== docs bucket head/list prefix =="
aws s3api head-bucket $endpoint_args --bucket "$S3_BUCKET_DOCS" >/dev/null
aws s3api list-objects-v2 $endpoint_args --bucket "$S3_BUCKET_DOCS" --prefix "$S3_PREFIX_DOCS" --max-items 1 >/dev/null
echo "== put/get/delete object under prefix =="
aws s3api put-object $endpoint_args --bucket "$S3_BUCKET_DOCS" --key "$key" --body "$tmp" >/dev/null
aws s3api get-object $endpoint_args --bucket "$S3_BUCKET_DOCS" --key "$key" /dev/null >/dev/null
aws s3api delete-object $endpoint_args --bucket "$S3_BUCKET_DOCS" --key "$key" >/dev/null
echo "ok: verified S3 docs permissions for s3://$S3_BUCKET_DOCS/$S3_PREFIX_DOCS"

View File

@@ -11,3 +11,7 @@ ensure_secret() {
}
ensure_secret grafana_admin_password "${GRAFANA_ADMIN_PASSWORD:-admin}"
# Control plane S3 document storage (dev defaults: MinIO in swarm/stacks/control-plane.yml).
ensure_secret control_s3_access_key_id "${CONTROL_S3_ACCESS_KEY_ID:-minioadmin}"
ensure_secret control_s3_secret_access_key "${CONTROL_S3_SECRET_ACCESS_KEY:-minioadmin}"