wip:milestone 0 fixes
Some checks failed
CI/CD Pipeline / unit-tests (push) Failing after 1m16s
CI/CD Pipeline / integration-tests (push) Failing after 2m32s
CI/CD Pipeline / lint (push) Successful in 5m22s
CI/CD Pipeline / e2e-tests (push) Has been skipped
CI/CD Pipeline / build (push) Has been skipped

This commit is contained in:
2026-03-15 12:35:42 +02:00
parent 6708cf28a7
commit cffdf8af86
61266 changed files with 4511646 additions and 1938 deletions

211
realtime/src/presence.rs Normal file
View File

@@ -0,0 +1,211 @@
//! Realtime presence tracking using Redis
//!
//! This module provides distributed presence tracking across multiple worker nodes.
//! Users can join channels and their presence is tracked across the entire cluster.
use common::{CacheLayer, CacheError, CacheResult};
use serde::{Deserialize, Serialize};
use std::time::Duration;
use uuid::Uuid;
/// Presence information for a user
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct PresenceInfo {
pub user_id: Uuid,
pub channel: String,
pub status: PresenceStatus,
pub last_seen: chrono::DateTime<chrono::Utc>,
pub metadata: Option<serde_json::Value>,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum PresenceStatus {
Online,
Away,
Offline,
}
/// Presence manager for tracking users across channels
#[derive(Clone)]
pub struct PresenceManager {
cache: CacheLayer,
heartbeat_ttl: u64, // Time in seconds before a user is considered offline
}
impl PresenceManager {
/// Create a new presence manager
pub fn new(cache: CacheLayer, heartbeat_ttl: u64) -> Self {
Self {
cache,
heartbeat_ttl,
}
}
/// User joins a channel
pub async fn join_channel(
&self,
user_id: Uuid,
channel: String,
metadata: Option<serde_json::Value>,
) -> CacheResult<()> {
let presence = PresenceInfo {
user_id,
channel: channel.clone(),
status: PresenceStatus::Online,
last_seen: chrono::Utc::now(),
metadata,
};
let key = format!("presence:channel:{}:user:{}", channel, user_id);
self.cache.set(&key, &presence).await?;
// Also add to channel's user set
let channel_users_key = format!("presence:channel:{}:users", channel);
if let Some(redis) = &self.cache.redis {
let mut conn = redis.get_async_connection().await?;
redis::cmd("SADD")
.arg(&channel_users_key)
.arg(user_id.to_string())
.query_async(&mut conn)
.await?;
// Set expiration on the set
redis::cmd("EXPIRE")
.arg(&channel_users_key)
.arg(self.heartbeat_ttl * 2)
.query_async(&mut conn)
.await?;
}
Ok(())
}
/// User leaves a channel
pub async fn leave_channel(&self, user_id: Uuid, channel: String) -> CacheResult<()> {
let key = format!("presence:channel:{}:user:{}", channel, user_id);
self.cache.delete(&key).await?;
// Remove from channel's user set
let channel_users_key = format!("presence:channel:{}:users", channel);
if let Some(redis) = &self.cache.redis {
let mut conn = redis.get_async_connection().await?;
redis::cmd("SREM")
.arg(&channel_users_key)
.arg(user_id.to_string())
.query_async(&mut conn)
.await?;
}
Ok(())
}
/// Update user heartbeat (keep them online)
pub async fn heartbeat(&self, user_id: Uuid, channel: String) -> CacheResult<()> {
let key = format!("presence:channel:{}:user:{}", channel, user_id);
if let Some(redis) = &self.cache.redis {
let mut conn = redis.get_async_connection().await?;
// Update the TTL to keep the user online
redis::cmd("EXPIRE")
.arg(&key)
.arg(self.heartbeat_ttl)
.query_async(&mut conn)
.await?;
}
Ok(())
}
/// Get all users in a channel
pub async fn get_channel_users(&self, channel: String) -> CacheResult<Vec<Uuid>> {
let channel_users_key = format!("presence:channel:{}:users", channel);
if let Some(redis) = &self.cache.redis {
let mut conn = redis.get_async_connection().await?;
let users: Vec<String> = redis::cmd("SMEMBERS")
.arg(&channel_users_key)
.query_async(&mut conn)
.await?;
return users
.into_iter()
.filter_map(|s| Uuid::parse_str(&s).ok())
.collect();
}
Ok(vec![])
}
/// Get presence info for a specific user in a channel
pub async fn get_user_presence(
&self,
user_id: Uuid,
channel: String,
) -> CacheResult<Option<PresenceInfo>> {
let key = format!("presence:channel:{}:user:{}", channel, user_id);
self.cache.get(&key).await
}
/// Get online count for a channel
pub async fn get_channel_online_count(&self, channel: String) -> CacheResult<usize> {
let users = self.get_channel_users(channel).await?;
Ok(users.len())
}
/// Update user status
pub async fn update_status(
&self,
user_id: Uuid,
channel: String,
status: PresenceStatus,
) -> CacheResult<()> {
let key = format!("presence:channel:{}:user:{}", channel, user_id);
if let Some(mut presence) = self.cache.get::<PresenceInfo>(&key).await? {
presence.status = status;
presence.last_seen = chrono::Utc::now();
self.cache.set(&key, &presence).await?;
}
Ok(())
}
/// Get all channels a user is present in
pub async fn get_user_channels(&self, user_id: Uuid) -> CacheResult<Vec<String>> {
// This would require scanning keys, which is not ideal
// In production, you'd maintain a separate index
let user_channels_key = format!("presence:user:{}:channels", user_id);
if let Some(redis) = &self.cache.redis {
let mut conn = redis.get_async_connection().await?;
let channels: Vec<String> = redis::cmd("SMEMBERS")
.arg(&user_channels_key)
.query_async(&mut conn)
.await?;
return Ok(channels);
}
Ok(vec![])
}
/// Cleanup stale presence data (should be run periodically)
pub async fn cleanup_stale(&self) -> CacheResult<usize> {
// This would use SCAN to find expired keys
// For now, we rely on Redis TTL to auto-expire
Ok(0)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_presence_manager_creation() {
let cache = CacheLayer::new(None, 60);
let manager = PresenceManager::new(cache, 30);
assert_eq!(manager.heartbeat_ttl, 30);
}
}