//! SQLite-backed caching handle resolver. //! //! This module provides a handle resolver that caches resolution results in SQLite //! with configurable expiration times. SQLite caching provides persistence across //! service restarts while remaining lightweight for single-instance deployments. use super::errors::HandleResolverError; use super::traits::HandleResolver; use crate::handle_resolution_result::HandleResolutionResult; use crate::metrics::SharedMetricsPublisher; use async_trait::async_trait; use metrohash::MetroHash64; use sqlx::{Row, SqlitePool}; use std::hash::Hasher as _; use std::sync::Arc; use std::time::{SystemTime, UNIX_EPOCH}; /// SQLite-backed caching handle resolver. /// /// This resolver caches handle resolution results in SQLite with a configurable TTL. /// Results are stored in a compact binary format using bincode serialization /// to minimize storage overhead. /// /// # Features /// /// - Persistent caching across service restarts /// - Lightweight single-file database /// - Configurable TTL (default: 90 days) /// - Compact binary storage format /// - Automatic schema management /// - Graceful fallback if SQLite is unavailable /// /// # Example /// /// ```no_run /// use std::sync::Arc; /// use sqlx::SqlitePool; /// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver}; /// use quickdid::metrics::NoOpMetricsPublisher; /// /// # async fn example() { /// # use atproto_identity::resolve::HickoryDnsResolver; /// # use reqwest::Client; /// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[])); /// # let http_client = Client::new(); /// # let metrics = Arc::new(NoOpMetricsPublisher); /// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone()); /// # let sqlite_pool: SqlitePool = todo!(); /// // Create with default 90-day TTL /// let resolver = create_sqlite_resolver( /// base_resolver, /// sqlite_pool, /// metrics /// ); /// # } /// ``` pub(super) struct SqliteHandleResolver { /// Base handle resolver to perform actual resolution inner: Arc, /// SQLite connection pool pool: SqlitePool, /// TTL for cache entries in seconds ttl_seconds: u64, /// Metrics publisher for telemetry metrics: SharedMetricsPublisher, } impl SqliteHandleResolver { /// Create a new SQLite-backed handle resolver with default 90-day TTL. fn new( inner: Arc, pool: SqlitePool, metrics: SharedMetricsPublisher, ) -> Self { Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default } /// Create a new SQLite-backed handle resolver with custom TTL. fn with_ttl( inner: Arc, pool: SqlitePool, ttl_seconds: u64, metrics: SharedMetricsPublisher, ) -> Self { Self { inner, pool, ttl_seconds, metrics, } } /// Generate the cache key for a handle. /// /// Uses MetroHash64 to generate a consistent hash of the handle /// for use as the primary key. This provides better key distribution /// and avoids issues with special characters in handles. fn make_key(&self, handle: &str) -> u64 { let mut h = MetroHash64::default(); h.write(handle.as_bytes()); h.finish() } /// Check if a cache entry is expired. fn is_expired(&self, updated_timestamp: i64) -> bool { let current_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_secs() as i64; (current_timestamp - updated_timestamp) > (self.ttl_seconds as i64) } } #[async_trait] impl HandleResolver for SqliteHandleResolver { async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> { let handle = s.to_string(); let key = self.make_key(&handle) as i64; // SQLite uses signed integers // Try to get from SQLite cache first let cached_result = sqlx::query("SELECT result, updated FROM handle_resolution_cache WHERE key = ?1") .bind(key) .fetch_optional(&self.pool) .await; match cached_result { Ok(Some(row)) => { let cached_bytes: Vec = row.get("result"); let updated_timestamp: i64 = row.get("updated"); // Check if the entry is expired if !self.is_expired(updated_timestamp) { // Deserialize the cached result match HandleResolutionResult::from_bytes(&cached_bytes) { Ok(cached_result) => { if let Some(did) = cached_result.to_did() { tracing::debug!("Cache hit for handle {}: {}", handle, did); self.metrics.incr("resolver.sqlite.cache_hit").await; return Ok((did, cached_result.timestamp)); } else { tracing::debug!("Cache hit (not resolved) for handle {}", handle); self.metrics .incr("resolver.sqlite.cache_hit_not_resolved") .await; return Err(HandleResolverError::HandleNotFound); } } Err(e) => { tracing::warn!( "Failed to deserialize cached result for handle {}: {}", handle, e ); self.metrics.incr("resolver.sqlite.deserialize_error").await; // Fall through to re-resolve if deserialization fails } } } else { tracing::debug!("Cache entry expired for handle {}", handle); self.metrics.incr("resolver.sqlite.cache_expired").await; // Entry is expired, we'll re-resolve and update it } } Ok(None) => { tracing::debug!("Cache miss for handle {}, resolving...", handle); self.metrics.incr("resolver.sqlite.cache_miss").await; } Err(e) => { tracing::warn!("Failed to query SQLite cache for handle {}: {}", handle, e); self.metrics.incr("resolver.sqlite.query_error").await; // Fall through to resolve without caching on database error } } // Not in cache or expired, resolve through inner resolver let result = self.inner.resolve(s).await; // Create and serialize resolution result let resolution_result = match &result { Ok((did, _timestamp)) => { tracing::debug!( "Caching successful resolution for handle {}: {}", handle, did ); match HandleResolutionResult::success(did) { Ok(res) => res, Err(e) => { tracing::warn!("Failed to create resolution result: {}", e); self.metrics .incr("resolver.sqlite.result_create_error") .await; return result; } } } Err(e) => { tracing::debug!("Caching failed resolution for handle {}: {}", handle, e); match HandleResolutionResult::not_resolved() { Ok(res) => res, Err(err) => { tracing::warn!("Failed to create not_resolved result: {}", err); self.metrics .incr("resolver.sqlite.result_create_error") .await; return result; } } } }; // Serialize to bytes match resolution_result.to_bytes() { Ok(bytes) => { let current_timestamp = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() .as_secs() as i64; // Insert or update the cache entry let query_result = sqlx::query( r#" INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (?1, ?2, ?3, ?4) ON CONFLICT(key) DO UPDATE SET result = excluded.result, updated = excluded.updated "#, ) .bind(key) .bind(&bytes) .bind(current_timestamp) .bind(current_timestamp) .execute(&self.pool) .await; if let Err(e) = query_result { tracing::warn!("Failed to cache handle resolution in SQLite: {}", e); self.metrics.incr("resolver.sqlite.cache_set_error").await; } else { self.metrics.incr("resolver.sqlite.cache_set").await; } } Err(e) => { tracing::warn!( "Failed to serialize resolution result for handle {}: {}", handle, e ); self.metrics.incr("resolver.sqlite.serialize_error").await; } } result } async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> { // Normalize the handle to lowercase let handle = handle.to_lowercase(); // Update the SQLite cache if let Ok(mut conn) = self.pool.acquire().await { // Create a resolution result for the successful mapping let resolution_result = match HandleResolutionResult::success(did) { Ok(res) => res, Err(e) => { tracing::warn!( "Failed to create resolution result for set operation: {}", e ); self.metrics .incr("resolver.sqlite.set_result_create_error") .await; // Still chain to inner resolver even if we can't cache return self.inner.set(&handle, did).await; } }; // Serialize to bytes match resolution_result.to_bytes() { Ok(bytes) => { // Insert or update the cache entry let timestamp = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_secs() as i64; let expires_at = timestamp + self.ttl_seconds as i64; match sqlx::query( "INSERT OR REPLACE INTO handle_resolution_cache (handle, resolved_value, created_at, expires_at) VALUES (?, ?, ?, ?)" ) .bind(&handle) .bind(&bytes) .bind(timestamp) .bind(expires_at) .execute(&mut *conn) .await { Ok(_) => { tracing::debug!("Set handle {} -> DID {} in SQLite cache", handle, did); self.metrics.incr("resolver.sqlite.set_success").await; } Err(e) => { tracing::warn!("Failed to set handle->DID mapping in SQLite: {}", e); self.metrics.incr("resolver.sqlite.set_cache_error").await; // Still chain to inner resolver even if cache update fails } } } Err(e) => { tracing::warn!( "Failed to serialize resolution result for set operation: {}", e ); self.metrics .incr("resolver.sqlite.set_serialize_error") .await; // Still chain to inner resolver even if serialization fails } } } else { tracing::warn!("Failed to get SQLite connection for set operation"); self.metrics .incr("resolver.sqlite.set_connection_error") .await; } // Chain to inner resolver self.inner.set(&handle, did).await } } /// Create a new SQLite-backed handle resolver with default 90-day TTL. /// /// # Arguments /// /// * `inner` - The underlying resolver to use for actual resolution /// * `pool` - SQLite connection pool /// * `metrics` - Metrics publisher for telemetry /// /// # Example /// /// ```no_run /// use std::sync::Arc; /// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver}; /// use quickdid::sqlite_schema::create_sqlite_pool; /// use quickdid::metrics::NoOpMetricsPublisher; /// /// # async fn example() -> anyhow::Result<()> { /// # use atproto_identity::resolve::HickoryDnsResolver; /// # use reqwest::Client; /// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[])); /// # let http_client = Client::new(); /// # let metrics = Arc::new(NoOpMetricsPublisher); /// let base = create_base_resolver( /// dns_resolver, /// http_client, /// metrics.clone(), /// ); /// /// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?; /// let resolver = create_sqlite_resolver(base, pool, metrics); /// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap(); /// # Ok(()) /// # } /// ``` pub fn create_sqlite_resolver( inner: Arc, pool: SqlitePool, metrics: SharedMetricsPublisher, ) -> Arc { Arc::new(SqliteHandleResolver::new(inner, pool, metrics)) } /// Create a new SQLite-backed handle resolver with custom TTL. /// /// # Arguments /// /// * `inner` - The underlying resolver to use for actual resolution /// * `pool` - SQLite connection pool /// * `ttl_seconds` - TTL for cache entries in seconds /// * `metrics` - Metrics publisher for telemetry pub fn create_sqlite_resolver_with_ttl( inner: Arc, pool: SqlitePool, ttl_seconds: u64, metrics: SharedMetricsPublisher, ) -> Arc { Arc::new(SqliteHandleResolver::with_ttl( inner, pool, ttl_seconds, metrics, )) } #[cfg(test)] mod tests { use super::*; // Mock handle resolver for testing #[derive(Clone)] struct MockHandleResolver { should_fail: bool, expected_did: String, } #[async_trait] impl HandleResolver for MockHandleResolver { async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> { if self.should_fail { Err(HandleResolverError::MockResolutionFailure) } else { let timestamp = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_secs(); Ok((self.expected_did.clone(), timestamp)) } } } #[tokio::test] async fn test_sqlite_handle_resolver_cache_hit() { // Create in-memory SQLite database for testing let pool = SqlitePool::connect("sqlite::memory:") .await .expect("Failed to connect to in-memory SQLite"); // Create the schema crate::sqlite_schema::create_schema(&pool) .await .expect("Failed to create schema"); // Create mock resolver let mock_resolver = Arc::new(MockHandleResolver { should_fail: false, expected_did: "did:plc:testuser123".to_string(), }); // Create metrics publisher let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher); // Create SQLite-backed resolver let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics); let test_handle = "alice.bsky.social"; let expected_key = sqlite_resolver.make_key(test_handle) as i64; // Verify database is empty initially let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query initial count"); assert_eq!(initial_count, 0); // First resolution - should call inner resolver and cache the result let (result1, _timestamp1) = sqlite_resolver.resolve(test_handle).await.unwrap(); assert_eq!(result1, "did:plc:testuser123"); // Verify record was inserted let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query count after first resolution"); assert_eq!(count_after_first, 1); // Verify the cached record has correct key and non-empty result let cached_record = sqlx::query( "SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1", ) .bind(expected_key) .fetch_one(&pool) .await .expect("Failed to fetch cached record"); let cached_key: i64 = cached_record.get("key"); let cached_result: Vec = cached_record.get("result"); let cached_created: i64 = cached_record.get("created"); let cached_updated: i64 = cached_record.get("updated"); assert_eq!(cached_key, expected_key); assert!( !cached_result.is_empty(), "Cached result should not be empty" ); assert!(cached_created > 0, "Created timestamp should be positive"); assert!(cached_updated > 0, "Updated timestamp should be positive"); assert_eq!( cached_created, cached_updated, "Created and updated should be equal on first insert" ); // Verify we can deserialize the cached result let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result) .expect("Failed to deserialize cached result"); let cached_did = resolution_result.to_did().expect("Should have a DID"); assert_eq!(cached_did, "did:plc:testuser123"); // Second resolution - should hit cache (no additional database insert) let (result2, _timestamp2) = sqlite_resolver.resolve(test_handle).await.unwrap(); assert_eq!(result2, "did:plc:testuser123"); // Verify count hasn't changed (cache hit, no new insert) let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query count after second resolution"); assert_eq!(count_after_second, 1); } #[tokio::test] async fn test_sqlite_handle_resolver_cache_error() { // Create in-memory SQLite database for testing let pool = SqlitePool::connect("sqlite::memory:") .await .expect("Failed to connect to in-memory SQLite"); // Create the schema crate::sqlite_schema::create_schema(&pool) .await .expect("Failed to create schema"); // Create mock resolver that fails let mock_resolver = Arc::new(MockHandleResolver { should_fail: true, expected_did: String::new(), }); // Create metrics publisher let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher); // Create SQLite-backed resolver let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics); let test_handle = "error.bsky.social"; let expected_key = sqlite_resolver.make_key(test_handle) as i64; // Verify database is empty initially let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query initial count"); assert_eq!(initial_count, 0); // First resolution - should fail and cache the failure let result1 = sqlite_resolver.resolve(test_handle).await; assert!(result1.is_err()); // Match the specific error type we expect match result1 { Err(HandleResolverError::MockResolutionFailure) => {} other => panic!("Expected MockResolutionFailure, got {:?}", other), } // Verify the failure was cached let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query count after first resolution"); assert_eq!(count_after_first, 1); // Verify the cached error record let cached_record = sqlx::query( "SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1", ) .bind(expected_key) .fetch_one(&pool) .await .expect("Failed to fetch cached error record"); let cached_key: i64 = cached_record.get("key"); let cached_result: Vec = cached_record.get("result"); let cached_created: i64 = cached_record.get("created"); let cached_updated: i64 = cached_record.get("updated"); assert_eq!(cached_key, expected_key); assert!( !cached_result.is_empty(), "Cached error result should not be empty" ); assert!(cached_created > 0, "Created timestamp should be positive"); assert!(cached_updated > 0, "Updated timestamp should be positive"); assert_eq!( cached_created, cached_updated, "Created and updated should be equal on first insert" ); // Verify we can deserialize the cached error result let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result) .expect("Failed to deserialize cached error result"); let cached_did = resolution_result.to_did(); assert!(cached_did.is_none(), "Error result should have no DID"); // Second resolution - should hit cache with error (no additional database operations) let result2 = sqlite_resolver.resolve(test_handle).await; assert!(result2.is_err()); // Match the specific error type we expect from cache match result2 { Err(HandleResolverError::HandleNotFound) => {} // Cache returns HandleNotFound for "not resolved" other => panic!("Expected HandleNotFound from cache, got {:?}", other), } // Verify count hasn't changed (cache hit, no new operations) let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache") .fetch_one(&pool) .await .expect("Failed to query count after second resolution"); assert_eq!(count_after_second, 1); } }