+37
.dockerignore
+37
.dockerignore
···
1
+
# Git
2
+
.git
3
+
.gitignore
4
+
5
+
# Documentation
6
+
*.md
7
+
docs/
8
+
LICENSE
9
+
10
+
# Development files
11
+
.vscode/
12
+
.env
13
+
.env.local
14
+
*.log
15
+
16
+
# Build artifacts
17
+
target/
18
+
Dockerfile
19
+
.dockerignore
20
+
21
+
# Test files
22
+
tests/
23
+
benches/
24
+
25
+
# Scripts (except the ones we need)
26
+
*.sh
27
+
28
+
# SQLite databases
29
+
*.db
30
+
*.db-*
31
+
32
+
# OS files
33
+
.DS_Store
34
+
Thumbs.db
35
+
36
+
# Keep the www directory for static files
37
+
!www/
+12
-8
.env.example
+12
-8
.env.example
···
1
1
# QuickDID Environment Configuration Template
2
2
# Copy this file to .env and customize for your deployment
3
-
#
4
-
# IMPORTANT: Never commit .env files with real SERVICE_KEY values
5
3
6
4
# ============================================================================
7
5
# REQUIRED CONFIGURATION
···
13
11
# - quickdid.example.com:8080
14
12
# - localhost:3007
15
13
HTTP_EXTERNAL=quickdid.example.com
16
-
17
-
# Private key for service identity (REQUIRED)
18
-
# SECURITY: Generate a new key for each environment
19
-
# NEVER commit real keys to version control
20
-
SERVICE_KEY=did:key:YOUR_PRIVATE_KEY_HERE
21
14
22
15
# ============================================================================
23
16
# NETWORK CONFIGURATION
···
98
91
QUEUE_BUFFER_SIZE=1000
99
92
100
93
# ============================================================================
94
+
# STATIC FILES CONFIGURATION
95
+
# ============================================================================
96
+
97
+
# Directory for serving static files (default: www)
98
+
# This should contain:
99
+
# - index.html (landing page)
100
+
# - .well-known/atproto-did (service DID)
101
+
# - .well-known/did.json (DID document)
102
+
# Docker default: /app/www
103
+
STATIC_FILES_DIR=www
104
+
105
+
# ============================================================================
101
106
# LOGGING
102
107
# ============================================================================
103
108
···
112
117
# ============================================================================
113
118
114
119
# HTTP_EXTERNAL=localhost:3007
115
-
# SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
116
120
# RUST_LOG=debug
117
121
# CACHE_TTL_MEMORY=60
118
122
# CACHE_TTL_REDIS=300
+3
.gitignore
+3
.gitignore
+77
CHANGELOG.md
+77
CHANGELOG.md
···
7
7
8
8
## [Unreleased]
9
9
10
+
## [1.0.0-rc.5] - 2025-09-10
11
+
12
+
### Added
13
+
- Bidirectional caching support for handle-to-DID and DID-to-handle lookups in Redis resolver
14
+
- `purge` method to HandleResolver trait for removing entries by handle or DID
15
+
- `set` method to HandleResolver trait for manual cache updates
16
+
- Jetstream consumer integration for real-time cache updates from AT Protocol firehose
17
+
- QuickDidEventHandler module for processing Account and Identity events
18
+
- Static file serving with www directory support for landing page and well-known files
19
+
- Comprehensive test coverage for new bidirectional cache operations
20
+
21
+
### Changed
22
+
- Handle normalization to lowercase throughout the system for consistency
23
+
- Updated all resolver implementations to chain `purge` and `set` calls through the stack
24
+
- Enhanced documentation to reflect Jetstream configuration and bidirectional caching
25
+
- Improved production deployment guide with real-time sync recommendations
26
+
27
+
### Fixed
28
+
- Handle case sensitivity issues - all handles now normalized to lowercase
29
+
- Cache consistency between handle and DID lookups
30
+
- Event processing error handling in Jetstream consumer
31
+
32
+
## [1.0.0-rc.4] - 2025-09-08
33
+
34
+
### Added
35
+
- Metrics system with pluggable adapters (StatsD support) for monitoring and observability
36
+
- Proactive refresh resolver for keeping cached entries fresh before expiration
37
+
- Redis queue deduplication to prevent duplicate handle resolution work items
38
+
- Configurable bind address for StatsD UDP socket supporting both IPv4 and IPv6
39
+
- CORS headers support for cross-origin requests
40
+
- OPTIONS method handling for preflight requests
41
+
- Resolution timing measurements for performance monitoring
42
+
- Comprehensive metrics tracking including counters, gauges, and timings
43
+
- Telegraf and TimescaleDB integration guide for metrics aggregation
44
+
- Railway deployment resources for production environments
45
+
46
+
### Changed
47
+
- Replaced chrono with httpdate for more efficient HTTP date formatting
48
+
- Refactored handle resolver to include resolution time measurements
49
+
- Improved handle resolution view architecture
50
+
- Enhanced documentation with metrics configuration and deployment guides
51
+
52
+
### Fixed
53
+
- Minor typo in feature commit message ("fesature" corrected to "feature")
54
+
55
+
## [1.0.0-rc.3] - 2025-09-06
56
+
57
+
### Added
58
+
- SQLite support for persistent caching and queue processing
59
+
- Rate limiting with semaphore-based concurrency control (`RESOLVER_MAX_CONCURRENT`)
60
+
- Timeout support for rate limit permit acquisition (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`)
61
+
- SQLite queue adapter with work shedding capabilities (`QUEUE_SQLITE_MAX_SIZE`)
62
+
- Comprehensive error system with unique identifiers (e.g., `error-quickdid-config-1`)
63
+
- 12-factor app compliance with environment-only configuration
64
+
65
+
### Changed
66
+
- Configuration now exclusively uses environment variables (removed clap dependency)
67
+
- Command-line arguments limited to `--version` and `--help` only
68
+
- Improved error handling with strongly-typed errors using `thiserror` throughout
69
+
- Enhanced documentation with accurate configuration defaults and examples
70
+
- Updated README with complete architecture overview and deployment strategies
71
+
- Cache priority system: Redis โ SQLite โ Memory (first available)
72
+
73
+
### Fixed
74
+
- Error messages now consistently follow `error-quickdid-<domain>-<number>` format
75
+
- Configuration validation for all TTL and timeout values
76
+
- Documentation inconsistencies in CLAUDE.md development guide
77
+
- Queue adapter validation to include 'sqlite' option
78
+
79
+
### Removed
80
+
- `clap` crate dependency (replaced with simple argument handling)
81
+
- `anyhow!()` macro usage in favor of proper error types
82
+
- Command-line configuration options (following 12-factor methodology)
83
+
10
84
## [1.0.0-rc.2] - 2025-09-05
11
85
12
86
### Changed
···
51
125
- Unnecessary feature flags (axum macros, deadpool-redis script)
52
126
- 4 unused dependencies reducing compilation time
53
127
128
+
[1.0.0-rc.5]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.5
129
+
[1.0.0-rc.4]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.4
130
+
[1.0.0-rc.3]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.3
54
131
[1.0.0-rc.2]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.2
55
132
[1.0.0-rc.1]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.1
+164
-33
CLAUDE.md
+164
-33
CLAUDE.md
···
1
1
# QuickDID - Development Guide for Claude
2
2
3
3
## Overview
4
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
4
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides bidirectional handle-to-DID and DID-to-handle resolution with multi-layer caching (Redis, SQLite, in-memory), queue processing, metrics support, proactive cache refreshing, and real-time cache updates via Jetstream consumer.
5
+
6
+
## Configuration
7
+
8
+
QuickDID follows the 12-factor app methodology and uses environment variables exclusively for configuration. There are no command-line arguments except for `--version` and `--help`.
9
+
10
+
Configuration is validated at startup, and the service will exit with specific error codes if validation fails:
11
+
- `error-quickdid-config-1`: Missing required environment variable
12
+
- `error-quickdid-config-2`: Invalid configuration value
13
+
- `error-quickdid-config-3`: Invalid TTL value (must be positive)
14
+
- `error-quickdid-config-4`: Invalid timeout value (must be positive)
5
15
6
16
## Common Commands
7
17
···
10
20
# Build the project
11
21
cargo build
12
22
13
-
# Run in debug mode
14
-
cargo run
23
+
# Run in debug mode (requires environment variables)
24
+
HTTP_EXTERNAL=localhost:3007 cargo run
15
25
16
26
# Run tests
17
27
cargo test
···
19
29
# Type checking
20
30
cargo check
21
31
22
-
# Run with environment variables
23
-
HTTP_EXTERNAL=localhost:3007 SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK cargo run
32
+
# Linting
33
+
cargo clippy
34
+
35
+
# Show version
36
+
cargo run -- --version
37
+
38
+
# Show help
39
+
cargo run -- --help
24
40
```
25
41
26
42
### Development with VS Code
···
30
46
31
47
### Core Components
32
48
33
-
1. **Handle Resolution** (`src/handle_resolver.rs`)
49
+
1. **Handle Resolution** (`src/handle_resolver/`)
34
50
- `BaseHandleResolver`: Core resolution using DNS and HTTP
35
-
- `CachingHandleResolver`: In-memory caching layer
36
-
- `RedisHandleResolver`: Redis-backed persistent caching with 90-day TTL
51
+
- `RateLimitedHandleResolver`: Semaphore-based rate limiting with optional timeout
52
+
- `CachingHandleResolver`: In-memory caching layer with bidirectional support
53
+
- `RedisHandleResolver`: Redis-backed persistent caching with bidirectional lookups
54
+
- `SqliteHandleResolver`: SQLite-backed persistent caching with bidirectional support
55
+
- `ProactiveRefreshResolver`: Automatically refreshes cache entries before expiration
56
+
- All resolvers implement `HandleResolver` trait with:
57
+
- `resolve`: Handle-to-DID resolution
58
+
- `purge`: Remove entries by handle or DID
59
+
- `set`: Manually update handle-to-DID mappings
37
60
- Uses binary serialization via `HandleResolutionResult` for space efficiency
61
+
- Resolution stack: Cache โ ProactiveRefresh (optional) โ RateLimited (optional) โ Base โ DNS/HTTP
62
+
- Includes resolution timing measurements for metrics
38
63
39
64
2. **Binary Serialization** (`src/handle_resolution_result.rs`)
40
65
- Compact storage format using bincode
41
66
- Strips DID prefixes for did:web and did:plc methods
42
67
- Stores: timestamp (u64), method type (i16), payload (String)
43
68
44
-
3. **Queue System** (`src/queue_adapter.rs`)
45
-
- Supports MPSC (in-process) and Redis adapters
69
+
3. **Queue System** (`src/queue/`)
70
+
- Supports MPSC (in-process), Redis, SQLite, and no-op adapters
46
71
- `HandleResolutionWork` items processed asynchronously
47
72
- Redis uses reliable queue pattern (LPUSH/RPOPLPUSH/LREM)
73
+
- SQLite provides persistent queue with work shedding capabilities
48
74
49
75
4. **HTTP Server** (`src/http/`)
50
76
- XRPC endpoints for AT Protocol compatibility
51
77
- Health check endpoint
52
-
- DID document serving via .well-known
78
+
- Static file serving from configurable directory (default: www)
79
+
- Serves .well-known files as static content
80
+
- CORS headers support for cross-origin requests
81
+
- Cache-Control headers with configurable max-age and stale directives
82
+
- ETag support with configurable seed for cache invalidation
83
+
84
+
5. **Metrics System** (`src/metrics.rs`)
85
+
- Pluggable metrics publishing with StatsD support
86
+
- Tracks counters, gauges, and timings
87
+
- Configurable tags for environment/service identification
88
+
- No-op adapter for development environments
89
+
- Metrics for Jetstream event processing
90
+
91
+
6. **Jetstream Consumer** (`src/jetstream_handler.rs`)
92
+
- Consumes AT Protocol firehose events via WebSocket
93
+
- Processes Account events (purges deleted/deactivated accounts)
94
+
- Processes Identity events (updates handle-to-DID mappings)
95
+
- Automatic reconnection with exponential backoff
96
+
- Comprehensive metrics for event processing
97
+
- Spawned as cancellable task using task manager
53
98
54
99
## Key Technical Details
55
100
···
59
104
- Other DID methods stored with full identifier
60
105
61
106
### Redis Integration
62
-
- **Caching**: Uses MetroHash64 for key generation, stores binary data
107
+
- **Bidirectional Caching**:
108
+
- Stores both handleโDID and DIDโhandle mappings
109
+
- Uses MetroHash64 for key generation
110
+
- Binary data storage for efficiency
111
+
- Automatic synchronization of both directions
63
112
- **Queuing**: Reliable queue with processing/dead letter queues
64
113
- **Key Prefixes**: Configurable via `QUEUE_REDIS_PREFIX` environment variable
65
114
66
115
### Handle Resolution Flow
67
-
1. Check Redis cache (if configured)
68
-
2. Fall back to in-memory cache
116
+
1. Check cache (Redis/SQLite/in-memory based on configuration)
117
+
2. If cache miss and rate limiting enabled:
118
+
- Acquire semaphore permit (with optional timeout)
119
+
- If timeout configured and exceeded, return error
69
120
3. Perform DNS TXT lookup or HTTP well-known query
70
-
4. Cache result with appropriate TTL
121
+
4. Cache result with appropriate TTL in both directions (handleโDID and DIDโhandle)
71
122
5. Return DID or error
72
123
124
+
### Cache Management Operations
125
+
- **Purge**: Removes entries by either handle or DID
126
+
- Uses `atproto_identity::resolve::parse_input` for identifier detection
127
+
- Removes both handleโDID and DIDโhandle mappings
128
+
- Chains through all resolver layers
129
+
- **Set**: Manually updates handle-to-DID mappings
130
+
- Updates both directions in cache
131
+
- Normalizes handles to lowercase
132
+
- Chains through all resolver layers
133
+
73
134
## Environment Variables
74
135
75
136
### Required
76
137
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
77
-
- `SERVICE_KEY`: Private key for service identity (DID format)
78
138
79
-
### Optional
139
+
### Optional - Core Configuration
80
140
- `HTTP_PORT`: Server port (default: 8080)
81
141
- `PLC_HOSTNAME`: PLC directory hostname (default: plc.directory)
142
+
- `RUST_LOG`: Logging level (e.g., debug, info)
143
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
144
+
145
+
### Optional - Caching
82
146
- `REDIS_URL`: Redis connection URL for caching
83
-
- `QUEUE_ADAPTER`: Queue type - 'mpsc' or 'redis' (default: mpsc)
147
+
- `SQLITE_URL`: SQLite database URL for caching (e.g., `sqlite:./quickdid.db`)
148
+
- `CACHE_TTL_MEMORY`: TTL for in-memory cache in seconds (default: 600)
149
+
- `CACHE_TTL_REDIS`: TTL for Redis cache in seconds (default: 7776000)
150
+
- `CACHE_TTL_SQLITE`: TTL for SQLite cache in seconds (default: 7776000)
151
+
152
+
### Optional - Queue Configuration
153
+
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
84
154
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
85
-
- `QUEUE_WORKER_ID`: Worker ID for Redis queue (auto-generated if not set)
86
-
- `RUST_LOG`: Logging level (e.g., debug, info)
155
+
- `QUEUE_WORKER_ID`: Worker ID for queue operations (default: worker1)
156
+
- `QUEUE_BUFFER_SIZE`: Buffer size for MPSC queue (default: 1000)
157
+
- `QUEUE_SQLITE_MAX_SIZE`: Max queue size for SQLite work shedding (default: 10000)
158
+
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
159
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication to prevent duplicate handles (default: false)
160
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
161
+
162
+
### Optional - Rate Limiting
163
+
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
164
+
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
165
+
166
+
### Optional - HTTP Cache Control
167
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
168
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
169
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate directive in seconds (default: 86400)
170
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
171
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
172
+
173
+
### Optional - Metrics
174
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
175
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd, e.g., localhost:8125)
176
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
177
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
178
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
179
+
180
+
### Optional - Proactive Refresh
181
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
182
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
183
+
184
+
### Optional - Jetstream Consumer
185
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
186
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
87
187
88
188
## Error Handling
89
189
···
91
191
92
192
error-quickdid-<domain>-<number> <message>: <details>
93
193
94
-
Example errors:
194
+
Current error domains and examples:
95
195
96
-
* error-quickdid-resolve-1 Multiple DIDs resolved for method
97
-
* error-quickdid-plc-1 HTTP request failed: https://google.com/ Not Found
98
-
* error-quickdid-key-1 Error decoding key: invalid
196
+
* `config`: Configuration errors (e.g., error-quickdid-config-1 Missing required environment variable)
197
+
* `resolve`: Handle resolution errors (e.g., error-quickdid-resolve-1 Failed to resolve subject)
198
+
* `queue`: Queue operation errors (e.g., error-quickdid-queue-1 Failed to push to queue)
199
+
* `cache`: Cache-related errors (e.g., error-quickdid-cache-1 Redis pool creation failed)
200
+
* `result`: Serialization errors (e.g., error-quickdid-result-1 System time error)
201
+
* `task`: Task processing errors (e.g., error-quickdid-task-1 Queue adapter health check failed)
99
202
100
203
Errors should be represented as enums using the `thiserror` library.
101
204
···
118
221
### Test Coverage Areas
119
222
- Handle resolution with various DID methods
120
223
- Binary serialization/deserialization
121
-
- Redis caching and expiration
224
+
- Redis caching and expiration with bidirectional lookups
122
225
- Queue processing logic
123
226
- HTTP endpoint responses
227
+
- Jetstream event handler processing
228
+
- Purge and set operations across resolver layers
124
229
125
230
## Development Patterns
126
231
127
232
### Error Handling
128
-
- Uses `anyhow::Result` for error propagation
129
-
- Graceful fallbacks when Redis is unavailable
233
+
- Uses strongly-typed errors with `thiserror` for all modules
234
+
- Each error has a unique identifier following the pattern `error-quickdid-<domain>-<number>`
235
+
- Graceful fallbacks when Redis/SQLite is unavailable
130
236
- Detailed tracing for debugging
237
+
- Avoid using `anyhow!()` or `bail!()` macros - use proper error types instead
131
238
132
239
### Performance Optimizations
133
240
- Binary serialization reduces storage by ~40%
134
241
- MetroHash64 for fast key generation
135
242
- Connection pooling for Redis
136
243
- Configurable TTLs for cache entries
244
+
- Rate limiting via semaphore-based concurrency control
245
+
- HTTP caching with ETag and Cache-Control headers
246
+
- Resolution timing metrics for performance monitoring
137
247
138
248
### Code Style
139
249
- Follow existing Rust idioms and patterns
140
250
- Use `tracing` for logging, not `println!`
141
251
- Prefer `Arc` for shared state across async tasks
142
252
- Handle errors explicitly, avoid `.unwrap()` in production code
253
+
- Use `httpdate` crate for HTTP date formatting (not `chrono`)
143
254
144
255
## Common Tasks
145
256
···
149
260
3. Add test cases for the new method type
150
261
151
262
### Modifying Cache TTL
152
-
- For in-memory: Pass TTL to `CachingHandleResolver::new()`
153
-
- For Redis: Modify `RedisHandleResolver::ttl_seconds()`
263
+
- For in-memory: Set `CACHE_TTL_MEMORY` environment variable
264
+
- For Redis: Set `CACHE_TTL_REDIS` environment variable
265
+
- For SQLite: Set `CACHE_TTL_SQLITE` environment variable
266
+
267
+
### Configuring Metrics
268
+
1. Set `METRICS_ADAPTER=statsd` and `METRICS_STATSD_HOST=localhost:8125`
269
+
2. Configure tags with `METRICS_TAGS=env:prod,service:quickdid`
270
+
3. Use Telegraf + TimescaleDB for aggregation (see `docs/telegraf-timescaledb-metrics-guide.md`)
271
+
4. Railway deployment resources available in `railway-resources/telegraf/`
154
272
155
273
### Debugging Resolution Issues
156
274
1. Enable debug logging: `RUST_LOG=debug`
157
-
2. Check Redis cache: `redis-cli GET "handle:<hash>"`
158
-
3. Monitor queue processing in logs
159
-
4. Verify DNS/HTTP connectivity to AT Protocol infrastructure
275
+
2. Check Redis cache:
276
+
- Handle lookup: `redis-cli GET "handle:<hash>"`
277
+
- DID lookup: `redis-cli GET "handle:<hash>"` (same key format)
278
+
3. Check SQLite cache: `sqlite3 quickdid.db "SELECT * FROM handle_resolution_cache;"`
279
+
4. Monitor queue processing in logs
280
+
5. Check rate limiting: Look for "Rate limit permit acquisition timed out" errors
281
+
6. Verify DNS/HTTP connectivity to AT Protocol infrastructure
282
+
7. Monitor metrics for resolution timing and cache hit rates
283
+
8. Check Jetstream consumer status:
284
+
- Look for "Jetstream consumer" log entries
285
+
- Monitor `jetstream.*` metrics
286
+
- Check reconnection attempts in logs
160
287
161
288
## Dependencies
162
289
- `atproto-identity`: Core AT Protocol identity resolution
290
+
- `atproto-jetstream`: AT Protocol Jetstream event consumer
163
291
- `bincode`: Binary serialization
164
292
- `deadpool-redis`: Redis connection pooling
165
293
- `metrohash`: Fast non-cryptographic hashing
166
294
- `tokio`: Async runtime
167
-
- `axum`: Web framework
295
+
- `axum`: Web framework
296
+
- `httpdate`: HTTP date formatting (replacing chrono)
297
+
- `cadence`: StatsD metrics client
298
+
- `thiserror`: Error handling
+1148
-595
Cargo.lock
+1148
-595
Cargo.lock
···
3
3
version = 4
4
4
5
5
[[package]]
6
-
name = "addr2line"
7
-
version = "0.24.2"
8
-
source = "registry+https://github.com/rust-lang/crates.io-index"
9
-
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
10
-
dependencies = [
11
-
"gimli",
12
-
]
13
-
14
-
[[package]]
15
-
name = "adler2"
16
-
version = "2.0.1"
17
-
source = "registry+https://github.com/rust-lang/crates.io-index"
18
-
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
19
-
20
-
[[package]]
21
6
name = "aho-corasick"
22
-
version = "1.1.3"
7
+
version = "1.1.4"
23
8
source = "registry+https://github.com/rust-lang/crates.io-index"
24
-
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
9
+
checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
25
10
dependencies = [
26
11
"memchr",
27
12
]
···
33
18
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
34
19
35
20
[[package]]
36
-
name = "anstream"
37
-
version = "0.6.20"
21
+
name = "anyhow"
22
+
version = "1.0.100"
38
23
source = "registry+https://github.com/rust-lang/crates.io-index"
39
-
checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192"
40
-
dependencies = [
41
-
"anstyle",
42
-
"anstyle-parse",
43
-
"anstyle-query",
44
-
"anstyle-wincon",
45
-
"colorchoice",
46
-
"is_terminal_polyfill",
47
-
"utf8parse",
48
-
]
24
+
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
49
25
50
26
[[package]]
51
-
name = "anstyle"
52
-
version = "1.0.11"
27
+
name = "arc-swap"
28
+
version = "1.7.1"
53
29
source = "registry+https://github.com/rust-lang/crates.io-index"
54
-
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
30
+
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
55
31
56
32
[[package]]
57
-
name = "anstyle-parse"
58
-
version = "0.2.7"
33
+
name = "async-trait"
34
+
version = "0.1.89"
59
35
source = "registry+https://github.com/rust-lang/crates.io-index"
60
-
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
36
+
checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
61
37
dependencies = [
62
-
"utf8parse",
38
+
"proc-macro2",
39
+
"quote",
40
+
"syn 2.0.108",
63
41
]
64
42
65
43
[[package]]
66
-
name = "anstyle-query"
67
-
version = "1.1.4"
44
+
name = "atoi"
45
+
version = "2.0.0"
68
46
source = "registry+https://github.com/rust-lang/crates.io-index"
69
-
checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2"
47
+
checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528"
70
48
dependencies = [
71
-
"windows-sys 0.60.2",
49
+
"num-traits",
72
50
]
73
51
74
52
[[package]]
75
-
name = "anstyle-wincon"
76
-
version = "3.0.10"
53
+
name = "atomic-waker"
54
+
version = "1.1.2"
77
55
source = "registry+https://github.com/rust-lang/crates.io-index"
78
-
checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a"
79
-
dependencies = [
80
-
"anstyle",
81
-
"once_cell_polyfill",
82
-
"windows-sys 0.60.2",
83
-
]
56
+
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
84
57
85
58
[[package]]
86
-
name = "anyhow"
87
-
version = "1.0.99"
88
-
source = "registry+https://github.com/rust-lang/crates.io-index"
89
-
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
59
+
name = "atproto-client"
60
+
version = "0.13.0"
61
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
62
+
dependencies = [
63
+
"anyhow",
64
+
"async-trait",
65
+
"atproto-identity",
66
+
"atproto-oauth",
67
+
"atproto-record",
68
+
"bytes",
69
+
"reqwest",
70
+
"reqwest-chain",
71
+
"reqwest-middleware",
72
+
"serde",
73
+
"serde_json",
74
+
"thiserror 2.0.17",
75
+
"tokio",
76
+
"tracing",
77
+
"urlencoding",
78
+
]
90
79
91
80
[[package]]
92
-
name = "arc-swap"
93
-
version = "1.7.1"
94
-
source = "registry+https://github.com/rust-lang/crates.io-index"
95
-
checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457"
81
+
name = "atproto-identity"
82
+
version = "0.13.0"
83
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
84
+
dependencies = [
85
+
"anyhow",
86
+
"async-trait",
87
+
"ecdsa",
88
+
"elliptic-curve",
89
+
"hickory-resolver",
90
+
"k256",
91
+
"lru",
92
+
"multibase",
93
+
"p256",
94
+
"p384",
95
+
"rand 0.8.5",
96
+
"reqwest",
97
+
"serde",
98
+
"serde_ipld_dagcbor",
99
+
"serde_json",
100
+
"thiserror 2.0.17",
101
+
"tokio",
102
+
"tracing",
103
+
"url",
104
+
"urlencoding",
105
+
]
96
106
97
107
[[package]]
98
-
name = "async-trait"
99
-
version = "0.1.89"
100
-
source = "registry+https://github.com/rust-lang/crates.io-index"
101
-
checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
108
+
name = "atproto-jetstream"
109
+
version = "0.13.0"
110
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
102
111
dependencies = [
103
-
"proc-macro2",
104
-
"quote",
105
-
"syn",
112
+
"anyhow",
113
+
"async-trait",
114
+
"atproto-identity",
115
+
"futures",
116
+
"http",
117
+
"serde",
118
+
"serde_json",
119
+
"thiserror 2.0.17",
120
+
"tokio",
121
+
"tokio-util",
122
+
"tokio-websockets",
123
+
"tracing",
124
+
"tracing-subscriber",
125
+
"urlencoding",
126
+
"zstd",
106
127
]
107
128
108
129
[[package]]
109
-
name = "atomic-waker"
110
-
version = "1.1.2"
111
-
source = "registry+https://github.com/rust-lang/crates.io-index"
112
-
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
130
+
name = "atproto-lexicon"
131
+
version = "0.13.0"
132
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
133
+
dependencies = [
134
+
"anyhow",
135
+
"async-trait",
136
+
"atproto-client",
137
+
"atproto-identity",
138
+
"hickory-resolver",
139
+
"reqwest",
140
+
"serde",
141
+
"serde_json",
142
+
"thiserror 2.0.17",
143
+
"tokio",
144
+
"tracing",
145
+
]
113
146
114
147
[[package]]
115
-
name = "atproto-identity"
116
-
version = "0.11.3"
117
-
source = "registry+https://github.com/rust-lang/crates.io-index"
118
-
checksum = "aaac8751c7e4329a95714c01d9e47d22d94bc8c96e78079098312235128acb9f"
148
+
name = "atproto-oauth"
149
+
version = "0.13.0"
150
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
119
151
dependencies = [
120
152
"anyhow",
121
153
"async-trait",
154
+
"atproto-identity",
155
+
"base64",
156
+
"chrono",
122
157
"ecdsa",
123
158
"elliptic-curve",
124
-
"hickory-resolver",
125
159
"k256",
126
160
"lru",
127
161
"multibase",
···
129
163
"p384",
130
164
"rand 0.8.5",
131
165
"reqwest",
166
+
"reqwest-chain",
167
+
"reqwest-middleware",
132
168
"serde",
133
169
"serde_ipld_dagcbor",
134
170
"serde_json",
135
-
"thiserror 2.0.16",
171
+
"sha2",
172
+
"thiserror 2.0.17",
136
173
"tokio",
137
174
"tracing",
175
+
"ulid",
176
+
]
177
+
178
+
[[package]]
179
+
name = "atproto-record"
180
+
version = "0.13.0"
181
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
182
+
dependencies = [
183
+
"anyhow",
184
+
"atproto-identity",
185
+
"base64",
186
+
"chrono",
187
+
"cid",
188
+
"multihash",
189
+
"rand 0.8.5",
190
+
"serde",
191
+
"serde_ipld_dagcbor",
192
+
"serde_json",
193
+
"sha2",
194
+
"thiserror 2.0.17",
138
195
]
139
196
140
197
[[package]]
···
145
202
146
203
[[package]]
147
204
name = "axum"
148
-
version = "0.8.4"
205
+
version = "0.8.6"
149
206
source = "registry+https://github.com/rust-lang/crates.io-index"
150
-
checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5"
207
+
checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871"
151
208
dependencies = [
152
209
"axum-core",
153
210
"bytes",
···
164
221
"mime",
165
222
"percent-encoding",
166
223
"pin-project-lite",
167
-
"rustversion",
168
-
"serde",
224
+
"serde_core",
169
225
"serde_json",
170
226
"serde_path_to_error",
171
227
"serde_urlencoded",
···
179
235
180
236
[[package]]
181
237
name = "axum-core"
182
-
version = "0.5.2"
238
+
version = "0.5.5"
183
239
source = "registry+https://github.com/rust-lang/crates.io-index"
184
-
checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6"
240
+
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
185
241
dependencies = [
186
242
"bytes",
187
243
"futures-core",
···
190
246
"http-body-util",
191
247
"mime",
192
248
"pin-project-lite",
193
-
"rustversion",
194
249
"sync_wrapper",
195
250
"tower-layer",
196
251
"tower-service",
···
199
254
200
255
[[package]]
201
256
name = "backon"
202
-
version = "1.5.2"
257
+
version = "1.6.0"
203
258
source = "registry+https://github.com/rust-lang/crates.io-index"
204
-
checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d"
259
+
checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef"
205
260
dependencies = [
206
261
"fastrand",
207
262
]
208
263
209
264
[[package]]
210
-
name = "backtrace"
211
-
version = "0.3.75"
212
-
source = "registry+https://github.com/rust-lang/crates.io-index"
213
-
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
214
-
dependencies = [
215
-
"addr2line",
216
-
"cfg-if",
217
-
"libc",
218
-
"miniz_oxide",
219
-
"object",
220
-
"rustc-demangle",
221
-
"windows-targets 0.52.6",
222
-
]
223
-
224
-
[[package]]
225
265
name = "base-x"
226
266
version = "0.2.11"
227
267
source = "registry+https://github.com/rust-lang/crates.io-index"
···
232
272
version = "0.2.0"
233
273
source = "registry+https://github.com/rust-lang/crates.io-index"
234
274
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
275
+
276
+
[[package]]
277
+
name = "base256emoji"
278
+
version = "1.0.2"
279
+
source = "registry+https://github.com/rust-lang/crates.io-index"
280
+
checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c"
281
+
dependencies = [
282
+
"const-str",
283
+
"match-lookup",
284
+
]
235
285
236
286
[[package]]
237
287
name = "base64"
···
267
317
268
318
[[package]]
269
319
name = "bitflags"
270
-
version = "2.9.4"
320
+
version = "2.10.0"
271
321
source = "registry+https://github.com/rust-lang/crates.io-index"
272
-
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
322
+
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
323
+
dependencies = [
324
+
"serde_core",
325
+
]
273
326
274
327
[[package]]
275
328
name = "block-buffer"
···
287
340
checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43"
288
341
289
342
[[package]]
343
+
name = "byteorder"
344
+
version = "1.5.0"
345
+
source = "registry+https://github.com/rust-lang/crates.io-index"
346
+
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
347
+
348
+
[[package]]
290
349
name = "bytes"
291
350
version = "1.10.1"
292
351
source = "registry+https://github.com/rust-lang/crates.io-index"
293
352
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
294
353
295
354
[[package]]
355
+
name = "cadence"
356
+
version = "1.6.0"
357
+
source = "registry+https://github.com/rust-lang/crates.io-index"
358
+
checksum = "3075f133bee430b7644c54fb629b9b4420346ffa275a45c81a6babe8b09b4f51"
359
+
dependencies = [
360
+
"crossbeam-channel",
361
+
]
362
+
363
+
[[package]]
296
364
name = "cbor4ii"
297
365
version = "0.2.14"
298
366
source = "registry+https://github.com/rust-lang/crates.io-index"
···
303
371
304
372
[[package]]
305
373
name = "cc"
306
-
version = "1.2.36"
374
+
version = "1.2.44"
307
375
source = "registry+https://github.com/rust-lang/crates.io-index"
308
-
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
376
+
checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3"
309
377
dependencies = [
310
378
"find-msvc-tools",
379
+
"jobserver",
380
+
"libc",
311
381
"shlex",
312
382
]
313
383
314
384
[[package]]
315
385
name = "cfg-if"
316
-
version = "1.0.3"
386
+
version = "1.0.4"
317
387
source = "registry+https://github.com/rust-lang/crates.io-index"
318
-
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
388
+
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
319
389
320
390
[[package]]
321
391
name = "cfg_aliases"
···
324
394
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
325
395
326
396
[[package]]
397
+
name = "chrono"
398
+
version = "0.4.42"
399
+
source = "registry+https://github.com/rust-lang/crates.io-index"
400
+
checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
401
+
dependencies = [
402
+
"num-traits",
403
+
"serde",
404
+
]
405
+
406
+
[[package]]
327
407
name = "cid"
328
408
version = "0.11.1"
329
409
source = "registry+https://github.com/rust-lang/crates.io-index"
···
338
418
]
339
419
340
420
[[package]]
341
-
name = "clap"
342
-
version = "4.5.47"
343
-
source = "registry+https://github.com/rust-lang/crates.io-index"
344
-
checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931"
345
-
dependencies = [
346
-
"clap_builder",
347
-
"clap_derive",
348
-
]
349
-
350
-
[[package]]
351
-
name = "clap_builder"
352
-
version = "4.5.47"
353
-
source = "registry+https://github.com/rust-lang/crates.io-index"
354
-
checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6"
355
-
dependencies = [
356
-
"anstream",
357
-
"anstyle",
358
-
"clap_lex",
359
-
"strsim",
360
-
]
361
-
362
-
[[package]]
363
-
name = "clap_derive"
364
-
version = "4.5.47"
365
-
source = "registry+https://github.com/rust-lang/crates.io-index"
366
-
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
367
-
dependencies = [
368
-
"heck",
369
-
"proc-macro2",
370
-
"quote",
371
-
"syn",
372
-
]
373
-
374
-
[[package]]
375
-
name = "clap_lex"
376
-
version = "0.7.5"
377
-
source = "registry+https://github.com/rust-lang/crates.io-index"
378
-
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
379
-
380
-
[[package]]
381
-
name = "colorchoice"
382
-
version = "1.0.4"
383
-
source = "registry+https://github.com/rust-lang/crates.io-index"
384
-
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
385
-
386
-
[[package]]
387
421
name = "combine"
388
422
version = "4.6.7"
389
423
source = "registry+https://github.com/rust-lang/crates.io-index"
···
398
432
]
399
433
400
434
[[package]]
435
+
name = "concurrent-queue"
436
+
version = "2.5.0"
437
+
source = "registry+https://github.com/rust-lang/crates.io-index"
438
+
checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973"
439
+
dependencies = [
440
+
"crossbeam-utils",
441
+
]
442
+
443
+
[[package]]
401
444
name = "const-oid"
402
445
version = "0.9.6"
403
446
source = "registry+https://github.com/rust-lang/crates.io-index"
404
447
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
448
+
449
+
[[package]]
450
+
name = "const-str"
451
+
version = "0.4.3"
452
+
source = "registry+https://github.com/rust-lang/crates.io-index"
453
+
checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3"
405
454
406
455
[[package]]
407
456
name = "core-foundation"
···
448
497
]
449
498
450
499
[[package]]
500
+
name = "crc"
501
+
version = "3.3.0"
502
+
source = "registry+https://github.com/rust-lang/crates.io-index"
503
+
checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675"
504
+
dependencies = [
505
+
"crc-catalog",
506
+
]
507
+
508
+
[[package]]
509
+
name = "crc-catalog"
510
+
version = "2.4.0"
511
+
source = "registry+https://github.com/rust-lang/crates.io-index"
512
+
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
513
+
514
+
[[package]]
451
515
name = "critical-section"
452
516
version = "1.2.0"
453
517
source = "registry+https://github.com/rust-lang/crates.io-index"
···
472
536
]
473
537
474
538
[[package]]
539
+
name = "crossbeam-queue"
540
+
version = "0.3.12"
541
+
source = "registry+https://github.com/rust-lang/crates.io-index"
542
+
checksum = "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115"
543
+
dependencies = [
544
+
"crossbeam-utils",
545
+
]
546
+
547
+
[[package]]
475
548
name = "crossbeam-utils"
476
549
version = "0.8.21"
477
550
source = "registry+https://github.com/rust-lang/crates.io-index"
···
522
595
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
523
596
dependencies = [
524
597
"data-encoding",
525
-
"syn",
598
+
"syn 2.0.108",
526
599
]
527
600
528
601
[[package]]
···
587
660
dependencies = [
588
661
"proc-macro2",
589
662
"quote",
590
-
"syn",
663
+
"syn 2.0.108",
591
664
]
592
665
593
666
[[package]]
667
+
name = "dotenvy"
668
+
version = "0.15.7"
669
+
source = "registry+https://github.com/rust-lang/crates.io-index"
670
+
checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b"
671
+
672
+
[[package]]
594
673
name = "ecdsa"
595
674
version = "0.16.9"
596
675
source = "registry+https://github.com/rust-lang/crates.io-index"
···
600
679
"digest",
601
680
"elliptic-curve",
602
681
"rfc6979",
682
+
"serdect",
603
683
"signature",
604
684
"spki",
605
685
]
606
686
607
687
[[package]]
688
+
name = "either"
689
+
version = "1.15.0"
690
+
source = "registry+https://github.com/rust-lang/crates.io-index"
691
+
checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719"
692
+
dependencies = [
693
+
"serde",
694
+
]
695
+
696
+
[[package]]
608
697
name = "elliptic-curve"
609
698
version = "0.13.8"
610
699
source = "registry+https://github.com/rust-lang/crates.io-index"
···
646
735
"heck",
647
736
"proc-macro2",
648
737
"quote",
649
-
"syn",
738
+
"syn 2.0.108",
650
739
]
651
740
652
741
[[package]]
···
657
746
658
747
[[package]]
659
748
name = "errno"
660
-
version = "0.3.13"
749
+
version = "0.3.14"
661
750
source = "registry+https://github.com/rust-lang/crates.io-index"
662
-
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
751
+
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
663
752
dependencies = [
664
753
"libc",
665
-
"windows-sys 0.60.2",
754
+
"windows-sys 0.61.2",
755
+
]
756
+
757
+
[[package]]
758
+
name = "etcetera"
759
+
version = "0.8.0"
760
+
source = "registry+https://github.com/rust-lang/crates.io-index"
761
+
checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943"
762
+
dependencies = [
763
+
"cfg-if",
764
+
"home",
765
+
"windows-sys 0.48.0",
766
+
]
767
+
768
+
[[package]]
769
+
name = "event-listener"
770
+
version = "5.4.1"
771
+
source = "registry+https://github.com/rust-lang/crates.io-index"
772
+
checksum = "e13b66accf52311f30a0db42147dadea9850cb48cd070028831ae5f5d4b856ab"
773
+
dependencies = [
774
+
"concurrent-queue",
775
+
"parking",
776
+
"pin-project-lite",
666
777
]
667
778
668
779
[[package]]
···
683
794
684
795
[[package]]
685
796
name = "find-msvc-tools"
686
-
version = "0.1.1"
797
+
version = "0.1.4"
798
+
source = "registry+https://github.com/rust-lang/crates.io-index"
799
+
checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127"
800
+
801
+
[[package]]
802
+
name = "flume"
803
+
version = "0.11.1"
687
804
source = "registry+https://github.com/rust-lang/crates.io-index"
688
-
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
805
+
checksum = "da0e4dd2a88388a1f4ccc7c9ce104604dab68d9f408dc34cd45823d5a9069095"
806
+
dependencies = [
807
+
"futures-core",
808
+
"futures-sink",
809
+
"spin",
810
+
]
689
811
690
812
[[package]]
691
813
name = "fnv"
···
724
846
]
725
847
726
848
[[package]]
849
+
name = "futures"
850
+
version = "0.3.31"
851
+
source = "registry+https://github.com/rust-lang/crates.io-index"
852
+
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
853
+
dependencies = [
854
+
"futures-channel",
855
+
"futures-core",
856
+
"futures-executor",
857
+
"futures-io",
858
+
"futures-sink",
859
+
"futures-task",
860
+
"futures-util",
861
+
]
862
+
863
+
[[package]]
727
864
name = "futures-channel"
728
865
version = "0.3.31"
729
866
source = "registry+https://github.com/rust-lang/crates.io-index"
730
867
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
731
868
dependencies = [
732
869
"futures-core",
870
+
"futures-sink",
733
871
]
734
872
735
873
[[package]]
···
739
877
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
740
878
741
879
[[package]]
880
+
name = "futures-executor"
881
+
version = "0.3.31"
882
+
source = "registry+https://github.com/rust-lang/crates.io-index"
883
+
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
884
+
dependencies = [
885
+
"futures-core",
886
+
"futures-task",
887
+
"futures-util",
888
+
]
889
+
890
+
[[package]]
891
+
name = "futures-intrusive"
892
+
version = "0.5.0"
893
+
source = "registry+https://github.com/rust-lang/crates.io-index"
894
+
checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f"
895
+
dependencies = [
896
+
"futures-core",
897
+
"lock_api",
898
+
"parking_lot",
899
+
]
900
+
901
+
[[package]]
742
902
name = "futures-io"
743
903
version = "0.3.31"
744
904
source = "registry+https://github.com/rust-lang/crates.io-index"
···
752
912
dependencies = [
753
913
"proc-macro2",
754
914
"quote",
755
-
"syn",
915
+
"syn 2.0.108",
756
916
]
757
917
758
918
[[package]]
···
773
933
source = "registry+https://github.com/rust-lang/crates.io-index"
774
934
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
775
935
dependencies = [
936
+
"futures-channel",
776
937
"futures-core",
938
+
"futures-io",
777
939
"futures-macro",
778
940
"futures-sink",
779
941
"futures-task",
942
+
"memchr",
780
943
"pin-project-lite",
781
944
"pin-utils",
782
945
"slab",
783
946
]
784
947
785
948
[[package]]
786
-
name = "generator"
787
-
version = "0.8.7"
788
-
source = "registry+https://github.com/rust-lang/crates.io-index"
789
-
checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2"
790
-
dependencies = [
791
-
"cc",
792
-
"cfg-if",
793
-
"libc",
794
-
"log",
795
-
"rustversion",
796
-
"windows",
797
-
]
798
-
799
-
[[package]]
800
949
name = "generic-array"
801
-
version = "0.14.7"
950
+
version = "0.14.9"
802
951
source = "registry+https://github.com/rust-lang/crates.io-index"
803
-
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
952
+
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
804
953
dependencies = [
805
954
"typenum",
806
955
"version_check",
···
816
965
"cfg-if",
817
966
"js-sys",
818
967
"libc",
819
-
"wasi 0.11.1+wasi-snapshot-preview1",
968
+
"wasi",
820
969
"wasm-bindgen",
821
970
]
822
971
823
972
[[package]]
824
973
name = "getrandom"
825
-
version = "0.3.3"
974
+
version = "0.3.4"
826
975
source = "registry+https://github.com/rust-lang/crates.io-index"
827
-
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
976
+
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
828
977
dependencies = [
829
978
"cfg-if",
830
979
"js-sys",
831
980
"libc",
832
981
"r-efi",
833
-
"wasi 0.14.3+wasi-0.2.4",
982
+
"wasip2",
834
983
"wasm-bindgen",
835
984
]
836
985
837
986
[[package]]
838
-
name = "gimli"
839
-
version = "0.31.1"
840
-
source = "registry+https://github.com/rust-lang/crates.io-index"
841
-
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
842
-
843
-
[[package]]
844
987
name = "group"
845
988
version = "0.13.0"
846
989
source = "registry+https://github.com/rust-lang/crates.io-index"
···
882
1025
]
883
1026
884
1027
[[package]]
1028
+
name = "hashbrown"
1029
+
version = "0.16.0"
1030
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1031
+
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
1032
+
1033
+
[[package]]
1034
+
name = "hashlink"
1035
+
version = "0.10.0"
1036
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1037
+
checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
1038
+
dependencies = [
1039
+
"hashbrown 0.15.5",
1040
+
]
1041
+
1042
+
[[package]]
885
1043
name = "heck"
886
1044
version = "0.5.0"
887
1045
source = "registry+https://github.com/rust-lang/crates.io-index"
···
892
1050
version = "0.5.2"
893
1051
source = "registry+https://github.com/rust-lang/crates.io-index"
894
1052
checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
1053
+
1054
+
[[package]]
1055
+
name = "hex"
1056
+
version = "0.4.3"
1057
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1058
+
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
895
1059
896
1060
[[package]]
897
1061
name = "hickory-proto"
···
911
1075
"once_cell",
912
1076
"rand 0.9.2",
913
1077
"ring",
914
-
"thiserror 2.0.16",
1078
+
"thiserror 2.0.17",
915
1079
"tinyvec",
916
1080
"tokio",
917
1081
"tracing",
···
934
1098
"rand 0.9.2",
935
1099
"resolv-conf",
936
1100
"smallvec",
937
-
"thiserror 2.0.16",
1101
+
"thiserror 2.0.17",
938
1102
"tokio",
939
1103
"tracing",
940
1104
]
···
955
1119
checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e"
956
1120
dependencies = [
957
1121
"digest",
1122
+
]
1123
+
1124
+
[[package]]
1125
+
name = "home"
1126
+
version = "0.5.12"
1127
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1128
+
checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d"
1129
+
dependencies = [
1130
+
"windows-sys 0.61.2",
958
1131
]
959
1132
960
1133
[[package]]
···
992
1165
]
993
1166
994
1167
[[package]]
1168
+
name = "http-range-header"
1169
+
version = "0.4.2"
1170
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1171
+
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
1172
+
1173
+
[[package]]
995
1174
name = "httparse"
996
1175
version = "1.10.1"
997
1176
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1061
1240
1062
1241
[[package]]
1063
1242
name = "hyper-util"
1064
-
version = "0.1.16"
1243
+
version = "0.1.17"
1065
1244
source = "registry+https://github.com/rust-lang/crates.io-index"
1066
-
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
1245
+
checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8"
1067
1246
dependencies = [
1068
1247
"base64",
1069
1248
"bytes",
···
1077
1256
"libc",
1078
1257
"percent-encoding",
1079
1258
"pin-project-lite",
1080
-
"socket2 0.6.0",
1259
+
"socket2 0.6.1",
1081
1260
"system-configuration",
1082
1261
"tokio",
1083
1262
"tower-service",
···
1087
1266
1088
1267
[[package]]
1089
1268
name = "icu_collections"
1090
-
version = "2.0.0"
1269
+
version = "2.1.1"
1091
1270
source = "registry+https://github.com/rust-lang/crates.io-index"
1092
-
checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47"
1271
+
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
1093
1272
dependencies = [
1094
1273
"displaydoc",
1095
1274
"potential_utf",
···
1100
1279
1101
1280
[[package]]
1102
1281
name = "icu_locale_core"
1103
-
version = "2.0.0"
1282
+
version = "2.1.1"
1104
1283
source = "registry+https://github.com/rust-lang/crates.io-index"
1105
-
checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a"
1284
+
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
1106
1285
dependencies = [
1107
1286
"displaydoc",
1108
1287
"litemap",
···
1113
1292
1114
1293
[[package]]
1115
1294
name = "icu_normalizer"
1116
-
version = "2.0.0"
1295
+
version = "2.1.1"
1117
1296
source = "registry+https://github.com/rust-lang/crates.io-index"
1118
-
checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979"
1297
+
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
1119
1298
dependencies = [
1120
-
"displaydoc",
1121
1299
"icu_collections",
1122
1300
"icu_normalizer_data",
1123
1301
"icu_properties",
···
1128
1306
1129
1307
[[package]]
1130
1308
name = "icu_normalizer_data"
1131
-
version = "2.0.0"
1309
+
version = "2.1.1"
1132
1310
source = "registry+https://github.com/rust-lang/crates.io-index"
1133
-
checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3"
1311
+
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
1134
1312
1135
1313
[[package]]
1136
1314
name = "icu_properties"
1137
-
version = "2.0.1"
1315
+
version = "2.1.1"
1138
1316
source = "registry+https://github.com/rust-lang/crates.io-index"
1139
-
checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b"
1317
+
checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99"
1140
1318
dependencies = [
1141
-
"displaydoc",
1142
1319
"icu_collections",
1143
1320
"icu_locale_core",
1144
1321
"icu_properties_data",
1145
1322
"icu_provider",
1146
-
"potential_utf",
1147
1323
"zerotrie",
1148
1324
"zerovec",
1149
1325
]
1150
1326
1151
1327
[[package]]
1152
1328
name = "icu_properties_data"
1153
-
version = "2.0.1"
1329
+
version = "2.1.1"
1154
1330
source = "registry+https://github.com/rust-lang/crates.io-index"
1155
-
checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632"
1331
+
checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899"
1156
1332
1157
1333
[[package]]
1158
1334
name = "icu_provider"
1159
-
version = "2.0.0"
1335
+
version = "2.1.1"
1160
1336
source = "registry+https://github.com/rust-lang/crates.io-index"
1161
-
checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af"
1337
+
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
1162
1338
dependencies = [
1163
1339
"displaydoc",
1164
1340
"icu_locale_core",
1165
-
"stable_deref_trait",
1166
-
"tinystr",
1167
1341
"writeable",
1168
1342
"yoke",
1169
1343
"zerofrom",
···
1194
1368
1195
1369
[[package]]
1196
1370
name = "indexmap"
1197
-
version = "2.11.0"
1371
+
version = "2.12.0"
1198
1372
source = "registry+https://github.com/rust-lang/crates.io-index"
1199
-
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
1373
+
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
1200
1374
dependencies = [
1201
1375
"equivalent",
1202
-
"hashbrown",
1203
-
]
1204
-
1205
-
[[package]]
1206
-
name = "io-uring"
1207
-
version = "0.7.10"
1208
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1209
-
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
1210
-
dependencies = [
1211
-
"bitflags",
1212
-
"cfg-if",
1213
-
"libc",
1376
+
"hashbrown 0.16.0",
1214
1377
]
1215
1378
1216
1379
[[package]]
···
1253
1416
]
1254
1417
1255
1418
[[package]]
1256
-
name = "is_terminal_polyfill"
1257
-
version = "1.70.1"
1258
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1259
-
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
1260
-
1261
-
[[package]]
1262
1419
name = "itoa"
1263
1420
version = "1.0.15"
1264
1421
source = "registry+https://github.com/rust-lang/crates.io-index"
1265
1422
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
1266
1423
1267
1424
[[package]]
1425
+
name = "jobserver"
1426
+
version = "0.1.34"
1427
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1428
+
checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33"
1429
+
dependencies = [
1430
+
"getrandom 0.3.4",
1431
+
"libc",
1432
+
]
1433
+
1434
+
[[package]]
1268
1435
name = "js-sys"
1269
-
version = "0.3.78"
1436
+
version = "0.3.82"
1270
1437
source = "registry+https://github.com/rust-lang/crates.io-index"
1271
-
checksum = "0c0b063578492ceec17683ef2f8c5e89121fbd0b172cbc280635ab7567db2738"
1438
+
checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65"
1272
1439
dependencies = [
1273
1440
"once_cell",
1274
1441
"wasm-bindgen",
···
1293
1460
version = "1.5.0"
1294
1461
source = "registry+https://github.com/rust-lang/crates.io-index"
1295
1462
checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe"
1463
+
dependencies = [
1464
+
"spin",
1465
+
]
1296
1466
1297
1467
[[package]]
1298
1468
name = "libc"
1299
-
version = "0.2.175"
1469
+
version = "0.2.177"
1470
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1471
+
checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
1472
+
1473
+
[[package]]
1474
+
name = "libm"
1475
+
version = "0.2.15"
1476
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1477
+
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
1478
+
1479
+
[[package]]
1480
+
name = "libredox"
1481
+
version = "0.1.10"
1300
1482
source = "registry+https://github.com/rust-lang/crates.io-index"
1301
-
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
1483
+
checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb"
1484
+
dependencies = [
1485
+
"bitflags",
1486
+
"libc",
1487
+
"redox_syscall",
1488
+
]
1489
+
1490
+
[[package]]
1491
+
name = "libsqlite3-sys"
1492
+
version = "0.30.1"
1493
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1494
+
checksum = "2e99fb7a497b1e3339bc746195567ed8d3e24945ecd636e3619d20b9de9e9149"
1495
+
dependencies = [
1496
+
"cc",
1497
+
"pkg-config",
1498
+
"vcpkg",
1499
+
]
1302
1500
1303
1501
[[package]]
1304
1502
name = "linux-raw-sys"
1305
-
version = "0.9.4"
1503
+
version = "0.11.0"
1306
1504
source = "registry+https://github.com/rust-lang/crates.io-index"
1307
-
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
1505
+
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039"
1308
1506
1309
1507
[[package]]
1310
1508
name = "litemap"
1311
-
version = "0.8.0"
1509
+
version = "0.8.1"
1312
1510
source = "registry+https://github.com/rust-lang/crates.io-index"
1313
-
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
1511
+
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
1314
1512
1315
1513
[[package]]
1316
1514
name = "lock_api"
1317
-
version = "0.4.13"
1515
+
version = "0.4.14"
1318
1516
source = "registry+https://github.com/rust-lang/crates.io-index"
1319
-
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
1517
+
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
1320
1518
dependencies = [
1321
-
"autocfg",
1322
1519
"scopeguard",
1323
1520
]
1324
1521
···
1329
1526
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
1330
1527
1331
1528
[[package]]
1332
-
name = "loom"
1333
-
version = "0.7.2"
1334
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1335
-
checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca"
1336
-
dependencies = [
1337
-
"cfg-if",
1338
-
"generator",
1339
-
"scoped-tls",
1340
-
"tracing",
1341
-
"tracing-subscriber",
1342
-
]
1343
-
1344
-
[[package]]
1345
1529
name = "lru"
1346
1530
version = "0.12.5"
1347
1531
source = "registry+https://github.com/rust-lang/crates.io-index"
1348
1532
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
1349
1533
dependencies = [
1350
-
"hashbrown",
1534
+
"hashbrown 0.15.5",
1351
1535
]
1352
1536
1353
1537
[[package]]
···
1357
1541
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
1358
1542
1359
1543
[[package]]
1544
+
name = "match-lookup"
1545
+
version = "0.1.1"
1546
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1547
+
checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e"
1548
+
dependencies = [
1549
+
"proc-macro2",
1550
+
"quote",
1551
+
"syn 1.0.109",
1552
+
]
1553
+
1554
+
[[package]]
1360
1555
name = "matchers"
1361
1556
version = "0.2.0"
1362
1557
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1372
1567
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
1373
1568
1374
1569
[[package]]
1570
+
name = "md-5"
1571
+
version = "0.10.6"
1572
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1573
+
checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf"
1574
+
dependencies = [
1575
+
"cfg-if",
1576
+
"digest",
1577
+
]
1578
+
1579
+
[[package]]
1375
1580
name = "memchr"
1376
-
version = "2.7.5"
1581
+
version = "2.7.6"
1377
1582
source = "registry+https://github.com/rust-lang/crates.io-index"
1378
-
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
1583
+
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
1379
1584
1380
1585
[[package]]
1381
1586
name = "metrohash"
···
1390
1595
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
1391
1596
1392
1597
[[package]]
1393
-
name = "miniz_oxide"
1394
-
version = "0.8.9"
1598
+
name = "mime_guess"
1599
+
version = "2.0.5"
1395
1600
source = "registry+https://github.com/rust-lang/crates.io-index"
1396
-
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
1601
+
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
1397
1602
dependencies = [
1398
-
"adler2",
1603
+
"mime",
1604
+
"unicase",
1399
1605
]
1400
1606
1401
1607
[[package]]
1402
1608
name = "mio"
1403
-
version = "1.0.4"
1609
+
version = "1.1.0"
1404
1610
source = "registry+https://github.com/rust-lang/crates.io-index"
1405
-
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
1611
+
checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873"
1406
1612
dependencies = [
1407
1613
"libc",
1408
-
"wasi 0.11.1+wasi-snapshot-preview1",
1409
-
"windows-sys 0.59.0",
1614
+
"wasi",
1615
+
"windows-sys 0.61.2",
1410
1616
]
1411
1617
1412
1618
[[package]]
1413
1619
name = "moka"
1414
-
version = "0.12.10"
1620
+
version = "0.12.11"
1415
1621
source = "registry+https://github.com/rust-lang/crates.io-index"
1416
-
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
1622
+
checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077"
1417
1623
dependencies = [
1418
1624
"crossbeam-channel",
1419
1625
"crossbeam-epoch",
1420
1626
"crossbeam-utils",
1421
-
"loom",
1627
+
"equivalent",
1422
1628
"parking_lot",
1423
1629
"portable-atomic",
1424
1630
"rustc_version",
1425
1631
"smallvec",
1426
1632
"tagptr",
1427
-
"thiserror 1.0.69",
1428
1633
"uuid",
1429
1634
]
1430
1635
1431
1636
[[package]]
1432
1637
name = "multibase"
1433
-
version = "0.9.1"
1638
+
version = "0.9.2"
1434
1639
source = "registry+https://github.com/rust-lang/crates.io-index"
1435
-
checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404"
1640
+
checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77"
1436
1641
dependencies = [
1437
1642
"base-x",
1643
+
"base256emoji",
1438
1644
"data-encoding",
1439
1645
"data-encoding-macro",
1440
1646
]
···
1469
1675
1470
1676
[[package]]
1471
1677
name = "nu-ansi-term"
1472
-
version = "0.50.1"
1678
+
version = "0.50.3"
1473
1679
source = "registry+https://github.com/rust-lang/crates.io-index"
1474
-
checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399"
1680
+
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
1475
1681
dependencies = [
1476
-
"windows-sys 0.52.0",
1682
+
"windows-sys 0.61.2",
1477
1683
]
1478
1684
1479
1685
[[package]]
···
1487
1693
]
1488
1694
1489
1695
[[package]]
1696
+
name = "num-bigint-dig"
1697
+
version = "0.8.5"
1698
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1699
+
checksum = "82c79c15c05d4bf82b6f5ef163104cc81a760d8e874d38ac50ab67c8877b647b"
1700
+
dependencies = [
1701
+
"lazy_static",
1702
+
"libm",
1703
+
"num-integer",
1704
+
"num-iter",
1705
+
"num-traits",
1706
+
"rand 0.8.5",
1707
+
"smallvec",
1708
+
"zeroize",
1709
+
]
1710
+
1711
+
[[package]]
1490
1712
name = "num-integer"
1491
1713
version = "0.1.46"
1492
1714
source = "registry+https://github.com/rust-lang/crates.io-index"
1493
1715
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
1494
1716
dependencies = [
1717
+
"num-traits",
1718
+
]
1719
+
1720
+
[[package]]
1721
+
name = "num-iter"
1722
+
version = "0.1.45"
1723
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1724
+
checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
1725
+
dependencies = [
1726
+
"autocfg",
1727
+
"num-integer",
1495
1728
"num-traits",
1496
1729
]
1497
1730
···
1502
1735
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
1503
1736
dependencies = [
1504
1737
"autocfg",
1738
+
"libm",
1505
1739
]
1506
1740
1507
1741
[[package]]
···
1515
1749
]
1516
1750
1517
1751
[[package]]
1518
-
name = "object"
1519
-
version = "0.36.7"
1520
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1521
-
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
1522
-
dependencies = [
1523
-
"memchr",
1524
-
]
1525
-
1526
-
[[package]]
1527
1752
name = "once_cell"
1528
1753
version = "1.21.3"
1529
1754
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1534
1759
]
1535
1760
1536
1761
[[package]]
1537
-
name = "once_cell_polyfill"
1538
-
version = "1.70.1"
1539
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1540
-
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
1541
-
1542
-
[[package]]
1543
1762
name = "openssl"
1544
-
version = "0.10.73"
1763
+
version = "0.10.74"
1545
1764
source = "registry+https://github.com/rust-lang/crates.io-index"
1546
-
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
1765
+
checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654"
1547
1766
dependencies = [
1548
1767
"bitflags",
1549
1768
"cfg-if",
···
1562
1781
dependencies = [
1563
1782
"proc-macro2",
1564
1783
"quote",
1565
-
"syn",
1784
+
"syn 2.0.108",
1566
1785
]
1567
1786
1568
1787
[[package]]
···
1573
1792
1574
1793
[[package]]
1575
1794
name = "openssl-sys"
1576
-
version = "0.9.109"
1795
+
version = "0.9.110"
1577
1796
source = "registry+https://github.com/rust-lang/crates.io-index"
1578
-
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
1797
+
checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2"
1579
1798
dependencies = [
1580
1799
"cc",
1581
1800
"libc",
···
1592
1811
"ecdsa",
1593
1812
"elliptic-curve",
1594
1813
"primeorder",
1814
+
"serdect",
1595
1815
"sha2",
1596
1816
]
1597
1817
···
1604
1824
"ecdsa",
1605
1825
"elliptic-curve",
1606
1826
"primeorder",
1827
+
"serdect",
1607
1828
"sha2",
1608
1829
]
1609
1830
1610
1831
[[package]]
1832
+
name = "parking"
1833
+
version = "2.2.1"
1834
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1835
+
checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba"
1836
+
1837
+
[[package]]
1611
1838
name = "parking_lot"
1612
-
version = "0.12.4"
1839
+
version = "0.12.5"
1613
1840
source = "registry+https://github.com/rust-lang/crates.io-index"
1614
-
checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13"
1841
+
checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a"
1615
1842
dependencies = [
1616
1843
"lock_api",
1617
1844
"parking_lot_core",
···
1619
1846
1620
1847
[[package]]
1621
1848
name = "parking_lot_core"
1622
-
version = "0.9.11"
1849
+
version = "0.9.12"
1623
1850
source = "registry+https://github.com/rust-lang/crates.io-index"
1624
-
checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5"
1851
+
checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
1625
1852
dependencies = [
1626
1853
"cfg-if",
1627
1854
"libc",
1628
1855
"redox_syscall",
1629
1856
"smallvec",
1630
-
"windows-targets 0.52.6",
1857
+
"windows-link 0.2.1",
1631
1858
]
1632
1859
1633
1860
[[package]]
···
1656
1883
version = "0.1.0"
1657
1884
source = "registry+https://github.com/rust-lang/crates.io-index"
1658
1885
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
1886
+
1887
+
[[package]]
1888
+
name = "pkcs1"
1889
+
version = "0.7.5"
1890
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1891
+
checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f"
1892
+
dependencies = [
1893
+
"der",
1894
+
"pkcs8",
1895
+
"spki",
1896
+
]
1659
1897
1660
1898
[[package]]
1661
1899
name = "pkcs8"
···
1681
1919
1682
1920
[[package]]
1683
1921
name = "potential_utf"
1684
-
version = "0.1.3"
1922
+
version = "0.1.4"
1685
1923
source = "registry+https://github.com/rust-lang/crates.io-index"
1686
-
checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a"
1924
+
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
1687
1925
dependencies = [
1688
1926
"zerovec",
1689
1927
]
···
1704
1942
checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6"
1705
1943
dependencies = [
1706
1944
"elliptic-curve",
1945
+
"serdect",
1707
1946
]
1708
1947
1709
1948
[[package]]
1710
1949
name = "proc-macro2"
1711
-
version = "1.0.101"
1950
+
version = "1.0.103"
1712
1951
source = "registry+https://github.com/rust-lang/crates.io-index"
1713
-
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
1952
+
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
1714
1953
dependencies = [
1715
1954
"unicode-ident",
1716
1955
]
1717
1956
1718
1957
[[package]]
1719
1958
name = "quickdid"
1720
-
version = "1.0.0-rc.2"
1959
+
version = "1.0.0-rc.5"
1721
1960
dependencies = [
1722
1961
"anyhow",
1723
1962
"async-trait",
1724
1963
"atproto-identity",
1964
+
"atproto-jetstream",
1965
+
"atproto-lexicon",
1725
1966
"axum",
1726
1967
"bincode",
1727
-
"clap",
1968
+
"cadence",
1728
1969
"deadpool-redis",
1729
-
"http",
1970
+
"httpdate",
1730
1971
"metrohash",
1972
+
"once_cell",
1731
1973
"reqwest",
1732
1974
"serde",
1733
1975
"serde_json",
1734
-
"thiserror 2.0.16",
1976
+
"sqlx",
1977
+
"thiserror 2.0.17",
1735
1978
"tokio",
1736
1979
"tokio-util",
1980
+
"tower-http",
1737
1981
"tracing",
1738
1982
"tracing-subscriber",
1739
1983
]
···
1751
1995
"quinn-udp",
1752
1996
"rustc-hash",
1753
1997
"rustls",
1754
-
"socket2 0.6.0",
1755
-
"thiserror 2.0.16",
1998
+
"socket2 0.6.1",
1999
+
"thiserror 2.0.17",
1756
2000
"tokio",
1757
2001
"tracing",
1758
2002
"web-time",
···
1765
2009
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
1766
2010
dependencies = [
1767
2011
"bytes",
1768
-
"getrandom 0.3.3",
2012
+
"getrandom 0.3.4",
1769
2013
"lru-slab",
1770
2014
"rand 0.9.2",
1771
2015
"ring",
···
1773
2017
"rustls",
1774
2018
"rustls-pki-types",
1775
2019
"slab",
1776
-
"thiserror 2.0.16",
2020
+
"thiserror 2.0.17",
1777
2021
"tinyvec",
1778
2022
"tracing",
1779
2023
"web-time",
···
1788
2032
"cfg_aliases",
1789
2033
"libc",
1790
2034
"once_cell",
1791
-
"socket2 0.6.0",
2035
+
"socket2 0.6.1",
1792
2036
"tracing",
1793
2037
"windows-sys 0.60.2",
1794
2038
]
1795
2039
1796
2040
[[package]]
1797
2041
name = "quote"
1798
-
version = "1.0.40"
2042
+
version = "1.0.41"
1799
2043
source = "registry+https://github.com/rust-lang/crates.io-index"
1800
-
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
2044
+
checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1"
1801
2045
dependencies = [
1802
2046
"proc-macro2",
1803
2047
]
···
1864
2108
source = "registry+https://github.com/rust-lang/crates.io-index"
1865
2109
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
1866
2110
dependencies = [
1867
-
"getrandom 0.3.3",
2111
+
"getrandom 0.3.4",
1868
2112
]
1869
2113
1870
2114
[[package]]
1871
2115
name = "redis"
1872
-
version = "0.32.5"
2116
+
version = "0.32.7"
1873
2117
source = "registry+https://github.com/rust-lang/crates.io-index"
1874
-
checksum = "7cd3650deebc68526b304898b192fa4102a4ef0b9ada24da096559cb60e0eef8"
2118
+
checksum = "014cc767fefab6a3e798ca45112bccad9c6e0e218fbd49720042716c73cfef44"
1875
2119
dependencies = [
1876
2120
"arc-swap",
1877
2121
"backon",
···
1887
2131
"rustls",
1888
2132
"rustls-native-certs",
1889
2133
"ryu",
1890
-
"socket2 0.6.0",
2134
+
"socket2 0.6.1",
1891
2135
"tokio",
1892
2136
"tokio-rustls",
1893
2137
"tokio-util",
···
1896
2140
1897
2141
[[package]]
1898
2142
name = "redox_syscall"
1899
-
version = "0.5.17"
2143
+
version = "0.5.18"
1900
2144
source = "registry+https://github.com/rust-lang/crates.io-index"
1901
-
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
2145
+
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
1902
2146
dependencies = [
1903
2147
"bitflags",
1904
2148
]
1905
2149
1906
2150
[[package]]
1907
2151
name = "regex-automata"
1908
-
version = "0.4.10"
2152
+
version = "0.4.13"
1909
2153
source = "registry+https://github.com/rust-lang/crates.io-index"
1910
-
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
2154
+
checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
1911
2155
dependencies = [
1912
2156
"aho-corasick",
1913
2157
"memchr",
···
1916
2160
1917
2161
[[package]]
1918
2162
name = "regex-syntax"
1919
-
version = "0.8.6"
2163
+
version = "0.8.8"
1920
2164
source = "registry+https://github.com/rust-lang/crates.io-index"
1921
-
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
2165
+
checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
1922
2166
1923
2167
[[package]]
1924
2168
name = "reqwest"
1925
-
version = "0.12.23"
2169
+
version = "0.12.24"
1926
2170
source = "registry+https://github.com/rust-lang/crates.io-index"
1927
-
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
2171
+
checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
1928
2172
dependencies = [
1929
2173
"base64",
1930
2174
"bytes",
1931
2175
"encoding_rs",
1932
2176
"futures-core",
2177
+
"futures-util",
1933
2178
"h2",
1934
2179
"http",
1935
2180
"http-body",
···
1941
2186
"js-sys",
1942
2187
"log",
1943
2188
"mime",
2189
+
"mime_guess",
1944
2190
"native-tls",
1945
2191
"percent-encoding",
1946
2192
"pin-project-lite",
···
1965
2211
]
1966
2212
1967
2213
[[package]]
2214
+
name = "reqwest-chain"
2215
+
version = "1.0.0"
2216
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2217
+
checksum = "da5c014fb79a8227db44a0433d748107750d2550b7fca55c59a3d7ee7d2ee2b2"
2218
+
dependencies = [
2219
+
"anyhow",
2220
+
"async-trait",
2221
+
"http",
2222
+
"reqwest-middleware",
2223
+
]
2224
+
2225
+
[[package]]
2226
+
name = "reqwest-middleware"
2227
+
version = "0.4.2"
2228
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2229
+
checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e"
2230
+
dependencies = [
2231
+
"anyhow",
2232
+
"async-trait",
2233
+
"http",
2234
+
"reqwest",
2235
+
"serde",
2236
+
"thiserror 1.0.69",
2237
+
"tower-service",
2238
+
]
2239
+
2240
+
[[package]]
1968
2241
name = "resolv-conf"
1969
-
version = "0.7.4"
2242
+
version = "0.7.5"
1970
2243
source = "registry+https://github.com/rust-lang/crates.io-index"
1971
-
checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3"
2244
+
checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799"
1972
2245
1973
2246
[[package]]
1974
2247
name = "rfc6979"
···
1995
2268
]
1996
2269
1997
2270
[[package]]
1998
-
name = "rustc-demangle"
1999
-
version = "0.1.26"
2271
+
name = "rsa"
2272
+
version = "0.9.8"
2000
2273
source = "registry+https://github.com/rust-lang/crates.io-index"
2001
-
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
2274
+
checksum = "78928ac1ed176a5ca1d17e578a1825f3d81ca54cf41053a592584b020cfd691b"
2275
+
dependencies = [
2276
+
"const-oid",
2277
+
"digest",
2278
+
"num-bigint-dig",
2279
+
"num-integer",
2280
+
"num-traits",
2281
+
"pkcs1",
2282
+
"pkcs8",
2283
+
"rand_core 0.6.4",
2284
+
"signature",
2285
+
"spki",
2286
+
"subtle",
2287
+
"zeroize",
2288
+
]
2002
2289
2003
2290
[[package]]
2004
2291
name = "rustc-hash"
···
2017
2304
2018
2305
[[package]]
2019
2306
name = "rustix"
2020
-
version = "1.0.8"
2307
+
version = "1.1.2"
2021
2308
source = "registry+https://github.com/rust-lang/crates.io-index"
2022
-
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
2309
+
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
2023
2310
dependencies = [
2024
2311
"bitflags",
2025
2312
"errno",
2026
2313
"libc",
2027
2314
"linux-raw-sys",
2028
-
"windows-sys 0.60.2",
2315
+
"windows-sys 0.61.2",
2029
2316
]
2030
2317
2031
2318
[[package]]
2032
2319
name = "rustls"
2033
-
version = "0.23.31"
2320
+
version = "0.23.34"
2034
2321
source = "registry+https://github.com/rust-lang/crates.io-index"
2035
-
checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc"
2322
+
checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7"
2036
2323
dependencies = [
2037
2324
"once_cell",
2038
2325
"ring",
···
2044
2331
2045
2332
[[package]]
2046
2333
name = "rustls-native-certs"
2047
-
version = "0.8.1"
2334
+
version = "0.8.2"
2048
2335
source = "registry+https://github.com/rust-lang/crates.io-index"
2049
-
checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
2336
+
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
2050
2337
dependencies = [
2051
2338
"openssl-probe",
2052
2339
"rustls-pki-types",
2053
2340
"schannel",
2054
-
"security-framework 3.3.0",
2341
+
"security-framework 3.5.1",
2055
2342
]
2056
2343
2057
2344
[[package]]
2058
2345
name = "rustls-pki-types"
2059
-
version = "1.12.0"
2346
+
version = "1.13.0"
2060
2347
source = "registry+https://github.com/rust-lang/crates.io-index"
2061
-
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
2348
+
checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a"
2062
2349
dependencies = [
2063
2350
"web-time",
2064
2351
"zeroize",
···
2066
2353
2067
2354
[[package]]
2068
2355
name = "rustls-webpki"
2069
-
version = "0.103.4"
2356
+
version = "0.103.8"
2070
2357
source = "registry+https://github.com/rust-lang/crates.io-index"
2071
-
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
2358
+
checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52"
2072
2359
dependencies = [
2073
2360
"ring",
2074
2361
"rustls-pki-types",
···
2089
2376
2090
2377
[[package]]
2091
2378
name = "schannel"
2092
-
version = "0.1.27"
2379
+
version = "0.1.28"
2093
2380
source = "registry+https://github.com/rust-lang/crates.io-index"
2094
-
checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
2381
+
checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1"
2095
2382
dependencies = [
2096
-
"windows-sys 0.59.0",
2383
+
"windows-sys 0.61.2",
2097
2384
]
2098
-
2099
-
[[package]]
2100
-
name = "scoped-tls"
2101
-
version = "1.0.1"
2102
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2103
-
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
2104
2385
2105
2386
[[package]]
2106
2387
name = "scopeguard"
···
2138
2419
2139
2420
[[package]]
2140
2421
name = "security-framework"
2141
-
version = "3.3.0"
2422
+
version = "3.5.1"
2142
2423
source = "registry+https://github.com/rust-lang/crates.io-index"
2143
-
checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c"
2424
+
checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef"
2144
2425
dependencies = [
2145
2426
"bitflags",
2146
2427
"core-foundation 0.10.1",
···
2151
2432
2152
2433
[[package]]
2153
2434
name = "security-framework-sys"
2154
-
version = "2.14.0"
2435
+
version = "2.15.0"
2155
2436
source = "registry+https://github.com/rust-lang/crates.io-index"
2156
-
checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
2437
+
checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0"
2157
2438
dependencies = [
2158
2439
"core-foundation-sys",
2159
2440
"libc",
···
2161
2442
2162
2443
[[package]]
2163
2444
name = "semver"
2164
-
version = "1.0.26"
2445
+
version = "1.0.27"
2165
2446
source = "registry+https://github.com/rust-lang/crates.io-index"
2166
-
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
2447
+
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
2167
2448
2168
2449
[[package]]
2169
2450
name = "serde"
2170
-
version = "1.0.219"
2451
+
version = "1.0.228"
2171
2452
source = "registry+https://github.com/rust-lang/crates.io-index"
2172
-
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
2453
+
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
2173
2454
dependencies = [
2455
+
"serde_core",
2174
2456
"serde_derive",
2175
2457
]
2176
2458
2177
2459
[[package]]
2178
2460
name = "serde_bytes"
2179
-
version = "0.11.17"
2461
+
version = "0.11.19"
2180
2462
source = "registry+https://github.com/rust-lang/crates.io-index"
2181
-
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
2463
+
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
2182
2464
dependencies = [
2183
2465
"serde",
2466
+
"serde_core",
2467
+
]
2468
+
2469
+
[[package]]
2470
+
name = "serde_core"
2471
+
version = "1.0.228"
2472
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2473
+
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
2474
+
dependencies = [
2475
+
"serde_derive",
2184
2476
]
2185
2477
2186
2478
[[package]]
2187
2479
name = "serde_derive"
2188
-
version = "1.0.219"
2480
+
version = "1.0.228"
2189
2481
source = "registry+https://github.com/rust-lang/crates.io-index"
2190
-
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
2482
+
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
2191
2483
dependencies = [
2192
2484
"proc-macro2",
2193
2485
"quote",
2194
-
"syn",
2486
+
"syn 2.0.108",
2195
2487
]
2196
2488
2197
2489
[[package]]
2198
2490
name = "serde_ipld_dagcbor"
2199
-
version = "0.6.3"
2491
+
version = "0.6.4"
2200
2492
source = "registry+https://github.com/rust-lang/crates.io-index"
2201
-
checksum = "99600723cf53fb000a66175555098db7e75217c415bdd9a16a65d52a19dcc4fc"
2493
+
checksum = "46182f4f08349a02b45c998ba3215d3f9de826246ba02bb9dddfe9a2a2100778"
2202
2494
dependencies = [
2203
2495
"cbor4ii",
2204
2496
"ipld-core",
···
2208
2500
2209
2501
[[package]]
2210
2502
name = "serde_json"
2211
-
version = "1.0.143"
2503
+
version = "1.0.145"
2212
2504
source = "registry+https://github.com/rust-lang/crates.io-index"
2213
-
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
2505
+
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
2214
2506
dependencies = [
2215
2507
"itoa",
2216
2508
"memchr",
2217
2509
"ryu",
2218
2510
"serde",
2511
+
"serde_core",
2219
2512
]
2220
2513
2221
2514
[[package]]
2222
2515
name = "serde_path_to_error"
2223
-
version = "0.1.17"
2516
+
version = "0.1.20"
2224
2517
source = "registry+https://github.com/rust-lang/crates.io-index"
2225
-
checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a"
2518
+
checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457"
2226
2519
dependencies = [
2227
2520
"itoa",
2228
2521
"serde",
2522
+
"serde_core",
2229
2523
]
2230
2524
2231
2525
[[package]]
···
2251
2545
]
2252
2546
2253
2547
[[package]]
2548
+
name = "sha1"
2549
+
version = "0.10.6"
2550
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2551
+
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
2552
+
dependencies = [
2553
+
"cfg-if",
2554
+
"cpufeatures",
2555
+
"digest",
2556
+
]
2557
+
2558
+
[[package]]
2254
2559
name = "sha2"
2255
2560
version = "0.10.9"
2256
2561
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2296
2601
]
2297
2602
2298
2603
[[package]]
2604
+
name = "simdutf8"
2605
+
version = "0.1.5"
2606
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2607
+
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
2608
+
2609
+
[[package]]
2299
2610
name = "slab"
2300
2611
version = "0.4.11"
2301
2612
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2306
2617
version = "1.15.1"
2307
2618
source = "registry+https://github.com/rust-lang/crates.io-index"
2308
2619
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
2620
+
dependencies = [
2621
+
"serde",
2622
+
]
2309
2623
2310
2624
[[package]]
2311
2625
name = "socket2"
···
2319
2633
2320
2634
[[package]]
2321
2635
name = "socket2"
2322
-
version = "0.6.0"
2636
+
version = "0.6.1"
2323
2637
source = "registry+https://github.com/rust-lang/crates.io-index"
2324
-
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
2638
+
checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
2325
2639
dependencies = [
2326
2640
"libc",
2327
-
"windows-sys 0.59.0",
2641
+
"windows-sys 0.60.2",
2642
+
]
2643
+
2644
+
[[package]]
2645
+
name = "spin"
2646
+
version = "0.9.8"
2647
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2648
+
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
2649
+
dependencies = [
2650
+
"lock_api",
2328
2651
]
2329
2652
2330
2653
[[package]]
···
2338
2661
]
2339
2662
2340
2663
[[package]]
2664
+
name = "sqlx"
2665
+
version = "0.8.6"
2666
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2667
+
checksum = "1fefb893899429669dcdd979aff487bd78f4064e5e7907e4269081e0ef7d97dc"
2668
+
dependencies = [
2669
+
"sqlx-core",
2670
+
"sqlx-macros",
2671
+
"sqlx-mysql",
2672
+
"sqlx-postgres",
2673
+
"sqlx-sqlite",
2674
+
]
2675
+
2676
+
[[package]]
2677
+
name = "sqlx-core"
2678
+
version = "0.8.6"
2679
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2680
+
checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6"
2681
+
dependencies = [
2682
+
"base64",
2683
+
"bytes",
2684
+
"crc",
2685
+
"crossbeam-queue",
2686
+
"either",
2687
+
"event-listener",
2688
+
"futures-core",
2689
+
"futures-intrusive",
2690
+
"futures-io",
2691
+
"futures-util",
2692
+
"hashbrown 0.15.5",
2693
+
"hashlink",
2694
+
"indexmap",
2695
+
"log",
2696
+
"memchr",
2697
+
"once_cell",
2698
+
"percent-encoding",
2699
+
"serde",
2700
+
"serde_json",
2701
+
"sha2",
2702
+
"smallvec",
2703
+
"thiserror 2.0.17",
2704
+
"tokio",
2705
+
"tokio-stream",
2706
+
"tracing",
2707
+
"url",
2708
+
]
2709
+
2710
+
[[package]]
2711
+
name = "sqlx-macros"
2712
+
version = "0.8.6"
2713
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2714
+
checksum = "a2d452988ccaacfbf5e0bdbc348fb91d7c8af5bee192173ac3636b5fb6e6715d"
2715
+
dependencies = [
2716
+
"proc-macro2",
2717
+
"quote",
2718
+
"sqlx-core",
2719
+
"sqlx-macros-core",
2720
+
"syn 2.0.108",
2721
+
]
2722
+
2723
+
[[package]]
2724
+
name = "sqlx-macros-core"
2725
+
version = "0.8.6"
2726
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2727
+
checksum = "19a9c1841124ac5a61741f96e1d9e2ec77424bf323962dd894bdb93f37d5219b"
2728
+
dependencies = [
2729
+
"dotenvy",
2730
+
"either",
2731
+
"heck",
2732
+
"hex",
2733
+
"once_cell",
2734
+
"proc-macro2",
2735
+
"quote",
2736
+
"serde",
2737
+
"serde_json",
2738
+
"sha2",
2739
+
"sqlx-core",
2740
+
"sqlx-mysql",
2741
+
"sqlx-postgres",
2742
+
"sqlx-sqlite",
2743
+
"syn 2.0.108",
2744
+
"tokio",
2745
+
"url",
2746
+
]
2747
+
2748
+
[[package]]
2749
+
name = "sqlx-mysql"
2750
+
version = "0.8.6"
2751
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2752
+
checksum = "aa003f0038df784eb8fecbbac13affe3da23b45194bd57dba231c8f48199c526"
2753
+
dependencies = [
2754
+
"atoi",
2755
+
"base64",
2756
+
"bitflags",
2757
+
"byteorder",
2758
+
"bytes",
2759
+
"crc",
2760
+
"digest",
2761
+
"dotenvy",
2762
+
"either",
2763
+
"futures-channel",
2764
+
"futures-core",
2765
+
"futures-io",
2766
+
"futures-util",
2767
+
"generic-array",
2768
+
"hex",
2769
+
"hkdf",
2770
+
"hmac",
2771
+
"itoa",
2772
+
"log",
2773
+
"md-5",
2774
+
"memchr",
2775
+
"once_cell",
2776
+
"percent-encoding",
2777
+
"rand 0.8.5",
2778
+
"rsa",
2779
+
"serde",
2780
+
"sha1",
2781
+
"sha2",
2782
+
"smallvec",
2783
+
"sqlx-core",
2784
+
"stringprep",
2785
+
"thiserror 2.0.17",
2786
+
"tracing",
2787
+
"whoami",
2788
+
]
2789
+
2790
+
[[package]]
2791
+
name = "sqlx-postgres"
2792
+
version = "0.8.6"
2793
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2794
+
checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46"
2795
+
dependencies = [
2796
+
"atoi",
2797
+
"base64",
2798
+
"bitflags",
2799
+
"byteorder",
2800
+
"crc",
2801
+
"dotenvy",
2802
+
"etcetera",
2803
+
"futures-channel",
2804
+
"futures-core",
2805
+
"futures-util",
2806
+
"hex",
2807
+
"hkdf",
2808
+
"hmac",
2809
+
"home",
2810
+
"itoa",
2811
+
"log",
2812
+
"md-5",
2813
+
"memchr",
2814
+
"once_cell",
2815
+
"rand 0.8.5",
2816
+
"serde",
2817
+
"serde_json",
2818
+
"sha2",
2819
+
"smallvec",
2820
+
"sqlx-core",
2821
+
"stringprep",
2822
+
"thiserror 2.0.17",
2823
+
"tracing",
2824
+
"whoami",
2825
+
]
2826
+
2827
+
[[package]]
2828
+
name = "sqlx-sqlite"
2829
+
version = "0.8.6"
2830
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2831
+
checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea"
2832
+
dependencies = [
2833
+
"atoi",
2834
+
"flume",
2835
+
"futures-channel",
2836
+
"futures-core",
2837
+
"futures-executor",
2838
+
"futures-intrusive",
2839
+
"futures-util",
2840
+
"libsqlite3-sys",
2841
+
"log",
2842
+
"percent-encoding",
2843
+
"serde",
2844
+
"serde_urlencoded",
2845
+
"sqlx-core",
2846
+
"thiserror 2.0.17",
2847
+
"tracing",
2848
+
"url",
2849
+
]
2850
+
2851
+
[[package]]
2341
2852
name = "stable_deref_trait"
2342
-
version = "1.2.0"
2853
+
version = "1.2.1"
2343
2854
source = "registry+https://github.com/rust-lang/crates.io-index"
2344
-
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
2855
+
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
2345
2856
2346
2857
[[package]]
2347
-
name = "strsim"
2348
-
version = "0.11.1"
2858
+
name = "stringprep"
2859
+
version = "0.1.5"
2349
2860
source = "registry+https://github.com/rust-lang/crates.io-index"
2350
-
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
2861
+
checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1"
2862
+
dependencies = [
2863
+
"unicode-bidi",
2864
+
"unicode-normalization",
2865
+
"unicode-properties",
2866
+
]
2351
2867
2352
2868
[[package]]
2353
2869
name = "subtle"
···
2357
2873
2358
2874
[[package]]
2359
2875
name = "syn"
2360
-
version = "2.0.106"
2876
+
version = "1.0.109"
2361
2877
source = "registry+https://github.com/rust-lang/crates.io-index"
2362
-
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
2878
+
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
2879
+
dependencies = [
2880
+
"proc-macro2",
2881
+
"quote",
2882
+
"unicode-ident",
2883
+
]
2884
+
2885
+
[[package]]
2886
+
name = "syn"
2887
+
version = "2.0.108"
2888
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2889
+
checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917"
2363
2890
dependencies = [
2364
2891
"proc-macro2",
2365
2892
"quote",
···
2383
2910
dependencies = [
2384
2911
"proc-macro2",
2385
2912
"quote",
2386
-
"syn",
2913
+
"syn 2.0.108",
2387
2914
]
2388
2915
2389
2916
[[package]]
···
2415
2942
2416
2943
[[package]]
2417
2944
name = "tempfile"
2418
-
version = "3.21.0"
2945
+
version = "3.23.0"
2419
2946
source = "registry+https://github.com/rust-lang/crates.io-index"
2420
-
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
2947
+
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
2421
2948
dependencies = [
2422
2949
"fastrand",
2423
-
"getrandom 0.3.3",
2950
+
"getrandom 0.3.4",
2424
2951
"once_cell",
2425
2952
"rustix",
2426
-
"windows-sys 0.60.2",
2953
+
"windows-sys 0.61.2",
2427
2954
]
2428
2955
2429
2956
[[package]]
···
2437
2964
2438
2965
[[package]]
2439
2966
name = "thiserror"
2440
-
version = "2.0.16"
2967
+
version = "2.0.17"
2441
2968
source = "registry+https://github.com/rust-lang/crates.io-index"
2442
-
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
2969
+
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
2443
2970
dependencies = [
2444
-
"thiserror-impl 2.0.16",
2971
+
"thiserror-impl 2.0.17",
2445
2972
]
2446
2973
2447
2974
[[package]]
···
2452
2979
dependencies = [
2453
2980
"proc-macro2",
2454
2981
"quote",
2455
-
"syn",
2982
+
"syn 2.0.108",
2456
2983
]
2457
2984
2458
2985
[[package]]
2459
2986
name = "thiserror-impl"
2460
-
version = "2.0.16"
2987
+
version = "2.0.17"
2461
2988
source = "registry+https://github.com/rust-lang/crates.io-index"
2462
-
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
2989
+
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
2463
2990
dependencies = [
2464
2991
"proc-macro2",
2465
2992
"quote",
2466
-
"syn",
2993
+
"syn 2.0.108",
2467
2994
]
2468
2995
2469
2996
[[package]]
···
2477
3004
2478
3005
[[package]]
2479
3006
name = "tinystr"
2480
-
version = "0.8.1"
3007
+
version = "0.8.2"
2481
3008
source = "registry+https://github.com/rust-lang/crates.io-index"
2482
-
checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b"
3009
+
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
2483
3010
dependencies = [
2484
3011
"displaydoc",
2485
3012
"zerovec",
···
2502
3029
2503
3030
[[package]]
2504
3031
name = "tokio"
2505
-
version = "1.47.1"
3032
+
version = "1.48.0"
2506
3033
source = "registry+https://github.com/rust-lang/crates.io-index"
2507
-
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
3034
+
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
2508
3035
dependencies = [
2509
-
"backtrace",
2510
3036
"bytes",
2511
-
"io-uring",
2512
3037
"libc",
2513
3038
"mio",
3039
+
"parking_lot",
2514
3040
"pin-project-lite",
2515
3041
"signal-hook-registry",
2516
-
"slab",
2517
-
"socket2 0.6.0",
3042
+
"socket2 0.6.1",
2518
3043
"tokio-macros",
2519
-
"windows-sys 0.59.0",
3044
+
"windows-sys 0.61.2",
2520
3045
]
2521
3046
2522
3047
[[package]]
2523
3048
name = "tokio-macros"
2524
-
version = "2.5.0"
3049
+
version = "2.6.0"
2525
3050
source = "registry+https://github.com/rust-lang/crates.io-index"
2526
-
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
3051
+
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
2527
3052
dependencies = [
2528
3053
"proc-macro2",
2529
3054
"quote",
2530
-
"syn",
3055
+
"syn 2.0.108",
2531
3056
]
2532
3057
2533
3058
[[package]]
···
2542
3067
2543
3068
[[package]]
2544
3069
name = "tokio-rustls"
2545
-
version = "0.26.2"
3070
+
version = "0.26.4"
2546
3071
source = "registry+https://github.com/rust-lang/crates.io-index"
2547
-
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
3072
+
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
2548
3073
dependencies = [
2549
3074
"rustls",
2550
3075
"tokio",
2551
3076
]
2552
3077
2553
3078
[[package]]
3079
+
name = "tokio-stream"
3080
+
version = "0.1.17"
3081
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3082
+
checksum = "eca58d7bba4a75707817a2c44174253f9236b2d5fbd055602e9d5c07c139a047"
3083
+
dependencies = [
3084
+
"futures-core",
3085
+
"pin-project-lite",
3086
+
"tokio",
3087
+
]
3088
+
3089
+
[[package]]
2554
3090
name = "tokio-util"
2555
-
version = "0.7.16"
3091
+
version = "0.7.17"
2556
3092
source = "registry+https://github.com/rust-lang/crates.io-index"
2557
-
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
3093
+
checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594"
2558
3094
dependencies = [
2559
3095
"bytes",
2560
3096
"futures-core",
···
2565
3101
]
2566
3102
2567
3103
[[package]]
3104
+
name = "tokio-websockets"
3105
+
version = "0.11.4"
3106
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3107
+
checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e"
3108
+
dependencies = [
3109
+
"base64",
3110
+
"bytes",
3111
+
"futures-core",
3112
+
"futures-sink",
3113
+
"http",
3114
+
"httparse",
3115
+
"rand 0.9.2",
3116
+
"ring",
3117
+
"rustls-native-certs",
3118
+
"rustls-pki-types",
3119
+
"simdutf8",
3120
+
"tokio",
3121
+
"tokio-rustls",
3122
+
"tokio-util",
3123
+
]
3124
+
3125
+
[[package]]
2568
3126
name = "tower"
2569
3127
version = "0.5.2"
2570
3128
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2588
3146
dependencies = [
2589
3147
"bitflags",
2590
3148
"bytes",
3149
+
"futures-core",
2591
3150
"futures-util",
2592
3151
"http",
2593
3152
"http-body",
3153
+
"http-body-util",
3154
+
"http-range-header",
3155
+
"httpdate",
2594
3156
"iri-string",
3157
+
"mime",
3158
+
"mime_guess",
3159
+
"percent-encoding",
2595
3160
"pin-project-lite",
3161
+
"tokio",
3162
+
"tokio-util",
2596
3163
"tower",
2597
3164
"tower-layer",
2598
3165
"tower-service",
3166
+
"tracing",
2599
3167
]
2600
3168
2601
3169
[[package]]
···
2630
3198
dependencies = [
2631
3199
"proc-macro2",
2632
3200
"quote",
2633
-
"syn",
3201
+
"syn 2.0.108",
2634
3202
]
2635
3203
2636
3204
[[package]]
···
2680
3248
2681
3249
[[package]]
2682
3250
name = "typenum"
2683
-
version = "1.18.0"
3251
+
version = "1.19.0"
2684
3252
source = "registry+https://github.com/rust-lang/crates.io-index"
2685
-
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
3253
+
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
3254
+
3255
+
[[package]]
3256
+
name = "ulid"
3257
+
version = "1.2.1"
3258
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3259
+
checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe"
3260
+
dependencies = [
3261
+
"rand 0.9.2",
3262
+
"web-time",
3263
+
]
3264
+
3265
+
[[package]]
3266
+
name = "unicase"
3267
+
version = "2.8.1"
3268
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3269
+
checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
3270
+
3271
+
[[package]]
3272
+
name = "unicode-bidi"
3273
+
version = "0.3.18"
3274
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3275
+
checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
2686
3276
2687
3277
[[package]]
2688
3278
name = "unicode-ident"
2689
-
version = "1.0.18"
3279
+
version = "1.0.22"
2690
3280
source = "registry+https://github.com/rust-lang/crates.io-index"
2691
-
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
3281
+
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
3282
+
3283
+
[[package]]
3284
+
name = "unicode-normalization"
3285
+
version = "0.1.25"
3286
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3287
+
checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8"
3288
+
dependencies = [
3289
+
"tinyvec",
3290
+
]
3291
+
3292
+
[[package]]
3293
+
name = "unicode-properties"
3294
+
version = "0.1.4"
3295
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3296
+
checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
2692
3297
2693
3298
[[package]]
2694
3299
name = "unsigned-varint"
···
2721
3326
]
2722
3327
2723
3328
[[package]]
2724
-
name = "utf8_iter"
2725
-
version = "1.0.4"
3329
+
name = "urlencoding"
3330
+
version = "2.1.3"
2726
3331
source = "registry+https://github.com/rust-lang/crates.io-index"
2727
-
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
3332
+
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
2728
3333
2729
3334
[[package]]
2730
-
name = "utf8parse"
2731
-
version = "0.2.2"
3335
+
name = "utf8_iter"
3336
+
version = "1.0.4"
2732
3337
source = "registry+https://github.com/rust-lang/crates.io-index"
2733
-
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
3338
+
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
2734
3339
2735
3340
[[package]]
2736
3341
name = "uuid"
···
2738
3343
source = "registry+https://github.com/rust-lang/crates.io-index"
2739
3344
checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
2740
3345
dependencies = [
2741
-
"getrandom 0.3.3",
3346
+
"getrandom 0.3.4",
2742
3347
"js-sys",
2743
3348
"wasm-bindgen",
2744
3349
]
···
2783
3388
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
2784
3389
2785
3390
[[package]]
2786
-
name = "wasi"
2787
-
version = "0.14.3+wasi-0.2.4"
3391
+
name = "wasip2"
3392
+
version = "1.0.1+wasi-0.2.4"
2788
3393
source = "registry+https://github.com/rust-lang/crates.io-index"
2789
-
checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95"
3394
+
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
2790
3395
dependencies = [
2791
3396
"wit-bindgen",
2792
3397
]
2793
3398
2794
3399
[[package]]
3400
+
name = "wasite"
3401
+
version = "0.1.0"
3402
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3403
+
checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b"
3404
+
3405
+
[[package]]
2795
3406
name = "wasm-bindgen"
2796
-
version = "0.2.101"
3407
+
version = "0.2.105"
2797
3408
source = "registry+https://github.com/rust-lang/crates.io-index"
2798
-
checksum = "7e14915cadd45b529bb8d1f343c4ed0ac1de926144b746e2710f9cd05df6603b"
3409
+
checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60"
2799
3410
dependencies = [
2800
3411
"cfg-if",
2801
3412
"once_cell",
···
2805
3416
]
2806
3417
2807
3418
[[package]]
2808
-
name = "wasm-bindgen-backend"
2809
-
version = "0.2.101"
2810
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2811
-
checksum = "e28d1ba982ca7923fd01448d5c30c6864d0a14109560296a162f80f305fb93bb"
2812
-
dependencies = [
2813
-
"bumpalo",
2814
-
"log",
2815
-
"proc-macro2",
2816
-
"quote",
2817
-
"syn",
2818
-
"wasm-bindgen-shared",
2819
-
]
2820
-
2821
-
[[package]]
2822
3419
name = "wasm-bindgen-futures"
2823
-
version = "0.4.51"
3420
+
version = "0.4.55"
2824
3421
source = "registry+https://github.com/rust-lang/crates.io-index"
2825
-
checksum = "0ca85039a9b469b38336411d6d6ced91f3fc87109a2a27b0c197663f5144dffe"
3422
+
checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0"
2826
3423
dependencies = [
2827
3424
"cfg-if",
2828
3425
"js-sys",
···
2833
3430
2834
3431
[[package]]
2835
3432
name = "wasm-bindgen-macro"
2836
-
version = "0.2.101"
3433
+
version = "0.2.105"
2837
3434
source = "registry+https://github.com/rust-lang/crates.io-index"
2838
-
checksum = "7c3d463ae3eff775b0c45df9da45d68837702ac35af998361e2c84e7c5ec1b0d"
3435
+
checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2"
2839
3436
dependencies = [
2840
3437
"quote",
2841
3438
"wasm-bindgen-macro-support",
···
2843
3440
2844
3441
[[package]]
2845
3442
name = "wasm-bindgen-macro-support"
2846
-
version = "0.2.101"
3443
+
version = "0.2.105"
2847
3444
source = "registry+https://github.com/rust-lang/crates.io-index"
2848
-
checksum = "7bb4ce89b08211f923caf51d527662b75bdc9c9c7aab40f86dcb9fb85ac552aa"
3445
+
checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc"
2849
3446
dependencies = [
3447
+
"bumpalo",
2850
3448
"proc-macro2",
2851
3449
"quote",
2852
-
"syn",
2853
-
"wasm-bindgen-backend",
3450
+
"syn 2.0.108",
2854
3451
"wasm-bindgen-shared",
2855
3452
]
2856
3453
2857
3454
[[package]]
2858
3455
name = "wasm-bindgen-shared"
2859
-
version = "0.2.101"
3456
+
version = "0.2.105"
2860
3457
source = "registry+https://github.com/rust-lang/crates.io-index"
2861
-
checksum = "f143854a3b13752c6950862c906306adb27c7e839f7414cec8fea35beab624c1"
3458
+
checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76"
2862
3459
dependencies = [
2863
3460
"unicode-ident",
2864
3461
]
2865
3462
2866
3463
[[package]]
2867
3464
name = "web-sys"
2868
-
version = "0.3.78"
3465
+
version = "0.3.82"
2869
3466
source = "registry+https://github.com/rust-lang/crates.io-index"
2870
-
checksum = "77e4b637749ff0d92b8fad63aa1f7cff3cbe125fd49c175cd6345e7272638b12"
3467
+
checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1"
2871
3468
dependencies = [
2872
3469
"js-sys",
2873
3470
"wasm-bindgen",
···
2885
3482
2886
3483
[[package]]
2887
3484
name = "webpki-roots"
2888
-
version = "1.0.2"
3485
+
version = "1.0.4"
2889
3486
source = "registry+https://github.com/rust-lang/crates.io-index"
2890
-
checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2"
3487
+
checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e"
2891
3488
dependencies = [
2892
3489
"rustls-pki-types",
2893
3490
]
2894
3491
2895
3492
[[package]]
2896
-
name = "widestring"
2897
-
version = "1.2.0"
2898
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2899
-
checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d"
2900
-
2901
-
[[package]]
2902
-
name = "windows"
2903
-
version = "0.61.3"
3493
+
name = "whoami"
3494
+
version = "1.6.1"
2904
3495
source = "registry+https://github.com/rust-lang/crates.io-index"
2905
-
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
3496
+
checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d"
2906
3497
dependencies = [
2907
-
"windows-collections",
2908
-
"windows-core",
2909
-
"windows-future",
2910
-
"windows-link",
2911
-
"windows-numerics",
3498
+
"libredox",
3499
+
"wasite",
2912
3500
]
2913
3501
2914
3502
[[package]]
2915
-
name = "windows-collections"
2916
-
version = "0.2.0"
3503
+
name = "widestring"
3504
+
version = "1.2.1"
2917
3505
source = "registry+https://github.com/rust-lang/crates.io-index"
2918
-
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
2919
-
dependencies = [
2920
-
"windows-core",
2921
-
]
2922
-
2923
-
[[package]]
2924
-
name = "windows-core"
2925
-
version = "0.61.2"
2926
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2927
-
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
2928
-
dependencies = [
2929
-
"windows-implement",
2930
-
"windows-interface",
2931
-
"windows-link",
2932
-
"windows-result",
2933
-
"windows-strings",
2934
-
]
2935
-
2936
-
[[package]]
2937
-
name = "windows-future"
2938
-
version = "0.2.1"
2939
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2940
-
checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
2941
-
dependencies = [
2942
-
"windows-core",
2943
-
"windows-link",
2944
-
"windows-threading",
2945
-
]
2946
-
2947
-
[[package]]
2948
-
name = "windows-implement"
2949
-
version = "0.60.0"
2950
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2951
-
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
2952
-
dependencies = [
2953
-
"proc-macro2",
2954
-
"quote",
2955
-
"syn",
2956
-
]
2957
-
2958
-
[[package]]
2959
-
name = "windows-interface"
2960
-
version = "0.59.1"
2961
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2962
-
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
2963
-
dependencies = [
2964
-
"proc-macro2",
2965
-
"quote",
2966
-
"syn",
2967
-
]
3506
+
checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471"
2968
3507
2969
3508
[[package]]
2970
3509
name = "windows-link"
···
2973
3512
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
2974
3513
2975
3514
[[package]]
2976
-
name = "windows-numerics"
2977
-
version = "0.2.0"
3515
+
name = "windows-link"
3516
+
version = "0.2.1"
2978
3517
source = "registry+https://github.com/rust-lang/crates.io-index"
2979
-
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
2980
-
dependencies = [
2981
-
"windows-core",
2982
-
"windows-link",
2983
-
]
3518
+
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
2984
3519
2985
3520
[[package]]
2986
3521
name = "windows-registry"
···
2988
3523
source = "registry+https://github.com/rust-lang/crates.io-index"
2989
3524
checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e"
2990
3525
dependencies = [
2991
-
"windows-link",
3526
+
"windows-link 0.1.3",
2992
3527
"windows-result",
2993
3528
"windows-strings",
2994
3529
]
···
2999
3534
source = "registry+https://github.com/rust-lang/crates.io-index"
3000
3535
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
3001
3536
dependencies = [
3002
-
"windows-link",
3537
+
"windows-link 0.1.3",
3003
3538
]
3004
3539
3005
3540
[[package]]
···
3008
3543
source = "registry+https://github.com/rust-lang/crates.io-index"
3009
3544
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
3010
3545
dependencies = [
3011
-
"windows-link",
3546
+
"windows-link 0.1.3",
3012
3547
]
3013
3548
3014
3549
[[package]]
···
3031
3566
3032
3567
[[package]]
3033
3568
name = "windows-sys"
3034
-
version = "0.59.0"
3569
+
version = "0.60.2"
3035
3570
source = "registry+https://github.com/rust-lang/crates.io-index"
3036
-
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
3571
+
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3037
3572
dependencies = [
3038
-
"windows-targets 0.52.6",
3573
+
"windows-targets 0.53.5",
3039
3574
]
3040
3575
3041
3576
[[package]]
3042
3577
name = "windows-sys"
3043
-
version = "0.60.2"
3578
+
version = "0.61.2"
3044
3579
source = "registry+https://github.com/rust-lang/crates.io-index"
3045
-
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3580
+
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
3046
3581
dependencies = [
3047
-
"windows-targets 0.53.3",
3582
+
"windows-link 0.2.1",
3048
3583
]
3049
3584
3050
3585
[[package]]
···
3080
3615
3081
3616
[[package]]
3082
3617
name = "windows-targets"
3083
-
version = "0.53.3"
3084
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3085
-
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
3086
-
dependencies = [
3087
-
"windows-link",
3088
-
"windows_aarch64_gnullvm 0.53.0",
3089
-
"windows_aarch64_msvc 0.53.0",
3090
-
"windows_i686_gnu 0.53.0",
3091
-
"windows_i686_gnullvm 0.53.0",
3092
-
"windows_i686_msvc 0.53.0",
3093
-
"windows_x86_64_gnu 0.53.0",
3094
-
"windows_x86_64_gnullvm 0.53.0",
3095
-
"windows_x86_64_msvc 0.53.0",
3096
-
]
3097
-
3098
-
[[package]]
3099
-
name = "windows-threading"
3100
-
version = "0.1.0"
3618
+
version = "0.53.5"
3101
3619
source = "registry+https://github.com/rust-lang/crates.io-index"
3102
-
checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6"
3620
+
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
3103
3621
dependencies = [
3104
-
"windows-link",
3622
+
"windows-link 0.2.1",
3623
+
"windows_aarch64_gnullvm 0.53.1",
3624
+
"windows_aarch64_msvc 0.53.1",
3625
+
"windows_i686_gnu 0.53.1",
3626
+
"windows_i686_gnullvm 0.53.1",
3627
+
"windows_i686_msvc 0.53.1",
3628
+
"windows_x86_64_gnu 0.53.1",
3629
+
"windows_x86_64_gnullvm 0.53.1",
3630
+
"windows_x86_64_msvc 0.53.1",
3105
3631
]
3106
3632
3107
3633
[[package]]
···
3118
3644
3119
3645
[[package]]
3120
3646
name = "windows_aarch64_gnullvm"
3121
-
version = "0.53.0"
3647
+
version = "0.53.1"
3122
3648
source = "registry+https://github.com/rust-lang/crates.io-index"
3123
-
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
3649
+
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
3124
3650
3125
3651
[[package]]
3126
3652
name = "windows_aarch64_msvc"
···
3136
3662
3137
3663
[[package]]
3138
3664
name = "windows_aarch64_msvc"
3139
-
version = "0.53.0"
3665
+
version = "0.53.1"
3140
3666
source = "registry+https://github.com/rust-lang/crates.io-index"
3141
-
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
3667
+
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
3142
3668
3143
3669
[[package]]
3144
3670
name = "windows_i686_gnu"
···
3154
3680
3155
3681
[[package]]
3156
3682
name = "windows_i686_gnu"
3157
-
version = "0.53.0"
3683
+
version = "0.53.1"
3158
3684
source = "registry+https://github.com/rust-lang/crates.io-index"
3159
-
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
3685
+
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
3160
3686
3161
3687
[[package]]
3162
3688
name = "windows_i686_gnullvm"
···
3166
3692
3167
3693
[[package]]
3168
3694
name = "windows_i686_gnullvm"
3169
-
version = "0.53.0"
3695
+
version = "0.53.1"
3170
3696
source = "registry+https://github.com/rust-lang/crates.io-index"
3171
-
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
3697
+
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
3172
3698
3173
3699
[[package]]
3174
3700
name = "windows_i686_msvc"
···
3184
3710
3185
3711
[[package]]
3186
3712
name = "windows_i686_msvc"
3187
-
version = "0.53.0"
3713
+
version = "0.53.1"
3188
3714
source = "registry+https://github.com/rust-lang/crates.io-index"
3189
-
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
3715
+
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
3190
3716
3191
3717
[[package]]
3192
3718
name = "windows_x86_64_gnu"
···
3202
3728
3203
3729
[[package]]
3204
3730
name = "windows_x86_64_gnu"
3205
-
version = "0.53.0"
3731
+
version = "0.53.1"
3206
3732
source = "registry+https://github.com/rust-lang/crates.io-index"
3207
-
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
3733
+
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
3208
3734
3209
3735
[[package]]
3210
3736
name = "windows_x86_64_gnullvm"
···
3220
3746
3221
3747
[[package]]
3222
3748
name = "windows_x86_64_gnullvm"
3223
-
version = "0.53.0"
3749
+
version = "0.53.1"
3224
3750
source = "registry+https://github.com/rust-lang/crates.io-index"
3225
-
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
3751
+
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
3226
3752
3227
3753
[[package]]
3228
3754
name = "windows_x86_64_msvc"
···
3238
3764
3239
3765
[[package]]
3240
3766
name = "windows_x86_64_msvc"
3241
-
version = "0.53.0"
3767
+
version = "0.53.1"
3242
3768
source = "registry+https://github.com/rust-lang/crates.io-index"
3243
-
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
3769
+
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
3244
3770
3245
3771
[[package]]
3246
3772
name = "winreg"
···
3254
3780
3255
3781
[[package]]
3256
3782
name = "wit-bindgen"
3257
-
version = "0.45.1"
3783
+
version = "0.46.0"
3258
3784
source = "registry+https://github.com/rust-lang/crates.io-index"
3259
-
checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36"
3785
+
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
3260
3786
3261
3787
[[package]]
3262
3788
name = "writeable"
3263
-
version = "0.6.1"
3789
+
version = "0.6.2"
3264
3790
source = "registry+https://github.com/rust-lang/crates.io-index"
3265
-
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
3791
+
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
3266
3792
3267
3793
[[package]]
3268
3794
name = "yoke"
3269
-
version = "0.8.0"
3795
+
version = "0.8.1"
3270
3796
source = "registry+https://github.com/rust-lang/crates.io-index"
3271
-
checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc"
3797
+
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
3272
3798
dependencies = [
3273
-
"serde",
3274
3799
"stable_deref_trait",
3275
3800
"yoke-derive",
3276
3801
"zerofrom",
···
3278
3803
3279
3804
[[package]]
3280
3805
name = "yoke-derive"
3281
-
version = "0.8.0"
3806
+
version = "0.8.1"
3282
3807
source = "registry+https://github.com/rust-lang/crates.io-index"
3283
-
checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
3808
+
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
3284
3809
dependencies = [
3285
3810
"proc-macro2",
3286
3811
"quote",
3287
-
"syn",
3812
+
"syn 2.0.108",
3288
3813
"synstructure",
3289
3814
]
3290
3815
3291
3816
[[package]]
3292
3817
name = "zerocopy"
3293
-
version = "0.8.26"
3818
+
version = "0.8.27"
3294
3819
source = "registry+https://github.com/rust-lang/crates.io-index"
3295
-
checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f"
3820
+
checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c"
3296
3821
dependencies = [
3297
3822
"zerocopy-derive",
3298
3823
]
3299
3824
3300
3825
[[package]]
3301
3826
name = "zerocopy-derive"
3302
-
version = "0.8.26"
3827
+
version = "0.8.27"
3303
3828
source = "registry+https://github.com/rust-lang/crates.io-index"
3304
-
checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
3829
+
checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831"
3305
3830
dependencies = [
3306
3831
"proc-macro2",
3307
3832
"quote",
3308
-
"syn",
3833
+
"syn 2.0.108",
3309
3834
]
3310
3835
3311
3836
[[package]]
···
3325
3850
dependencies = [
3326
3851
"proc-macro2",
3327
3852
"quote",
3328
-
"syn",
3853
+
"syn 2.0.108",
3329
3854
"synstructure",
3330
3855
]
3331
3856
3332
3857
[[package]]
3333
3858
name = "zeroize"
3334
-
version = "1.8.1"
3859
+
version = "1.8.2"
3335
3860
source = "registry+https://github.com/rust-lang/crates.io-index"
3336
-
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
3861
+
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
3337
3862
3338
3863
[[package]]
3339
3864
name = "zerotrie"
3340
-
version = "0.2.2"
3865
+
version = "0.2.3"
3341
3866
source = "registry+https://github.com/rust-lang/crates.io-index"
3342
-
checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595"
3867
+
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
3343
3868
dependencies = [
3344
3869
"displaydoc",
3345
3870
"yoke",
···
3348
3873
3349
3874
[[package]]
3350
3875
name = "zerovec"
3351
-
version = "0.11.4"
3876
+
version = "0.11.5"
3352
3877
source = "registry+https://github.com/rust-lang/crates.io-index"
3353
-
checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b"
3878
+
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
3354
3879
dependencies = [
3355
3880
"yoke",
3356
3881
"zerofrom",
···
3359
3884
3360
3885
[[package]]
3361
3886
name = "zerovec-derive"
3362
-
version = "0.11.1"
3887
+
version = "0.11.2"
3363
3888
source = "registry+https://github.com/rust-lang/crates.io-index"
3364
-
checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
3889
+
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
3365
3890
dependencies = [
3366
3891
"proc-macro2",
3367
3892
"quote",
3368
-
"syn",
3893
+
"syn 2.0.108",
3894
+
]
3895
+
3896
+
[[package]]
3897
+
name = "zstd"
3898
+
version = "0.13.3"
3899
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3900
+
checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a"
3901
+
dependencies = [
3902
+
"zstd-safe",
3903
+
]
3904
+
3905
+
[[package]]
3906
+
name = "zstd-safe"
3907
+
version = "7.2.4"
3908
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3909
+
checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d"
3910
+
dependencies = [
3911
+
"zstd-sys",
3912
+
]
3913
+
3914
+
[[package]]
3915
+
name = "zstd-sys"
3916
+
version = "2.0.16+zstd.1.5.7"
3917
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3918
+
checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748"
3919
+
dependencies = [
3920
+
"cc",
3921
+
"pkg-config",
3369
3922
]
+13
-6
Cargo.toml
+13
-6
Cargo.toml
···
1
1
[package]
2
2
name = "quickdid"
3
-
version = "1.0.0-rc.2"
3
+
version = "1.0.0-rc.5"
4
4
edition = "2024"
5
5
authors = ["Nick Gerakines <nick.gerakines@gmail.com>"]
6
6
description = "A fast and scalable com.atproto.identity.resolveHandle service"
···
16
16
[dependencies]
17
17
anyhow = "1.0"
18
18
async-trait = "0.1"
19
-
atproto-identity = { version = "0.11.3" }
19
+
atproto-identity = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
+
atproto-jetstream = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
21
+
atproto-lexicon = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
22
axum = { version = "0.8" }
21
-
bincode = { version = "2.0.1", features = ["serde"] }
22
-
clap = { version = "4", features = ["derive", "env"] }
23
+
bincode = { version = "2.0", features = ["serde"] }
24
+
cadence = "1.6"
23
25
deadpool-redis = { version = "0.22", features = ["connection-manager", "tokio-comp", "tokio-rustls-comp"] }
24
-
http = "1.0"
25
-
metrohash = "1.0.7"
26
+
httpdate = "1.0"
27
+
metrohash = "1.0"
26
28
reqwest = { version = "0.12", features = ["json"] }
27
29
serde = { version = "1.0", features = ["derive"] }
28
30
serde_json = "1.0"
31
+
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
29
32
thiserror = "2.0"
30
33
tokio = { version = "1.35", features = ["rt-multi-thread", "macros", "signal", "sync", "time", "net", "fs"] }
31
34
tokio-util = { version = "0.7", features = ["rt"] }
35
+
tower-http = { version = "0.6", features = ["fs"] }
32
36
tracing = "0.1"
33
37
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
38
+
39
+
[dev-dependencies]
40
+
once_cell = "1.20"
+6
-5
Dockerfile
+6
-5
Dockerfile
···
1
1
# syntax=docker/dockerfile:1.4
2
-
FROM rust:1.89-slim AS builder
2
+
FROM rust:1.90-slim-bookworm AS builder
3
3
4
4
RUN apt-get update && apt-get install -y \
5
5
pkg-config \
···
9
9
WORKDIR /app
10
10
COPY Cargo.lock Cargo.toml ./
11
11
12
-
ARG GIT_HASH=0
13
-
ENV GIT_HASH=$GIT_HASH
14
-
15
12
COPY src ./src
16
13
RUN cargo build --bin quickdid --release
17
14
···
22
19
LABEL org.opencontainers.image.licenses="MIT"
23
20
LABEL org.opencontainers.image.authors="Nick Gerakines <nick.gerakines@gmail.com>"
24
21
LABEL org.opencontainers.image.source="https://tangled.sh/@smokesignal.events/quickdid"
25
-
LABEL org.opencontainers.image.version="1.0.0-rc.2"
22
+
LABEL org.opencontainers.image.version="1.0.0-rc.5"
26
23
27
24
WORKDIR /app
28
25
COPY --from=builder /app/target/release/quickdid /app/quickdid
29
26
27
+
# Copy static files for serving
28
+
COPY www /app/www
29
+
30
30
ENV HTTP_PORT=8080
31
+
ENV STATIC_FILES_DIR=/app/www
31
32
ENV RUST_LOG=info
32
33
ENV RUST_BACKTRACE=full
33
34
+211
-17
README.md
+211
-17
README.md
···
1
1
# QuickDID
2
2
3
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting both in-memory and Redis-backed persistent caching with binary serialization for optimal storage efficiency.
3
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting in-memory, Redis-backed, and SQLite-backed persistent caching with binary serialization for optimal storage efficiency. The service includes proactive cache refreshing to maintain optimal performance and comprehensive metrics support for production monitoring.
4
4
5
-
Built with minimal dependencies and optimized for production use, QuickDID delivers exceptional performance while maintaining a lean footprint.
5
+
Built following the 12-factor app methodology with minimal dependencies and optimized for production use, QuickDID delivers exceptional performance while maintaining a lean footprint. Configuration is handled exclusively through environment variables, with only `--version` and `--help` command-line arguments supported.
6
6
7
7
## โ ๏ธ Production Disclaimer
8
8
9
9
**This project is a release candidate and has not been fully vetted for production use.** While it includes comprehensive error handling and has been designed with production features in mind, more thorough testing is necessary before deploying in critical environments. Use at your own risk and conduct appropriate testing for your use case.
10
10
11
+
## Performance
12
+
13
+
QuickDID is designed for high throughput and low latency:
14
+
15
+
- **Binary serialization** reduces cache storage by ~40% compared to JSON
16
+
- **Rate limiting** protects upstream services from being overwhelmed
17
+
- **Work shedding** in SQLite queue adapter prevents unbounded growth
18
+
- **Configurable TTLs** allow fine-tuning cache freshness vs. performance
19
+
- **Connection pooling** for Redis minimizes connection overhead
20
+
11
21
## Features
12
22
13
23
- **Fast Handle Resolution**: Resolves AT Protocol handles to DIDs using DNS TXT records and HTTP well-known endpoints
14
-
- **Multi-Layer Caching**: In-memory caching with configurable TTL and Redis-backed persistent caching (90-day TTL)
24
+
- **Bidirectional Caching**: Supports both handle-to-DID and DID-to-handle lookups with automatic cache synchronization
25
+
- **Multi-Layer Caching**: Flexible caching with three tiers:
26
+
- In-memory caching with configurable TTL (default: 600 seconds)
27
+
- Redis-backed persistent caching (default: 90-day TTL)
28
+
- SQLite-backed persistent caching (default: 90-day TTL)
29
+
- **Jetstream Consumer**: Real-time cache updates from AT Protocol firehose:
30
+
- Processes Account and Identity events
31
+
- Automatically purges deleted/deactivated accounts
32
+
- Updates handle-to-DID mappings in real-time
33
+
- Comprehensive metrics for event processing
34
+
- Automatic reconnection with backoff
35
+
- **HTTP Caching**: Client-side caching support with:
36
+
- ETag generation with configurable seed for cache invalidation
37
+
- Cache-Control headers with max-age, stale-while-revalidate, and stale-if-error directives
38
+
- CORS headers for cross-origin requests
39
+
- **Rate Limiting**: Semaphore-based concurrency control with optional timeout to protect upstream services
15
40
- **Binary Serialization**: Compact storage format reduces cache size by ~40% compared to JSON
16
-
- **Queue Processing**: Asynchronous handle resolution with support for MPSC, Redis, and no-op queue adapters
41
+
- **Queue Processing**: Asynchronous handle resolution with multiple adapters:
42
+
- MPSC (in-memory, default)
43
+
- Redis (distributed)
44
+
- SQLite (persistent with work shedding)
45
+
- No-op (testing)
46
+
- **Metrics & Monitoring**:
47
+
- StatsD metrics support for counters, gauges, and timings
48
+
- Resolution timing measurements
49
+
- Jetstream event processing metrics
50
+
- Configurable tags for environment/service identification
51
+
- Integration guides for Telegraf and TimescaleDB
52
+
- Configurable bind address for StatsD UDP socket (IPv4/IPv6)
53
+
- **Proactive Cache Refresh**:
54
+
- Automatically refreshes cache entries before expiration
55
+
- Configurable refresh threshold
56
+
- Prevents cache misses for frequently accessed handles
57
+
- Metrics tracking for refresh operations
58
+
- **Queue Deduplication**:
59
+
- Redis-based deduplication for queue items
60
+
- Prevents duplicate handle resolution work
61
+
- Configurable TTL for deduplication keys
62
+
- **Cache Management APIs**:
63
+
- `purge` method for removing entries by handle or DID
64
+
- `set` method for manually updating handle-to-DID mappings
65
+
- Chainable operations across resolver layers
17
66
- **AT Protocol Compatible**: Implements XRPC endpoints for seamless integration with AT Protocol infrastructure
18
-
- **Comprehensive Error Handling**: Includes health checks and graceful shutdown support
67
+
- **Comprehensive Error Handling**: Structured errors with unique identifiers (e.g., `error-quickdid-config-1`), health checks, and graceful shutdown
68
+
- **12-Factor App**: Environment-based configuration following cloud-native best practices
19
69
- **Minimal Dependencies**: Optimized dependency tree for faster compilation and reduced attack surface
20
-
- **Predictable Worker IDs**: Simple default worker identification for distributed deployments
21
70
22
71
## Building
23
72
···
25
74
26
75
- Rust 1.70 or later
27
76
- Redis (optional, for persistent caching and distributed queuing)
77
+
- SQLite 3.35+ (optional, for single-instance persistent caching)
28
78
29
79
### Build Commands
30
80
···
45
95
46
96
## Minimum Configuration
47
97
48
-
QuickDID requires the following environment variables to run:
98
+
QuickDID requires minimal configuration to run. Configuration is validated at startup, and the service will exit with specific error codes if validation fails.
49
99
50
100
### Required
51
101
52
102
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
53
-
- `SERVICE_KEY`: Private key for service identity in DID format (e.g., `did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK`)
54
103
55
104
### Example Minimal Setup
56
105
57
106
```bash
58
-
HTTP_EXTERNAL=localhost:3007 \
59
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
60
-
cargo run
107
+
HTTP_EXTERNAL=localhost:3007 cargo run
108
+
```
109
+
110
+
### Static Files
111
+
112
+
QuickDID serves static files from the `www` directory by default. This includes:
113
+
- Landing page (`index.html`)
114
+
- AT Protocol well-known files (`.well-known/atproto-did` and `.well-known/did.json`)
115
+
116
+
Generate the `.well-known` files for your deployment:
117
+
118
+
```bash
119
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
61
120
```
62
121
63
122
This will start QuickDID with:
64
123
- HTTP server on port 8080 (default)
65
-
- In-memory caching only (300-second TTL)
124
+
- In-memory caching only (600-second TTL default)
66
125
- MPSC queue adapter for async processing
67
126
- Default worker ID: "worker1"
68
127
- Connection to plc.directory for DID resolution
128
+
- Rate limiting disabled (default)
69
129
70
130
### Optional Configuration
71
131
72
132
For production deployments, consider these additional environment variables:
73
133
134
+
#### Network & Service
74
135
- `HTTP_PORT`: Server port (default: 8080)
75
-
- `REDIS_URL`: Redis connection URL for persistent caching (e.g., `redis://localhost:6379`)
76
-
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', or 'noop' (default: mpsc)
77
-
- `QUEUE_WORKER_ID`: Worker identifier for distributed queue processing (default: worker1)
78
136
- `PLC_HOSTNAME`: PLC directory hostname (default: plc.directory)
137
+
- `USER_AGENT`: HTTP User-Agent for outgoing requests
138
+
- `DNS_NAMESERVERS`: Custom DNS servers (comma-separated)
139
+
140
+
#### Caching
141
+
- `REDIS_URL`: Redis connection URL (e.g., `redis://localhost:6379`)
142
+
- `SQLITE_URL`: SQLite database URL (e.g., `sqlite:./quickdid.db`)
143
+
- `CACHE_TTL_MEMORY`: In-memory cache TTL in seconds (default: 600)
144
+
- `CACHE_TTL_REDIS`: Redis cache TTL in seconds (default: 7776000 = 90 days)
145
+
- `CACHE_TTL_SQLITE`: SQLite cache TTL in seconds (default: 7776000 = 90 days)
146
+
147
+
#### Queue Processing
148
+
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
149
+
- `QUEUE_WORKER_ID`: Worker identifier (default: worker1)
150
+
- `QUEUE_BUFFER_SIZE`: MPSC queue buffer size (default: 1000)
151
+
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
152
+
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
153
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication (default: false)
154
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
155
+
- `QUEUE_SQLITE_MAX_SIZE`: Max SQLite queue size for work shedding (default: 10000)
156
+
157
+
#### Rate Limiting
158
+
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
159
+
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
160
+
161
+
#### HTTP Cache Control
162
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
163
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
164
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate in seconds (default: 86400)
165
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
166
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
167
+
168
+
#### Metrics
169
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
170
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd)
171
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
172
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
173
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
174
+
175
+
#### Proactive Refresh
176
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
177
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
178
+
179
+
#### Jetstream Consumer
180
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
181
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
182
+
183
+
#### Static Files
184
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
185
+
186
+
#### Logging
79
187
- `RUST_LOG`: Logging level (e.g., debug, info, warn, error)
80
188
81
-
### Production Example
189
+
### Production Examples
82
190
191
+
#### Redis-based with Metrics and Jetstream (Multi-instance/HA)
83
192
```bash
84
193
HTTP_EXTERNAL=quickdid.example.com \
85
-
SERVICE_KEY=did:key:yourkeyhere \
86
194
HTTP_PORT=3000 \
87
195
REDIS_URL=redis://localhost:6379 \
196
+
CACHE_TTL_REDIS=86400 \
88
197
QUEUE_ADAPTER=redis \
89
198
QUEUE_WORKER_ID=prod-worker-1 \
199
+
RESOLVER_MAX_CONCURRENT=100 \
200
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 \
201
+
METRICS_ADAPTER=statsd \
202
+
METRICS_STATSD_HOST=localhost:8125 \
203
+
METRICS_PREFIX=quickdid \
204
+
METRICS_TAGS=env:prod,service:quickdid \
205
+
CACHE_MAX_AGE=86400 \
206
+
JETSTREAM_ENABLED=true \
207
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools \
90
208
RUST_LOG=info \
91
209
./target/release/quickdid
92
210
```
93
211
212
+
#### SQLite-based (Single-instance)
213
+
```bash
214
+
HTTP_EXTERNAL=quickdid.example.com \
215
+
HTTP_PORT=3000 \
216
+
SQLITE_URL=sqlite:./quickdid.db \
217
+
CACHE_TTL_SQLITE=86400 \
218
+
QUEUE_ADAPTER=sqlite \
219
+
QUEUE_SQLITE_MAX_SIZE=10000 \
220
+
RESOLVER_MAX_CONCURRENT=50 \
221
+
RUST_LOG=info \
222
+
./target/release/quickdid
223
+
```
224
+
225
+
## Architecture
226
+
227
+
QuickDID uses a layered architecture for optimal performance:
228
+
229
+
```
230
+
Request โ Cache Layer โ Proactive Refresh โ Rate Limiter โ Base Resolver โ DNS/HTTP
231
+
โ โ โ โ
232
+
Memory/Redis/ Background Semaphore AT Protocol
233
+
SQLite Refresher (optional) Infrastructure
234
+
โ
235
+
Jetstream Consumer โ Real-time Updates from AT Protocol Firehose
236
+
```
237
+
238
+
### Cache Priority
239
+
QuickDID checks caches in this order:
240
+
1. Redis (if configured) - Best for distributed deployments
241
+
2. SQLite (if configured) - Best for single-instance with persistence
242
+
3. In-memory (fallback) - Always available
243
+
244
+
### Real-time Cache Updates
245
+
When Jetstream is enabled, QuickDID maintains cache consistency by:
246
+
- Processing Account events to purge deleted/deactivated accounts
247
+
- Processing Identity events to update handle-to-DID mappings
248
+
- Automatically reconnecting with exponential backoff on connection failures
249
+
- Tracking metrics for successful and failed event processing
250
+
251
+
### Deployment Strategies
252
+
253
+
- **Single-instance**: Use SQLite for both caching and queuing
254
+
- **Multi-instance/HA**: Use Redis for distributed caching and queuing
255
+
- **Development**: Use in-memory caching with MPSC queuing
256
+
- **Real-time sync**: Enable Jetstream consumer for live cache updates
257
+
94
258
## API Endpoints
95
259
96
260
- `GET /_health` - Health check endpoint
97
261
- `GET /xrpc/com.atproto.identity.resolveHandle` - Resolve handle to DID
98
262
- `GET /.well-known/atproto-did` - Serve DID document for the service
263
+
- `OPTIONS /*` - CORS preflight support for all endpoints
264
+
265
+
## Docker Deployment
266
+
267
+
QuickDID can be deployed using Docker. See the [production deployment guide](docs/production-deployment.md) for detailed Docker and Docker Compose configurations.
268
+
269
+
### Quick Docker Setup
270
+
271
+
```bash
272
+
# Build the image
273
+
docker build -t quickdid:latest .
274
+
275
+
# Run with environment file
276
+
docker run -d \
277
+
--name quickdid \
278
+
--env-file .env \
279
+
-p 8080:8080 \
280
+
quickdid:latest
281
+
```
282
+
283
+
## Documentation
284
+
285
+
- [Configuration Reference](docs/configuration-reference.md) - Complete list of all configuration options
286
+
- [Production Deployment Guide](docs/production-deployment.md) - Docker, monitoring, and production best practices
287
+
- [Metrics Guide](docs/telegraf-timescaledb-metrics-guide.md) - Setting up metrics with Telegraf and TimescaleDB
288
+
- [Development Guide](CLAUDE.md) - Architecture details and development patterns
289
+
290
+
## Railway Deployment
291
+
292
+
QuickDID includes Railway deployment resources in the `railway-resources/` directory for easy deployment with metrics support via Telegraf. See the deployment configurations for one-click deployment options.
99
293
100
294
## License
101
295
+41
docker-compose.yml
+41
docker-compose.yml
···
1
+
version: '3.8'
2
+
3
+
services:
4
+
quickdid:
5
+
image: quickdid:latest
6
+
build: .
7
+
ports:
8
+
- "3007:8080"
9
+
environment:
10
+
- HTTP_EXTERNAL=localhost:3007
11
+
- HTTP_PORT=8080
12
+
- RUST_LOG=info
13
+
# Optional: Override the static files directory
14
+
# - STATIC_FILES_DIR=/app/custom-www
15
+
volumes:
16
+
# Optional: Mount custom static files from host
17
+
# - ./custom-www:/app/custom-www:ro
18
+
# Optional: Mount custom .well-known files
19
+
# - ./www/.well-known:/app/www/.well-known:ro
20
+
# Optional: Use SQLite for caching
21
+
# - ./data:/app/data
22
+
# environment:
23
+
# SQLite cache configuration
24
+
# - SQLITE_URL=sqlite:/app/data/quickdid.db
25
+
# - CACHE_TTL_SQLITE=86400
26
+
27
+
# Redis cache configuration (if using external Redis)
28
+
# - REDIS_URL=redis://redis:6379
29
+
# - CACHE_TTL_REDIS=86400
30
+
# - QUEUE_ADAPTER=redis
31
+
32
+
# Optional: Redis service for caching
33
+
# redis:
34
+
# image: redis:7-alpine
35
+
# ports:
36
+
# - "6379:6379"
37
+
# volumes:
38
+
# - redis-data:/data
39
+
40
+
volumes:
41
+
redis-data:
+934
-54
docs/configuration-reference.md
+934
-54
docs/configuration-reference.md
···
8
8
- [Network Configuration](#network-configuration)
9
9
- [Caching Configuration](#caching-configuration)
10
10
- [Queue Configuration](#queue-configuration)
11
-
- [Security Configuration](#security-configuration)
12
-
- [Advanced Configuration](#advanced-configuration)
11
+
- [Rate Limiting Configuration](#rate-limiting-configuration)
12
+
- [HTTP Caching Configuration](#http-caching-configuration)
13
+
- [Metrics Configuration](#metrics-configuration)
14
+
- [Proactive Refresh Configuration](#proactive-refresh-configuration)
15
+
- [Jetstream Consumer Configuration](#jetstream-consumer-configuration)
16
+
- [Static Files Configuration](#static-files-configuration)
13
17
- [Configuration Examples](#configuration-examples)
14
18
- [Validation Rules](#validation-rules)
15
19
···
40
44
**Constraints**:
41
45
- Must be a valid hostname or hostname:port combination
42
46
- Port (if specified) must be between 1-65535
43
-
- Used to generate service DID (did:web:{HTTP_EXTERNAL})
44
-
45
-
### `SERVICE_KEY`
46
-
47
-
**Required**: Yes
48
-
**Type**: String
49
-
**Format**: DID private key
50
-
**Security**: SENSITIVE - Never commit to version control
51
-
52
-
The private key for the service's AT Protocol identity. This key is used to sign responses and authenticate the service.
53
-
54
-
**Examples**:
55
-
```bash
56
-
# did:key format (Ed25519)
57
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
58
-
59
-
# did:plc format
60
-
SERVICE_KEY=did:plc:xyz123abc456def789
61
-
```
62
-
63
-
**Constraints**:
64
-
- Must be a valid DID format
65
-
- Must include the private key component
66
-
- Should be stored securely (e.g., secrets manager, encrypted storage)
67
47
68
48
## Network Configuration
69
49
···
162
142
163
143
### `REDIS_URL`
164
144
165
-
**Required**: No (but highly recommended for production)
145
+
**Required**: No (recommended for multi-instance production)
166
146
**Type**: String
167
147
**Format**: Redis connection URL
168
148
···
185
165
# TLS connection
186
166
REDIS_URL=rediss://secure-redis.example.com:6380/0
187
167
```
168
+
169
+
### `SQLITE_URL`
170
+
171
+
**Required**: No (recommended for single-instance production)
172
+
**Type**: String
173
+
**Format**: SQLite database URL
174
+
175
+
SQLite database URL for persistent caching. Provides single-file persistent storage without external dependencies.
176
+
177
+
**Examples**:
178
+
```bash
179
+
# File-based database (recommended)
180
+
SQLITE_URL=sqlite:./quickdid.db
181
+
182
+
# With absolute path
183
+
SQLITE_URL=sqlite:/var/lib/quickdid/cache.db
184
+
185
+
# In-memory database (testing only)
186
+
SQLITE_URL=sqlite::memory:
187
+
188
+
# Alternative file syntax
189
+
SQLITE_URL=sqlite:///path/to/database.db
190
+
```
191
+
192
+
**Cache Priority**: QuickDID uses the first available cache:
193
+
1. Redis (if `REDIS_URL` is configured)
194
+
2. SQLite (if `SQLITE_URL` is configured)
195
+
3. In-memory cache (fallback)
188
196
189
197
### `CACHE_TTL_MEMORY`
190
198
···
229
237
CACHE_TTL_REDIS=7776000 # 90 days (default, maximum stability)
230
238
```
231
239
232
-
**Recommendations**:
240
+
### `CACHE_TTL_SQLITE`
241
+
242
+
**Required**: No
243
+
**Type**: Integer (seconds)
244
+
**Default**: `7776000` (90 days)
245
+
**Range**: 3600-31536000 (1 hour to 1 year)
246
+
**Constraints**: Must be > 0
247
+
248
+
Time-to-live for SQLite cache entries in seconds. Only used when `SQLITE_URL` is configured.
249
+
250
+
**Examples**:
251
+
```bash
252
+
CACHE_TTL_SQLITE=3600 # 1 hour (frequently changing data)
253
+
CACHE_TTL_SQLITE=86400 # 1 day (recommended for active handles)
254
+
CACHE_TTL_SQLITE=604800 # 1 week (balanced)
255
+
CACHE_TTL_SQLITE=2592000 # 30 days (stable handles)
256
+
CACHE_TTL_SQLITE=7776000 # 90 days (default, maximum stability)
257
+
```
258
+
259
+
**TTL Recommendations**:
233
260
- Social media handles: 1-7 days
234
261
- Corporate/stable handles: 30-90 days
235
262
- Test environments: 1 hour
263
+
- Single-instance deployments: Can use longer TTLs (30-90 days)
264
+
- Multi-instance deployments: Use shorter TTLs (1-7 days)
236
265
237
266
## Queue Configuration
238
267
···
241
270
**Required**: No
242
271
**Type**: String
243
272
**Default**: `mpsc`
244
-
**Values**: `mpsc`, `redis`, `noop`, `none`
273
+
**Values**: `mpsc`, `redis`, `sqlite`, `noop`, `none`
245
274
246
275
The type of queue adapter for background handle resolution.
247
276
248
277
**Options**:
249
278
- `mpsc`: In-memory multi-producer single-consumer queue (default)
250
279
- `redis`: Redis-backed distributed queue
280
+
- `sqlite`: SQLite-backed persistent queue
251
281
- `noop`: Disable queue processing (testing only)
252
282
- `none`: Alias for `noop`
253
283
···
258
288
259
289
# Multi-instance or high availability
260
290
QUEUE_ADAPTER=redis
291
+
292
+
# Single instance with persistence
293
+
QUEUE_ADAPTER=sqlite
261
294
262
295
# Testing without background processing
263
296
QUEUE_ADAPTER=noop
···
325
358
QUEUE_REDIS_TIMEOUT=30 # Minimal polling, slow shutdown
326
359
```
327
360
361
+
### `QUEUE_REDIS_DEDUP_ENABLED`
362
+
363
+
**Required**: No
364
+
**Type**: Boolean
365
+
**Default**: `false`
366
+
367
+
Enable deduplication for Redis queue to prevent duplicate handles from being queued multiple times within the TTL window. When enabled, uses Redis SET with TTL to track handles currently being processed.
368
+
369
+
**Examples**:
370
+
```bash
371
+
# Enable deduplication (recommended for production)
372
+
QUEUE_REDIS_DEDUP_ENABLED=true
373
+
374
+
# Disable deduplication (default)
375
+
QUEUE_REDIS_DEDUP_ENABLED=false
376
+
```
377
+
378
+
**Use cases**:
379
+
- **Production**: Enable to prevent duplicate work and reduce load
380
+
- **High-traffic**: Essential to avoid processing the same handle multiple times
381
+
- **Development**: Can be disabled for simpler debugging
382
+
383
+
### `QUEUE_REDIS_DEDUP_TTL`
384
+
385
+
**Required**: No
386
+
**Type**: Integer (seconds)
387
+
**Default**: `60`
388
+
**Range**: 10-300 (recommended)
389
+
**Constraints**: Must be > 0 when deduplication is enabled
390
+
391
+
TTL for Redis queue deduplication keys in seconds. Determines how long to prevent duplicate handle resolution requests.
392
+
393
+
**Examples**:
394
+
```bash
395
+
# Quick deduplication window (10 seconds)
396
+
QUEUE_REDIS_DEDUP_TTL=10
397
+
398
+
# Default (1 minute)
399
+
QUEUE_REDIS_DEDUP_TTL=60
400
+
401
+
# Extended deduplication (5 minutes)
402
+
QUEUE_REDIS_DEDUP_TTL=300
403
+
```
404
+
405
+
**Recommendations**:
406
+
- **Fast processing**: 10-30 seconds
407
+
- **Normal processing**: 60 seconds (default)
408
+
- **Slow processing or high load**: 120-300 seconds
409
+
328
410
### `QUEUE_WORKER_ID`
329
411
330
412
**Required**: No
···
366
448
QUEUE_BUFFER_SIZE=10000 # Very high traffic
367
449
```
368
450
451
+
### `QUEUE_SQLITE_MAX_SIZE`
452
+
453
+
**Required**: No
454
+
**Type**: Integer
455
+
**Default**: `10000`
456
+
**Range**: 100-1000000 (recommended)
457
+
**Constraints**: Must be >= 0
458
+
459
+
Maximum queue size for SQLite adapter work shedding. When the queue exceeds this limit, the oldest entries are automatically deleted to maintain the specified size limit, preserving the most recently queued work items.
460
+
461
+
**Work Shedding Behavior**:
462
+
- New work items are always accepted
463
+
- When queue size exceeds `QUEUE_SQLITE_MAX_SIZE`, oldest entries are deleted
464
+
- Deletion happens atomically with insertion in a single transaction
465
+
- Essential for long-running deployments to prevent unbounded disk growth
466
+
- Set to `0` to disable work shedding (unlimited queue size)
467
+
468
+
**Examples**:
469
+
```bash
470
+
QUEUE_SQLITE_MAX_SIZE=0 # Unlimited (disable work shedding)
471
+
QUEUE_SQLITE_MAX_SIZE=1000 # Small deployment, frequent processing
472
+
QUEUE_SQLITE_MAX_SIZE=10000 # Default, balanced for most deployments
473
+
QUEUE_SQLITE_MAX_SIZE=100000 # High-traffic deployment with slower processing
474
+
QUEUE_SQLITE_MAX_SIZE=1000000 # Very high-traffic, maximum recommended
475
+
```
476
+
477
+
**Recommendations**:
478
+
- **Small deployments**: 1000-5000 entries
479
+
- **Production deployments**: 10000-50000 entries
480
+
- **High-traffic deployments**: 50000-1000000 entries
481
+
- **Development/testing**: 100-1000 entries
482
+
- **Disk space concerns**: Lower values (1000-5000)
483
+
- **High ingestion rate**: Higher values (50000-1000000)
484
+
485
+
## Rate Limiting Configuration
486
+
487
+
### `RESOLVER_MAX_CONCURRENT`
488
+
489
+
**Required**: No
490
+
**Type**: Integer
491
+
**Default**: `0` (disabled)
492
+
**Range**: 0-10000
493
+
**Constraints**: Must be between 0 and 10000
494
+
495
+
Maximum concurrent handle resolutions allowed. When set to a value greater than 0, enables semaphore-based rate limiting to protect upstream DNS and HTTP services from being overwhelmed.
496
+
497
+
**How it works**:
498
+
- Uses a semaphore to limit concurrent resolutions
499
+
- Applied between the base resolver and caching layers
500
+
- Requests wait for an available permit before resolution
501
+
- Helps prevent overwhelming upstream services
502
+
503
+
**Examples**:
504
+
```bash
505
+
# Disabled (default)
506
+
RESOLVER_MAX_CONCURRENT=0
507
+
508
+
# Light rate limiting
509
+
RESOLVER_MAX_CONCURRENT=10
510
+
511
+
# Moderate rate limiting
512
+
RESOLVER_MAX_CONCURRENT=50
513
+
514
+
# Heavy traffic with rate limiting
515
+
RESOLVER_MAX_CONCURRENT=100
516
+
517
+
# Maximum allowed
518
+
RESOLVER_MAX_CONCURRENT=10000
519
+
```
520
+
521
+
**Recommendations**:
522
+
- **Development**: 0 (disabled) or 10-50 for testing
523
+
- **Production (low traffic)**: 50-100
524
+
- **Production (high traffic)**: 100-500
525
+
- **Production (very high traffic)**: 500-1000
526
+
- **Testing rate limiting**: 1-5 to observe behavior
527
+
528
+
**Placement in resolver stack**:
529
+
```
530
+
Request โ Cache โ RateLimited โ Base โ DNS/HTTP
531
+
```
532
+
533
+
### `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`
534
+
535
+
**Required**: No
536
+
**Type**: Integer (milliseconds)
537
+
**Default**: `0` (no timeout)
538
+
**Range**: 0-60000
539
+
**Constraints**: Must be between 0 and 60000 (60 seconds max)
540
+
541
+
Timeout for acquiring a rate limit permit in milliseconds. When set to a value greater than 0, requests will timeout if they cannot acquire a permit within the specified time, preventing them from waiting indefinitely when the rate limiter is at capacity.
542
+
543
+
**How it works**:
544
+
- Applied when `RESOLVER_MAX_CONCURRENT` is enabled (> 0)
545
+
- Uses `tokio::time::timeout` to limit permit acquisition time
546
+
- Returns an error if timeout expires before permit is acquired
547
+
- Prevents request queue buildup during high load
548
+
549
+
**Examples**:
550
+
```bash
551
+
# No timeout (default)
552
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=0
553
+
554
+
# Quick timeout for responsive failures (100ms)
555
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=100
556
+
557
+
# Moderate timeout (1 second)
558
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=1000
559
+
560
+
# Longer timeout for production (5 seconds)
561
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000
562
+
563
+
# Maximum allowed (60 seconds)
564
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=60000
565
+
```
566
+
567
+
**Recommendations**:
568
+
- **Development**: 100-1000ms for quick feedback
569
+
- **Production (low latency)**: 1000-5000ms
570
+
- **Production (high latency tolerance)**: 5000-30000ms
571
+
- **Testing**: 100ms to quickly identify bottlenecks
572
+
- **0**: Use when you want requests to wait indefinitely
573
+
574
+
**Error behavior**:
575
+
When a timeout occurs, the request fails with:
576
+
```
577
+
Rate limit permit acquisition timed out after {timeout}ms
578
+
```
579
+
580
+
## Metrics Configuration
581
+
582
+
### `METRICS_ADAPTER`
583
+
584
+
**Required**: No
585
+
**Type**: String
586
+
**Default**: `noop`
587
+
**Values**: `noop`, `statsd`
588
+
589
+
Metrics adapter type for collecting and publishing metrics.
590
+
591
+
**Options**:
592
+
- `noop`: No metrics collection (default)
593
+
- `statsd`: Send metrics to StatsD server
594
+
595
+
**Examples**:
596
+
```bash
597
+
# No metrics (default)
598
+
METRICS_ADAPTER=noop
599
+
600
+
# Enable StatsD metrics
601
+
METRICS_ADAPTER=statsd
602
+
```
603
+
604
+
### `METRICS_STATSD_HOST`
605
+
606
+
**Required**: Yes (when METRICS_ADAPTER=statsd)
607
+
**Type**: String
608
+
**Format**: hostname:port
609
+
610
+
StatsD server host and port for metrics collection.
611
+
612
+
**Examples**:
613
+
```bash
614
+
# Local StatsD
615
+
METRICS_STATSD_HOST=localhost:8125
616
+
617
+
# Remote StatsD
618
+
METRICS_STATSD_HOST=statsd.example.com:8125
619
+
620
+
# Docker network
621
+
METRICS_STATSD_HOST=statsd:8125
622
+
```
623
+
624
+
### `METRICS_STATSD_BIND`
625
+
626
+
**Required**: No
627
+
**Type**: String
628
+
**Default**: `[::]:0`
629
+
630
+
Bind address for StatsD UDP socket. Controls which local address to bind for sending UDP packets.
631
+
632
+
**Examples**:
633
+
```bash
634
+
# IPv6 any address, random port (default)
635
+
METRICS_STATSD_BIND=[::]:0
636
+
637
+
# IPv4 any address, random port
638
+
METRICS_STATSD_BIND=0.0.0.0:0
639
+
640
+
# Specific interface
641
+
METRICS_STATSD_BIND=192.168.1.100:0
642
+
643
+
# Specific port
644
+
METRICS_STATSD_BIND=[::]:8126
645
+
```
646
+
647
+
### `METRICS_PREFIX`
648
+
649
+
**Required**: No
650
+
**Type**: String
651
+
**Default**: `quickdid`
652
+
653
+
Prefix for all metrics. Used to namespace metrics in your monitoring system.
654
+
655
+
**Examples**:
656
+
```bash
657
+
# Default
658
+
METRICS_PREFIX=quickdid
659
+
660
+
# Environment-specific
661
+
METRICS_PREFIX=prod.quickdid
662
+
METRICS_PREFIX=staging.quickdid
663
+
664
+
# Region-specific
665
+
METRICS_PREFIX=us-east-1.quickdid
666
+
METRICS_PREFIX=eu-west-1.quickdid
667
+
668
+
# Service-specific
669
+
METRICS_PREFIX=api.quickdid
670
+
```
671
+
672
+
### `METRICS_TAGS`
673
+
674
+
**Required**: No
675
+
**Type**: String (comma-separated key:value pairs)
676
+
**Default**: None
677
+
678
+
Default tags for all metrics. Added to all metrics for filtering and grouping.
679
+
680
+
**Examples**:
681
+
```bash
682
+
# Basic tags
683
+
METRICS_TAGS=env:production,service:quickdid
684
+
685
+
# Detailed tags
686
+
METRICS_TAGS=env:production,service:quickdid,region:us-east-1,version:1.0.0
687
+
688
+
# Deployment-specific
689
+
METRICS_TAGS=env:staging,cluster:k8s-staging,namespace:quickdid
690
+
```
691
+
692
+
**Common tag patterns**:
693
+
- `env`: Environment (production, staging, development)
694
+
- `service`: Service name
695
+
- `region`: Geographic region
696
+
- `version`: Application version
697
+
- `cluster`: Kubernetes cluster name
698
+
- `instance`: Instance identifier
699
+
700
+
## Proactive Refresh Configuration
701
+
702
+
### `PROACTIVE_REFRESH_ENABLED`
703
+
704
+
**Required**: No
705
+
**Type**: Boolean
706
+
**Default**: `false`
707
+
708
+
Enable proactive cache refresh for frequently accessed handles. When enabled, cache entries that have reached the refresh threshold will be queued for background refresh to keep the cache warm.
709
+
710
+
**Examples**:
711
+
```bash
712
+
# Enable proactive refresh (recommended for production)
713
+
PROACTIVE_REFRESH_ENABLED=true
714
+
715
+
# Disable proactive refresh (default)
716
+
PROACTIVE_REFRESH_ENABLED=false
717
+
```
718
+
719
+
**Benefits**:
720
+
- Prevents cache misses for popular handles
721
+
- Maintains consistent response times
722
+
- Reduces latency spikes during cache expiration
723
+
724
+
**Considerations**:
725
+
- Increases background processing load
726
+
- More DNS/HTTP requests to upstream services
727
+
- Best for high-traffic services with predictable access patterns
728
+
729
+
### `PROACTIVE_REFRESH_THRESHOLD`
730
+
731
+
**Required**: No
732
+
**Type**: Float
733
+
**Default**: `0.8`
734
+
**Range**: 0.0-1.0
735
+
**Constraints**: Must be between 0.0 and 1.0
736
+
737
+
Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh. For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
738
+
739
+
**Examples**:
740
+
```bash
741
+
# Very aggressive (refresh at 50% of TTL)
742
+
PROACTIVE_REFRESH_THRESHOLD=0.5
743
+
744
+
# Moderate (refresh at 70% of TTL)
745
+
PROACTIVE_REFRESH_THRESHOLD=0.7
746
+
747
+
# Default (refresh at 80% of TTL)
748
+
PROACTIVE_REFRESH_THRESHOLD=0.8
749
+
750
+
# Conservative (refresh at 90% of TTL)
751
+
PROACTIVE_REFRESH_THRESHOLD=0.9
752
+
753
+
# Very conservative (refresh at 95% of TTL)
754
+
PROACTIVE_REFRESH_THRESHOLD=0.95
755
+
```
756
+
757
+
**Recommendations**:
758
+
- **High-traffic services**: 0.5-0.7 (aggressive refresh)
759
+
- **Normal traffic**: 0.8 (default, balanced)
760
+
- **Low traffic**: 0.9-0.95 (conservative)
761
+
- **Development**: 0.5 (test refresh behavior)
762
+
763
+
**Impact on different cache TTLs**:
764
+
- TTL=600s (10 min), threshold=0.8: Refresh after 8 minutes
765
+
- TTL=3600s (1 hour), threshold=0.8: Refresh after 48 minutes
766
+
- TTL=86400s (1 day), threshold=0.8: Refresh after 19.2 hours
767
+
768
+
## Jetstream Consumer Configuration
769
+
770
+
### `JETSTREAM_ENABLED`
771
+
772
+
**Required**: No
773
+
**Type**: Boolean
774
+
**Default**: `false`
775
+
776
+
Enable Jetstream consumer for real-time cache updates from the AT Protocol firehose. When enabled, QuickDID connects to the Jetstream WebSocket service to receive live updates about account and identity changes.
777
+
778
+
**How it works**:
779
+
- Subscribes to Account and Identity events from the firehose
780
+
- Processes Account events to purge deleted/deactivated accounts
781
+
- Processes Identity events to update handle-to-DID mappings
782
+
- Automatically reconnects with exponential backoff on connection failures
783
+
- Tracks metrics for successful and failed event processing
784
+
785
+
**Examples**:
786
+
```bash
787
+
# Enable Jetstream consumer (recommended for production)
788
+
JETSTREAM_ENABLED=true
789
+
790
+
# Disable Jetstream consumer (default)
791
+
JETSTREAM_ENABLED=false
792
+
```
793
+
794
+
**Benefits**:
795
+
- Real-time cache synchronization with AT Protocol network
796
+
- Automatic removal of deleted/deactivated accounts
797
+
- Immediate handle change updates
798
+
- Reduces stale data in cache
799
+
800
+
**Considerations**:
801
+
- Requires stable WebSocket connection
802
+
- Increases network traffic (incoming events)
803
+
- Best for services requiring up-to-date handle mappings
804
+
- Automatically handles reconnection on failures
805
+
806
+
### `JETSTREAM_HOSTNAME`
807
+
808
+
**Required**: No
809
+
**Type**: String
810
+
**Default**: `jetstream.atproto.tools`
811
+
812
+
The hostname of the Jetstream WebSocket service to connect to for real-time AT Protocol events. Only used when `JETSTREAM_ENABLED=true`.
813
+
814
+
**Examples**:
815
+
```bash
816
+
# Production firehose (default)
817
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
818
+
819
+
# Staging environment
820
+
JETSTREAM_HOSTNAME=jetstream-staging.atproto.tools
821
+
822
+
# Local development firehose
823
+
JETSTREAM_HOSTNAME=localhost:6008
824
+
825
+
# Custom deployment
826
+
JETSTREAM_HOSTNAME=jetstream.example.com
827
+
```
828
+
829
+
**Event Processing**:
830
+
- **Account events**:
831
+
- `status: deleted` โ Purges handle and DID from all caches
832
+
- `status: deactivated` โ Purges handle and DID from all caches
833
+
- Other statuses โ Ignored
834
+
835
+
- **Identity events**:
836
+
- Updates handle-to-DID mapping in cache
837
+
- Removes old handle mapping if changed
838
+
- Maintains bidirectional cache consistency
839
+
840
+
**Metrics Tracked** (when metrics are enabled):
841
+
- `jetstream.events.received`: Total events received
842
+
- `jetstream.events.processed`: Successfully processed events
843
+
- `jetstream.events.failed`: Failed event processing
844
+
- `jetstream.connections.established`: Successful connections
845
+
- `jetstream.connections.failed`: Failed connection attempts
846
+
847
+
**Reconnection Behavior**:
848
+
- Initial retry delay: 1 second
849
+
- Maximum retry delay: 60 seconds
850
+
- Exponential backoff with jitter
851
+
- Automatic recovery on transient failures
852
+
853
+
**Recommendations**:
854
+
- **Production**: Use default `jetstream.atproto.tools`
855
+
- **Development**: Consider local firehose for testing
856
+
- **High availability**: Monitor connection metrics
857
+
- **Network issues**: Check WebSocket connectivity
858
+
859
+
## Static Files Configuration
860
+
861
+
### `STATIC_FILES_DIR`
862
+
863
+
**Required**: No
864
+
**Type**: String (directory path)
865
+
**Default**: `www`
866
+
867
+
Directory path for serving static files. This directory should contain the landing page and AT Protocol well-known files.
868
+
869
+
**Directory Structure**:
870
+
```
871
+
www/
872
+
โโโ index.html # Landing page
873
+
โโโ .well-known/
874
+
โ โโโ atproto-did # Service DID identifier
875
+
โ โโโ did.json # DID document
876
+
โโโ (other static assets)
877
+
```
878
+
879
+
**Examples**:
880
+
```bash
881
+
# Default (relative to working directory)
882
+
STATIC_FILES_DIR=www
883
+
884
+
# Absolute path
885
+
STATIC_FILES_DIR=/var/www/quickdid
886
+
887
+
# Docker container path
888
+
STATIC_FILES_DIR=/app/www
889
+
890
+
# Custom directory
891
+
STATIC_FILES_DIR=./public
892
+
```
893
+
894
+
**Docker Volume Mounting**:
895
+
```yaml
896
+
volumes:
897
+
# Mount entire custom directory
898
+
- ./custom-www:/app/www:ro
899
+
900
+
# Mount specific files
901
+
- ./custom-index.html:/app/www/index.html:ro
902
+
- ./well-known:/app/www/.well-known:ro
903
+
```
904
+
905
+
**Generating Well-Known Files**:
906
+
```bash
907
+
# Generate .well-known files for your domain
908
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
909
+
```
910
+
911
+
## HTTP Caching Configuration
912
+
913
+
### `CACHE_MAX_AGE`
914
+
915
+
**Required**: No
916
+
**Type**: Integer (seconds)
917
+
**Default**: `86400` (24 hours)
918
+
**Range**: 0-31536000 (0 to 1 year)
919
+
920
+
Maximum age for HTTP Cache-Control header in seconds. When set to 0, the Cache-Control header is disabled and will not be added to responses. This controls how long clients and intermediate caches can cache responses.
921
+
922
+
**Examples**:
923
+
```bash
924
+
# Default (24 hours)
925
+
CACHE_MAX_AGE=86400
926
+
927
+
# Aggressive caching (7 days)
928
+
CACHE_MAX_AGE=604800
929
+
930
+
# Conservative caching (1 hour)
931
+
CACHE_MAX_AGE=3600
932
+
933
+
# Disable Cache-Control header
934
+
CACHE_MAX_AGE=0
935
+
```
936
+
937
+
### `CACHE_STALE_IF_ERROR`
938
+
939
+
**Required**: No
940
+
**Type**: Integer (seconds)
941
+
**Default**: `172800` (48 hours)
942
+
943
+
Allows stale content to be served if the backend encounters an error. This provides resilience during service outages.
944
+
945
+
**Examples**:
946
+
```bash
947
+
# Default (48 hours)
948
+
CACHE_STALE_IF_ERROR=172800
949
+
950
+
# Extended error tolerance (7 days)
951
+
CACHE_STALE_IF_ERROR=604800
952
+
953
+
# Minimal error tolerance (1 hour)
954
+
CACHE_STALE_IF_ERROR=3600
955
+
```
956
+
957
+
### `CACHE_STALE_WHILE_REVALIDATE`
958
+
959
+
**Required**: No
960
+
**Type**: Integer (seconds)
961
+
**Default**: `86400` (24 hours)
962
+
963
+
Allows stale content to be served while fresh content is being fetched in the background. This improves perceived performance.
964
+
965
+
**Examples**:
966
+
```bash
967
+
# Default (24 hours)
968
+
CACHE_STALE_WHILE_REVALIDATE=86400
969
+
970
+
# Quick revalidation (1 hour)
971
+
CACHE_STALE_WHILE_REVALIDATE=3600
972
+
973
+
# Extended revalidation (7 days)
974
+
CACHE_STALE_WHILE_REVALIDATE=604800
975
+
```
976
+
977
+
### `CACHE_MAX_STALE`
978
+
979
+
**Required**: No
980
+
**Type**: Integer (seconds)
981
+
**Default**: `172800` (48 hours)
982
+
983
+
Maximum time a client will accept stale responses. This provides an upper bound on how old cached content can be.
984
+
985
+
**Examples**:
986
+
```bash
987
+
# Default (48 hours)
988
+
CACHE_MAX_STALE=172800
989
+
990
+
# Extended staleness (7 days)
991
+
CACHE_MAX_STALE=604800
992
+
993
+
# Strict freshness (1 hour)
994
+
CACHE_MAX_STALE=3600
995
+
```
996
+
997
+
### `CACHE_MIN_FRESH`
998
+
999
+
**Required**: No
1000
+
**Type**: Integer (seconds)
1001
+
**Default**: `3600` (1 hour)
1002
+
1003
+
Minimum time a response must remain fresh. Clients will not accept responses that will expire within this time.
1004
+
1005
+
**Examples**:
1006
+
```bash
1007
+
# Default (1 hour)
1008
+
CACHE_MIN_FRESH=3600
1009
+
1010
+
# Strict freshness (24 hours)
1011
+
CACHE_MIN_FRESH=86400
1012
+
1013
+
# Relaxed freshness (5 minutes)
1014
+
CACHE_MIN_FRESH=300
1015
+
```
1016
+
1017
+
**Cache-Control Header Format**:
1018
+
1019
+
When `CACHE_MAX_AGE` is greater than 0, the following Cache-Control header is added to responses:
1020
+
```
1021
+
Cache-Control: public, max-age=86400, stale-while-revalidate=86400, stale-if-error=172800, max-stale=172800, min-fresh=3600
1022
+
```
1023
+
1024
+
**Recommendations**:
1025
+
- **High-traffic services**: Use longer max-age (86400-604800) to reduce load
1026
+
- **Frequently changing data**: Use shorter max-age (3600-14400)
1027
+
- **Critical services**: Set higher stale-if-error for resilience
1028
+
- **Performance-sensitive**: Enable stale-while-revalidate for better UX
1029
+
- **Disable caching**: Set CACHE_MAX_AGE=0 for real-time data
1030
+
1031
+
### `ETAG_SEED`
1032
+
1033
+
**Required**: No
1034
+
**Type**: String
1035
+
**Default**: Application version (from `CARGO_PKG_VERSION`)
1036
+
1037
+
Seed value for ETAG generation to allow cache invalidation. This value is incorporated into ETAG checksums, allowing server administrators to invalidate client-cached responses after major changes or deployments.
1038
+
1039
+
**How it works**:
1040
+
- Combined with response content to generate ETAG checksums
1041
+
- Uses MetroHash64 for fast, non-cryptographic hashing
1042
+
- Generates weak ETags (W/"hash") for HTTP caching
1043
+
- Changing the seed invalidates all client caches
1044
+
1045
+
**Examples**:
1046
+
```bash
1047
+
# Default (uses application version)
1048
+
# ETAG_SEED is automatically set to the version
1049
+
1050
+
# Deployment-specific seed
1051
+
ETAG_SEED=prod-2024-01-15
1052
+
1053
+
# Version with timestamp
1054
+
ETAG_SEED=v1.0.0-1705344000
1055
+
1056
+
# Environment-specific
1057
+
ETAG_SEED=staging-v2
1058
+
1059
+
# Force cache invalidation after config change
1060
+
ETAG_SEED=config-update-2024-01-15
1061
+
```
1062
+
1063
+
**Use cases**:
1064
+
- **Major configuration changes**: Update seed to invalidate all cached responses
1065
+
- **Data migration**: Force clients to refetch after backend changes
1066
+
- **Security updates**: Ensure clients get fresh data after security fixes
1067
+
- **A/B testing**: Different seeds for different deployment groups
1068
+
- **Rollback scenarios**: Revert to previous seed to restore cache behavior
1069
+
1070
+
**Recommendations**:
1071
+
- **Default**: Use the application version (automatic)
1072
+
- **Production**: Include deployment date or config version
1073
+
- **Staging**: Use environment-specific seeds
1074
+
- **After incidents**: Update seed to force fresh data
1075
+
- **Routine deployments**: Keep the same seed if no data changes
1076
+
369
1077
## Configuration Examples
370
1078
371
1079
### Minimal Development Configuration
···
373
1081
```bash
374
1082
# .env.development
375
1083
HTTP_EXTERNAL=localhost:3007
376
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
377
1084
RUST_LOG=debug
378
1085
```
379
1086
380
-
### Standard Production Configuration
1087
+
### Standard Production Configuration (Redis)
381
1088
382
1089
```bash
383
-
# .env.production
1090
+
# .env.production.redis
384
1091
# Required
385
1092
HTTP_EXTERNAL=quickdid.example.com
386
-
SERVICE_KEY=${SECRET_SERVICE_KEY} # From secrets manager
387
1093
388
1094
# Network
389
1095
HTTP_PORT=8080
390
1096
USER_AGENT=quickdid/1.0.0 (+https://quickdid.example.com)
391
1097
392
-
# Caching
1098
+
# Caching (Redis-based)
393
1099
REDIS_URL=redis://redis:6379/0
394
1100
CACHE_TTL_MEMORY=600
395
1101
CACHE_TTL_REDIS=86400 # 1 day
···
398
1104
QUEUE_ADAPTER=redis
399
1105
QUEUE_REDIS_TIMEOUT=5
400
1106
QUEUE_BUFFER_SIZE=5000
1107
+
QUEUE_REDIS_DEDUP_ENABLED=true # Prevent duplicate work
1108
+
QUEUE_REDIS_DEDUP_TTL=60
1109
+
1110
+
# Rate Limiting (optional, recommended for production)
1111
+
RESOLVER_MAX_CONCURRENT=100
1112
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
1113
+
1114
+
# Metrics (optional, recommended for production)
1115
+
METRICS_ADAPTER=statsd
1116
+
METRICS_STATSD_HOST=localhost:8125
1117
+
METRICS_PREFIX=quickdid
1118
+
METRICS_TAGS=env:prod,service:quickdid
1119
+
1120
+
# Proactive Refresh (optional, recommended for high-traffic)
1121
+
PROACTIVE_REFRESH_ENABLED=true
1122
+
PROACTIVE_REFRESH_THRESHOLD=0.8
1123
+
1124
+
# Jetstream Consumer (optional, recommended for real-time sync)
1125
+
JETSTREAM_ENABLED=true
1126
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1127
+
1128
+
# HTTP Caching (Cache-Control headers)
1129
+
CACHE_MAX_AGE=86400 # 24 hours
1130
+
CACHE_STALE_IF_ERROR=172800 # 48 hours
1131
+
CACHE_STALE_WHILE_REVALIDATE=86400 # 24 hours
401
1132
402
1133
# Logging
403
1134
RUST_LOG=info
404
1135
```
405
1136
406
-
### High-Availability Configuration
1137
+
### Standard Production Configuration (SQLite)
407
1138
408
1139
```bash
409
-
# .env.ha
1140
+
# .env.production.sqlite
410
1141
# Required
411
1142
HTTP_EXTERNAL=quickdid.example.com
412
-
SERVICE_KEY=${SECRET_SERVICE_KEY}
1143
+
1144
+
# Network
1145
+
HTTP_PORT=8080
1146
+
USER_AGENT=quickdid/1.0.0 (+https://quickdid.example.com)
1147
+
1148
+
# Caching (SQLite-based for single instance)
1149
+
SQLITE_URL=sqlite:/data/quickdid.db
1150
+
CACHE_TTL_MEMORY=600
1151
+
CACHE_TTL_SQLITE=86400 # 1 day
1152
+
1153
+
# Queue (SQLite for single instance with persistence)
1154
+
QUEUE_ADAPTER=sqlite
1155
+
QUEUE_BUFFER_SIZE=5000
1156
+
QUEUE_SQLITE_MAX_SIZE=10000
1157
+
1158
+
# Rate Limiting (optional, recommended for production)
1159
+
RESOLVER_MAX_CONCURRENT=100
1160
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
1161
+
1162
+
# Jetstream Consumer (optional, recommended for real-time sync)
1163
+
JETSTREAM_ENABLED=true
1164
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1165
+
1166
+
# HTTP Caching (Cache-Control headers)
1167
+
CACHE_MAX_AGE=86400 # 24 hours
1168
+
CACHE_STALE_IF_ERROR=172800 # 48 hours
1169
+
CACHE_STALE_WHILE_REVALIDATE=86400 # 24 hours
1170
+
1171
+
# Logging
1172
+
RUST_LOG=info
1173
+
```
1174
+
1175
+
### High-Availability Configuration (Redis)
1176
+
1177
+
```bash
1178
+
# .env.ha.redis
1179
+
# Required
1180
+
HTTP_EXTERNAL=quickdid.example.com
413
1181
414
1182
# Network
415
1183
HTTP_PORT=8080
···
426
1194
QUEUE_REDIS_PREFIX=prod:queue:
427
1195
QUEUE_WORKER_ID=${HOSTNAME:-worker1}
428
1196
QUEUE_REDIS_TIMEOUT=10
1197
+
QUEUE_REDIS_DEDUP_ENABLED=true # Essential for multi-instance
1198
+
QUEUE_REDIS_DEDUP_TTL=120 # Longer TTL for HA
429
1199
430
1200
# Performance
431
1201
QUEUE_BUFFER_SIZE=10000
432
1202
1203
+
# Rate Limiting (important for HA deployments)
1204
+
RESOLVER_MAX_CONCURRENT=500
1205
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=10000 # 10 second timeout for HA
1206
+
1207
+
# Metrics (recommended for HA monitoring)
1208
+
METRICS_ADAPTER=statsd
1209
+
METRICS_STATSD_HOST=statsd:8125
1210
+
METRICS_PREFIX=quickdid.prod
1211
+
METRICS_TAGS=env:prod,service:quickdid,cluster:ha
1212
+
1213
+
# Proactive Refresh (recommended for HA)
1214
+
PROACTIVE_REFRESH_ENABLED=true
1215
+
PROACTIVE_REFRESH_THRESHOLD=0.7 # More aggressive for HA
1216
+
1217
+
# Jetstream Consumer (recommended for real-time sync in HA)
1218
+
JETSTREAM_ENABLED=true
1219
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1220
+
433
1221
# Logging
434
1222
RUST_LOG=warn
435
1223
```
436
1224
437
-
### Docker Compose Configuration
1225
+
### Hybrid Configuration (Redis + SQLite Fallback)
1226
+
1227
+
```bash
1228
+
# .env.hybrid
1229
+
# Required
1230
+
HTTP_EXTERNAL=quickdid.example.com
1231
+
1232
+
# Network
1233
+
HTTP_PORT=8080
1234
+
1235
+
# Caching (Redis primary, SQLite fallback)
1236
+
REDIS_URL=redis://redis:6379/0
1237
+
SQLITE_URL=sqlite:/data/fallback.db
1238
+
CACHE_TTL_MEMORY=600
1239
+
CACHE_TTL_REDIS=86400
1240
+
CACHE_TTL_SQLITE=604800 # 1 week (longer for fallback)
1241
+
1242
+
# Queue
1243
+
QUEUE_ADAPTER=redis
1244
+
QUEUE_REDIS_TIMEOUT=5
1245
+
1246
+
# Logging
1247
+
RUST_LOG=info
1248
+
```
1249
+
1250
+
### Docker Compose Configuration (Redis)
438
1251
439
1252
```yaml
1253
+
# docker-compose.redis.yml
440
1254
version: '3.8'
441
1255
442
1256
services:
···
444
1258
image: quickdid:latest
445
1259
environment:
446
1260
HTTP_EXTERNAL: quickdid.example.com
447
-
SERVICE_KEY: ${SERVICE_KEY}
448
1261
HTTP_PORT: 8080
449
1262
REDIS_URL: redis://redis:6379/0
450
1263
CACHE_TTL_MEMORY: 600
451
1264
CACHE_TTL_REDIS: 86400
452
1265
QUEUE_ADAPTER: redis
453
1266
QUEUE_REDIS_TIMEOUT: 5
1267
+
JETSTREAM_ENABLED: true
1268
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
454
1269
RUST_LOG: info
455
1270
ports:
456
1271
- "8080:8080"
···
462
1277
command: redis-server --maxmemory 256mb --maxmemory-policy allkeys-lru
463
1278
```
464
1279
1280
+
### Docker Compose Configuration (SQLite)
1281
+
1282
+
```yaml
1283
+
# docker-compose.sqlite.yml
1284
+
version: '3.8'
1285
+
1286
+
services:
1287
+
quickdid:
1288
+
image: quickdid:latest
1289
+
environment:
1290
+
HTTP_EXTERNAL: quickdid.example.com
1291
+
HTTP_PORT: 8080
1292
+
SQLITE_URL: sqlite:/data/quickdid.db
1293
+
CACHE_TTL_MEMORY: 600
1294
+
CACHE_TTL_SQLITE: 86400
1295
+
QUEUE_ADAPTER: sqlite
1296
+
QUEUE_BUFFER_SIZE: 5000
1297
+
QUEUE_SQLITE_MAX_SIZE: 10000
1298
+
JETSTREAM_ENABLED: true
1299
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
1300
+
RUST_LOG: info
1301
+
ports:
1302
+
- "8080:8080"
1303
+
volumes:
1304
+
- quickdid-data:/data
1305
+
1306
+
volumes:
1307
+
quickdid-data:
1308
+
driver: local
1309
+
```
1310
+
465
1311
## Validation Rules
466
1312
467
1313
QuickDID validates configuration at startup. The following rules are enforced:
···
469
1315
### Required Fields
470
1316
471
1317
1. **HTTP_EXTERNAL**: Must be provided
472
-
2. **SERVICE_KEY**: Must be provided
1318
+
2. **HTTP_EXTERNAL**: Must be provided
473
1319
474
1320
### Value Constraints
475
1321
476
-
1. **TTL Values** (`CACHE_TTL_MEMORY`, `CACHE_TTL_REDIS`):
1322
+
1. **TTL Values** (`CACHE_TTL_MEMORY`, `CACHE_TTL_REDIS`, `CACHE_TTL_SQLITE`):
477
1323
- Must be positive integers (> 0)
478
1324
- Recommended minimum: 60 seconds
479
1325
···
482
1328
- Recommended range: 1-60 seconds
483
1329
484
1330
3. **Queue Adapter** (`QUEUE_ADAPTER`):
485
-
- Must be one of: `mpsc`, `redis`, `noop`, `none`
1331
+
- Must be one of: `mpsc`, `redis`, `sqlite`, `noop`, `none`
486
1332
- Case-sensitive
487
1333
488
-
4. **Port** (`HTTP_PORT`):
1334
+
4. **Rate Limiting** (`RESOLVER_MAX_CONCURRENT`):
1335
+
- Must be between 0 and 10000
1336
+
- 0 = disabled (default)
1337
+
- Values > 10000 will fail validation
1338
+
1339
+
5. **Rate Limiting Timeout** (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`):
1340
+
- Must be between 0 and 60000 (milliseconds)
1341
+
- 0 = no timeout (default)
1342
+
- Values > 60000 will fail validation
1343
+
1344
+
6. **Port** (`HTTP_PORT`):
489
1345
- Must be valid port number (1-65535)
490
1346
- Ports < 1024 require elevated privileges
491
1347
···
504
1360
505
1361
```bash
506
1362
# Validate configuration
507
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1363
+
HTTP_EXTERNAL=test quickdid --help
508
1364
509
1365
# Test with specific values
510
1366
CACHE_TTL_MEMORY=0 quickdid --help # Will fail validation
511
1367
512
1368
# Check parsed configuration (with debug logging)
513
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1369
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
514
1370
```
515
1371
516
1372
## Best Practices
517
1373
518
1374
### Security
519
1375
520
-
1. **Never commit SERVICE_KEY** to version control
521
-
2. Use environment-specific key management (Vault, AWS Secrets, etc.)
522
-
3. Rotate SERVICE_KEY regularly
523
-
4. Use TLS for Redis connections in production (`rediss://`)
1376
+
1. Use environment-specific configuration management
1377
+
2. Use TLS for Redis connections in production (`rediss://`)
1378
+
3. Never commit sensitive configuration to version control
524
1379
5. Implement network segmentation for Redis access
525
1380
526
1381
### Performance
527
1382
528
1383
1. **With Redis**: Use lower memory cache TTL (300-600s)
529
-
2. **Without Redis**: Use higher memory cache TTL (1800-3600s)
530
-
3. **High traffic**: Increase QUEUE_BUFFER_SIZE (5000-10000)
531
-
4. **Multi-region**: Use region-specific QUEUE_WORKER_ID
1384
+
2. **With SQLite**: Use moderate memory cache TTL (600-1800s)
1385
+
3. **Without persistent cache**: Use higher memory cache TTL (1800-3600s)
1386
+
4. **High traffic**: Increase QUEUE_BUFFER_SIZE (5000-10000)
1387
+
5. **Multi-region**: Use region-specific QUEUE_WORKER_ID
1388
+
1389
+
### Caching and Queue Strategy
1390
+
1391
+
1. **Multi-instance/HA deployments**: Use Redis for distributed caching and queuing
1392
+
2. **Single-instance deployments**: Use SQLite for persistent caching and queuing
1393
+
3. **Development/testing**: Use memory-only caching with MPSC queuing
1394
+
4. **Hybrid setups**: Configure both Redis and SQLite for redundancy
1395
+
5. **Real-time sync**: Enable Jetstream consumer for live cache updates
1396
+
6. **Queue adapter guidelines**:
1397
+
- Redis: Best for multi-instance deployments with distributed processing
1398
+
- SQLite: Best for single-instance deployments needing persistence
1399
+
- MPSC: Best for single-instance deployments without persistence needs
1400
+
7. **Cache TTL guidelines**:
1401
+
- Redis: Shorter TTLs (1-7 days) for frequently updated handles
1402
+
- SQLite: Longer TTLs (7-90 days) for stable single-instance caching
1403
+
- Memory: Short TTLs (5-30 minutes) as fallback
1404
+
8. **Jetstream guidelines**:
1405
+
- Production: Enable for real-time cache synchronization
1406
+
- High-traffic: Essential for reducing stale data
1407
+
- Development: Can be disabled for simpler testing
1408
+
- Monitor WebSocket connection health in production
532
1409
533
1410
### Monitoring
534
1411
···
540
1417
### Deployment
541
1418
542
1419
1. Use `.env` files for local development
543
-
2. Use secrets management for production SERVICE_KEY
1420
+
2. Use secrets management for production configurations
544
1421
3. Set resource limits in container orchestration
545
1422
4. Use health checks to monitor service availability
546
-
5. Implement gradual rollouts with feature flags
1423
+
5. Implement gradual rollouts with feature flags
1424
+
6. **SQLite deployments**: Ensure persistent volume for database file
1425
+
7. **Redis deployments**: Configure Redis persistence and backup
1426
+
8. **Hybrid deployments**: Test fallback scenarios (Redis unavailable)
+354
-24
docs/production-deployment.md
+354
-24
docs/production-deployment.md
···
1
1
# QuickDID Production Deployment Guide
2
2
3
-
This guide provides comprehensive instructions for deploying QuickDID in a production environment using Docker.
3
+
This guide provides comprehensive instructions for deploying QuickDID in a production environment using Docker. QuickDID supports multiple caching strategies: Redis (distributed), SQLite (single-instance), or in-memory caching.
4
4
5
5
## Table of Contents
6
6
···
17
17
- Docker 20.10.0 or higher
18
18
- Docker Compose 2.0.0 or higher (optional, for multi-container setup)
19
19
- Redis 6.0 or higher (optional, for persistent caching and queue management)
20
+
- SQLite 3.35 or higher (optional, alternative to Redis for single-instance caching)
20
21
- Valid SSL certificates for HTTPS (recommended for production)
21
22
- Domain name configured with appropriate DNS records
22
23
···
41
42
# - localhost:3007 (for testing only)
42
43
HTTP_EXTERNAL=quickdid.example.com
43
44
44
-
# Private key for service identity (DID format)
45
-
# Generate a new key for production using atproto-identity tools
46
-
# SECURITY: Keep this key secure and never commit to version control
47
-
# Example formats:
48
-
# - did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
49
-
# - did:plc:xyz123abc456
50
-
SERVICE_KEY=did:key:YOUR_PRODUCTION_KEY_HERE
51
-
52
45
# ----------------------------------------------------------------------------
53
46
# NETWORK CONFIGURATION
54
47
# ----------------------------------------------------------------------------
···
66
59
# CACHING CONFIGURATION
67
60
# ----------------------------------------------------------------------------
68
61
69
-
# Redis connection URL for caching (highly recommended for production)
62
+
# Redis connection URL for caching (recommended for production)
70
63
# Format: redis://[username:password@]host:port/database
71
64
# Examples:
72
65
# - redis://localhost:6379/0 (local Redis, no auth)
···
76
69
# Benefits: Persistent cache, distributed caching, better performance
77
70
REDIS_URL=redis://redis:6379/0
78
71
72
+
# SQLite database URL for caching (alternative to Redis for single-instance deployments)
73
+
# Format: sqlite:path/to/database.db
74
+
# Examples:
75
+
# - sqlite:./quickdid.db (file-based database)
76
+
# - sqlite::memory: (in-memory database for testing)
77
+
# - sqlite:/var/lib/quickdid/cache.db (absolute path)
78
+
# Benefits: Persistent cache, single-file storage, no external dependencies
79
+
# Note: Cache priority is Redis > SQLite > Memory (first available is used)
80
+
# SQLITE_URL=sqlite:./quickdid.db
81
+
79
82
# TTL for in-memory cache in seconds (default: 600 = 10 minutes)
80
83
# Range: 60-3600 recommended
81
84
# Lower = fresher data, more DNS/HTTP lookups
···
90
93
# - 7776000 (90 days) for stable data
91
94
CACHE_TTL_REDIS=86400
92
95
96
+
# TTL for SQLite cache in seconds (default: 7776000 = 90 days)
97
+
# Range: 3600-31536000 (1 hour to 1 year)
98
+
# Same recommendations as Redis TTL
99
+
# Only used when SQLITE_URL is configured
100
+
CACHE_TTL_SQLITE=86400
101
+
93
102
# ----------------------------------------------------------------------------
94
103
# QUEUE CONFIGURATION
95
104
# ----------------------------------------------------------------------------
96
105
97
-
# Queue adapter type: 'mpsc', 'redis', 'noop', or 'none' (default: mpsc)
106
+
# Queue adapter type: 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
98
107
# - 'mpsc': In-memory queue for single-instance deployments
99
108
# - 'redis': Distributed queue for multi-instance or HA deployments
109
+
# - 'sqlite': Persistent queue for single-instance deployments
100
110
# - 'noop': Disable queue processing (testing only)
101
111
# - 'none': Alias for 'noop'
102
112
QUEUE_ADAPTER=redis
···
115
125
# Higher = less polling overhead, slower shutdown
116
126
QUEUE_REDIS_TIMEOUT=5
117
127
128
+
# Enable deduplication for Redis queue to prevent duplicate handles (default: false)
129
+
# When enabled, uses Redis SET with TTL to track handles being processed
130
+
# Prevents the same handle from being queued multiple times within the TTL window
131
+
QUEUE_REDIS_DEDUP_ENABLED=false
132
+
133
+
# TTL for Redis queue deduplication keys in seconds (default: 60)
134
+
# Range: 10-300 recommended
135
+
# Determines how long to prevent duplicate handle resolution requests
136
+
QUEUE_REDIS_DEDUP_TTL=60
137
+
118
138
# Worker ID for Redis queue (defaults to "worker1")
119
139
# Set this for predictable worker identification in multi-instance deployments
120
140
# Examples: worker-001, prod-us-east-1, $(hostname)
···
125
145
# Increase for high-traffic deployments using MPSC adapter
126
146
QUEUE_BUFFER_SIZE=5000
127
147
148
+
# Maximum queue size for SQLite adapter work shedding (default: 10000)
149
+
# Range: 100-1000000 (recommended)
150
+
# When exceeded, oldest entries are deleted to maintain this limit
151
+
# Set to 0 to disable work shedding (unlimited queue size)
152
+
# Benefits: Prevents unbounded disk usage, maintains recent work items
153
+
QUEUE_SQLITE_MAX_SIZE=10000
154
+
128
155
# ----------------------------------------------------------------------------
129
156
# HTTP CLIENT CONFIGURATION
130
157
# ----------------------------------------------------------------------------
···
133
160
# Identifies your service to other AT Protocol services
134
161
# Default: Auto-generated with current version from Cargo.toml
135
162
# Format: quickdid/{version} (+https://github.com/smokesignal.events/quickdid)
136
-
USER_AGENT=quickdid/1.0.0-rc.2 (+https://quickdid.example.com)
163
+
USER_AGENT=quickdid/1.0.0-rc.5 (+https://quickdid.example.com)
137
164
138
165
# Custom DNS nameservers (comma-separated)
139
166
# Use for custom DNS resolution or to bypass local DNS
···
159
186
# RUST_LOG_FORMAT=json
160
187
161
188
# ----------------------------------------------------------------------------
189
+
# RATE LIMITING CONFIGURATION
190
+
# ----------------------------------------------------------------------------
191
+
192
+
# Maximum concurrent handle resolutions (default: 0 = disabled)
193
+
# When > 0, enables semaphore-based rate limiting
194
+
# Range: 0-10000 (0 = disabled)
195
+
# Protects upstream DNS/HTTP services from being overwhelmed
196
+
RESOLVER_MAX_CONCURRENT=0
197
+
198
+
# Timeout for acquiring rate limit permit in milliseconds (default: 0 = no timeout)
199
+
# When > 0, requests will timeout if they can't acquire a permit within this time
200
+
# Range: 0-60000 (max 60 seconds)
201
+
# Prevents requests from waiting indefinitely when rate limiter is at capacity
202
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=0
203
+
204
+
# ----------------------------------------------------------------------------
205
+
# HTTP CACHING CONFIGURATION
206
+
# ----------------------------------------------------------------------------
207
+
208
+
# ETAG seed for cache invalidation (default: application version)
209
+
# Used to generate ETAG checksums for HTTP responses
210
+
# Changing this value invalidates all client-cached responses
211
+
# Examples:
212
+
# - prod-2024-01-15 (deployment-specific)
213
+
# - v1.0.0-1705344000 (version with timestamp)
214
+
# - config-update-2024-01-15 (after configuration changes)
215
+
# Default uses the application version from Cargo.toml
216
+
# ETAG_SEED=prod-2024-01-15
217
+
218
+
# Maximum age for HTTP Cache-Control header in seconds (default: 86400 = 24 hours)
219
+
# Set to 0 to disable Cache-Control header
220
+
# Controls how long clients and intermediate caches can cache responses
221
+
CACHE_MAX_AGE=86400
222
+
223
+
# Stale-if-error directive for Cache-Control in seconds (default: 172800 = 48 hours)
224
+
# Allows stale content to be served if backend errors occur
225
+
# Provides resilience during service outages
226
+
CACHE_STALE_IF_ERROR=172800
227
+
228
+
# Stale-while-revalidate directive for Cache-Control in seconds (default: 86400 = 24 hours)
229
+
# Allows stale content to be served while fetching fresh content in background
230
+
# Improves perceived performance for users
231
+
CACHE_STALE_WHILE_REVALIDATE=86400
232
+
233
+
# Max-stale directive for Cache-Control in seconds (default: 172800 = 48 hours)
234
+
# Maximum time client will accept stale responses
235
+
# Provides upper bound on cached content age
236
+
CACHE_MAX_STALE=172800
237
+
238
+
# Min-fresh directive for Cache-Control in seconds (default: 3600 = 1 hour)
239
+
# Minimum time response must remain fresh
240
+
# Clients won't accept responses expiring within this time
241
+
CACHE_MIN_FRESH=3600
242
+
243
+
# ----------------------------------------------------------------------------
244
+
# METRICS CONFIGURATION
245
+
# ----------------------------------------------------------------------------
246
+
247
+
# Metrics adapter type: 'noop' or 'statsd' (default: noop)
248
+
# - 'noop': No metrics collection (default)
249
+
# - 'statsd': Send metrics to StatsD server
250
+
METRICS_ADAPTER=statsd
251
+
252
+
# StatsD host and port (required when METRICS_ADAPTER=statsd)
253
+
# Format: hostname:port
254
+
# Examples:
255
+
# - localhost:8125 (local StatsD)
256
+
# - statsd.example.com:8125 (remote StatsD)
257
+
METRICS_STATSD_HOST=localhost:8125
258
+
259
+
# Bind address for StatsD UDP socket (default: [::]:0)
260
+
# Controls which local address to bind for sending UDP packets
261
+
# Examples:
262
+
# - [::]:0 (IPv6 any address, random port - default)
263
+
# - 0.0.0.0:0 (IPv4 any address, random port)
264
+
# - 192.168.1.100:0 (specific interface)
265
+
METRICS_STATSD_BIND=[::]:0
266
+
267
+
# Prefix for all metrics (default: quickdid)
268
+
# Used to namespace metrics in your monitoring system
269
+
# Examples:
270
+
# - quickdid (default)
271
+
# - prod.quickdid
272
+
# - us-east-1.quickdid
273
+
METRICS_PREFIX=quickdid
274
+
275
+
# Tags for all metrics (comma-separated key:value pairs)
276
+
# Added to all metrics for filtering and grouping
277
+
# Examples:
278
+
# - env:production,service:quickdid
279
+
# - env:staging,region:us-east-1,version:1.0.0
280
+
METRICS_TAGS=env:production,service:quickdid
281
+
282
+
# ----------------------------------------------------------------------------
283
+
# PROACTIVE REFRESH CONFIGURATION
284
+
# ----------------------------------------------------------------------------
285
+
286
+
# Enable proactive cache refresh (default: false)
287
+
# When enabled, cache entries nearing expiration are automatically refreshed
288
+
# in the background to prevent cache misses for frequently accessed handles
289
+
PROACTIVE_REFRESH_ENABLED=false
290
+
291
+
# Threshold for proactive refresh as percentage of TTL (default: 0.8)
292
+
# Range: 0.0-1.0 (0% to 100% of TTL)
293
+
# Example: 0.8 means refresh when 80% of TTL has elapsed
294
+
# Lower values = more aggressive refreshing, higher load
295
+
# Higher values = less aggressive refreshing, more cache misses
296
+
PROACTIVE_REFRESH_THRESHOLD=0.8
297
+
298
+
# ----------------------------------------------------------------------------
299
+
# JETSTREAM CONSUMER CONFIGURATION
300
+
# ----------------------------------------------------------------------------
301
+
302
+
# Enable Jetstream consumer for real-time cache updates (default: false)
303
+
# When enabled, connects to AT Protocol firehose for live updates
304
+
# Processes Account events (deleted/deactivated) and Identity events (handle changes)
305
+
# Automatically reconnects with exponential backoff on connection failures
306
+
JETSTREAM_ENABLED=false
307
+
308
+
# Jetstream WebSocket hostname (default: jetstream.atproto.tools)
309
+
# The firehose service to connect to for real-time AT Protocol events
310
+
# Examples:
311
+
# - jetstream.atproto.tools (production firehose)
312
+
# - jetstream-staging.atproto.tools (staging environment)
313
+
# - localhost:6008 (local development)
314
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
315
+
316
+
# ----------------------------------------------------------------------------
317
+
# STATIC FILES CONFIGURATION
318
+
# ----------------------------------------------------------------------------
319
+
320
+
# Directory path for serving static files (default: www)
321
+
# This directory should contain:
322
+
# - index.html (landing page)
323
+
# - .well-known/atproto-did (service DID identifier)
324
+
# - .well-known/did.json (DID document)
325
+
# In Docker, this defaults to /app/www
326
+
# You can mount custom files via Docker volumes
327
+
STATIC_FILES_DIR=/app/www
328
+
329
+
# ----------------------------------------------------------------------------
162
330
# PERFORMANCE TUNING
163
331
# ----------------------------------------------------------------------------
164
332
···
263
431
264
432
## Docker Compose Setup
265
433
266
-
Create a `docker-compose.yml` file for a complete production setup:
434
+
### Redis-based Production Setup with Jetstream
435
+
436
+
Create a `docker-compose.yml` file for a complete production setup with Redis and optional Jetstream consumer:
267
437
268
438
```yaml
269
439
version: '3.8'
···
352
522
driver: local
353
523
```
354
524
525
+
### SQLite-based Single-Instance Setup with Jetstream
526
+
527
+
For single-instance deployments without Redis, create a simpler `docker-compose.sqlite.yml` with optional Jetstream consumer:
528
+
529
+
```yaml
530
+
version: '3.8'
531
+
532
+
services:
533
+
quickdid:
534
+
image: quickdid:latest
535
+
container_name: quickdid-sqlite
536
+
environment:
537
+
HTTP_EXTERNAL: quickdid.example.com
538
+
HTTP_PORT: 8080
539
+
SQLITE_URL: sqlite:/data/quickdid.db
540
+
CACHE_TTL_MEMORY: 600
541
+
CACHE_TTL_SQLITE: 86400
542
+
QUEUE_ADAPTER: sqlite
543
+
QUEUE_BUFFER_SIZE: 5000
544
+
QUEUE_SQLITE_MAX_SIZE: 10000
545
+
# Optional: Enable Jetstream for real-time cache updates
546
+
# JETSTREAM_ENABLED: true
547
+
# JETSTREAM_HOSTNAME: jetstream.atproto.tools
548
+
RUST_LOG: info
549
+
ports:
550
+
- "8080:8080"
551
+
volumes:
552
+
- quickdid-data:/data
553
+
networks:
554
+
- quickdid-network
555
+
restart: ${RESTART_POLICY:-unless-stopped}
556
+
deploy:
557
+
resources:
558
+
limits:
559
+
memory: ${MEMORY_LIMIT:-256M}
560
+
cpus: ${CPU_LIMIT:-0.5}
561
+
reservations:
562
+
memory: 128M
563
+
cpus: '0.25'
564
+
healthcheck:
565
+
test: ["CMD", "curl", "-f", "http://localhost:8080/health"]
566
+
interval: 30s
567
+
timeout: 3s
568
+
retries: 3
569
+
start_period: 10s
570
+
logging:
571
+
driver: "json-file"
572
+
options:
573
+
max-size: "10m"
574
+
max-file: "3"
575
+
576
+
# Optional: Nginx reverse proxy with SSL
577
+
nginx:
578
+
image: nginx:alpine
579
+
container_name: quickdid-nginx
580
+
ports:
581
+
- "80:80"
582
+
- "443:443"
583
+
volumes:
584
+
- ./nginx.conf:/etc/nginx/nginx.conf:ro
585
+
- ./certs:/etc/nginx/certs:ro
586
+
- ./acme-challenge:/var/www/acme:ro
587
+
depends_on:
588
+
- quickdid
589
+
networks:
590
+
- quickdid-network
591
+
restart: unless-stopped
592
+
logging:
593
+
driver: "json-file"
594
+
options:
595
+
max-size: "10m"
596
+
max-file: "3"
597
+
598
+
networks:
599
+
quickdid-network:
600
+
driver: bridge
601
+
602
+
volumes:
603
+
quickdid-data:
604
+
driver: local
605
+
```
606
+
355
607
### Nginx Configuration (nginx.conf)
356
608
357
609
```nginx
···
418
670
### Starting the Stack
419
671
420
672
```bash
421
-
# Start all services
673
+
# Start Redis-based stack
422
674
docker-compose up -d
423
675
676
+
# Start SQLite-based stack
677
+
docker-compose -f docker-compose.sqlite.yml up -d
678
+
424
679
# View logs
425
680
docker-compose logs -f
681
+
# or for SQLite setup
682
+
docker-compose -f docker-compose.sqlite.yml logs -f
426
683
427
684
# Check service status
428
685
docker-compose ps
429
686
430
687
# Stop all services
431
688
docker-compose down
689
+
# or for SQLite setup
690
+
docker-compose -f docker-compose.sqlite.yml down
432
691
```
433
692
434
693
## Health Monitoring
···
479
738
480
739
### 1. Service Key Protection
481
740
482
-
- **Never commit** the `SERVICE_KEY` to version control
741
+
- **Never commit** sensitive configuration to version control
483
742
- Store keys in a secure secret management system (e.g., HashiCorp Vault, AWS Secrets Manager)
484
743
- Rotate keys regularly
485
744
- Use different keys for different environments
···
524
783
docker logs quickdid
525
784
526
785
# Verify environment variables
527
-
docker exec quickdid env | grep -E "HTTP_EXTERNAL|SERVICE_KEY"
786
+
docker exec quickdid env | grep -E "HTTP_EXTERNAL|HTTP_PORT"
528
787
529
788
# Test Redis connectivity
530
789
docker exec quickdid redis-cli -h redis ping
···
539
798
# Check DNS resolution
540
799
docker exec quickdid nslookup plc.directory
541
800
542
-
# Verify Redis cache
801
+
# Verify Redis cache (if using Redis)
543
802
docker exec -it quickdid-redis redis-cli
544
803
> KEYS handle:*
545
804
> TTL handle:example_key
805
+
806
+
# Check SQLite cache (if using SQLite)
807
+
docker exec quickdid sqlite3 /data/quickdid.db ".tables"
808
+
docker exec quickdid sqlite3 /data/quickdid.db "SELECT COUNT(*) FROM handle_resolution_cache;"
546
809
```
547
810
548
811
#### 3. Performance Issues
549
812
550
813
```bash
551
-
# Monitor Redis memory usage
814
+
# Monitor Redis memory usage (if using Redis)
552
815
docker exec quickdid-redis redis-cli INFO memory
816
+
817
+
# Check SQLite database size (if using SQLite)
818
+
docker exec quickdid ls -lh /data/quickdid.db
819
+
docker exec quickdid sqlite3 /data/quickdid.db "PRAGMA page_count; PRAGMA page_size;"
553
820
554
821
# Check container resource usage
555
822
docker stats quickdid
···
577
844
# Test handle resolution
578
845
curl "http://localhost:8080/xrpc/com.atproto.identity.resolveHandle?handle=example.bsky.social"
579
846
580
-
# Check Redis keys
847
+
# Check Redis keys (if using Redis)
581
848
docker exec quickdid-redis redis-cli --scan --pattern "handle:*" | head -20
582
849
850
+
# Check SQLite cache entries (if using SQLite)
851
+
docker exec quickdid sqlite3 /data/quickdid.db "SELECT COUNT(*) as total_entries, MIN(updated) as oldest, MAX(updated) as newest FROM handle_resolution_cache;"
852
+
853
+
# Check SQLite queue entries (if using SQLite queue adapter)
854
+
docker exec quickdid sqlite3 /data/quickdid.db "SELECT COUNT(*) as queue_entries, MIN(queued_at) as oldest, MAX(queued_at) as newest FROM handle_resolution_queue;"
855
+
583
856
# Monitor real-time logs
584
857
docker-compose logs -f quickdid | grep -E "ERROR|WARN"
585
858
```
···
588
861
589
862
### Backup and Restore
590
863
864
+
#### Redis Backup
591
865
```bash
592
866
# Backup Redis data
593
867
docker exec quickdid-redis redis-cli BGSAVE
···
598
872
docker restart quickdid-redis
599
873
```
600
874
875
+
#### SQLite Backup
876
+
```bash
877
+
# Backup SQLite database
878
+
docker exec quickdid sqlite3 /data/quickdid.db ".backup /tmp/backup.db"
879
+
docker cp quickdid:/tmp/backup.db ./backups/sqlite-$(date +%Y%m%d).db
880
+
881
+
# Alternative: Copy database file directly (service must be stopped)
882
+
docker-compose -f docker-compose.sqlite.yml stop quickdid
883
+
docker cp quickdid:/data/quickdid.db ./backups/sqlite-$(date +%Y%m%d).db
884
+
docker-compose -f docker-compose.sqlite.yml start quickdid
885
+
886
+
# Restore SQLite database
887
+
docker-compose -f docker-compose.sqlite.yml stop quickdid
888
+
docker cp ./backups/sqlite-backup.db quickdid:/data/quickdid.db
889
+
docker-compose -f docker-compose.sqlite.yml start quickdid
890
+
```
891
+
601
892
### Updates and Rollbacks
602
893
603
894
```bash
···
628
919
629
920
## Performance Optimization
630
921
922
+
### Caching Strategy Selection
923
+
924
+
**Cache Priority**: QuickDID uses the first available cache in this order:
925
+
1. **Redis** (distributed, best for multi-instance)
926
+
2. **SQLite** (persistent, best for single-instance)
927
+
3. **Memory** (fast, but lost on restart)
928
+
929
+
**Real-time Updates with Jetstream**: When `JETSTREAM_ENABLED=true`, QuickDID:
930
+
- Connects to AT Protocol firehose for live cache updates
931
+
- Processes Account events to purge deleted/deactivated accounts
932
+
- Processes Identity events to update handle-to-DID mappings
933
+
- Automatically reconnects with exponential backoff on failures
934
+
- Tracks metrics for successful and failed event processing
935
+
936
+
**Recommendations by Deployment Type**:
937
+
- **Single instance, persistent**: Use SQLite for both caching and queuing (`SQLITE_URL=sqlite:./quickdid.db`, `QUEUE_ADAPTER=sqlite`)
938
+
- **Multi-instance, HA**: Use Redis for both caching and queuing (`REDIS_URL=redis://redis:6379/0`, `QUEUE_ADAPTER=redis`)
939
+
- **Real-time sync**: Enable Jetstream consumer (`JETSTREAM_ENABLED=true`) for live cache updates
940
+
- **Testing/development**: Use memory-only caching with MPSC queuing (`QUEUE_ADAPTER=mpsc`)
941
+
- **Hybrid**: Configure both Redis and SQLite for redundancy
942
+
943
+
### Queue Strategy Selection
944
+
945
+
**Queue Adapter Options**:
946
+
1. **Redis** (`QUEUE_ADAPTER=redis`) - Distributed queuing, best for multi-instance deployments
947
+
2. **SQLite** (`QUEUE_ADAPTER=sqlite`) - Persistent queuing, best for single-instance deployments
948
+
3. **MPSC** (`QUEUE_ADAPTER=mpsc`) - In-memory queuing, lightweight for single-instance without persistence needs
949
+
4. **No-op** (`QUEUE_ADAPTER=noop`) - Disable queuing entirely (testing only)
950
+
631
951
### Redis Optimization
632
952
633
953
```redis
···
656
976
### Required Fields
657
977
658
978
- **HTTP_EXTERNAL**: Must be provided
659
-
- **SERVICE_KEY**: Must be provided
979
+
- **HTTP_EXTERNAL**: Must be provided
660
980
661
981
### Value Constraints
662
982
663
-
1. **TTL Values** (`CACHE_TTL_MEMORY`, `CACHE_TTL_REDIS`):
983
+
1. **TTL Values** (`CACHE_TTL_MEMORY`, `CACHE_TTL_REDIS`, `CACHE_TTL_SQLITE`):
664
984
- Must be positive integers (> 0)
665
985
- Recommended minimum: 60 seconds
666
986
···
669
989
- Recommended range: 1-60 seconds
670
990
671
991
3. **Queue Adapter** (`QUEUE_ADAPTER`):
672
-
- Must be one of: `mpsc`, `redis`, `noop`, `none`
992
+
- Must be one of: `mpsc`, `redis`, `sqlite`, `noop`, `none`
673
993
- Case-sensitive
674
994
995
+
4. **Rate Limiting** (`RESOLVER_MAX_CONCURRENT`):
996
+
- Must be between 0 and 10000
997
+
- 0 = disabled (default)
998
+
- When > 0, limits concurrent handle resolutions
999
+
1000
+
5. **Rate Limiting Timeout** (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`):
1001
+
- Must be between 0 and 60000 (milliseconds)
1002
+
- 0 = no timeout (default)
1003
+
- Maximum: 60000ms (60 seconds)
1004
+
675
1005
### Validation Errors
676
1006
677
1007
If validation fails, QuickDID will exit with one of these error codes:
···
685
1015
686
1016
```bash
687
1017
# Validate configuration without starting service
688
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1018
+
HTTP_EXTERNAL=test quickdid --help
689
1019
690
1020
# Test with specific values (will fail validation)
691
1021
CACHE_TTL_MEMORY=0 quickdid --help
692
1022
693
1023
# Debug configuration parsing
694
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1024
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
695
1025
```
696
1026
697
1027
## Support and Resources
+714
docs/telegraf-timescaledb-metrics-guide.md
+714
docs/telegraf-timescaledb-metrics-guide.md
···
1
+
# Telegraf and TimescaleDB Metrics Collection Guide
2
+
3
+
This guide demonstrates how to set up a metrics collection pipeline using Telegraf to collect StatsD metrics and store them in PostgreSQL with TimescaleDB using Docker Compose.
4
+
5
+
## Overview
6
+
7
+
This setup creates a metrics pipeline that:
8
+
- Collects StatsD metrics via Telegraf on UDP port 8125
9
+
- Creates individual PostgreSQL tables for each metric type
10
+
- Stores metric tags as JSONB for flexible querying
11
+
- Automatically creates hypertables for time-series optimization
12
+
- Provides a complete Docker Compose configuration for easy deployment
13
+
14
+
## Important Note on Table Structure
15
+
16
+
The Telegraf PostgreSQL output plugin with the configuration in this guide creates **individual tables for each metric name**. For example:
17
+
- `quickdid.http.request.count` becomes table `"quickdid.http.request.count"`
18
+
- `quickdid.resolver.rate_limit.available_permits` becomes table `"quickdid.resolver.rate_limit.available_permits"`
19
+
20
+
Each table has the following structure:
21
+
- `time` (timestamptz) - The timestamp of the metric
22
+
- `tags` (jsonb) - All tags stored as a JSON object
23
+
- Metric-specific columns for values (e.g., `value`, `mean`, `p99`, etc.)
24
+
25
+
## Prerequisites
26
+
27
+
- Docker and Docker Compose installed
28
+
- Basic understanding of StatsD metrics format
29
+
- Familiarity with PostgreSQL/TimescaleDB concepts
30
+
31
+
## Project Structure
32
+
33
+
Create the following directory structure:
34
+
35
+
```
36
+
metrics-stack/
37
+
โโโ docker-compose.yml
38
+
โโโ telegraf/
39
+
โ โโโ telegraf.conf
40
+
โโโ test-scripts/
41
+
โ โโโ send-metrics.sh
42
+
โ โโโ verify-queries.sql
43
+
โโโ .env
44
+
```
45
+
46
+
## Configuration Files
47
+
48
+
### 1. Environment Variables (.env)
49
+
50
+
Create a `.env` file to store sensitive configuration:
51
+
52
+
```env
53
+
# PostgreSQL/TimescaleDB Configuration
54
+
POSTGRES_DB=metrics
55
+
POSTGRES_USER=postgres
56
+
POSTGRES_PASSWORD=secretpassword
57
+
58
+
# Telegraf Database User
59
+
TELEGRAF_DB_USER=postgres
60
+
TELEGRAF_DB_PASSWORD=secretpassword
61
+
62
+
# TimescaleDB Settings
63
+
TIMESCALE_TELEMETRY=off
64
+
```
65
+
66
+
### 2. Telegraf Configuration (telegraf/telegraf.conf)
67
+
68
+
Create the Telegraf configuration file:
69
+
70
+
```toml
71
+
# Global Telegraf Agent Configuration
72
+
[agent]
73
+
interval = "10s"
74
+
round_interval = true
75
+
metric_batch_size = 1000
76
+
metric_buffer_limit = 10000
77
+
collection_jitter = "0s"
78
+
flush_interval = "10s"
79
+
flush_jitter = "0s"
80
+
precision = ""
81
+
debug = false
82
+
quiet = false
83
+
hostname = "telegraf-agent"
84
+
omit_hostname = false
85
+
86
+
# StatsD Input Plugin
87
+
[[inputs.statsd]]
88
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
89
+
protocol = "udp"
90
+
delete_gauges = true
91
+
delete_counters = true
92
+
delete_sets = true
93
+
delete_timings = true
94
+
percentiles = [50, 90, 95, 99]
95
+
metric_separator = "."
96
+
allowed_pending_messages = 10000
97
+
datadog_extensions = true
98
+
datadog_distributions = true
99
+
100
+
# PostgreSQL (TimescaleDB) Output Plugin
101
+
[[outputs.postgresql]]
102
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
103
+
schema = "public"
104
+
105
+
# Create individual tables for each metric with hypertable support
106
+
create_templates = [
107
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
108
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
109
+
]
110
+
111
+
# Store all tags as JSONB for flexible querying
112
+
tags_as_jsonb = true
113
+
114
+
# Keep fields as separate columns for better performance on aggregations
115
+
fields_as_jsonb = false
116
+
```
117
+
118
+
### 3. Docker Compose Configuration (docker-compose.yml)
119
+
120
+
Create the Docker Compose file:
121
+
122
+
```yaml
123
+
version: '3.8'
124
+
125
+
services:
126
+
timescaledb:
127
+
image: timescale/timescaledb:latest-pg17
128
+
container_name: timescaledb
129
+
restart: unless-stopped
130
+
environment:
131
+
POSTGRES_DB: ${POSTGRES_DB}
132
+
POSTGRES_USER: ${POSTGRES_USER}
133
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
134
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
135
+
ports:
136
+
- "5442:5432"
137
+
volumes:
138
+
- timescale_data:/home/postgres/pgdata/data
139
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
140
+
command:
141
+
- postgres
142
+
- -c
143
+
- shared_buffers=1GB
144
+
- -c
145
+
- effective_cache_size=3GB
146
+
- -c
147
+
- maintenance_work_mem=512MB
148
+
- -c
149
+
- work_mem=32MB
150
+
- -c
151
+
- timescaledb.max_background_workers=8
152
+
- -c
153
+
- max_parallel_workers_per_gather=2
154
+
- -c
155
+
- max_parallel_workers=8
156
+
healthcheck:
157
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
158
+
interval: 10s
159
+
timeout: 5s
160
+
retries: 5
161
+
networks:
162
+
- metrics_network
163
+
164
+
telegraf:
165
+
image: telegraf:1.35
166
+
container_name: telegraf
167
+
restart: unless-stopped
168
+
environment:
169
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
170
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
171
+
POSTGRES_DB: ${POSTGRES_DB}
172
+
ports:
173
+
- "8125:8125/udp" # StatsD UDP port
174
+
volumes:
175
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
176
+
depends_on:
177
+
timescaledb:
178
+
condition: service_healthy
179
+
networks:
180
+
- metrics_network
181
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
182
+
183
+
redis:
184
+
image: redis:7-alpine
185
+
container_name: redis
186
+
restart: unless-stopped
187
+
ports:
188
+
- "6379:6379"
189
+
volumes:
190
+
- redis_data:/data
191
+
command: redis-server --appendonly yes --appendfsync everysec
192
+
healthcheck:
193
+
test: ["CMD", "redis-cli", "ping"]
194
+
interval: 10s
195
+
timeout: 5s
196
+
retries: 5
197
+
networks:
198
+
- metrics_network
199
+
200
+
networks:
201
+
metrics_network:
202
+
driver: bridge
203
+
204
+
volumes:
205
+
timescale_data:
206
+
redis_data:
207
+
```
208
+
209
+
### 4. Database Initialization Script (optional)
210
+
211
+
Create `init-scripts/01-init.sql` to set up the TimescaleDB extension:
212
+
213
+
```sql
214
+
-- Enable TimescaleDB extension
215
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
216
+
217
+
-- Enable additional useful extensions
218
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
219
+
```
220
+
221
+
## Test Scripts
222
+
223
+
### 1. Send Test Metrics Script (test-scripts/send-metrics.sh)
224
+
225
+
Create a script to send various types of metrics:
226
+
227
+
```bash
228
+
#!/bin/bash
229
+
230
+
# Send test metrics to StatsD/Telegraf
231
+
232
+
echo "Sending test metrics to StatsD on localhost:8125..."
233
+
234
+
# Counter metrics
235
+
for i in {1..10}; do
236
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
237
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
238
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
239
+
done
240
+
241
+
# Gauge metrics
242
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
243
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
244
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
245
+
246
+
# Timing metrics (in milliseconds)
247
+
for i in {1..20}; do
248
+
duration=$((RANDOM % 100 + 10))
249
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
250
+
done
251
+
252
+
for i in {1..10}; do
253
+
duration=$((RANDOM % 200 + 50))
254
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
255
+
done
256
+
257
+
# Histogram metrics
258
+
for i in {1..15}; do
259
+
resolution_time=$((RANDOM % 500 + 50))
260
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
261
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
262
+
done
263
+
264
+
# Cache metrics
265
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
266
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
267
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
268
+
269
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
270
+
sleep 15
271
+
echo "Done!"
272
+
```
273
+
274
+
### 2. Verify Queries Script (test-scripts/verify-queries.sql)
275
+
276
+
Create a SQL script to verify all queries work correctly:
277
+
278
+
```sql
279
+
-- Test script to verify all metrics queries work correctly
280
+
-- Run this after sending test metrics with send-metrics.sh
281
+
282
+
\echo '===== CHECKING AVAILABLE TABLES ====='
283
+
SELECT table_name
284
+
FROM information_schema.tables
285
+
WHERE table_schema = 'public'
286
+
AND table_name LIKE 'quickdid%'
287
+
ORDER BY table_name;
288
+
289
+
\echo ''
290
+
\echo '===== CHECKING TABLE STRUCTURES ====='
291
+
\echo 'Structure of quickdid.http.request.count table:'
292
+
\d "quickdid.http.request.count"
293
+
294
+
\echo ''
295
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
296
+
\d "quickdid.http.request.duration_ms"
297
+
298
+
\echo ''
299
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
300
+
SELECT
301
+
time,
302
+
tags,
303
+
tags->>'method' as method,
304
+
tags->>'path' as path,
305
+
tags->>'status' as status,
306
+
value
307
+
FROM "quickdid.http.request.count"
308
+
WHERE time > NOW() - INTERVAL '1 hour'
309
+
ORDER BY time DESC
310
+
LIMIT 10;
311
+
312
+
\echo ''
313
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
314
+
SELECT
315
+
time_bucket('1 minute', time) AS minute,
316
+
tags->>'method' as method,
317
+
tags->>'path' as path,
318
+
tags->>'status' as status,
319
+
COUNT(*) as request_count,
320
+
AVG(mean) as avg_duration_ms,
321
+
MAX(p99) as p99_duration_ms,
322
+
MIN(mean) as min_duration_ms
323
+
FROM "quickdid.http.request.duration_ms"
324
+
WHERE time > NOW() - INTERVAL '1 hour'
325
+
AND tags IS NOT NULL
326
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
327
+
ORDER BY minute DESC
328
+
LIMIT 10;
329
+
330
+
\echo ''
331
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
332
+
SELECT
333
+
time,
334
+
value as available_permits
335
+
FROM "quickdid.resolver.rate_limit.available_permits"
336
+
WHERE time > NOW() - INTERVAL '1 hour'
337
+
ORDER BY time DESC
338
+
LIMIT 10;
339
+
340
+
\echo ''
341
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
342
+
SELECT
343
+
tags->>'resolver' as resolver_type,
344
+
COUNT(*) as sample_count,
345
+
AVG(mean) as avg_resolution_time_ms,
346
+
MAX(p99) as p99_resolution_time_ms,
347
+
MIN(mean) as min_resolution_time_ms
348
+
FROM "quickdid.resolver.resolution_time"
349
+
WHERE time > NOW() - INTERVAL '1 hour'
350
+
AND tags->>'resolver' IS NOT NULL
351
+
GROUP BY tags->>'resolver'
352
+
ORDER BY avg_resolution_time_ms;
353
+
354
+
\echo ''
355
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
356
+
WITH cache_stats AS (
357
+
SELECT
358
+
'hits' as metric_type,
359
+
SUM(value) as total_count
360
+
FROM "quickdid.cache.hit.count"
361
+
WHERE time > NOW() - INTERVAL '1 hour'
362
+
UNION ALL
363
+
SELECT
364
+
'misses' as metric_type,
365
+
SUM(value) as total_count
366
+
FROM "quickdid.cache.miss.count"
367
+
WHERE time > NOW() - INTERVAL '1 hour'
368
+
)
369
+
SELECT
370
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
371
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
372
+
CASE
373
+
WHEN SUM(total_count) > 0 THEN
374
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
375
+
ELSE 0
376
+
END as hit_rate_percentage
377
+
FROM cache_stats;
378
+
379
+
\echo ''
380
+
\echo '===== QUERY 6: Hypertable Information ====='
381
+
SELECT
382
+
hypertable_schema,
383
+
hypertable_name,
384
+
owner,
385
+
num_dimensions,
386
+
num_chunks,
387
+
compression_enabled
388
+
FROM timescaledb_information.hypertables
389
+
WHERE hypertable_name LIKE 'quickdid%'
390
+
ORDER BY hypertable_name;
391
+
392
+
\echo ''
393
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
394
+
WITH status_counts AS (
395
+
SELECT
396
+
time_bucket('5 minutes', time) as period,
397
+
tags->>'path' as path,
398
+
CASE
399
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
400
+
ELSE 'success'
401
+
END as status_category,
402
+
SUM(value) as request_count
403
+
FROM "quickdid.http.request.count"
404
+
WHERE time > NOW() - INTERVAL '1 hour'
405
+
GROUP BY period, path, status_category
406
+
)
407
+
SELECT
408
+
period,
409
+
path,
410
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
411
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
412
+
CASE
413
+
WHEN SUM(request_count) > 0 THEN
414
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
415
+
ELSE 0
416
+
END as error_rate_percentage
417
+
FROM status_counts
418
+
GROUP BY period, path
419
+
HAVING SUM(request_count) > 0
420
+
ORDER BY period DESC, error_rate_percentage DESC;
421
+
422
+
\echo ''
423
+
\echo '===== TEST COMPLETED ====='
424
+
```
425
+
426
+
## Usage
427
+
428
+
### Starting the Stack
429
+
430
+
1. Navigate to your project directory:
431
+
```bash
432
+
cd metrics-stack
433
+
```
434
+
435
+
2. Make the test scripts executable:
436
+
```bash
437
+
chmod +x test-scripts/send-metrics.sh
438
+
```
439
+
440
+
3. Start the services:
441
+
```bash
442
+
docker-compose up -d
443
+
```
444
+
445
+
4. Check the logs to ensure everything is running:
446
+
```bash
447
+
docker-compose logs -f
448
+
```
449
+
450
+
5. Wait for services to be fully ready (about 30 seconds)
451
+
452
+
### Running the Test Suite
453
+
454
+
1. Send test metrics:
455
+
```bash
456
+
./test-scripts/send-metrics.sh
457
+
```
458
+
459
+
2. Verify all queries work:
460
+
```bash
461
+
docker exec -i timescaledb psql -U postgres -d metrics < test-scripts/verify-queries.sql
462
+
```
463
+
464
+
### Manual Querying
465
+
466
+
Connect to TimescaleDB to run queries manually:
467
+
468
+
```bash
469
+
# Connect to the database
470
+
docker exec -it timescaledb psql -U postgres -d metrics
471
+
472
+
# List all metric tables
473
+
\dt "quickdid*"
474
+
475
+
# Describe a specific table structure
476
+
\d "quickdid.http.request.duration_ms"
477
+
478
+
# Query with JSONB tag filtering
479
+
SELECT
480
+
time,
481
+
tags->>'method' as method,
482
+
mean as avg_ms,
483
+
'99_percentile' as p99_ms
484
+
FROM "quickdid.http.request.duration_ms"
485
+
WHERE tags @> '{"method": "GET"}'::jsonb
486
+
AND time > NOW() - INTERVAL '1 hour'
487
+
ORDER BY time DESC
488
+
LIMIT 10;
489
+
```
490
+
491
+
## Advanced Configuration
492
+
493
+
### Continuous Aggregates for Performance
494
+
495
+
Create continuous aggregates for frequently queried data:
496
+
497
+
```sql
498
+
-- Create hourly aggregates for HTTP metrics
499
+
CREATE MATERIALIZED VIEW http_metrics_hourly
500
+
WITH (timescaledb.continuous) AS
501
+
SELECT
502
+
time_bucket('1 hour', time) AS hour,
503
+
tags->>'method' as method,
504
+
tags->>'path' as path,
505
+
tags->>'status' as status,
506
+
COUNT(*) as request_count,
507
+
AVG(mean) as avg_duration_ms,
508
+
MAX('99_percentile') as p99_duration_ms,
509
+
MIN(mean) as min_duration_ms
510
+
FROM "quickdid.http.request.duration_ms"
511
+
WHERE tags IS NOT NULL
512
+
GROUP BY hour, method, path, status
513
+
WITH NO DATA;
514
+
515
+
-- Add refresh policy
516
+
SELECT add_continuous_aggregate_policy('http_metrics_hourly',
517
+
start_offset => INTERVAL '3 hours',
518
+
end_offset => INTERVAL '1 hour',
519
+
schedule_interval => INTERVAL '1 hour');
520
+
521
+
-- Manually refresh to populate initial data
522
+
CALL refresh_continuous_aggregate('http_metrics_hourly', NULL, NULL);
523
+
524
+
-- Query the aggregate
525
+
SELECT * FROM http_metrics_hourly
526
+
ORDER BY hour DESC, request_count DESC
527
+
LIMIT 20;
528
+
```
529
+
530
+
### Data Retention Policies
531
+
532
+
Set up automatic data retention:
533
+
534
+
```sql
535
+
-- Add retention policy to drop data older than 30 days
536
+
SELECT add_retention_policy('"quickdid.http.request.count"', INTERVAL '30 days');
537
+
SELECT add_retention_policy('"quickdid.http.request.duration_ms"', INTERVAL '30 days');
538
+
539
+
-- View retention policies
540
+
SELECT js.* FROM timescaledb_information.job_stats js
541
+
JOIN timescaledb_information.jobs j ON js.job_id = j.job_id
542
+
WHERE j.proc_name LIKE '%retention%';
543
+
```
544
+
545
+
### Compression for Storage Optimization
546
+
547
+
Enable compression for older data:
548
+
549
+
```sql
550
+
-- Enable compression on a hypertable
551
+
ALTER TABLE "quickdid.http.request.duration_ms" SET (
552
+
timescaledb.compress,
553
+
timescaledb.compress_segmentby = 'tags'
554
+
);
555
+
556
+
-- Add compression policy (compress chunks older than 7 days)
557
+
SELECT add_compression_policy('"quickdid.http.request.duration_ms"', INTERVAL '7 days');
558
+
559
+
-- Manually compress old chunks
560
+
SELECT compress_chunk(format('%I.%I', c.chunk_schema, c.chunk_name)::regclass)
561
+
FROM timescaledb_information.chunks c
562
+
WHERE c.hypertable_name = 'quickdid.http.request.duration_ms'
563
+
AND c.range_end < NOW() - INTERVAL '7 days'
564
+
AND NOT c.is_compressed;
565
+
566
+
-- Check compression status
567
+
SELECT
568
+
hypertable_name,
569
+
uncompressed_total_bytes,
570
+
compressed_total_bytes,
571
+
compression_ratio
572
+
FROM timescaledb_information.hypertable_compression_stats
573
+
WHERE hypertable_name LIKE 'quickdid%';
574
+
```
575
+
576
+
## Monitoring and Maintenance
577
+
578
+
### Health Checks
579
+
580
+
```sql
581
+
-- Check chunk distribution
582
+
SELECT
583
+
hypertable_name,
584
+
chunk_name,
585
+
range_start,
586
+
range_end,
587
+
is_compressed,
588
+
pg_size_pretty(total_bytes) as size
589
+
FROM timescaledb_information.chunks
590
+
WHERE hypertable_name LIKE 'quickdid%'
591
+
ORDER BY hypertable_name, range_start DESC
592
+
LIMIT 20;
593
+
594
+
-- Check background jobs
595
+
SELECT
596
+
job_id,
597
+
application_name,
598
+
job_type,
599
+
schedule_interval,
600
+
last_run_started_at,
601
+
last_successful_finish,
602
+
next_scheduled_run
603
+
FROM timescaledb_information.job_stats
604
+
ORDER BY job_id;
605
+
606
+
-- Check table sizes
607
+
SELECT
608
+
hypertable_name,
609
+
chunks_total_size,
610
+
chunks_compressed_size,
611
+
chunks_uncompressed_size
612
+
FROM timescaledb_information.hypertables
613
+
WHERE hypertable_name LIKE 'quickdid%';
614
+
```
615
+
616
+
### Troubleshooting
617
+
618
+
1. **Tables not being created:**
619
+
- Check Telegraf logs: `docker-compose logs telegraf | grep -i error`
620
+
- Verify PostgreSQL connectivity: `docker exec telegraf telegraf --test`
621
+
- Ensure metrics are being received: `docker-compose logs telegraf | grep statsd`
622
+
623
+
2. **Queries returning no data:**
624
+
- Verify tables exist: `\dt "quickdid*"` in psql
625
+
- Check table contents: `SELECT COUNT(*) FROM "quickdid.http.request.count";`
626
+
- Verify time ranges in WHERE clauses
627
+
628
+
3. **Performance issues:**
629
+
- Check if hypertables are created: Query `timescaledb_information.hypertables`
630
+
- Verify compression is working if enabled
631
+
- Consider creating appropriate indexes on JSONB paths:
632
+
```sql
633
+
CREATE INDEX idx_http_method ON "quickdid.http.request.duration_ms" ((tags->>'method'));
634
+
CREATE INDEX idx_http_path ON "quickdid.http.request.duration_ms" ((tags->>'path'));
635
+
```
636
+
637
+
## Integration with QuickDID
638
+
639
+
To integrate with QuickDID, configure it to send metrics to the Telegraf StatsD endpoint:
640
+
641
+
```bash
642
+
# Set environment variables for QuickDID
643
+
export METRICS_ADAPTER=statsd
644
+
export METRICS_STATSD_HOST=localhost:8125
645
+
export METRICS_PREFIX=quickdid.
646
+
export METRICS_TAGS=env:production,service:quickdid
647
+
648
+
# Start QuickDID
649
+
cargo run
650
+
```
651
+
652
+
QuickDID will automatically send metrics to Telegraf, which will store them in TimescaleDB for analysis.
653
+
654
+
## Key Differences from Generic Metrics Table Approach
655
+
656
+
This configuration creates **individual tables per metric** instead of a single generic metrics table. Benefits include:
657
+
658
+
1. **Better performance**: Each metric has its own optimized schema
659
+
2. **Clearer data model**: Tables directly represent metrics
660
+
3. **Easier querying**: No need to filter by metric name
661
+
4. **Type safety**: Each metric's fields have appropriate types
662
+
5. **Efficient compression**: Per-metric compression strategies
663
+
664
+
Trade-offs:
665
+
- More tables to manage (mitigated by TimescaleDB automation)
666
+
- Need to know metric names upfront for queries
667
+
- Schema changes require table alterations
668
+
669
+
## Security Considerations
670
+
671
+
1. **Use strong passwords:** Update the default passwords in `.env`
672
+
2. **Enable SSL:** Configure `sslmode=require` in production
673
+
3. **Network isolation:** Use Docker networks to isolate services
674
+
4. **Access control:** Create separate database users with minimal permissions:
675
+
```sql
676
+
CREATE USER metrics_reader WITH PASSWORD 'readonly_password';
677
+
GRANT CONNECT ON DATABASE metrics TO metrics_reader;
678
+
GRANT USAGE ON SCHEMA public TO metrics_reader;
679
+
GRANT SELECT ON ALL TABLES IN SCHEMA public TO metrics_reader;
680
+
```
681
+
5. **Regular updates:** Keep Docker images updated for security patches
682
+
683
+
## Performance Tuning
684
+
685
+
### PostgreSQL/TimescaleDB Settings
686
+
687
+
The docker-compose.yml includes optimized settings. Adjust based on your hardware:
688
+
689
+
- `shared_buffers`: 25% of system RAM
690
+
- `effective_cache_size`: 75% of system RAM
691
+
- `maintenance_work_mem`: 5% of system RAM
692
+
- `work_mem`: RAM / max_connections / 2
693
+
694
+
### Telegraf Buffer Settings
695
+
696
+
For high-volume metrics, adjust in telegraf.conf:
697
+
698
+
```toml
699
+
[agent]
700
+
metric_batch_size = 5000 # Increase for high volume
701
+
metric_buffer_limit = 100000 # Increase buffer size
702
+
flush_interval = "5s" # Decrease for more frequent writes
703
+
```
704
+
705
+
## Conclusion
706
+
707
+
This setup provides a robust metrics collection and storage solution with:
708
+
- **Individual metric tables** for optimal performance and clarity
709
+
- **JSONB tag storage** for flexible querying
710
+
- **TimescaleDB hypertables** for efficient time-series storage
711
+
- **Comprehensive test suite** to verify functionality
712
+
- **Production-ready configuration** with compression and retention policies
713
+
714
+
The system correctly handles StatsD metrics from QuickDID and provides powerful querying capabilities through PostgreSQL's JSONB support and TimescaleDB's time-series functions.
+59
generate-wellknown.sh
+59
generate-wellknown.sh
···
1
+
#!/bin/bash
2
+
3
+
# Script to generate .well-known static files based on QuickDID configuration
4
+
# Usage: HTTP_EXTERNAL=quickdid.smokesignal.tools ./generate-wellknown.sh
5
+
#
6
+
# Note: Since we no longer process SERVICE_KEY, you'll need to manually
7
+
# add the public key to the did.json file if you need DID document support.
8
+
9
+
set -e
10
+
11
+
# Check required environment variables
12
+
if [ -z "$HTTP_EXTERNAL" ]; then
13
+
echo "Error: HTTP_EXTERNAL environment variable is required"
14
+
echo "Usage: HTTP_EXTERNAL=example.com ./generate-wellknown.sh"
15
+
exit 1
16
+
fi
17
+
18
+
# Ensure www/.well-known directory exists
19
+
mkdir -p www/.well-known
20
+
21
+
# Generate service DID from HTTP_EXTERNAL
22
+
if [[ "$HTTP_EXTERNAL" == *":"* ]]; then
23
+
# Contains port - URL encode the colon
24
+
SERVICE_DID="did:web:${HTTP_EXTERNAL//:/%3A}"
25
+
else
26
+
SERVICE_DID="did:web:$HTTP_EXTERNAL"
27
+
fi
28
+
29
+
echo "Generating .well-known files for $SERVICE_DID"
30
+
31
+
# Write atproto-did file
32
+
echo "$SERVICE_DID" > www/.well-known/atproto-did
33
+
echo "Created: www/.well-known/atproto-did"
34
+
35
+
# Create a basic did.json template
36
+
# Note: You'll need to manually add the publicKeyMultibase if you need DID document support
37
+
38
+
cat > www/.well-known/did.json <<EOF
39
+
{
40
+
"@context": [
41
+
"https://www.w3.org/ns/did/v1",
42
+
"https://w3id.org/security/multikey/v1"
43
+
],
44
+
"id": "$SERVICE_DID",
45
+
"verificationMethod": [],
46
+
"service": [
47
+
{
48
+
"id": "${SERVICE_DID}#quickdid",
49
+
"type": "QuickDIDService",
50
+
"serviceEndpoint": "https://${HTTP_EXTERNAL}"
51
+
}
52
+
]
53
+
}
54
+
EOF
55
+
56
+
echo "Created: www/.well-known/did.json"
57
+
echo ""
58
+
echo "Note: The did.json file is a basic template. If you need DID document support,"
59
+
echo "you'll need to manually add the verificationMethod with your public key."
+18
railway-resources/telegraf/Dockerfile
+18
railway-resources/telegraf/Dockerfile
···
1
+
# Telegraf Dockerfile for Railway Deployment
2
+
FROM telegraf:1.33-alpine
3
+
4
+
# Install additional packages for health checks
5
+
RUN apk add --no-cache curl postgresql-client
6
+
7
+
# Create directories for custom configs
8
+
RUN mkdir -p /etc/telegraf/telegraf.d
9
+
10
+
# Copy main configuration
11
+
COPY railway-resources/telegraf/telegraf.conf /etc/telegraf/telegraf.conf
12
+
13
+
# Health check - test configuration validity
14
+
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
15
+
CMD telegraf --config /etc/telegraf/telegraf.conf --test || exit 1
16
+
17
+
# Run telegraf with custom config
18
+
CMD ["telegraf", "--config", "/etc/telegraf/telegraf.conf", "--config-directory", "/etc/telegraf/telegraf.d"]
+48
railway-resources/telegraf/railway.toml
+48
railway-resources/telegraf/railway.toml
···
1
+
# Railway configuration for Telegraf service
2
+
# This file configures how Railway builds and deploys the Telegraf metrics collector
3
+
4
+
[build]
5
+
# Use Dockerfile for building
6
+
builder = "DOCKERFILE"
7
+
dockerfilePath = "railway-resources/telegraf/Dockerfile"
8
+
9
+
[deploy]
10
+
# Start command (handled by Dockerfile CMD)
11
+
startCommand = "telegraf --config /etc/telegraf/telegraf.conf"
12
+
13
+
# No health check path for Telegraf (uses container health check)
14
+
# healthcheckPath = ""
15
+
16
+
# Restart policy
17
+
restartPolicyType = "ALWAYS"
18
+
restartPolicyMaxRetries = 10
19
+
20
+
# Resource limits
21
+
memoryLimitMB = 1024
22
+
cpuLimitCores = 1
23
+
24
+
# Scaling (Telegraf should be singleton)
25
+
minReplicas = 1
26
+
maxReplicas = 1
27
+
28
+
# Graceful shutdown
29
+
stopTimeout = 10
30
+
31
+
# Service configuration for StatsD UDP endpoint
32
+
[[services]]
33
+
name = "telegraf-statsd"
34
+
port = 8125
35
+
protocol = "UDP"
36
+
internalPort = 8125
37
+
38
+
# Service configuration for Telegraf HTTP API (optional)
39
+
[[services]]
40
+
name = "telegraf-http"
41
+
port = 8086
42
+
protocol = "HTTP"
43
+
internalPort = 8086
44
+
45
+
# Environment-specific settings
46
+
[environments.production]
47
+
memoryLimitMB = 512
48
+
cpuLimitCores = 1
+77
railway-resources/telegraf/telegraf.conf
+77
railway-resources/telegraf/telegraf.conf
···
1
+
# Telegraf Configuration for QuickDID Metrics Collection
2
+
# Optimized for Railway deployment with TimescaleDB
3
+
4
+
# Global tags applied to all metrics
5
+
[global_tags]
6
+
environment = "${ENVIRONMENT:-production}"
7
+
service = "quickdid"
8
+
region = "${RAILWAY_REGION:-us-west1}"
9
+
deployment_id = "${RAILWAY_DEPLOYMENT_ID:-unknown}"
10
+
11
+
# Agent configuration
12
+
[agent]
13
+
## Default data collection interval
14
+
interval = "10s"
15
+
16
+
## Rounds collection interval to interval
17
+
round_interval = true
18
+
19
+
## Telegraf will send metrics to outputs in batches of at most metric_batch_size metrics.
20
+
metric_batch_size = 1000
21
+
22
+
## Maximum number of unwritten metrics per output
23
+
metric_buffer_limit = 10000
24
+
25
+
## Collection jitter is used to jitter the collection by a random amount
26
+
collection_jitter = "0s"
27
+
28
+
## Default flushing interval for all outputs
29
+
flush_interval = "10s"
30
+
31
+
## Jitter the flush interval by a random amount
32
+
flush_jitter = "0s"
33
+
34
+
## Precision of timestamps
35
+
precision = "1ms"
36
+
37
+
## Log level
38
+
debug = ${TELEGRAF_DEBUG:-false}
39
+
quiet = ${TELEGRAF_QUIET:-false}
40
+
41
+
## Override default hostname
42
+
hostname = "${HOSTNAME:-telegraf}"
43
+
44
+
## If true, do not set the "host" tag in the telegraf agent
45
+
omit_hostname = false
46
+
47
+
###############################################################################
48
+
# INPUT PLUGINS #
49
+
###############################################################################
50
+
51
+
# StatsD Server - receives metrics from QuickDID
52
+
[[inputs.statsd]]
53
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
54
+
protocol = "udp"
55
+
delete_gauges = true
56
+
delete_counters = true
57
+
delete_sets = true
58
+
delete_timings = true
59
+
percentiles = [50, 90, 95, 99]
60
+
metric_separator = "."
61
+
allowed_pending_messages = 100
62
+
datadog_extensions = true
63
+
datadog_distributions = true
64
+
65
+
[[outputs.postgresql]]
66
+
connection = "${DATABASE_URL}"
67
+
68
+
schema = "public"
69
+
70
+
create_templates = [
71
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
72
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
73
+
]
74
+
75
+
tags_as_jsonb = true
76
+
77
+
fields_as_jsonb = false
+457
-63
src/bin/quickdid.rs
+457
-63
src/bin/quickdid.rs
···
1
1
use anyhow::Result;
2
2
use atproto_identity::{
3
3
config::{CertificateBundles, DnsNameservers},
4
-
key::{identify_key, to_public},
5
4
resolve::HickoryDnsResolver,
6
5
};
7
-
use clap::Parser;
6
+
use atproto_jetstream::{Consumer as JetstreamConsumer, ConsumerTaskConfig};
7
+
use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
8
8
use quickdid::{
9
9
cache::create_redis_pool,
10
-
config::{Args, Config},
10
+
config::Config,
11
11
handle_resolver::{
12
-
create_base_resolver, create_caching_resolver, create_redis_resolver_with_ttl,
12
+
create_base_resolver, create_caching_resolver,
13
+
create_proactive_refresh_resolver_with_metrics, create_rate_limited_resolver_with_timeout,
14
+
create_redis_resolver_with_ttl, create_sqlite_resolver_with_ttl,
13
15
},
14
16
handle_resolver_task::{HandleResolverTaskConfig, create_handle_resolver_task_with_config},
15
17
http::{AppContext, create_router},
16
-
queue_adapter::{
18
+
jetstream_handler::QuickDidEventHandler,
19
+
lexicon_resolver::create_redis_lexicon_resolver_with_ttl,
20
+
metrics::create_metrics_publisher,
21
+
queue::{
17
22
HandleResolutionWork, QueueAdapter, create_mpsc_queue_from_channel, create_noop_queue,
18
-
create_redis_queue,
23
+
create_redis_queue, create_redis_queue_with_dedup, create_sqlite_queue,
24
+
create_sqlite_queue_with_max_size,
19
25
},
26
+
sqlite_schema::create_sqlite_pool,
20
27
task_manager::spawn_cancellable_task,
21
28
};
22
-
use serde_json::json;
23
29
use std::sync::Arc;
24
30
use tokio::signal;
25
31
use tokio_util::{sync::CancellationToken, task::TaskTracker};
···
39
45
}
40
46
}
41
47
48
+
/// Helper function to create a SQLite pool with consistent error handling
49
+
async fn try_create_sqlite_pool(sqlite_url: &str, purpose: &str) -> Option<sqlx::SqlitePool> {
50
+
match create_sqlite_pool(sqlite_url).await {
51
+
Ok(pool) => {
52
+
tracing::info!("SQLite pool created for {}", purpose);
53
+
Some(pool)
54
+
}
55
+
Err(e) => {
56
+
tracing::warn!("Failed to create SQLite pool for {}: {}", purpose, e);
57
+
None
58
+
}
59
+
}
60
+
}
61
+
62
+
/// Simple command-line argument handling for --version and --help
63
+
fn handle_simple_args() -> bool {
64
+
let args: Vec<String> = std::env::args().collect();
65
+
66
+
if args.len() > 1 {
67
+
match args[1].as_str() {
68
+
"--version" | "-V" => {
69
+
println!("quickdid {}", env!("CARGO_PKG_VERSION"));
70
+
return true;
71
+
}
72
+
"--help" | "-h" => {
73
+
println!("QuickDID - AT Protocol Identity Resolver Service");
74
+
println!("Version: {}", env!("CARGO_PKG_VERSION"));
75
+
println!();
76
+
println!("USAGE:");
77
+
println!(" quickdid [OPTIONS]");
78
+
println!();
79
+
println!("OPTIONS:");
80
+
println!(" -h, --help Print help information");
81
+
println!(" -V, --version Print version information");
82
+
println!();
83
+
println!("ENVIRONMENT VARIABLES:");
84
+
println!(
85
+
" HTTP_EXTERNAL External hostname for service endpoints (required)"
86
+
);
87
+
println!(" HTTP_PORT HTTP server port (default: 8080)");
88
+
println!(" PLC_HOSTNAME PLC directory hostname (default: plc.directory)");
89
+
println!(
90
+
" USER_AGENT HTTP User-Agent header (auto-generated with version)"
91
+
);
92
+
println!(" DNS_NAMESERVERS Custom DNS nameservers (comma-separated IPs)");
93
+
println!(
94
+
" CERTIFICATE_BUNDLES Additional CA certificates (comma-separated paths)"
95
+
);
96
+
println!();
97
+
println!(" CACHING:");
98
+
println!(" REDIS_URL Redis URL for handle resolution caching");
99
+
println!(
100
+
" SQLITE_URL SQLite database URL for handle resolution caching"
101
+
);
102
+
println!(
103
+
" CACHE_TTL_MEMORY TTL for in-memory cache in seconds (default: 600)"
104
+
);
105
+
println!(
106
+
" CACHE_TTL_REDIS TTL for Redis cache in seconds (default: 7776000)"
107
+
);
108
+
println!(
109
+
" CACHE_TTL_SQLITE TTL for SQLite cache in seconds (default: 7776000)"
110
+
);
111
+
println!();
112
+
println!(" QUEUE CONFIGURATION:");
113
+
println!(
114
+
" QUEUE_ADAPTER Queue adapter: 'mpsc', 'redis', 'sqlite', 'noop' (default: mpsc)"
115
+
);
116
+
println!(" QUEUE_REDIS_URL Redis URL for queue adapter");
117
+
println!(
118
+
" QUEUE_REDIS_PREFIX Redis key prefix for queues (default: queue:handleresolver:)"
119
+
);
120
+
println!(" QUEUE_REDIS_TIMEOUT Queue blocking timeout in seconds (default: 5)");
121
+
println!(
122
+
" QUEUE_REDIS_DEDUP_ENABLED Enable queue deduplication (default: false)"
123
+
);
124
+
println!(" QUEUE_REDIS_DEDUP_TTL TTL for dedup keys in seconds (default: 60)");
125
+
println!(" QUEUE_WORKER_ID Worker ID for Redis queue (default: worker1)");
126
+
println!(" QUEUE_BUFFER_SIZE Buffer size for MPSC queue (default: 1000)");
127
+
println!(" QUEUE_SQLITE_MAX_SIZE Maximum SQLite queue size (default: 10000)");
128
+
println!();
129
+
println!(" RATE LIMITING:");
130
+
println!(
131
+
" RESOLVER_MAX_CONCURRENT Maximum concurrent resolutions (default: 0 = disabled)"
132
+
);
133
+
println!(
134
+
" RESOLVER_MAX_CONCURRENT_TIMEOUT_MS Timeout for acquiring permits in ms (default: 0 = no timeout)"
135
+
);
136
+
println!();
137
+
println!(" METRICS:");
138
+
println!(
139
+
" METRICS_ADAPTER Metrics adapter: 'noop' or 'statsd' (default: noop)"
140
+
);
141
+
println!(
142
+
" METRICS_STATSD_HOST StatsD host when using statsd adapter (e.g., localhost:8125)"
143
+
);
144
+
println!(
145
+
" METRICS_STATSD_BIND Bind address for StatsD UDP socket (default: [::]:0)"
146
+
);
147
+
println!(" METRICS_PREFIX Prefix for all metrics (default: quickdid)");
148
+
println!(
149
+
" METRICS_TAGS Default tags for metrics (comma-separated key:value pairs)"
150
+
);
151
+
println!();
152
+
println!(" PROACTIVE CACHE REFRESH:");
153
+
println!(
154
+
" PROACTIVE_REFRESH_ENABLED Enable proactive cache refresh (default: false)"
155
+
);
156
+
println!(
157
+
" PROACTIVE_REFRESH_THRESHOLD Threshold as percentage of TTL (0.0-1.0, default: 0.8)"
158
+
);
159
+
println!();
160
+
println!(" JETSTREAM:");
161
+
println!(" JETSTREAM_ENABLED Enable Jetstream consumer (default: false)");
162
+
println!(
163
+
" JETSTREAM_HOSTNAME Jetstream hostname (default: jetstream.atproto.tools)"
164
+
);
165
+
println!();
166
+
println!(
167
+
"For more information, visit: https://github.com/smokesignal.events/quickdid"
168
+
);
169
+
return true;
170
+
}
171
+
_ => {}
172
+
}
173
+
}
174
+
175
+
false
176
+
}
177
+
42
178
#[tokio::main]
43
179
async fn main() -> Result<()> {
180
+
// Handle --version and --help
181
+
if handle_simple_args() {
182
+
return Ok(());
183
+
}
184
+
44
185
// Initialize tracing
45
186
tracing_subscriber::registry()
46
187
.with(
···
51
192
.with(tracing_subscriber::fmt::layer())
52
193
.init();
53
194
54
-
let args = Args::parse();
55
-
let config = Config::from_args(args)?;
195
+
let config = Config::from_env()?;
56
196
57
197
// Validate configuration
58
198
config.validate()?;
59
199
60
200
tracing::info!("Starting QuickDID service on port {}", config.http_port);
61
-
tracing::info!("Service DID: {}", config.service_did);
62
201
tracing::info!(
63
-
"Cache TTL - Memory: {}s, Redis: {}s",
202
+
"Cache TTL - Memory: {}s, Redis: {}s, SQLite: {}s",
64
203
config.cache_ttl_memory,
65
-
config.cache_ttl_redis
204
+
config.cache_ttl_redis,
205
+
config.cache_ttl_sqlite
66
206
);
67
207
68
208
// Parse certificate bundles if provided
···
92
232
// Create DNS resolver
93
233
let dns_resolver = HickoryDnsResolver::create_resolver(dns_nameservers.as_ref());
94
234
95
-
// Process service key
96
-
let private_service_key_data = identify_key(&config.service_key)?;
97
-
let public_service_key_data = to_public(&private_service_key_data)?;
98
-
let public_service_key = public_service_key_data.to_string();
235
+
// Clone DNS resolver for lexicon resolution before wrapping in Arc
236
+
let lexicon_dns_resolver = dns_resolver.clone();
99
237
100
-
// Create service DID document
101
-
let service_document = json!({
102
-
"@context": vec!["https://www.w3.org/ns/did/v1", "https://w3id.org/security/multikey/v1"],
103
-
"id": config.service_did.clone(),
104
-
"verificationMethod": [{
105
-
"id": format!("{}#atproto", config.service_did),
106
-
"type": "Multikey",
107
-
"controller": config.service_did.clone(),
108
-
"publicKeyMultibase": public_service_key
109
-
}],
110
-
"service": []
111
-
});
238
+
// Wrap DNS resolver in Arc for handle resolution
239
+
let dns_resolver_arc = Arc::new(dns_resolver);
112
240
113
-
// Create DNS resolver Arc for sharing
114
-
let dns_resolver_arc = Arc::new(dns_resolver);
241
+
// Create metrics publisher based on configuration
242
+
let metrics_publisher = create_metrics_publisher(&config).map_err(|e| {
243
+
tracing::error!("Failed to create metrics publisher: {}", e);
244
+
anyhow::anyhow!("Failed to create metrics publisher: {}", e)
245
+
})?;
246
+
247
+
tracing::info!(
248
+
"Metrics publisher created with {} adapter",
249
+
config.metrics_adapter
250
+
);
251
+
252
+
metrics_publisher.gauge("server", 1).await;
115
253
116
254
// Create base handle resolver using factory function
117
-
let base_handle_resolver = create_base_resolver(dns_resolver_arc.clone(), http_client.clone());
255
+
let mut base_handle_resolver = create_base_resolver(
256
+
dns_resolver_arc.clone(),
257
+
http_client.clone(),
258
+
metrics_publisher.clone(),
259
+
);
260
+
261
+
// Apply rate limiting if configured
262
+
if config.resolver_max_concurrent > 0 {
263
+
let timeout_info = if config.resolver_max_concurrent_timeout_ms > 0 {
264
+
format!(", {}ms timeout", config.resolver_max_concurrent_timeout_ms)
265
+
} else {
266
+
String::new()
267
+
};
268
+
tracing::info!(
269
+
"Applying rate limiting to handle resolver (max {} concurrent resolutions{})",
270
+
config.resolver_max_concurrent,
271
+
timeout_info
272
+
);
273
+
base_handle_resolver = create_rate_limited_resolver_with_timeout(
274
+
base_handle_resolver,
275
+
config.resolver_max_concurrent,
276
+
config.resolver_max_concurrent_timeout_ms,
277
+
metrics_publisher.clone(),
278
+
);
279
+
}
118
280
119
281
// Create Redis pool if configured
120
282
let redis_pool = config
···
122
284
.as_ref()
123
285
.and_then(|url| try_create_redis_pool(url, "handle resolver cache"));
124
286
125
-
// Create handle resolver with Redis caching if available, otherwise use in-memory caching
126
-
let handle_resolver: Arc<dyn quickdid::handle_resolver::HandleResolver> =
127
-
if let Some(pool) = redis_pool {
128
-
tracing::info!(
129
-
"Using Redis-backed handle resolver with {}-second cache TTL",
130
-
config.cache_ttl_redis
131
-
);
132
-
create_redis_resolver_with_ttl(base_handle_resolver, pool, config.cache_ttl_redis)
133
-
} else {
134
-
tracing::info!(
135
-
"Using in-memory handle resolver with {}-second cache TTL",
136
-
config.cache_ttl_memory
137
-
);
138
-
create_caching_resolver(base_handle_resolver, config.cache_ttl_memory)
139
-
};
287
+
// Create SQLite pool if configured
288
+
let sqlite_pool = if let Some(url) = config.sqlite_url.as_ref() {
289
+
try_create_sqlite_pool(url, "handle resolver cache").await
290
+
} else {
291
+
None
292
+
};
140
293
141
294
// Create task tracker and cancellation token
142
295
let tracker = TaskTracker::new();
143
296
let token = CancellationToken::new();
144
297
145
-
// Setup background handle resolution task and get the queue adapter
298
+
// Create the queue adapter first (needed for proactive refresh)
146
299
let handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>> = {
147
300
// Create queue adapter based on configuration
148
301
let adapter: Arc<dyn QueueAdapter<HandleResolutionWork>> = match config
···
159
312
if let Some(url) = queue_redis_url {
160
313
if let Some(pool) = try_create_redis_pool(url, "queue adapter") {
161
314
tracing::info!(
162
-
"Creating Redis queue adapter with prefix: {}",
163
-
config.queue_redis_prefix
315
+
"Creating Redis queue adapter with prefix: {}, dedup: {}, dedup_ttl: {}s",
316
+
config.queue_redis_prefix,
317
+
config.queue_redis_dedup_enabled,
318
+
config.queue_redis_dedup_ttl
164
319
);
165
-
create_redis_queue::<HandleResolutionWork>(
166
-
pool,
167
-
config.queue_worker_id.clone(),
168
-
config.queue_redis_prefix.clone(),
169
-
config.queue_redis_timeout,
170
-
)
320
+
if config.queue_redis_dedup_enabled {
321
+
create_redis_queue_with_dedup::<HandleResolutionWork>(
322
+
pool,
323
+
config.queue_worker_id.clone(),
324
+
config.queue_redis_prefix.clone(),
325
+
config.queue_redis_timeout,
326
+
true,
327
+
config.queue_redis_dedup_ttl,
328
+
)
329
+
} else {
330
+
create_redis_queue::<HandleResolutionWork>(
331
+
pool,
332
+
config.queue_worker_id.clone(),
333
+
config.queue_redis_prefix.clone(),
334
+
config.queue_redis_timeout,
335
+
)
336
+
}
171
337
} else {
172
338
tracing::warn!("Falling back to MPSC queue adapter");
173
339
// Fall back to MPSC if Redis fails
···
184
350
create_noop_queue::<HandleResolutionWork>()
185
351
}
186
352
}
353
+
"sqlite" => {
354
+
// Use SQLite adapter
355
+
if let Some(url) = config.sqlite_url.as_ref() {
356
+
if let Some(pool) = try_create_sqlite_pool(url, "queue adapter").await {
357
+
if config.queue_sqlite_max_size > 0 {
358
+
tracing::info!(
359
+
"Creating SQLite queue adapter with work shedding (max_size: {})",
360
+
config.queue_sqlite_max_size
361
+
);
362
+
create_sqlite_queue_with_max_size::<HandleResolutionWork>(
363
+
pool,
364
+
config.queue_sqlite_max_size,
365
+
)
366
+
} else {
367
+
tracing::info!("Creating SQLite queue adapter (unlimited size)");
368
+
create_sqlite_queue::<HandleResolutionWork>(pool)
369
+
}
370
+
} else {
371
+
tracing::warn!(
372
+
"Failed to create SQLite pool for queue, falling back to MPSC queue adapter"
373
+
);
374
+
// Fall back to MPSC if SQLite fails
375
+
let (handle_sender, handle_receiver) =
376
+
tokio::sync::mpsc::channel::<HandleResolutionWork>(
377
+
config.queue_buffer_size,
378
+
);
379
+
create_mpsc_queue_from_channel(handle_sender, handle_receiver)
380
+
}
381
+
} else {
382
+
tracing::warn!(
383
+
"SQLite queue adapter requested but no SQLite URL configured, using no-op adapter"
384
+
);
385
+
create_noop_queue::<HandleResolutionWork>()
386
+
}
387
+
}
187
388
"mpsc" => {
188
389
// Use MPSC adapter
189
390
tracing::info!(
···
209
410
}
210
411
};
211
412
212
-
// Keep a reference to the adapter for the AppContext
213
-
let adapter_for_context = adapter.clone();
413
+
adapter
414
+
};
415
+
416
+
// Create handle resolver with cache priority: Redis > SQLite > In-memory
417
+
let (mut handle_resolver, cache_ttl): (
418
+
Arc<dyn quickdid::handle_resolver::HandleResolver>,
419
+
u64,
420
+
) = if let Some(ref pool) = redis_pool {
421
+
tracing::info!(
422
+
"Using Redis-backed handle resolver with {}-second cache TTL",
423
+
config.cache_ttl_redis
424
+
);
425
+
(
426
+
create_redis_resolver_with_ttl(
427
+
base_handle_resolver,
428
+
pool.clone(),
429
+
config.cache_ttl_redis,
430
+
metrics_publisher.clone(),
431
+
),
432
+
config.cache_ttl_redis,
433
+
)
434
+
} else if let Some(pool) = sqlite_pool {
435
+
tracing::info!(
436
+
"Using SQLite-backed handle resolver with {}-second cache TTL",
437
+
config.cache_ttl_sqlite
438
+
);
439
+
(
440
+
create_sqlite_resolver_with_ttl(
441
+
base_handle_resolver,
442
+
pool,
443
+
config.cache_ttl_sqlite,
444
+
metrics_publisher.clone(),
445
+
),
446
+
config.cache_ttl_sqlite,
447
+
)
448
+
} else {
449
+
tracing::info!(
450
+
"Using in-memory handle resolver with {}-second cache TTL",
451
+
config.cache_ttl_memory
452
+
);
453
+
(
454
+
create_caching_resolver(
455
+
base_handle_resolver,
456
+
config.cache_ttl_memory,
457
+
metrics_publisher.clone(),
458
+
),
459
+
config.cache_ttl_memory,
460
+
)
461
+
};
462
+
463
+
// Apply proactive refresh if enabled
464
+
if config.proactive_refresh_enabled && !matches!(config.queue_adapter.as_str(), "noop" | "none")
465
+
{
466
+
tracing::info!(
467
+
"Enabling proactive cache refresh with {}% threshold",
468
+
(config.proactive_refresh_threshold * 100.0) as u32
469
+
);
470
+
handle_resolver = create_proactive_refresh_resolver_with_metrics(
471
+
handle_resolver,
472
+
handle_queue.clone(),
473
+
metrics_publisher.clone(),
474
+
cache_ttl,
475
+
config.proactive_refresh_threshold,
476
+
);
477
+
} else if config.proactive_refresh_enabled {
478
+
tracing::warn!(
479
+
"Proactive refresh enabled but queue adapter is no-op, skipping proactive refresh"
480
+
);
481
+
}
482
+
483
+
// Create lexicon resolver with Redis caching if available
484
+
let lexicon_resolver: Arc<dyn LexiconResolver> = {
485
+
let base_lexicon_resolver: Arc<dyn LexiconResolver> = Arc::new(
486
+
DefaultLexiconResolver::new(http_client.clone(), lexicon_dns_resolver),
487
+
);
488
+
489
+
if let Some(ref pool) = redis_pool {
490
+
tracing::info!(
491
+
"Using Redis-backed lexicon resolver with {}-second cache TTL",
492
+
config.cache_ttl_redis
493
+
);
494
+
create_redis_lexicon_resolver_with_ttl(
495
+
base_lexicon_resolver,
496
+
pool.clone(),
497
+
config.cache_ttl_redis,
498
+
metrics_publisher.clone(),
499
+
)
500
+
} else {
501
+
tracing::info!("Using base lexicon resolver without caching");
502
+
base_lexicon_resolver
503
+
}
504
+
};
505
+
506
+
// Setup background handle resolution task
507
+
{
508
+
let adapter_for_task = handle_queue.clone();
214
509
215
510
// Only spawn handle resolver task if not using noop adapter
216
511
if !matches!(config.queue_adapter.as_str(), "noop" | "none") {
···
221
516
222
517
// Create and start handle resolver task
223
518
let handle_task = create_handle_resolver_task_with_config(
224
-
adapter,
519
+
adapter_for_task,
225
520
handle_resolver.clone(),
226
521
token.clone(),
227
522
handle_task_config,
523
+
metrics_publisher.clone(),
228
524
);
229
525
230
526
// Spawn the handle resolver task
···
257
553
} else {
258
554
tracing::info!("Background handle resolution task disabled (using no-op adapter)");
259
555
}
260
-
261
-
// Return the adapter to be used in AppContext
262
-
adapter_for_context
263
556
};
264
557
265
558
// Create app context with the queue adapter
266
559
let app_context = AppContext::new(
267
-
service_document,
268
-
config.service_did.clone(),
269
560
handle_resolver.clone(),
270
561
handle_queue,
562
+
lexicon_resolver,
563
+
metrics_publisher.clone(),
564
+
config.etag_seed.clone(),
565
+
config.cache_control_header.clone(),
566
+
config.static_files_dir.clone(),
271
567
);
272
568
273
569
// Create router
···
314
610
signal_token.cancel();
315
611
tracing::info!("Signal handler task completed");
316
612
});
613
+
}
614
+
615
+
// Start Jetstream consumer if enabled
616
+
if config.jetstream_enabled {
617
+
let jetstream_resolver = handle_resolver.clone();
618
+
let jetstream_metrics = metrics_publisher.clone();
619
+
let jetstream_hostname = config.jetstream_hostname.clone();
620
+
let jetstream_user_agent = config.user_agent.clone();
621
+
622
+
spawn_cancellable_task(
623
+
&tracker,
624
+
token.clone(),
625
+
"jetstream_consumer",
626
+
move |cancel_token| async move {
627
+
tracing::info!(hostname = %jetstream_hostname, "Starting Jetstream consumer");
628
+
629
+
// Create event handler
630
+
let event_handler = Arc::new(QuickDidEventHandler::new(
631
+
jetstream_resolver,
632
+
jetstream_metrics.clone(),
633
+
));
634
+
635
+
// Reconnection loop
636
+
let mut reconnect_count = 0u32;
637
+
let max_reconnects_per_minute = 5;
638
+
let reconnect_window = std::time::Duration::from_secs(60);
639
+
let mut last_disconnect = std::time::Instant::now() - reconnect_window;
640
+
641
+
while !cancel_token.is_cancelled() {
642
+
let now = std::time::Instant::now();
643
+
if now.duration_since(last_disconnect) < reconnect_window {
644
+
reconnect_count += 1;
645
+
if reconnect_count > max_reconnects_per_minute {
646
+
tracing::warn!(
647
+
count = reconnect_count,
648
+
"Too many Jetstream reconnects, waiting 60 seconds"
649
+
);
650
+
tokio::time::sleep(reconnect_window).await;
651
+
reconnect_count = 0;
652
+
last_disconnect = now;
653
+
continue;
654
+
}
655
+
} else {
656
+
reconnect_count = 0;
657
+
}
658
+
659
+
// Create consumer configuration
660
+
let consumer_config = ConsumerTaskConfig {
661
+
user_agent: jetstream_user_agent.clone(),
662
+
compression: false,
663
+
zstd_dictionary_location: String::new(),
664
+
jetstream_hostname: jetstream_hostname.clone(),
665
+
// Listen to the "community.lexicon.collection.fake" collection
666
+
// so that we keep an active connection open but only for
667
+
// account and identity events.
668
+
collections: vec!["community.lexicon.collection.fake".to_string()], // Listen to all collections
669
+
dids: vec![],
670
+
max_message_size_bytes: None,
671
+
cursor: None,
672
+
require_hello: true,
673
+
};
674
+
675
+
let consumer = JetstreamConsumer::new(consumer_config);
676
+
677
+
// Register event handler
678
+
if let Err(e) = consumer.register_handler(event_handler.clone()).await {
679
+
tracing::error!(error = ?e, "Failed to register Jetstream event handler");
680
+
continue;
681
+
}
682
+
683
+
// Run consumer with cancellation support
684
+
match consumer.run_background(cancel_token.clone()).await {
685
+
Ok(()) => {
686
+
tracing::info!("Jetstream consumer stopped normally");
687
+
if cancel_token.is_cancelled() {
688
+
break;
689
+
}
690
+
last_disconnect = std::time::Instant::now();
691
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
692
+
}
693
+
Err(e) => {
694
+
tracing::error!(error = ?e, "Jetstream consumer connection failed, will reconnect");
695
+
jetstream_metrics.incr("jetstream.connection.error").await;
696
+
last_disconnect = std::time::Instant::now();
697
+
698
+
if !cancel_token.is_cancelled() {
699
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
700
+
}
701
+
}
702
+
}
703
+
}
704
+
705
+
tracing::info!("Jetstream consumer task shutting down");
706
+
Ok(())
707
+
},
708
+
);
709
+
} else {
710
+
tracing::info!("Jetstream consumer disabled");
317
711
}
318
712
319
713
// Start HTTP server with cancellation support
+11
-3
src/cache.rs
+11
-3
src/cache.rs
···
1
1
//! Redis cache utilities for QuickDID
2
2
3
-
use anyhow::Result;
4
3
use deadpool_redis::{Config, Pool, Runtime};
4
+
use thiserror::Error;
5
+
6
+
/// Cache-specific errors
7
+
#[derive(Debug, Error)]
8
+
pub enum CacheError {
9
+
/// Redis pool creation failed
10
+
#[error("error-quickdid-cache-1 Redis pool creation failed: {0}")]
11
+
RedisPoolCreationFailed(String),
12
+
}
5
13
6
14
/// Create a Redis connection pool from a Redis URL.
7
15
///
···
14
22
/// Returns an error if:
15
23
/// - The Redis URL is invalid
16
24
/// - Pool creation fails
17
-
pub fn create_redis_pool(redis_url: &str) -> Result<Pool> {
25
+
pub fn create_redis_pool(redis_url: &str) -> Result<Pool, CacheError> {
18
26
let config = Config::from_url(redis_url);
19
27
let pool = config
20
28
.create_pool(Some(Runtime::Tokio1))
21
-
.map_err(|e| anyhow::anyhow!("error-quickdid-cache-1 Redis pool creation failed: {}", e))?;
29
+
.map_err(|e| CacheError::RedisPoolCreationFailed(e.to_string()))?;
22
30
Ok(pool)
23
31
}
+273
-335
src/config.rs
+273
-335
src/config.rs
···
5
5
//!
6
6
//! ## Configuration Sources
7
7
//!
8
-
//! Configuration can be provided through:
9
-
//! - Environment variables (highest priority)
10
-
//! - Command-line arguments
11
-
//! - Default values (lowest priority)
8
+
//! Configuration is provided exclusively through environment variables following
9
+
//! the 12-factor app methodology.
12
10
//!
13
11
//! ## Example
14
12
//!
15
13
//! ```bash
16
14
//! # Minimal configuration
17
15
//! HTTP_EXTERNAL=quickdid.example.com \
18
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
19
16
//! quickdid
20
17
//!
21
18
//! # Full configuration with Redis and custom settings
22
19
//! HTTP_EXTERNAL=quickdid.example.com \
23
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
24
20
//! HTTP_PORT=3000 \
25
21
//! REDIS_URL=redis://localhost:6379 \
26
22
//! CACHE_TTL_MEMORY=300 \
···
30
26
//! quickdid
31
27
//! ```
32
28
33
-
use atproto_identity::config::optional_env;
34
-
use clap::Parser;
29
+
use std::env;
35
30
use thiserror::Error;
36
31
37
32
/// Configuration-specific errors following the QuickDID error format
···
41
36
pub enum ConfigError {
42
37
/// Missing required environment variable or command-line argument
43
38
///
44
-
/// Example: When SERVICE_KEY or HTTP_EXTERNAL are not provided
39
+
/// Example: When HTTP_EXTERNAL is not provided
45
40
#[error("error-quickdid-config-1 Missing required environment variable: {0}")]
46
41
MissingRequired(String),
47
42
48
43
/// Invalid configuration value that doesn't meet expected format or constraints
49
44
///
50
-
/// Example: Invalid QUEUE_ADAPTER value (must be 'mpsc', 'redis', or 'noop')
45
+
/// Example: Invalid QUEUE_ADAPTER value (must be 'mpsc', 'redis', 'sqlite', 'noop', or 'none')
51
46
#[error("error-quickdid-config-2 Invalid configuration value: {0}")]
52
47
InvalidValue(String),
53
48
···
64
59
InvalidTimeout(String),
65
60
}
66
61
67
-
#[derive(Parser, Clone)]
68
-
#[command(
69
-
name = "quickdid",
70
-
about = "QuickDID - AT Protocol Identity Resolver Service",
71
-
long_about = "
72
-
A fast identity resolution service for the AT Protocol ecosystem.
73
-
This service provides identity resolution endpoints and handle resolution
74
-
capabilities with in-memory caching.
75
-
76
-
FEATURES:
77
-
- AT Protocol identity resolution and DID document management
78
-
- Handle resolution with in-memory caching
79
-
- DID:web identity publishing via .well-known endpoints
80
-
- Health check endpoint
81
-
82
-
ENVIRONMENT VARIABLES:
83
-
SERVICE_KEY Private key for service identity (required)
84
-
HTTP_EXTERNAL External hostname for service endpoints (required)
85
-
HTTP_PORT HTTP server port (default: 8080)
86
-
PLC_HOSTNAME PLC directory hostname (default: plc.directory)
87
-
USER_AGENT HTTP User-Agent header (auto-generated with version)
88
-
DNS_NAMESERVERS Custom DNS nameservers (comma-separated IPs)
89
-
CERTIFICATE_BUNDLES Additional CA certificates (comma-separated paths)
90
-
91
-
CACHING:
92
-
REDIS_URL Redis URL for handle resolution caching (optional)
93
-
CACHE_TTL_MEMORY TTL for in-memory cache in seconds (default: 600)
94
-
CACHE_TTL_REDIS TTL for Redis cache in seconds (default: 7776000 = 90 days)
95
-
96
-
QUEUE CONFIGURATION:
97
-
QUEUE_ADAPTER Queue adapter: 'mpsc', 'redis', 'noop', 'none' (default: mpsc)
98
-
QUEUE_REDIS_URL Redis URL for queue adapter (uses REDIS_URL if not set)
99
-
QUEUE_REDIS_PREFIX Redis key prefix for queues (default: queue:handleresolver:)
100
-
QUEUE_REDIS_TIMEOUT Queue blocking timeout in seconds (default: 5)
101
-
QUEUE_WORKER_ID Worker ID for Redis queue (default: worker1)
102
-
QUEUE_BUFFER_SIZE Buffer size for MPSC queue (default: 1000)
103
-
"
104
-
)]
105
-
/// Command-line arguments and environment variables configuration
106
-
pub struct Args {
107
-
/// HTTP server port to bind to
108
-
///
109
-
/// Examples: "8080", "3000", "80"
110
-
/// Constraints: Must be a valid port number (1-65535)
111
-
#[arg(long, env = "HTTP_PORT", default_value = "8080")]
112
-
pub http_port: String,
113
-
114
-
/// PLC directory hostname for DID resolution
115
-
///
116
-
/// Examples: "plc.directory", "test.plc.directory"
117
-
/// Use "plc.directory" for production
118
-
#[arg(long, env = "PLC_HOSTNAME", default_value = "plc.directory")]
119
-
pub plc_hostname: String,
120
-
121
-
/// External hostname for service endpoints (REQUIRED)
122
-
///
123
-
/// Examples:
124
-
/// - "quickdid.example.com" (standard)
125
-
/// - "quickdid.example.com:8080" (with port)
126
-
/// - "localhost:3007" (development)
127
-
#[arg(long, env = "HTTP_EXTERNAL")]
128
-
pub http_external: Option<String>,
129
-
130
-
/// Private key for service identity in DID format (REQUIRED)
131
-
///
132
-
/// Examples:
133
-
/// - "did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK"
134
-
/// - "did:plc:xyz123abc456"
135
-
///
136
-
/// SECURITY: Keep this key secure and never commit to version control
137
-
#[arg(long, env = "SERVICE_KEY")]
138
-
pub service_key: Option<String>,
139
-
140
-
/// HTTP User-Agent header for outgoing requests
141
-
///
142
-
/// Example: `quickdid/1.0.0 (+https://quickdid.example.com)`
143
-
/// Default: Auto-generated with current version
144
-
#[arg(long, env = "USER_AGENT")]
145
-
pub user_agent: Option<String>,
146
-
147
-
/// Custom DNS nameservers (comma-separated IP addresses)
148
-
///
149
-
/// Examples:
150
-
/// - "8.8.8.8,8.8.4.4" (Google DNS)
151
-
/// - "1.1.1.1,1.0.0.1" (Cloudflare DNS)
152
-
/// - "192.168.1.1" (Local DNS)
153
-
#[arg(long, env = "DNS_NAMESERVERS")]
154
-
pub dns_nameservers: Option<String>,
155
-
156
-
/// Additional CA certificates (comma-separated file paths)
157
-
///
158
-
/// Examples:
159
-
/// - "/etc/ssl/certs/custom-ca.pem"
160
-
/// - "/certs/ca1.pem,/certs/ca2.pem"
161
-
///
162
-
/// Use for custom or internal certificate authorities
163
-
#[arg(long, env = "CERTIFICATE_BUNDLES")]
164
-
pub certificate_bundles: Option<String>,
165
-
166
-
/// Redis connection URL for caching
167
-
///
168
-
/// Examples:
169
-
/// - "redis://localhost:6379/0" (local, no auth)
170
-
/// - "redis://user:pass@redis.example.com:6379/0" (with auth)
171
-
/// - "rediss://secure-redis.example.com:6380/0" (TLS)
172
-
///
173
-
/// Benefits: Persistent cache, distributed caching, better performance
174
-
#[arg(long, env = "REDIS_URL")]
175
-
pub redis_url: Option<String>,
176
-
177
-
/// Queue adapter type for background processing
178
-
///
179
-
/// Values:
180
-
/// - "mpsc": In-memory multi-producer single-consumer queue
181
-
/// - "redis": Redis-backed distributed queue
182
-
/// - "noop": Disable queue processing (for testing)
183
-
/// - "none": Alias for "noop"
184
-
///
185
-
/// Default: "mpsc" for single-instance deployments
186
-
#[arg(long, env = "QUEUE_ADAPTER", default_value = "mpsc")]
187
-
pub queue_adapter: String,
188
-
189
-
/// Redis URL specifically for queue operations
190
-
///
191
-
/// Falls back to REDIS_URL if not specified
192
-
/// Use when separating cache and queue Redis instances
193
-
#[arg(long, env = "QUEUE_REDIS_URL")]
194
-
pub queue_redis_url: Option<String>,
195
-
196
-
/// Redis key prefix for queue operations
197
-
///
198
-
/// Examples:
199
-
/// - "queue:handleresolver:" (default)
200
-
/// - "prod:queue:hr:" (environment-specific)
201
-
/// - "quickdid:v1:queue:" (version-specific)
202
-
///
203
-
/// Use to namespace queues when sharing Redis
204
-
#[arg(
205
-
long,
206
-
env = "QUEUE_REDIS_PREFIX",
207
-
default_value = "queue:handleresolver:"
208
-
)]
209
-
pub queue_redis_prefix: String,
210
-
211
-
/// Worker ID for Redis queue operations
212
-
///
213
-
/// Examples: "worker-001", "prod-us-east-1", "quickdid-1"
214
-
/// Default: "worker1"
215
-
///
216
-
/// Use for identifying specific workers in logs
217
-
#[arg(long, env = "QUEUE_WORKER_ID")]
218
-
pub queue_worker_id: Option<String>,
219
-
220
-
/// Buffer size for MPSC queue
221
-
///
222
-
/// Range: 100-100000 (recommended)
223
-
/// Default: 1000
224
-
///
225
-
/// Increase for high-traffic deployments
226
-
#[arg(long, env = "QUEUE_BUFFER_SIZE", default_value = "1000")]
227
-
pub queue_buffer_size: usize,
228
-
229
-
/// TTL for in-memory cache in seconds
230
-
///
231
-
/// Range: 60-3600 (recommended)
232
-
/// Default: 600 (10 minutes)
233
-
///
234
-
/// Lower values = fresher data, more resolution requests
235
-
/// Higher values = better performance, potentially stale data
236
-
#[arg(long, env = "CACHE_TTL_MEMORY", default_value = "600")]
237
-
pub cache_ttl_memory: u64,
238
-
239
-
/// TTL for Redis cache in seconds
240
-
///
241
-
/// Range: 3600-31536000 (1 hour to 1 year)
242
-
/// Default: 7776000 (90 days)
243
-
///
244
-
/// Recommendation: 86400 (1 day) for frequently changing data
245
-
#[arg(long, env = "CACHE_TTL_REDIS", default_value = "7776000")]
246
-
pub cache_ttl_redis: u64,
62
+
/// Helper function to get an environment variable with an optional default
63
+
fn get_env_or_default(key: &str, default: Option<&str>) -> Option<String> {
64
+
match env::var(key) {
65
+
Ok(val) if !val.is_empty() => Some(val),
66
+
_ => default.map(String::from),
67
+
}
68
+
}
247
69
248
-
/// Redis blocking timeout for queue operations in seconds
249
-
///
250
-
/// Range: 1-60 (recommended)
251
-
/// Default: 5
252
-
///
253
-
/// Lower values = more responsive to shutdown
254
-
/// Higher values = less Redis polling overhead
255
-
#[arg(long, env = "QUEUE_REDIS_TIMEOUT", default_value = "5")]
256
-
pub queue_redis_timeout: u64,
70
+
/// Helper function to parse an environment variable as a specific type
71
+
fn parse_env<T: std::str::FromStr>(key: &str, default: T) -> Result<T, ConfigError>
72
+
where
73
+
T::Err: std::fmt::Display,
74
+
{
75
+
match env::var(key) {
76
+
Ok(val) if !val.is_empty() => val
77
+
.parse::<T>()
78
+
.map_err(|e| ConfigError::InvalidValue(format!("{}: {}", key, e))),
79
+
_ => Ok(default),
80
+
}
257
81
}
258
82
259
83
/// Validated configuration for QuickDID service
260
84
///
261
85
/// This struct contains all configuration after validation and processing.
262
-
/// Use `Config::from_args()` to create from command-line arguments and environment variables.
86
+
/// Use `Config::from_env()` to create from environment variables.
263
87
///
264
88
/// ## Example
265
89
///
266
90
/// ```rust,no_run
267
-
/// use quickdid::config::{Args, Config};
268
-
/// use clap::Parser;
91
+
/// use quickdid::config::Config;
269
92
///
270
93
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
271
-
/// let args = Args::parse();
272
-
/// let config = Config::from_args(args)?;
94
+
/// let config = Config::from_env()?;
273
95
/// config.validate()?;
274
96
///
275
97
/// println!("Service running at: {}", config.http_external);
276
-
/// println!("Service DID: {}", config.service_did);
277
98
/// # Ok(())
278
99
/// # }
279
100
/// ```
···
288
109
/// External hostname for service endpoints (e.g., "quickdid.example.com")
289
110
pub http_external: String,
290
111
291
-
/// Private key for service identity (e.g., "did:key:z42tm...")
292
-
pub service_key: String,
293
-
294
112
/// HTTP User-Agent for outgoing requests (e.g., "quickdid/1.0.0 (+https://...)")
295
113
pub user_agent: String,
296
-
297
-
/// Derived service DID (e.g., "did:web:quickdid.example.com")
298
-
/// Automatically generated from http_external with proper encoding
299
-
pub service_did: String,
300
114
301
115
/// Custom DNS nameservers, comma-separated (e.g., "8.8.8.8,8.8.4.4")
302
116
pub dns_nameservers: Option<String>,
···
307
121
/// Redis URL for caching (e.g., "redis://localhost:6379/0")
308
122
pub redis_url: Option<String>,
309
123
310
-
/// Queue adapter type: "mpsc", "redis", or "noop"
124
+
/// SQLite database URL for caching (e.g., "sqlite:./quickdid.db")
125
+
pub sqlite_url: Option<String>,
126
+
127
+
/// Queue adapter type: "mpsc", "redis", "sqlite", or "noop"
311
128
pub queue_adapter: String,
312
129
313
130
/// Redis URL for queue operations (falls back to redis_url)
···
328
145
/// TTL for Redis cache in seconds (e.g., 7776000 = 90 days)
329
146
pub cache_ttl_redis: u64,
330
147
148
+
/// TTL for SQLite cache in seconds (e.g., 7776000 = 90 days)
149
+
pub cache_ttl_sqlite: u64,
150
+
331
151
/// Redis blocking timeout for queue operations in seconds (e.g., 5)
332
152
pub queue_redis_timeout: u64,
153
+
154
+
/// Enable deduplication for Redis queue to prevent duplicate handles
155
+
/// Default: false
156
+
pub queue_redis_dedup_enabled: bool,
157
+
158
+
/// TTL for Redis queue deduplication keys in seconds
159
+
/// Default: 60 (1 minute)
160
+
pub queue_redis_dedup_ttl: u64,
161
+
162
+
/// Maximum queue size for SQLite adapter work shedding (e.g., 10000)
163
+
/// When exceeded, oldest entries are deleted to maintain this limit.
164
+
/// Set to 0 to disable work shedding (unlimited queue size).
165
+
pub queue_sqlite_max_size: u64,
166
+
167
+
/// Maximum concurrent handle resolutions allowed (rate limiting).
168
+
/// When set to > 0, enables rate limiting using a semaphore.
169
+
/// Default: 0 (disabled)
170
+
pub resolver_max_concurrent: usize,
171
+
172
+
/// Timeout for acquiring rate limit permit in milliseconds.
173
+
/// When set to > 0, requests will timeout if they can't acquire a permit within this time.
174
+
/// Default: 0 (no timeout)
175
+
pub resolver_max_concurrent_timeout_ms: u64,
176
+
177
+
/// Seed value for ETAG generation to allow cache invalidation.
178
+
/// This value is incorporated into ETAG checksums, allowing server admins
179
+
/// to invalidate client-cached responses after major changes.
180
+
/// Default: application version
181
+
pub etag_seed: String,
182
+
183
+
/// Maximum age for HTTP cache control in seconds.
184
+
/// When set to 0, Cache-Control header is disabled.
185
+
/// Default: 86400 (24 hours)
186
+
pub cache_max_age: u64,
187
+
188
+
/// Stale-if-error directive for Cache-Control in seconds.
189
+
/// Allows stale content to be served if backend errors occur.
190
+
/// Default: 172800 (48 hours)
191
+
pub cache_stale_if_error: u64,
192
+
193
+
/// Stale-while-revalidate directive for Cache-Control in seconds.
194
+
/// Allows stale content to be served while fetching fresh content.
195
+
/// Default: 86400 (24 hours)
196
+
pub cache_stale_while_revalidate: u64,
197
+
198
+
/// Max-stale directive for Cache-Control in seconds.
199
+
/// Maximum time client will accept stale responses.
200
+
/// Default: 172800 (48 hours)
201
+
pub cache_max_stale: u64,
202
+
203
+
/// Min-fresh directive for Cache-Control in seconds.
204
+
/// Minimum time response must remain fresh.
205
+
/// Default: 3600 (1 hour)
206
+
pub cache_min_fresh: u64,
207
+
208
+
/// Pre-calculated Cache-Control header value.
209
+
/// Calculated at startup for efficiency.
210
+
/// None if cache_max_age is 0 (disabled).
211
+
pub cache_control_header: Option<String>,
212
+
213
+
/// Metrics adapter type: "noop" or "statsd"
214
+
/// Default: "noop" (no metrics collection)
215
+
pub metrics_adapter: String,
216
+
217
+
/// StatsD host for metrics collection (e.g., "localhost:8125")
218
+
/// Required when metrics_adapter is "statsd"
219
+
pub metrics_statsd_host: Option<String>,
220
+
221
+
/// Bind address for StatsD UDP socket (e.g., "0.0.0.0:0" for IPv4 or "[::]:0" for IPv6)
222
+
/// Default: "[::]:0" (IPv6 any address, random port)
223
+
pub metrics_statsd_bind: String,
224
+
225
+
/// Metrics prefix for all metrics (e.g., "quickdid")
226
+
/// Default: "quickdid"
227
+
pub metrics_prefix: String,
228
+
229
+
/// Default tags for all metrics (comma-separated key:value pairs)
230
+
/// Example: "env:production,service:quickdid"
231
+
pub metrics_tags: Option<String>,
232
+
233
+
/// Enable proactive cache refresh for frequently accessed handles.
234
+
/// When enabled, cache entries that have reached the refresh threshold
235
+
/// will be queued for background refresh to keep the cache warm.
236
+
/// Default: false
237
+
pub proactive_refresh_enabled: bool,
238
+
239
+
/// Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh.
240
+
/// For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
241
+
/// Default: 0.8 (80%)
242
+
pub proactive_refresh_threshold: f64,
243
+
244
+
/// Directory path for serving static files.
245
+
/// When set, the root handler will serve files from this directory.
246
+
/// Default: "www" (relative to working directory)
247
+
pub static_files_dir: String,
248
+
249
+
/// Enable Jetstream consumer for AT Protocol events.
250
+
/// When enabled, the service will consume Account and Identity events
251
+
/// to maintain cache consistency.
252
+
/// Default: false
253
+
pub jetstream_enabled: bool,
254
+
255
+
/// Jetstream WebSocket hostname for consuming AT Protocol events.
256
+
/// Example: "jetstream.atproto.tools" or "jetstream1.us-west.bsky.network"
257
+
/// Default: "jetstream.atproto.tools"
258
+
pub jetstream_hostname: String,
333
259
}
334
260
335
261
impl Config {
336
-
/// Create a validated Config from command-line arguments and environment variables
262
+
/// Create a validated Config from environment variables
337
263
///
338
264
/// This method:
339
-
/// 1. Processes command-line arguments with environment variable fallbacks
340
-
/// 2. Validates required fields (HTTP_EXTERNAL and SERVICE_KEY)
341
-
/// 3. Generates derived values (service_did from http_external)
342
-
/// 4. Applies defaults where appropriate
343
-
///
344
-
/// ## Priority Order
345
-
///
346
-
/// 1. Command-line arguments (highest priority)
347
-
/// 2. Environment variables
348
-
/// 3. Default values (lowest priority)
265
+
/// 1. Reads configuration from environment variables
266
+
/// 2. Validates required fields (HTTP_EXTERNAL)
267
+
/// 3. Applies defaults where appropriate
349
268
///
350
269
/// ## Example
351
270
///
352
271
/// ```rust,no_run
353
-
/// use quickdid::config::{Args, Config};
354
-
/// use clap::Parser;
272
+
/// use quickdid::config::Config;
355
273
///
356
274
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
357
-
/// // Parse from environment and command-line
358
-
/// let args = Args::parse();
359
-
/// let config = Config::from_args(args)?;
275
+
/// // Parse from environment variables
276
+
/// let config = Config::from_env()?;
360
277
///
361
-
/// // The service DID is automatically generated from HTTP_EXTERNAL
362
-
/// assert!(config.service_did.starts_with("did:web:"));
363
278
/// # Ok(())
364
279
/// # }
365
280
/// ```
···
368
283
///
369
284
/// Returns `ConfigError::MissingRequired` if:
370
285
/// - HTTP_EXTERNAL is not provided
371
-
/// - SERVICE_KEY is not provided
372
-
pub fn from_args(args: Args) -> Result<Self, ConfigError> {
373
-
let http_external = args
374
-
.http_external
375
-
.or_else(|| {
376
-
let env_val = optional_env("HTTP_EXTERNAL");
377
-
if env_val.is_empty() {
378
-
None
379
-
} else {
380
-
Some(env_val)
381
-
}
382
-
})
286
+
pub fn from_env() -> Result<Self, ConfigError> {
287
+
// Required fields
288
+
let http_external = env::var("HTTP_EXTERNAL")
289
+
.ok()
290
+
.filter(|s| !s.is_empty())
383
291
.ok_or_else(|| ConfigError::MissingRequired("HTTP_EXTERNAL".to_string()))?;
384
292
385
-
let service_key = args
386
-
.service_key
387
-
.or_else(|| {
388
-
let env_val = optional_env("SERVICE_KEY");
389
-
if env_val.is_empty() {
390
-
None
391
-
} else {
392
-
Some(env_val)
393
-
}
394
-
})
395
-
.ok_or_else(|| ConfigError::MissingRequired("SERVICE_KEY".to_string()))?;
396
-
293
+
// Generate default user agent
397
294
let default_user_agent = format!(
398
295
"quickdid/{} (+https://github.com/smokesignal.events/quickdid)",
399
296
env!("CARGO_PKG_VERSION")
400
297
);
401
298
402
-
let user_agent = args
403
-
.user_agent
404
-
.or_else(|| {
405
-
let env_val = optional_env("USER_AGENT");
406
-
if env_val.is_empty() {
407
-
None
408
-
} else {
409
-
Some(env_val)
410
-
}
411
-
})
412
-
.unwrap_or(default_user_agent);
413
-
414
-
let service_did = if http_external.contains(':') {
415
-
let encoded_external = http_external.replace(':', "%3A");
416
-
format!("did:web:{}", encoded_external)
417
-
} else {
418
-
format!("did:web:{}", http_external)
299
+
let mut config = Config {
300
+
http_port: get_env_or_default("HTTP_PORT", Some("8080")).unwrap(),
301
+
plc_hostname: get_env_or_default("PLC_HOSTNAME", Some("plc.directory")).unwrap(),
302
+
http_external,
303
+
user_agent: get_env_or_default("USER_AGENT", None).unwrap_or(default_user_agent),
304
+
dns_nameservers: get_env_or_default("DNS_NAMESERVERS", None),
305
+
certificate_bundles: get_env_or_default("CERTIFICATE_BUNDLES", None),
306
+
redis_url: get_env_or_default("REDIS_URL", None),
307
+
sqlite_url: get_env_or_default("SQLITE_URL", None),
308
+
queue_adapter: get_env_or_default("QUEUE_ADAPTER", Some("mpsc")).unwrap(),
309
+
queue_redis_url: get_env_or_default("QUEUE_REDIS_URL", None),
310
+
queue_redis_prefix: get_env_or_default(
311
+
"QUEUE_REDIS_PREFIX",
312
+
Some("queue:handleresolver:"),
313
+
)
314
+
.unwrap(),
315
+
queue_worker_id: get_env_or_default("QUEUE_WORKER_ID", Some("worker1")).unwrap(),
316
+
queue_buffer_size: parse_env("QUEUE_BUFFER_SIZE", 1000)?,
317
+
cache_ttl_memory: parse_env("CACHE_TTL_MEMORY", 600)?,
318
+
cache_ttl_redis: parse_env("CACHE_TTL_REDIS", 7776000)?,
319
+
cache_ttl_sqlite: parse_env("CACHE_TTL_SQLITE", 7776000)?,
320
+
queue_redis_timeout: parse_env("QUEUE_REDIS_TIMEOUT", 5)?,
321
+
queue_redis_dedup_enabled: parse_env("QUEUE_REDIS_DEDUP_ENABLED", false)?,
322
+
queue_redis_dedup_ttl: parse_env("QUEUE_REDIS_DEDUP_TTL", 60)?,
323
+
queue_sqlite_max_size: parse_env("QUEUE_SQLITE_MAX_SIZE", 10000)?,
324
+
resolver_max_concurrent: parse_env("RESOLVER_MAX_CONCURRENT", 0)?,
325
+
resolver_max_concurrent_timeout_ms: parse_env("RESOLVER_MAX_CONCURRENT_TIMEOUT_MS", 0)?,
326
+
etag_seed: get_env_or_default("ETAG_SEED", Some(env!("CARGO_PKG_VERSION"))).unwrap(),
327
+
cache_max_age: parse_env("CACHE_MAX_AGE", 86400)?, // 24 hours
328
+
cache_stale_if_error: parse_env("CACHE_STALE_IF_ERROR", 172800)?, // 48 hours
329
+
cache_stale_while_revalidate: parse_env("CACHE_STALE_WHILE_REVALIDATE", 86400)?, // 24 hours
330
+
cache_max_stale: parse_env("CACHE_MAX_STALE", 172800)?, // 48 hours
331
+
cache_min_fresh: parse_env("CACHE_MIN_FRESH", 3600)?, // 1 hour
332
+
cache_control_header: None, // Will be calculated below
333
+
metrics_adapter: get_env_or_default("METRICS_ADAPTER", Some("noop")).unwrap(),
334
+
metrics_statsd_host: get_env_or_default("METRICS_STATSD_HOST", None),
335
+
metrics_statsd_bind: get_env_or_default("METRICS_STATSD_BIND", Some("[::]:0")).unwrap(),
336
+
metrics_prefix: get_env_or_default("METRICS_PREFIX", Some("quickdid")).unwrap(),
337
+
metrics_tags: get_env_or_default("METRICS_TAGS", None),
338
+
proactive_refresh_enabled: parse_env("PROACTIVE_REFRESH_ENABLED", false)?,
339
+
proactive_refresh_threshold: parse_env("PROACTIVE_REFRESH_THRESHOLD", 0.8)?,
340
+
static_files_dir: get_env_or_default("STATIC_FILES_DIR", Some("www")).unwrap(),
341
+
jetstream_enabled: parse_env("JETSTREAM_ENABLED", false)?,
342
+
jetstream_hostname: get_env_or_default(
343
+
"JETSTREAM_HOSTNAME",
344
+
Some("jetstream.atproto.tools"),
345
+
)
346
+
.unwrap(),
419
347
};
420
348
421
-
Ok(Config {
422
-
http_port: args.http_port,
423
-
plc_hostname: args.plc_hostname,
424
-
http_external,
425
-
service_key,
426
-
user_agent,
427
-
service_did,
428
-
dns_nameservers: args.dns_nameservers.or_else(|| {
429
-
let env_val = optional_env("DNS_NAMESERVERS");
430
-
if env_val.is_empty() {
431
-
None
432
-
} else {
433
-
Some(env_val)
434
-
}
435
-
}),
436
-
certificate_bundles: args.certificate_bundles.or_else(|| {
437
-
let env_val = optional_env("CERTIFICATE_BUNDLES");
438
-
if env_val.is_empty() {
439
-
None
440
-
} else {
441
-
Some(env_val)
442
-
}
443
-
}),
444
-
redis_url: args.redis_url.or_else(|| {
445
-
let env_val = optional_env("REDIS_URL");
446
-
if env_val.is_empty() {
447
-
None
448
-
} else {
449
-
Some(env_val)
450
-
}
451
-
}),
452
-
queue_adapter: args.queue_adapter,
453
-
queue_redis_url: args.queue_redis_url.or_else(|| {
454
-
let env_val = optional_env("QUEUE_REDIS_URL");
455
-
if env_val.is_empty() {
456
-
None
457
-
} else {
458
-
Some(env_val)
459
-
}
460
-
}),
461
-
queue_redis_prefix: args.queue_redis_prefix,
462
-
queue_worker_id: args.queue_worker_id
463
-
.or_else(|| {
464
-
let env_val = optional_env("QUEUE_WORKER_ID");
465
-
if env_val.is_empty() {
466
-
None
467
-
} else {
468
-
Some(env_val)
469
-
}
470
-
})
471
-
.unwrap_or_else(|| "worker1".to_string()),
472
-
queue_buffer_size: args.queue_buffer_size,
473
-
cache_ttl_memory: args.cache_ttl_memory,
474
-
cache_ttl_redis: args.cache_ttl_redis,
475
-
queue_redis_timeout: args.queue_redis_timeout,
476
-
})
349
+
// Calculate the Cache-Control header value if enabled
350
+
config.cache_control_header = config.calculate_cache_control_header();
351
+
352
+
Ok(config)
353
+
}
354
+
355
+
/// Calculate the Cache-Control header value based on configuration.
356
+
/// Returns None if cache_max_age is 0 (disabled).
357
+
fn calculate_cache_control_header(&self) -> Option<String> {
358
+
if self.cache_max_age == 0 {
359
+
return None;
360
+
}
361
+
362
+
Some(format!(
363
+
"public, max-age={}, stale-while-revalidate={}, stale-if-error={}, max-stale={}, min-fresh={}",
364
+
self.cache_max_age,
365
+
self.cache_stale_while_revalidate,
366
+
self.cache_stale_if_error,
367
+
self.cache_max_stale,
368
+
self.cache_min_fresh
369
+
))
477
370
}
478
371
479
372
/// Validate the configuration for correctness and consistency
···
481
374
/// Checks:
482
375
/// - Cache TTL values are positive (> 0)
483
376
/// - Queue timeout is positive (> 0)
484
-
/// - Queue adapter is a valid value ('mpsc', 'redis', 'noop', 'none')
377
+
/// - Queue adapter is a valid value ('mpsc', 'redis', 'sqlite', 'noop', 'none')
485
378
///
486
379
/// ## Example
487
380
///
488
381
/// ```rust,no_run
489
-
/// # use quickdid::config::{Args, Config};
490
-
/// # use clap::Parser;
382
+
/// # use quickdid::config::Config;
491
383
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
492
-
/// # let args = Args::parse();
493
-
/// let config = Config::from_args(args)?;
384
+
/// let config = Config::from_env()?;
494
385
/// config.validate()?; // Ensures all values are valid
495
386
/// # Ok(())
496
387
/// # }
···
512
403
"CACHE_TTL_REDIS must be > 0".to_string(),
513
404
));
514
405
}
406
+
if self.cache_ttl_sqlite == 0 {
407
+
return Err(ConfigError::InvalidTtl(
408
+
"CACHE_TTL_SQLITE must be > 0".to_string(),
409
+
));
410
+
}
515
411
if self.queue_redis_timeout == 0 {
516
412
return Err(ConfigError::InvalidTimeout(
517
413
"QUEUE_REDIS_TIMEOUT must be > 0".to_string(),
518
414
));
519
415
}
416
+
if self.queue_redis_dedup_enabled && self.queue_redis_dedup_ttl == 0 {
417
+
return Err(ConfigError::InvalidTtl(
418
+
"QUEUE_REDIS_DEDUP_TTL must be > 0 when deduplication is enabled".to_string(),
419
+
));
420
+
}
520
421
match self.queue_adapter.as_str() {
521
-
"mpsc" | "redis" | "noop" | "none" => {}
422
+
"mpsc" | "redis" | "sqlite" | "noop" | "none" => {}
522
423
_ => {
523
424
return Err(ConfigError::InvalidValue(format!(
524
-
"Invalid QUEUE_ADAPTER '{}', must be 'mpsc', 'redis', or 'noop'",
425
+
"Invalid QUEUE_ADAPTER '{}', must be 'mpsc', 'redis', 'sqlite', 'noop', or 'none'",
525
426
self.queue_adapter
526
427
)));
527
428
}
528
429
}
430
+
if self.resolver_max_concurrent > 10000 {
431
+
return Err(ConfigError::InvalidValue(
432
+
"RESOLVER_MAX_CONCURRENT must be between 0 and 10000".to_string(),
433
+
));
434
+
}
435
+
if self.resolver_max_concurrent_timeout_ms > 60000 {
436
+
return Err(ConfigError::InvalidTimeout(
437
+
"RESOLVER_MAX_CONCURRENT_TIMEOUT_MS must be <= 60000 (60 seconds)".to_string(),
438
+
));
439
+
}
440
+
441
+
// Validate metrics configuration
442
+
match self.metrics_adapter.as_str() {
443
+
"noop" | "statsd" => {}
444
+
_ => {
445
+
return Err(ConfigError::InvalidValue(format!(
446
+
"Invalid METRICS_ADAPTER '{}', must be 'noop' or 'statsd'",
447
+
self.metrics_adapter
448
+
)));
449
+
}
450
+
}
451
+
452
+
// If statsd is configured, ensure host is provided
453
+
if self.metrics_adapter == "statsd" && self.metrics_statsd_host.is_none() {
454
+
return Err(ConfigError::MissingRequired(
455
+
"METRICS_STATSD_HOST is required when METRICS_ADAPTER is 'statsd'".to_string(),
456
+
));
457
+
}
458
+
459
+
// Validate proactive refresh threshold
460
+
if self.proactive_refresh_threshold < 0.0 || self.proactive_refresh_threshold > 1.0 {
461
+
return Err(ConfigError::InvalidValue(format!(
462
+
"PROACTIVE_REFRESH_THRESHOLD must be between 0.0 and 1.0, got {}",
463
+
self.proactive_refresh_threshold
464
+
)));
465
+
}
466
+
529
467
Ok(())
530
468
}
531
469
}
+6
-3
src/handle_resolution_result.rs
+6
-3
src/handle_resolution_result.rs
···
11
11
/// Errors that can occur during handle resolution result operations
12
12
#[derive(Debug, Error)]
13
13
pub enum HandleResolutionError {
14
-
#[error("error-quickdid-resolution-1 System time error: {0}")]
14
+
/// System time error when getting timestamp
15
+
#[error("error-quickdid-result-1 System time error: {0}")]
15
16
SystemTime(String),
16
17
17
-
#[error("error-quickdid-serialization-1 Failed to serialize resolution result: {0}")]
18
+
/// Failed to serialize resolution result to binary format
19
+
#[error("error-quickdid-result-2 Failed to serialize resolution result: {0}")]
18
20
Serialization(String),
19
21
20
-
#[error("error-quickdid-serialization-2 Failed to deserialize resolution result: {0}")]
22
+
/// Failed to deserialize resolution result from binary format
23
+
#[error("error-quickdid-result-3 Failed to deserialize resolution result: {0}")]
21
24
Deserialization(String),
22
25
}
23
26
+54
-4
src/handle_resolver/base.rs
+54
-4
src/handle_resolver/base.rs
···
5
5
6
6
use super::errors::HandleResolverError;
7
7
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use async_trait::async_trait;
9
10
use atproto_identity::resolve::{DnsResolver, resolve_subject};
10
11
use reqwest::Client;
11
12
use std::sync::Arc;
13
+
use std::time::{SystemTime, UNIX_EPOCH};
12
14
13
15
/// Base handle resolver that performs actual resolution via DNS and HTTP.
14
16
///
···
24
26
/// use reqwest::Client;
25
27
/// use atproto_identity::resolve::HickoryDnsResolver;
26
28
/// use quickdid::handle_resolver::{create_base_resolver, HandleResolver};
29
+
/// use quickdid::metrics::NoOpMetricsPublisher;
27
30
///
28
31
/// # async fn example() {
29
32
/// let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
30
33
/// let http_client = Client::new();
34
+
/// let metrics = Arc::new(NoOpMetricsPublisher);
31
35
///
32
36
/// let resolver = create_base_resolver(
33
37
/// dns_resolver,
34
38
/// http_client,
39
+
/// metrics,
35
40
/// );
36
41
///
37
-
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
42
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
43
+
/// println!("Resolved {} at {}", did, timestamp);
38
44
/// # }
39
45
/// ```
40
46
pub(super) struct BaseHandleResolver {
···
43
49
44
50
/// HTTP client for DID document retrieval and well-known endpoint queries.
45
51
http_client: Client,
52
+
53
+
/// Metrics publisher for telemetry.
54
+
metrics: SharedMetricsPublisher,
46
55
}
47
56
48
57
#[async_trait]
49
58
impl HandleResolver for BaseHandleResolver {
50
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
51
-
resolve_subject(&self.http_client, &*self.dns_resolver, s)
59
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
60
+
let start_time = std::time::Instant::now();
61
+
62
+
// Perform DNS/HTTP resolution
63
+
let result = resolve_subject(&self.http_client, &*self.dns_resolver, s)
52
64
.await
53
-
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()))
65
+
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()));
66
+
67
+
let duration_ms = start_time.elapsed().as_millis() as u64;
68
+
69
+
// Publish metrics
70
+
71
+
match result {
72
+
Ok(did) => {
73
+
self.metrics
74
+
.time_with_tags(
75
+
"resolver.base.duration_ms",
76
+
duration_ms,
77
+
&[("success", "1")],
78
+
)
79
+
.await;
80
+
81
+
let timestamp = SystemTime::now()
82
+
.duration_since(UNIX_EPOCH)
83
+
.map_err(|e| {
84
+
HandleResolverError::ResolutionFailed(format!("System time error: {}", e))
85
+
})?
86
+
.as_secs();
87
+
88
+
Ok((did, timestamp))
89
+
}
90
+
Err(e) => {
91
+
self.metrics
92
+
.time_with_tags(
93
+
"resolver.base.duration_ms",
94
+
duration_ms,
95
+
&[("success", "0")],
96
+
)
97
+
.await;
98
+
Err(e)
99
+
}
100
+
}
54
101
}
55
102
}
56
103
···
63
110
///
64
111
/// * `dns_resolver` - DNS resolver for TXT record lookups
65
112
/// * `http_client` - HTTP client for well-known endpoint queries
113
+
/// * `metrics` - Metrics publisher for telemetry
66
114
pub fn create_base_resolver(
67
115
dns_resolver: Arc<dyn DnsResolver>,
68
116
http_client: Client,
117
+
metrics: SharedMetricsPublisher,
69
118
) -> Arc<dyn HandleResolver> {
70
119
Arc::new(BaseHandleResolver {
71
120
dns_resolver,
72
121
http_client,
122
+
metrics,
73
123
})
74
124
}
+3
src/handle_resolver/errors.rs
+3
src/handle_resolver/errors.rs
+70
-13
src/handle_resolver/memory.rs
+70
-13
src/handle_resolver/memory.rs
···
6
6
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
+
use crate::metrics::SharedMetricsPublisher;
9
10
use async_trait::async_trait;
10
-
use std::time::{SystemTime, UNIX_EPOCH};
11
11
use std::collections::HashMap;
12
12
use std::sync::Arc;
13
+
use std::time::{SystemTime, UNIX_EPOCH};
13
14
use tokio::sync::RwLock;
14
15
15
16
/// Result of a handle resolution cached in memory.
···
32
33
/// ```no_run
33
34
/// use std::sync::Arc;
34
35
/// use quickdid::handle_resolver::{create_caching_resolver, create_base_resolver, HandleResolver};
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
35
37
///
36
38
/// # async fn example() {
37
39
/// # use atproto_identity::resolve::HickoryDnsResolver;
38
40
/// # use reqwest::Client;
39
41
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
40
42
/// # let http_client = Client::new();
41
-
/// let base_resolver = create_base_resolver(dns_resolver, http_client);
43
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
44
+
/// let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
42
45
/// let caching_resolver = create_caching_resolver(
43
46
/// base_resolver,
44
-
/// 300 // 5 minute TTL
47
+
/// 300, // 5 minute TTL
48
+
/// metrics
45
49
/// );
46
50
///
47
51
/// // First call hits the underlying resolver
48
-
/// let did1 = caching_resolver.resolve("alice.bsky.social").await.unwrap();
52
+
/// let (did1, timestamp1) = caching_resolver.resolve("alice.bsky.social").await.unwrap();
49
53
///
50
54
/// // Second call returns cached result
51
-
/// let did2 = caching_resolver.resolve("alice.bsky.social").await.unwrap();
55
+
/// let (did2, timestamp2) = caching_resolver.resolve("alice.bsky.social").await.unwrap();
52
56
/// # }
53
57
/// ```
54
58
pub(super) struct CachingHandleResolver {
55
59
inner: Arc<dyn HandleResolver>,
56
60
cache: Arc<RwLock<HashMap<String, ResolveHandleResult>>>,
57
61
ttl_seconds: u64,
62
+
metrics: SharedMetricsPublisher,
58
63
}
59
64
60
65
impl CachingHandleResolver {
···
64
69
///
65
70
/// * `inner` - The underlying resolver to use for actual resolution
66
71
/// * `ttl_seconds` - How long to cache results in seconds
67
-
pub fn new(inner: Arc<dyn HandleResolver>, ttl_seconds: u64) -> Self {
72
+
/// * `metrics` - Metrics publisher for telemetry
73
+
pub fn new(
74
+
inner: Arc<dyn HandleResolver>,
75
+
ttl_seconds: u64,
76
+
metrics: SharedMetricsPublisher,
77
+
) -> Self {
68
78
Self {
69
79
inner,
70
80
cache: Arc::new(RwLock::new(HashMap::new())),
71
81
ttl_seconds,
82
+
metrics,
72
83
}
73
84
}
74
85
···
87
98
88
99
#[async_trait]
89
100
impl HandleResolver for CachingHandleResolver {
90
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
101
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
91
102
let handle = s.to_string();
92
103
93
104
// Check cache first
···
98
109
ResolveHandleResult::Found(timestamp, did) => {
99
110
if !self.is_expired(*timestamp) {
100
111
tracing::debug!("Cache hit for handle {}: {}", handle, did);
101
-
return Ok(did.clone());
112
+
self.metrics.incr("resolver.memory.cache_hit").await;
113
+
return Ok((did.clone(), *timestamp));
102
114
}
103
115
tracing::debug!("Cache entry expired for handle {}", handle);
116
+
self.metrics.incr("resolver.memory.cache_expired").await;
104
117
}
105
118
ResolveHandleResult::NotFound(timestamp, error) => {
106
119
if !self.is_expired(*timestamp) {
···
109
122
handle,
110
123
error
111
124
);
125
+
self.metrics
126
+
.incr("resolver.memory.cache_hit_not_resolved")
127
+
.await;
112
128
return Err(HandleResolverError::HandleNotFoundCached(error.clone()));
113
129
}
114
130
tracing::debug!("Cache entry expired for handle {}", handle);
131
+
self.metrics.incr("resolver.memory.cache_expired").await;
115
132
}
116
133
}
117
134
}
···
119
136
120
137
// Not in cache or expired, resolve through inner resolver
121
138
tracing::debug!("Cache miss for handle {}, resolving...", handle);
139
+
self.metrics.incr("resolver.memory.cache_miss").await;
122
140
let result = self.inner.resolve(s).await;
123
-
let timestamp = Self::current_timestamp();
124
141
125
142
// Store in cache
126
143
{
127
144
let mut cache = self.cache.write().await;
128
145
match &result {
129
-
Ok(did) => {
146
+
Ok((did, timestamp)) => {
130
147
cache.insert(
131
148
handle.clone(),
132
-
ResolveHandleResult::Found(timestamp, did.clone()),
149
+
ResolveHandleResult::Found(*timestamp, did.clone()),
133
150
);
151
+
self.metrics.incr("resolver.memory.cache_set").await;
134
152
tracing::debug!(
135
153
"Cached successful resolution for handle {}: {}",
136
154
handle,
···
138
156
);
139
157
}
140
158
Err(e) => {
159
+
let timestamp = Self::current_timestamp();
141
160
cache.insert(
142
161
handle.clone(),
143
162
ResolveHandleResult::NotFound(timestamp, e.to_string()),
144
163
);
164
+
self.metrics.incr("resolver.memory.cache_set_error").await;
145
165
tracing::debug!("Cached failed resolution for handle {}: {}", handle, e);
146
166
}
147
167
}
168
+
169
+
// Track cache size
170
+
let cache_size = cache.len() as u64;
171
+
self.metrics
172
+
.gauge("resolver.memory.cache_entries", cache_size)
173
+
.await;
148
174
}
149
175
150
176
result
151
177
}
178
+
179
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
180
+
// Normalize the handle to lowercase
181
+
let handle = handle.to_lowercase();
182
+
183
+
// Update the in-memory cache
184
+
{
185
+
let mut cache = self.cache.write().await;
186
+
let timestamp = Self::current_timestamp();
187
+
cache.insert(
188
+
handle.clone(),
189
+
ResolveHandleResult::Found(timestamp, did.to_string()),
190
+
);
191
+
self.metrics.incr("resolver.memory.set").await;
192
+
tracing::debug!("Set handle {} -> DID {} in memory cache", handle, did);
193
+
194
+
// Track cache size
195
+
let cache_size = cache.len() as u64;
196
+
self.metrics
197
+
.gauge("resolver.memory.cache_entries", cache_size)
198
+
.await;
199
+
}
200
+
201
+
// Chain to inner resolver
202
+
self.inner.set(&handle, did).await
203
+
}
152
204
}
153
205
154
206
/// Create a new in-memory caching handle resolver.
···
160
212
///
161
213
/// * `inner` - The underlying resolver to use for actual resolution
162
214
/// * `ttl_seconds` - How long to cache results in seconds
215
+
/// * `metrics` - Metrics publisher for telemetry
163
216
///
164
217
/// # Example
165
218
///
166
219
/// ```no_run
167
220
/// use std::sync::Arc;
168
221
/// use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
222
+
/// use quickdid::metrics::NoOpMetricsPublisher;
169
223
///
170
224
/// # async fn example() {
171
225
/// # use atproto_identity::resolve::HickoryDnsResolver;
172
226
/// # use reqwest::Client;
173
227
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
174
228
/// # let http_client = Client::new();
229
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
175
230
/// let base = create_base_resolver(
176
231
/// dns_resolver,
177
232
/// http_client,
233
+
/// metrics.clone(),
178
234
/// );
179
235
///
180
-
/// let resolver = create_caching_resolver(base, 300); // 5 minute TTL
236
+
/// let resolver = create_caching_resolver(base, 300, metrics); // 5 minute TTL
181
237
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
182
238
/// # }
183
239
/// ```
184
240
pub fn create_caching_resolver(
185
241
inner: Arc<dyn HandleResolver>,
186
242
ttl_seconds: u64,
243
+
metrics: SharedMetricsPublisher,
187
244
) -> Arc<dyn HandleResolver> {
188
-
Arc::new(CachingHandleResolver::new(inner, ttl_seconds))
245
+
Arc::new(CachingHandleResolver::new(inner, ttl_seconds, metrics))
189
246
}
+16
-1
src/handle_resolver/mod.rs
+16
-1
src/handle_resolver/mod.rs
···
9
9
//! implementations:
10
10
//!
11
11
//! - [`BaseHandleResolver`]: Core resolver that performs actual DNS/HTTP lookups
12
+
//! - [`RateLimitedHandleResolver`]: Rate limiting wrapper using semaphore-based concurrency control
12
13
//! - [`CachingHandleResolver`]: In-memory caching wrapper with configurable TTL
13
14
//! - [`RedisHandleResolver`]: Redis-backed persistent caching with binary serialization
15
+
//! - [`SqliteHandleResolver`]: SQLite-backed persistent caching for single-instance deployments
14
16
//!
15
17
//! # Example Usage
16
18
//!
17
19
//! ```no_run
18
20
//! use std::sync::Arc;
19
21
//! use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
22
+
//! use quickdid::metrics::NoOpMetricsPublisher;
20
23
//!
21
24
//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
22
25
//! # use atproto_identity::resolve::HickoryDnsResolver;
23
26
//! # use reqwest::Client;
24
27
//! # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
25
28
//! # let http_client = Client::new();
29
+
//! # let metrics = Arc::new(NoOpMetricsPublisher);
26
30
//! // Create base resolver using factory function
27
31
//! let base = create_base_resolver(
28
32
//! dns_resolver,
29
33
//! http_client,
34
+
//! metrics.clone(),
30
35
//! );
31
36
//!
32
37
//! // Wrap with in-memory caching
33
-
//! let resolver = create_caching_resolver(base, 300);
38
+
//! let resolver = create_caching_resolver(base, 300, metrics);
34
39
//!
35
40
//! // Resolve a handle
36
41
//! let did = resolver.resolve("alice.bsky.social").await?;
···
42
47
mod base;
43
48
mod errors;
44
49
mod memory;
50
+
mod proactive_refresh;
51
+
mod rate_limited;
45
52
mod redis;
53
+
mod sqlite;
46
54
mod traits;
47
55
48
56
// Re-export public API
···
52
60
// Factory functions for creating resolvers
53
61
pub use base::create_base_resolver;
54
62
pub use memory::create_caching_resolver;
63
+
pub use proactive_refresh::{
64
+
ProactiveRefreshResolver, create_proactive_refresh_resolver,
65
+
create_proactive_refresh_resolver_dyn, create_proactive_refresh_resolver_with_metrics,
66
+
create_proactive_refresh_resolver_with_threshold,
67
+
};
68
+
pub use rate_limited::{create_rate_limited_resolver, create_rate_limited_resolver_with_timeout};
55
69
pub use redis::{create_redis_resolver, create_redis_resolver_with_ttl};
70
+
pub use sqlite::{create_sqlite_resolver, create_sqlite_resolver_with_ttl};
+438
src/handle_resolver/proactive_refresh.rs
+438
src/handle_resolver/proactive_refresh.rs
···
1
+
use crate::handle_resolution_result::HandleResolutionResult;
2
+
use crate::handle_resolver::{HandleResolver, HandleResolverError};
3
+
use crate::metrics::MetricsPublisher;
4
+
use crate::queue::{HandleResolutionWork, QueueAdapter};
5
+
use async_trait::async_trait;
6
+
use std::sync::Arc;
7
+
use std::time::{SystemTime, UNIX_EPOCH};
8
+
use tracing::{debug, trace};
9
+
10
+
/// Create a ProactiveRefreshResolver with default 80% threshold
11
+
///
12
+
/// # Arguments
13
+
/// * `inner` - The inner resolver to wrap
14
+
/// * `queue` - The queue adapter for background refresh tasks
15
+
/// * `cache_ttl` - The TTL in seconds for cache entries
16
+
pub fn create_proactive_refresh_resolver<R, Q>(
17
+
inner: Arc<R>,
18
+
queue: Arc<Q>,
19
+
cache_ttl: u64,
20
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
21
+
where
22
+
R: HandleResolver + Send + Sync + 'static,
23
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
24
+
{
25
+
Arc::new(ProactiveRefreshResolver::new(inner, queue, cache_ttl))
26
+
}
27
+
28
+
/// Create a ProactiveRefreshResolver with custom threshold
29
+
///
30
+
/// # Arguments
31
+
/// * `inner` - The inner resolver to wrap
32
+
/// * `queue` - The queue adapter for background refresh tasks
33
+
/// * `cache_ttl` - The TTL in seconds for cache entries
34
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
35
+
pub fn create_proactive_refresh_resolver_with_threshold<R, Q>(
36
+
inner: Arc<R>,
37
+
queue: Arc<Q>,
38
+
cache_ttl: u64,
39
+
threshold: f64,
40
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
41
+
where
42
+
R: HandleResolver + Send + Sync + 'static,
43
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
44
+
{
45
+
Arc::new(ProactiveRefreshResolver::with_threshold(
46
+
inner, queue, cache_ttl, threshold,
47
+
))
48
+
}
49
+
50
+
/// Wrapper struct for dynamic dispatch with proactive refresh
51
+
/// This works with trait objects instead of concrete types
52
+
pub struct DynProactiveRefreshResolver {
53
+
inner: Arc<dyn HandleResolver>,
54
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
55
+
metrics: Option<Arc<dyn MetricsPublisher>>,
56
+
#[allow(dead_code)]
57
+
cache_ttl: u64,
58
+
#[allow(dead_code)]
59
+
refresh_threshold: f64,
60
+
}
61
+
62
+
impl DynProactiveRefreshResolver {
63
+
pub fn new(
64
+
inner: Arc<dyn HandleResolver>,
65
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
66
+
cache_ttl: u64,
67
+
refresh_threshold: f64,
68
+
) -> Self {
69
+
Self::with_metrics(inner, queue, None, cache_ttl, refresh_threshold)
70
+
}
71
+
72
+
pub fn with_metrics(
73
+
inner: Arc<dyn HandleResolver>,
74
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
75
+
metrics: Option<Arc<dyn MetricsPublisher>>,
76
+
cache_ttl: u64,
77
+
refresh_threshold: f64,
78
+
) -> Self {
79
+
Self {
80
+
inner,
81
+
queue,
82
+
metrics,
83
+
cache_ttl,
84
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
85
+
}
86
+
}
87
+
88
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
89
+
// If resolution took less than 5ms, it was probably a cache hit
90
+
if resolve_time < 5000 {
91
+
trace!(
92
+
handle = handle,
93
+
resolve_time_us = resolve_time,
94
+
"Fast resolution detected, considering proactive refresh"
95
+
);
96
+
97
+
if let Some(metrics) = &self.metrics {
98
+
metrics.incr("proactive_refresh.cache_hit_detected").await;
99
+
}
100
+
101
+
// Simple heuristic: queue for refresh with some probability
102
+
let now = SystemTime::now()
103
+
.duration_since(UNIX_EPOCH)
104
+
.unwrap_or_default()
105
+
.as_secs();
106
+
107
+
// Queue every N seconds for frequently accessed handles
108
+
if now % 60 == 0 {
109
+
let work = HandleResolutionWork {
110
+
handle: handle.to_string(),
111
+
};
112
+
113
+
if let Err(e) = self.queue.push(work).await {
114
+
debug!(
115
+
handle = handle,
116
+
error = %e,
117
+
"Failed to queue handle for proactive refresh"
118
+
);
119
+
if let Some(metrics) = &self.metrics {
120
+
metrics.incr("proactive_refresh.queue_error").await;
121
+
}
122
+
} else {
123
+
debug!(handle = handle, "Queued handle for proactive refresh");
124
+
if let Some(metrics) = &self.metrics {
125
+
metrics.incr("proactive_refresh.queued").await;
126
+
}
127
+
}
128
+
}
129
+
}
130
+
}
131
+
}
132
+
133
+
#[async_trait]
134
+
impl HandleResolver for DynProactiveRefreshResolver {
135
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
136
+
// Resolve through the inner resolver
137
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
138
+
139
+
// Check if we should queue for refresh based on resolution time
140
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
141
+
142
+
Ok((did, resolve_time))
143
+
}
144
+
145
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
146
+
// Simply chain to inner resolver - no proactive refresh needed for manual sets
147
+
self.inner.set(handle, did).await
148
+
}
149
+
}
150
+
151
+
/// Create a ProactiveRefreshResolver with custom threshold using trait objects
152
+
/// This version works with dyn HandleResolver and dyn QueueAdapter
153
+
///
154
+
/// # Arguments
155
+
/// * `inner` - The inner resolver to wrap
156
+
/// * `queue` - The queue adapter for background refresh tasks
157
+
/// * `cache_ttl` - The TTL in seconds for cache entries
158
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
159
+
pub fn create_proactive_refresh_resolver_dyn(
160
+
inner: Arc<dyn HandleResolver>,
161
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
162
+
cache_ttl: u64,
163
+
threshold: f64,
164
+
) -> Arc<dyn HandleResolver> {
165
+
Arc::new(DynProactiveRefreshResolver::new(
166
+
inner, queue, cache_ttl, threshold,
167
+
))
168
+
}
169
+
170
+
/// Create a ProactiveRefreshResolver with metrics support
171
+
pub fn create_proactive_refresh_resolver_with_metrics(
172
+
inner: Arc<dyn HandleResolver>,
173
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
174
+
metrics: Arc<dyn MetricsPublisher>,
175
+
cache_ttl: u64,
176
+
threshold: f64,
177
+
) -> Arc<dyn HandleResolver> {
178
+
Arc::new(DynProactiveRefreshResolver::with_metrics(
179
+
inner,
180
+
queue,
181
+
Some(metrics),
182
+
cache_ttl,
183
+
threshold,
184
+
))
185
+
}
186
+
187
+
/// A handle resolver that proactively refreshes cache entries when they reach
188
+
/// a certain staleness threshold (default 80% of TTL).
189
+
///
190
+
/// This resolver wraps another resolver and checks successful resolutions from cache.
191
+
/// When a cached entry has lived for more than the threshold percentage of its TTL,
192
+
/// it queues the handle for background refresh to keep the cache warm.
193
+
///
194
+
/// Note: Due to the current trait design, this implementation uses the resolution time
195
+
/// as a heuristic. When resolve_time is 0 (instant cache hit), it may queue for refresh.
196
+
/// For full functionality, the trait would need to expose cache timestamps.
197
+
pub struct ProactiveRefreshResolver<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> {
198
+
inner: Arc<R>,
199
+
queue: Arc<Q>,
200
+
/// TTL in seconds for cache entries
201
+
cache_ttl: u64,
202
+
/// Threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
203
+
/// Default is 0.8 (80%)
204
+
refresh_threshold: f64,
205
+
}
206
+
207
+
impl<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> ProactiveRefreshResolver<R, Q> {
208
+
pub fn new(inner: Arc<R>, queue: Arc<Q>, cache_ttl: u64) -> Self {
209
+
Self::with_threshold(inner, queue, cache_ttl, 0.8)
210
+
}
211
+
212
+
pub fn with_threshold(
213
+
inner: Arc<R>,
214
+
queue: Arc<Q>,
215
+
cache_ttl: u64,
216
+
refresh_threshold: f64,
217
+
) -> Self {
218
+
Self {
219
+
inner,
220
+
queue,
221
+
cache_ttl,
222
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
223
+
}
224
+
}
225
+
226
+
/// Check if a cached entry needs proactive refresh based on its age
227
+
#[allow(dead_code)]
228
+
fn needs_refresh(&self, result: &HandleResolutionResult) -> bool {
229
+
let now = SystemTime::now()
230
+
.duration_since(UNIX_EPOCH)
231
+
.unwrap_or_default()
232
+
.as_secs();
233
+
234
+
let age = now.saturating_sub(result.timestamp);
235
+
let threshold = (self.cache_ttl as f64 * self.refresh_threshold) as u64;
236
+
237
+
let needs_refresh = age >= threshold;
238
+
239
+
if needs_refresh {
240
+
debug!(
241
+
handle = ?result.to_did(),
242
+
age_seconds = age,
243
+
threshold_seconds = threshold,
244
+
cache_ttl = self.cache_ttl,
245
+
"Cache entry needs proactive refresh"
246
+
);
247
+
} else {
248
+
trace!(
249
+
handle = ?result.to_did(),
250
+
age_seconds = age,
251
+
threshold_seconds = threshold,
252
+
"Cache entry still fresh"
253
+
);
254
+
}
255
+
256
+
needs_refresh
257
+
}
258
+
259
+
/// Queue a handle for background refresh
260
+
async fn queue_for_refresh(&self, handle: &str) {
261
+
let work = HandleResolutionWork {
262
+
handle: handle.to_string(),
263
+
};
264
+
265
+
match self.queue.push(work).await {
266
+
Ok(_) => {
267
+
debug!(handle = handle, "Queued handle for proactive refresh");
268
+
}
269
+
Err(e) => {
270
+
// Don't fail the request if we can't queue for refresh
271
+
debug!(
272
+
handle = handle,
273
+
error = %e,
274
+
"Failed to queue handle for proactive refresh"
275
+
);
276
+
}
277
+
}
278
+
}
279
+
280
+
/// Check if we should queue for refresh based on resolution time
281
+
///
282
+
/// This is a heuristic approach:
283
+
/// - If resolve_time is very low (< 5ms), it was likely a cache hit
284
+
/// - We probabilistically queue for refresh based on time since service start
285
+
///
286
+
/// For proper implementation, the HandleResolver trait would need to expose
287
+
/// cache metadata or return HandleResolutionResult directly.
288
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
289
+
// If resolution took less than 5ms, it was probably a cache hit
290
+
if resolve_time < 5000 {
291
+
// Use a simple probabilistic approach for demonstration
292
+
// In production, you'd want access to the actual cache timestamp
293
+
trace!(
294
+
handle = handle,
295
+
resolve_time_us = resolve_time,
296
+
"Fast resolution detected, considering proactive refresh"
297
+
);
298
+
299
+
// Queue for refresh with some probability to avoid overwhelming the queue
300
+
// This is a simplified approach - ideally we'd have access to cache metadata
301
+
let now = SystemTime::now()
302
+
.duration_since(UNIX_EPOCH)
303
+
.unwrap_or_default()
304
+
.as_secs();
305
+
306
+
// Simple heuristic: queue every N seconds for frequently accessed handles
307
+
if now % 60 == 0 {
308
+
self.queue_for_refresh(handle).await;
309
+
}
310
+
}
311
+
}
312
+
}
313
+
314
+
#[async_trait]
315
+
impl<R, Q> HandleResolver for ProactiveRefreshResolver<R, Q>
316
+
where
317
+
R: HandleResolver + Send + Sync,
318
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync,
319
+
{
320
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
321
+
// Resolve through the inner resolver
322
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
323
+
324
+
// Check if we should queue for refresh based on resolution time
325
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
326
+
327
+
Ok((did, resolve_time))
328
+
}
329
+
}
330
+
331
+
#[cfg(test)]
332
+
mod tests {
333
+
use super::*;
334
+
use crate::handle_resolution_result::DidMethodType;
335
+
336
+
#[test]
337
+
fn test_needs_refresh_calculation() {
338
+
// Create a resolver with 100 second TTL and 80% threshold
339
+
let inner = Arc::new(MockResolver);
340
+
let queue = Arc::new(MockQueueAdapter);
341
+
let resolver = ProactiveRefreshResolver::new(inner, queue, 100);
342
+
343
+
let now = SystemTime::now()
344
+
.duration_since(UNIX_EPOCH)
345
+
.unwrap()
346
+
.as_secs();
347
+
348
+
// Test entry that's 50% through TTL (should not refresh)
349
+
let fresh_result = HandleResolutionResult {
350
+
timestamp: now - 50,
351
+
method_type: DidMethodType::Plc,
352
+
payload: "alice123".to_string(),
353
+
};
354
+
assert!(!resolver.needs_refresh(&fresh_result));
355
+
356
+
// Test entry that's 80% through TTL (should refresh)
357
+
let stale_result = HandleResolutionResult {
358
+
timestamp: now - 80,
359
+
method_type: DidMethodType::Plc,
360
+
payload: "alice123".to_string(),
361
+
};
362
+
assert!(resolver.needs_refresh(&stale_result));
363
+
364
+
// Test entry that's 90% through TTL (should definitely refresh)
365
+
let very_stale_result = HandleResolutionResult {
366
+
timestamp: now - 90,
367
+
method_type: DidMethodType::Plc,
368
+
payload: "alice123".to_string(),
369
+
};
370
+
assert!(resolver.needs_refresh(&very_stale_result));
371
+
}
372
+
373
+
#[test]
374
+
fn test_custom_threshold() {
375
+
let inner = Arc::new(MockResolver);
376
+
let queue = Arc::new(MockQueueAdapter);
377
+
378
+
// Create resolver with 50% threshold
379
+
let resolver = ProactiveRefreshResolver::with_threshold(inner, queue, 100, 0.5);
380
+
381
+
let now = SystemTime::now()
382
+
.duration_since(UNIX_EPOCH)
383
+
.unwrap()
384
+
.as_secs();
385
+
386
+
// Test entry that's 40% through TTL (should not refresh with 50% threshold)
387
+
let result_40 = HandleResolutionResult {
388
+
timestamp: now - 40,
389
+
method_type: DidMethodType::Plc,
390
+
payload: "alice123".to_string(),
391
+
};
392
+
assert!(!resolver.needs_refresh(&result_40));
393
+
394
+
// Test entry that's 60% through TTL (should refresh with 50% threshold)
395
+
let result_60 = HandleResolutionResult {
396
+
timestamp: now - 60,
397
+
method_type: DidMethodType::Plc,
398
+
payload: "alice123".to_string(),
399
+
};
400
+
assert!(resolver.needs_refresh(&result_60));
401
+
}
402
+
403
+
// Mock resolver for testing
404
+
struct MockResolver;
405
+
406
+
#[async_trait]
407
+
impl HandleResolver for MockResolver {
408
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
409
+
Ok((format!("did:plc:{}", handle), 1000))
410
+
}
411
+
}
412
+
413
+
// Mock queue adapter for testing
414
+
struct MockQueueAdapter;
415
+
416
+
#[async_trait]
417
+
impl QueueAdapter<HandleResolutionWork> for MockQueueAdapter {
418
+
async fn pull(&self) -> Option<HandleResolutionWork> {
419
+
None
420
+
}
421
+
422
+
async fn push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
423
+
Ok(())
424
+
}
425
+
426
+
async fn ack(&self, _item: &HandleResolutionWork) -> crate::queue::Result<()> {
427
+
Ok(())
428
+
}
429
+
430
+
async fn try_push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
431
+
Ok(())
432
+
}
433
+
434
+
async fn is_healthy(&self) -> bool {
435
+
true
436
+
}
437
+
}
438
+
}
+294
src/handle_resolver/rate_limited.rs
+294
src/handle_resolver/rate_limited.rs
···
1
+
//! Rate-limited handle resolver implementation.
2
+
//!
3
+
//! This module provides a handle resolver wrapper that limits concurrent
4
+
//! resolution requests using a semaphore to implement basic rate limiting.
5
+
6
+
use super::errors::HandleResolverError;
7
+
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
9
+
use async_trait::async_trait;
10
+
use std::sync::Arc;
11
+
use std::time::Duration;
12
+
use tokio::sync::Semaphore;
13
+
use tokio::time::timeout;
14
+
15
+
/// Rate-limited handle resolver that constrains concurrent resolutions.
16
+
///
17
+
/// This resolver wraps an inner resolver and uses a semaphore to limit
18
+
/// the number of concurrent resolution requests. This provides basic
19
+
/// rate limiting and protects upstream services from being overwhelmed.
20
+
///
21
+
/// # Architecture
22
+
///
23
+
/// The rate limiter should be placed between the base resolver and any
24
+
/// caching layers:
25
+
/// ```text
26
+
/// Request -> Cache -> RateLimited -> Base -> DNS/HTTP
27
+
/// ```
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use std::sync::Arc;
33
+
/// use quickdid::handle_resolver::{
34
+
/// create_base_resolver,
35
+
/// create_rate_limited_resolver,
36
+
/// HandleResolver,
37
+
/// };
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
39
+
///
40
+
/// # async fn example() {
41
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
42
+
/// # use reqwest::Client;
43
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
44
+
/// # let http_client = Client::new();
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
46
+
/// // Create base resolver
47
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
48
+
///
49
+
/// // Wrap with rate limiting (max 10 concurrent resolutions)
50
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
51
+
///
52
+
/// // Use the rate-limited resolver
53
+
/// let (did, timestamp) = rate_limited.resolve("alice.bsky.social").await.unwrap();
54
+
/// # }
55
+
/// ```
56
+
pub(super) struct RateLimitedHandleResolver {
57
+
/// Inner resolver that performs actual resolution.
58
+
inner: Arc<dyn HandleResolver>,
59
+
60
+
/// Semaphore for limiting concurrent resolutions.
61
+
semaphore: Arc<Semaphore>,
62
+
63
+
/// Optional timeout for acquiring permits (in milliseconds).
64
+
/// When None or 0, no timeout is applied.
65
+
timeout_ms: Option<u64>,
66
+
67
+
/// Metrics publisher for telemetry.
68
+
metrics: SharedMetricsPublisher,
69
+
}
70
+
71
+
impl RateLimitedHandleResolver {
72
+
/// Create a new rate-limited resolver.
73
+
///
74
+
/// # Arguments
75
+
///
76
+
/// * `inner` - The inner resolver to wrap
77
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
78
+
/// * `metrics` - Metrics publisher for telemetry
79
+
pub fn new(
80
+
inner: Arc<dyn HandleResolver>,
81
+
max_concurrent: usize,
82
+
metrics: SharedMetricsPublisher,
83
+
) -> Self {
84
+
Self {
85
+
inner,
86
+
semaphore: Arc::new(Semaphore::new(max_concurrent)),
87
+
timeout_ms: None,
88
+
metrics,
89
+
}
90
+
}
91
+
92
+
/// Create a new rate-limited resolver with timeout.
93
+
///
94
+
/// # Arguments
95
+
///
96
+
/// * `inner` - The inner resolver to wrap
97
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
98
+
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
99
+
/// * `metrics` - Metrics publisher for telemetry
100
+
pub fn new_with_timeout(
101
+
inner: Arc<dyn HandleResolver>,
102
+
max_concurrent: usize,
103
+
timeout_ms: u64,
104
+
metrics: SharedMetricsPublisher,
105
+
) -> Self {
106
+
Self {
107
+
inner,
108
+
semaphore: Arc::new(Semaphore::new(max_concurrent)),
109
+
timeout_ms: if timeout_ms > 0 {
110
+
Some(timeout_ms)
111
+
} else {
112
+
None
113
+
},
114
+
metrics,
115
+
}
116
+
}
117
+
}
118
+
119
+
#[async_trait]
120
+
impl HandleResolver for RateLimitedHandleResolver {
121
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
122
+
let permit_start = std::time::Instant::now();
123
+
124
+
// Track rate limiter queue depth
125
+
let available_permits = self.semaphore.available_permits();
126
+
self.metrics
127
+
.gauge(
128
+
"resolver.rate_limit.available_permits",
129
+
available_permits as u64,
130
+
)
131
+
.await;
132
+
133
+
// Acquire a permit from the semaphore, with optional timeout
134
+
let _permit = match self.timeout_ms {
135
+
Some(timeout_ms) if timeout_ms > 0 => {
136
+
// Apply timeout when acquiring permit
137
+
let duration = Duration::from_millis(timeout_ms);
138
+
match timeout(duration, self.semaphore.acquire()).await {
139
+
Ok(Ok(permit)) => {
140
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
141
+
self.metrics
142
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
143
+
.await;
144
+
permit
145
+
}
146
+
Ok(Err(e)) => {
147
+
// Semaphore error (e.g., closed)
148
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
149
+
return Err(HandleResolverError::ResolutionFailed(format!(
150
+
"Failed to acquire rate limit permit: {}",
151
+
e
152
+
)));
153
+
}
154
+
Err(_) => {
155
+
// Timeout occurred
156
+
self.metrics
157
+
.incr("resolver.rate_limit.permit_timeout")
158
+
.await;
159
+
return Err(HandleResolverError::ResolutionFailed(format!(
160
+
"Rate limit permit acquisition timed out after {}ms",
161
+
timeout_ms
162
+
)));
163
+
}
164
+
}
165
+
}
166
+
_ => {
167
+
// No timeout configured, wait indefinitely
168
+
match self.semaphore.acquire().await {
169
+
Ok(permit) => {
170
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
171
+
self.metrics
172
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
173
+
.await;
174
+
permit
175
+
}
176
+
Err(e) => {
177
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
178
+
return Err(HandleResolverError::ResolutionFailed(format!(
179
+
"Failed to acquire rate limit permit: {}",
180
+
e
181
+
)));
182
+
}
183
+
}
184
+
}
185
+
};
186
+
187
+
// With permit acquired, forward to inner resolver
188
+
self.inner.resolve(s).await
189
+
}
190
+
191
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
192
+
// Set operations don't need rate limiting since they're typically administrative
193
+
// and don't involve network calls to external services
194
+
self.inner.set(handle, did).await
195
+
}
196
+
}
197
+
198
+
/// Create a rate-limited handle resolver.
199
+
///
200
+
/// This factory function creates a new [`RateLimitedHandleResolver`] that wraps
201
+
/// the provided inner resolver with concurrency limiting.
202
+
///
203
+
/// # Arguments
204
+
///
205
+
/// * `inner` - The resolver to wrap with rate limiting
206
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
207
+
/// * `metrics` - Metrics publisher for telemetry
208
+
///
209
+
/// # Returns
210
+
///
211
+
/// An `Arc<dyn HandleResolver>` that can be used wherever a handle resolver is needed.
212
+
///
213
+
/// # Example
214
+
///
215
+
/// ```no_run
216
+
/// use std::sync::Arc;
217
+
/// use quickdid::handle_resolver::{
218
+
/// create_base_resolver,
219
+
/// create_rate_limited_resolver,
220
+
/// };
221
+
///
222
+
/// # async fn example() {
223
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
224
+
/// # use reqwest::Client;
225
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
226
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
227
+
/// # let http_client = Client::new();
228
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
229
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
230
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
231
+
/// # }
232
+
/// ```
233
+
pub fn create_rate_limited_resolver(
234
+
inner: Arc<dyn HandleResolver>,
235
+
max_concurrent: usize,
236
+
metrics: SharedMetricsPublisher,
237
+
) -> Arc<dyn HandleResolver> {
238
+
Arc::new(RateLimitedHandleResolver::new(
239
+
inner,
240
+
max_concurrent,
241
+
metrics,
242
+
))
243
+
}
244
+
245
+
/// Create a rate-limited handle resolver with timeout.
246
+
///
247
+
/// This factory function creates a new [`RateLimitedHandleResolver`] that wraps
248
+
/// the provided inner resolver with concurrency limiting and timeout for permit acquisition.
249
+
///
250
+
/// # Arguments
251
+
///
252
+
/// * `inner` - The resolver to wrap with rate limiting
253
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
254
+
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
255
+
/// * `metrics` - Metrics publisher for telemetry
256
+
///
257
+
/// # Returns
258
+
///
259
+
/// An `Arc<dyn HandleResolver>` that can be used wherever a handle resolver is needed.
260
+
///
261
+
/// # Example
262
+
///
263
+
/// ```no_run
264
+
/// use std::sync::Arc;
265
+
/// use quickdid::handle_resolver::{
266
+
/// create_base_resolver,
267
+
/// create_rate_limited_resolver_with_timeout,
268
+
/// };
269
+
///
270
+
/// # async fn example() {
271
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
272
+
/// # use reqwest::Client;
273
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
274
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
275
+
/// # let http_client = Client::new();
276
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
277
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
278
+
/// // Rate limit with 10 concurrent resolutions and 5 second timeout
279
+
/// let rate_limited = create_rate_limited_resolver_with_timeout(base, 10, 5000, metrics);
280
+
/// # }
281
+
/// ```
282
+
pub fn create_rate_limited_resolver_with_timeout(
283
+
inner: Arc<dyn HandleResolver>,
284
+
max_concurrent: usize,
285
+
timeout_ms: u64,
286
+
metrics: SharedMetricsPublisher,
287
+
) -> Arc<dyn HandleResolver> {
288
+
Arc::new(RateLimitedHandleResolver::new_with_timeout(
289
+
inner,
290
+
max_concurrent,
291
+
timeout_ms,
292
+
metrics,
293
+
))
294
+
}
+580
-19
src/handle_resolver/redis.rs
+580
-19
src/handle_resolver/redis.rs
···
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
9
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
10
11
use async_trait::async_trait;
12
+
use atproto_identity::resolve::{InputType, parse_input};
11
13
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
12
14
use metrohash::MetroHash64;
13
15
use std::hash::Hasher as _;
···
33
35
/// use std::sync::Arc;
34
36
/// use deadpool_redis::Pool;
35
37
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
36
39
///
37
40
/// # async fn example() {
38
41
/// # use atproto_identity::resolve::HickoryDnsResolver;
39
42
/// # use reqwest::Client;
40
43
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
41
44
/// # let http_client = Client::new();
42
-
/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
46
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
43
47
/// # let redis_pool: Pool = todo!();
44
48
/// // Create with default 90-day TTL
45
49
/// let resolver = create_redis_resolver(
46
50
/// base_resolver,
47
-
/// redis_pool
51
+
/// redis_pool,
52
+
/// metrics
48
53
/// );
49
54
/// # }
50
55
/// ```
···
57
62
key_prefix: String,
58
63
/// TTL for cache entries in seconds
59
64
ttl_seconds: u64,
65
+
/// Metrics publisher for telemetry
66
+
metrics: SharedMetricsPublisher,
60
67
}
61
68
62
69
impl RedisHandleResolver {
63
70
/// Create a new Redis-backed handle resolver with default 90-day TTL.
64
-
fn new(inner: Arc<dyn HandleResolver>, pool: RedisPool) -> Self {
65
-
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
71
+
fn new(
72
+
inner: Arc<dyn HandleResolver>,
73
+
pool: RedisPool,
74
+
metrics: SharedMetricsPublisher,
75
+
) -> Self {
76
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
66
77
}
67
78
68
79
/// Create a new Redis-backed handle resolver with custom TTL.
69
-
fn with_ttl(inner: Arc<dyn HandleResolver>, pool: RedisPool, ttl_seconds: u64) -> Self {
70
-
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds)
80
+
fn with_ttl(
81
+
inner: Arc<dyn HandleResolver>,
82
+
pool: RedisPool,
83
+
ttl_seconds: u64,
84
+
metrics: SharedMetricsPublisher,
85
+
) -> Self {
86
+
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds, metrics)
71
87
}
72
88
73
89
/// Create a new Redis-backed handle resolver with full configuration.
···
76
92
pool: RedisPool,
77
93
key_prefix: String,
78
94
ttl_seconds: u64,
95
+
metrics: SharedMetricsPublisher,
79
96
) -> Self {
80
97
Self {
81
98
inner,
82
99
pool,
83
100
key_prefix,
84
101
ttl_seconds,
102
+
metrics,
85
103
}
86
104
}
87
105
···
100
118
fn ttl_seconds(&self) -> u64 {
101
119
self.ttl_seconds
102
120
}
121
+
122
+
/// Purge a handle and its associated DID from the cache.
123
+
///
124
+
/// This method removes both the handle->DID mapping and the reverse DID->handle mapping.
125
+
async fn purge_handle(&self, handle: &str) -> Result<(), HandleResolverError> {
126
+
let handle_key = self.make_key(handle);
127
+
128
+
match self.pool.get().await {
129
+
Ok(mut conn) => {
130
+
// First, try to get the cached result to find the associated DID
131
+
let cached: Option<Vec<u8>> = match conn.get(&handle_key).await {
132
+
Ok(value) => value,
133
+
Err(e) => {
134
+
tracing::warn!("Failed to get handle from Redis for purging: {}", e);
135
+
self.metrics.incr("resolver.redis.purge_get_error").await;
136
+
None
137
+
}
138
+
};
139
+
140
+
// If we found a cached result, extract the DID and delete both keys
141
+
if let Some(cached_bytes) = cached {
142
+
if let Ok(cached_result) = HandleResolutionResult::from_bytes(&cached_bytes) {
143
+
if let Some(did) = cached_result.to_did() {
144
+
let did_key = self.make_key(&did);
145
+
146
+
// Delete both the handle key and the DID key
147
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
148
+
149
+
tracing::debug!("Purged handle {} and associated DID {}", handle, did);
150
+
self.metrics
151
+
.incr("resolver.redis.purge_handle_success")
152
+
.await;
153
+
} else {
154
+
// Just delete the handle key if no DID was resolved
155
+
let _: Result<(), _> = conn.del(&handle_key).await;
156
+
tracing::debug!("Purged unresolved handle {}", handle);
157
+
self.metrics
158
+
.incr("resolver.redis.purge_handle_unresolved")
159
+
.await;
160
+
}
161
+
} else {
162
+
// If we can't deserialize, just delete the handle key
163
+
let _: Result<(), _> = conn.del(&handle_key).await;
164
+
tracing::warn!("Purged handle {} with undeserializable data", handle);
165
+
self.metrics
166
+
.incr("resolver.redis.purge_handle_corrupt")
167
+
.await;
168
+
}
169
+
} else {
170
+
tracing::debug!("Handle {} not found in cache for purging", handle);
171
+
self.metrics
172
+
.incr("resolver.redis.purge_handle_not_found")
173
+
.await;
174
+
}
175
+
176
+
Ok(())
177
+
}
178
+
Err(e) => {
179
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
180
+
self.metrics
181
+
.incr("resolver.redis.purge_connection_error")
182
+
.await;
183
+
Err(HandleResolverError::ResolutionFailed(format!(
184
+
"Redis connection error: {}",
185
+
e
186
+
)))
187
+
}
188
+
}
189
+
}
190
+
191
+
/// Purge a DID and its associated handle from the cache.
192
+
///
193
+
/// This method removes both the DID->handle mapping and the handle->DID mapping.
194
+
async fn purge_did(&self, did: &str) -> Result<(), HandleResolverError> {
195
+
let did_key = self.make_key(did);
196
+
197
+
match self.pool.get().await {
198
+
Ok(mut conn) => {
199
+
// First, try to get the associated handle from the reverse mapping
200
+
let handle_bytes: Option<Vec<u8>> = match conn.get(&did_key).await {
201
+
Ok(value) => value,
202
+
Err(e) => {
203
+
tracing::warn!("Failed to get DID from Redis for purging: {}", e);
204
+
self.metrics.incr("resolver.redis.purge_get_error").await;
205
+
None
206
+
}
207
+
};
208
+
209
+
// If we found a handle, delete both keys
210
+
if let Some(handle_bytes) = handle_bytes {
211
+
if let Ok(handle) = String::from_utf8(handle_bytes) {
212
+
let handle_key = self.make_key(&handle);
213
+
214
+
// Delete both the DID key and the handle key
215
+
let _: Result<(), _> = conn.del(&[&did_key, &handle_key]).await;
216
+
217
+
tracing::debug!("Purged DID {} and associated handle {}", did, handle);
218
+
self.metrics.incr("resolver.redis.purge_did_success").await;
219
+
} else {
220
+
// If we can't parse the handle, just delete the DID key
221
+
let _: Result<(), _> = conn.del(&did_key).await;
222
+
tracing::warn!("Purged DID {} with unparseable handle data", did);
223
+
self.metrics.incr("resolver.redis.purge_did_corrupt").await;
224
+
}
225
+
} else {
226
+
tracing::debug!("DID {} not found in cache for purging", did);
227
+
self.metrics
228
+
.incr("resolver.redis.purge_did_not_found")
229
+
.await;
230
+
}
231
+
232
+
Ok(())
233
+
}
234
+
Err(e) => {
235
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
236
+
self.metrics
237
+
.incr("resolver.redis.purge_connection_error")
238
+
.await;
239
+
Err(HandleResolverError::ResolutionFailed(format!(
240
+
"Redis connection error: {}",
241
+
e
242
+
)))
243
+
}
244
+
}
245
+
}
103
246
}
104
247
105
248
#[async_trait]
106
249
impl HandleResolver for RedisHandleResolver {
107
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
250
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
108
251
let handle = s.to_string();
109
252
let key = self.make_key(&handle);
110
253
···
115
258
let cached: Option<Vec<u8>> = match conn.get(&key).await {
116
259
Ok(value) => value,
117
260
Err(e) => {
261
+
self.metrics.incr("resolver.redis.get_error").await;
118
262
tracing::warn!("Failed to get handle from Redis cache: {}", e);
119
263
None
120
264
}
···
126
270
Ok(cached_result) => {
127
271
if let Some(did) = cached_result.to_did() {
128
272
tracing::debug!("Cache hit for handle {}: {}", handle, did);
129
-
return Ok(did);
273
+
self.metrics.incr("resolver.redis.cache_hit").await;
274
+
return Ok((did, cached_result.timestamp));
130
275
} else {
131
276
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
277
+
self.metrics
278
+
.incr("resolver.redis.cache_hit_not_resolved")
279
+
.await;
132
280
return Err(HandleResolverError::HandleNotFound);
133
281
}
134
282
}
···
138
286
handle,
139
287
e
140
288
);
289
+
self.metrics.incr("resolver.redis.deserialize_error").await;
141
290
// Fall through to re-resolve if deserialization fails
142
291
}
143
292
}
···
145
294
146
295
// Not in cache, resolve through inner resolver
147
296
tracing::debug!("Cache miss for handle {}, resolving...", handle);
297
+
self.metrics.incr("resolver.redis.cache_miss").await;
148
298
let result = self.inner.resolve(s).await;
149
299
150
300
// Create and serialize resolution result
151
301
let resolution_result = match &result {
152
-
Ok(did) => {
302
+
Ok((did, _timestamp)) => {
153
303
tracing::debug!(
154
304
"Caching successful resolution for handle {}: {}",
155
305
handle,
···
159
309
Ok(res) => res,
160
310
Err(e) => {
161
311
tracing::warn!("Failed to create resolution result: {}", e);
312
+
self.metrics
313
+
.incr("resolver.redis.result_create_error")
314
+
.await;
162
315
return result;
163
316
}
164
317
}
···
169
322
Ok(res) => res,
170
323
Err(err) => {
171
324
tracing::warn!("Failed to create not_resolved result: {}", err);
325
+
self.metrics
326
+
.incr("resolver.redis.result_create_error")
327
+
.await;
172
328
return result;
173
329
}
174
330
}
···
184
340
.await
185
341
{
186
342
tracing::warn!("Failed to cache handle resolution in Redis: {}", e);
343
+
self.metrics.incr("resolver.redis.cache_set_error").await;
344
+
} else {
345
+
self.metrics.incr("resolver.redis.cache_set").await;
346
+
347
+
// For successful resolutions, also store reverse DID -> handle mapping
348
+
if let Ok((did, _)) = &result {
349
+
let did_key = self.make_key(did);
350
+
if let Err(e) = conn
351
+
.set_ex::<_, _, ()>(
352
+
&did_key,
353
+
handle.as_bytes(),
354
+
self.ttl_seconds(),
355
+
)
356
+
.await
357
+
{
358
+
tracing::warn!(
359
+
"Failed to cache reverse DID->handle mapping in Redis: {}",
360
+
e
361
+
);
362
+
self.metrics
363
+
.incr("resolver.redis.reverse_cache_set_error")
364
+
.await;
365
+
} else {
366
+
tracing::debug!(
367
+
"Cached reverse mapping for DID {}: {}",
368
+
did,
369
+
handle
370
+
);
371
+
self.metrics.incr("resolver.redis.reverse_cache_set").await;
372
+
}
373
+
}
187
374
}
188
375
}
189
376
Err(e) => {
···
192
379
handle,
193
380
e
194
381
);
382
+
self.metrics.incr("resolver.redis.serialize_error").await;
195
383
}
196
384
}
197
385
···
203
391
"Failed to get Redis connection, falling back to uncached resolution: {}",
204
392
e
205
393
);
394
+
self.metrics.incr("resolver.redis.connection_error").await;
206
395
self.inner.resolve(s).await
207
396
}
208
397
}
209
398
}
399
+
400
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
401
+
// Use atproto_identity's parse_input to properly identify the input type
402
+
let parsed_input = parse_input(subject)
403
+
.map_err(|_| HandleResolverError::InvalidSubject(subject.to_string()))?;
404
+
match parsed_input {
405
+
InputType::Handle(handle) => {
406
+
// It's a handle, purge using the lowercase version
407
+
self.purge_handle(&handle.to_lowercase()).await
408
+
}
409
+
InputType::Plc(did) | InputType::Web(did) => {
410
+
// It's a DID, purge the DID
411
+
self.purge_did(&did).await
412
+
}
413
+
}
414
+
}
415
+
416
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
417
+
// Normalize the handle to lowercase
418
+
let handle = handle.to_lowercase();
419
+
let handle_key = self.make_key(&handle);
420
+
let did_key = self.make_key(did);
421
+
422
+
match self.pool.get().await {
423
+
Ok(mut conn) => {
424
+
// Create a resolution result for the successful mapping
425
+
let resolution_result = match HandleResolutionResult::success(did) {
426
+
Ok(res) => res,
427
+
Err(e) => {
428
+
tracing::warn!(
429
+
"Failed to create resolution result for set operation: {}",
430
+
e
431
+
);
432
+
self.metrics
433
+
.incr("resolver.redis.set_result_create_error")
434
+
.await;
435
+
return Err(HandleResolverError::InvalidSubject(format!(
436
+
"Failed to create resolution result: {}",
437
+
e
438
+
)));
439
+
}
440
+
};
441
+
442
+
// Serialize to bytes
443
+
match resolution_result.to_bytes() {
444
+
Ok(bytes) => {
445
+
// Set the handle -> DID mapping with expiration
446
+
if let Err(e) = conn
447
+
.set_ex::<_, _, ()>(&handle_key, bytes, self.ttl_seconds())
448
+
.await
449
+
{
450
+
tracing::warn!("Failed to set handle->DID mapping in Redis: {}", e);
451
+
self.metrics.incr("resolver.redis.set_cache_error").await;
452
+
return Err(HandleResolverError::ResolutionFailed(format!(
453
+
"Failed to set cache: {}",
454
+
e
455
+
)));
456
+
}
457
+
458
+
// Set the reverse DID -> handle mapping
459
+
if let Err(e) = conn
460
+
.set_ex::<_, _, ()>(&did_key, handle.as_bytes(), self.ttl_seconds())
461
+
.await
462
+
{
463
+
tracing::warn!("Failed to set DID->handle mapping in Redis: {}", e);
464
+
self.metrics
465
+
.incr("resolver.redis.set_reverse_cache_error")
466
+
.await;
467
+
// Don't fail the operation, but log the warning
468
+
}
469
+
470
+
tracing::debug!("Set handle {} -> DID {} mapping in cache", handle, did);
471
+
self.metrics.incr("resolver.redis.set_success").await;
472
+
Ok(())
473
+
}
474
+
Err(e) => {
475
+
tracing::warn!(
476
+
"Failed to serialize resolution result for set operation: {}",
477
+
e
478
+
);
479
+
self.metrics
480
+
.incr("resolver.redis.set_serialize_error")
481
+
.await;
482
+
Err(HandleResolverError::InvalidSubject(format!(
483
+
"Failed to serialize: {}",
484
+
e
485
+
)))
486
+
}
487
+
}
488
+
}
489
+
Err(e) => {
490
+
tracing::warn!("Failed to get Redis connection for set operation: {}", e);
491
+
self.metrics
492
+
.incr("resolver.redis.set_connection_error")
493
+
.await;
494
+
Err(HandleResolverError::ResolutionFailed(format!(
495
+
"Redis connection error: {}",
496
+
e
497
+
)))
498
+
}
499
+
}
500
+
}
210
501
}
211
502
212
503
/// Create a new Redis-backed handle resolver with default 90-day TTL.
···
215
506
///
216
507
/// * `inner` - The underlying resolver to use for actual resolution
217
508
/// * `pool` - Redis connection pool
509
+
/// * `metrics` - Metrics publisher for telemetry
218
510
///
219
511
/// # Example
220
512
///
···
222
514
/// use std::sync::Arc;
223
515
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
224
516
/// use quickdid::cache::create_redis_pool;
517
+
/// use quickdid::metrics::NoOpMetricsPublisher;
225
518
///
226
519
/// # async fn example() -> anyhow::Result<()> {
227
520
/// # use atproto_identity::resolve::HickoryDnsResolver;
228
521
/// # use reqwest::Client;
229
522
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
230
523
/// # let http_client = Client::new();
524
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
231
525
/// let base = create_base_resolver(
232
526
/// dns_resolver,
233
527
/// http_client,
528
+
/// metrics.clone(),
234
529
/// );
235
530
///
236
531
/// let pool = create_redis_pool("redis://localhost:6379")?;
237
-
/// let resolver = create_redis_resolver(base, pool);
238
-
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
532
+
/// let resolver = create_redis_resolver(base, pool, metrics);
533
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
239
534
/// # Ok(())
240
535
/// # }
241
536
/// ```
242
537
pub fn create_redis_resolver(
243
538
inner: Arc<dyn HandleResolver>,
244
539
pool: RedisPool,
540
+
metrics: SharedMetricsPublisher,
245
541
) -> Arc<dyn HandleResolver> {
246
-
Arc::new(RedisHandleResolver::new(inner, pool))
542
+
Arc::new(RedisHandleResolver::new(inner, pool, metrics))
247
543
}
248
544
249
545
/// Create a new Redis-backed handle resolver with custom TTL.
···
253
549
/// * `inner` - The underlying resolver to use for actual resolution
254
550
/// * `pool` - Redis connection pool
255
551
/// * `ttl_seconds` - TTL for cache entries in seconds
552
+
/// * `metrics` - Metrics publisher for telemetry
256
553
pub fn create_redis_resolver_with_ttl(
257
554
inner: Arc<dyn HandleResolver>,
258
555
pool: RedisPool,
259
556
ttl_seconds: u64,
557
+
metrics: SharedMetricsPublisher,
260
558
) -> Arc<dyn HandleResolver> {
261
-
Arc::new(RedisHandleResolver::with_ttl(inner, pool, ttl_seconds))
559
+
Arc::new(RedisHandleResolver::with_ttl(
560
+
inner,
561
+
pool,
562
+
ttl_seconds,
563
+
metrics,
564
+
))
262
565
}
263
566
264
567
#[cfg(test)]
···
274
577
275
578
#[async_trait]
276
579
impl HandleResolver for MockHandleResolver {
277
-
async fn resolve(&self, _handle: &str) -> Result<String, HandleResolverError> {
580
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
278
581
if self.should_fail {
279
582
Err(HandleResolverError::MockResolutionFailure)
280
583
} else {
281
-
Ok(self.expected_did.clone())
584
+
let timestamp = std::time::SystemTime::now()
585
+
.duration_since(std::time::UNIX_EPOCH)
586
+
.unwrap_or_default()
587
+
.as_secs();
588
+
Ok((self.expected_did.clone(), timestamp))
282
589
}
283
590
}
284
591
}
···
296
603
expected_did: "did:plc:testuser123".to_string(),
297
604
});
298
605
606
+
// Create metrics publisher
607
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
608
+
299
609
// Create Redis-backed resolver with a unique key prefix for testing
300
-
let test_prefix = format!("test:handle:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
610
+
let test_prefix = format!(
611
+
"test:handle:{}:",
612
+
std::time::SystemTime::now()
613
+
.duration_since(std::time::UNIX_EPOCH)
614
+
.unwrap()
615
+
.as_nanos()
616
+
);
301
617
let redis_resolver = RedisHandleResolver::with_full_config(
302
618
mock_resolver,
303
619
pool.clone(),
304
620
test_prefix.clone(),
305
621
3600,
622
+
metrics,
306
623
);
307
624
308
625
let test_handle = "alice.bsky.social";
309
626
310
627
// First resolution - should call inner resolver
311
-
let result1 = redis_resolver.resolve(test_handle).await.unwrap();
628
+
let (result1, _timestamp1) = redis_resolver.resolve(test_handle).await.unwrap();
312
629
assert_eq!(result1, "did:plc:testuser123");
313
630
314
631
// Second resolution - should hit cache
315
-
let result2 = redis_resolver.resolve(test_handle).await.unwrap();
632
+
let (result2, _timestamp2) = redis_resolver.resolve(test_handle).await.unwrap();
316
633
assert_eq!(result2, "did:plc:testuser123");
317
634
318
635
// Clean up test data
···
326
643
}
327
644
328
645
#[tokio::test]
646
+
async fn test_redis_handle_resolver_bidirectional_purge() {
647
+
let pool = match crate::test_helpers::get_test_redis_pool() {
648
+
Some(p) => p,
649
+
None => return,
650
+
};
651
+
652
+
// Create mock resolver
653
+
let mock_resolver = Arc::new(MockHandleResolver {
654
+
should_fail: false,
655
+
expected_did: "did:plc:testuser456".to_string(),
656
+
});
657
+
658
+
// Create metrics publisher
659
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
660
+
661
+
// Create Redis-backed resolver with a unique key prefix for testing
662
+
let test_prefix = format!(
663
+
"test:handle:{}:",
664
+
std::time::SystemTime::now()
665
+
.duration_since(std::time::UNIX_EPOCH)
666
+
.unwrap()
667
+
.as_nanos()
668
+
);
669
+
let redis_resolver = RedisHandleResolver::with_full_config(
670
+
mock_resolver,
671
+
pool.clone(),
672
+
test_prefix.clone(),
673
+
3600,
674
+
metrics,
675
+
);
676
+
677
+
let test_handle = "bob.bsky.social";
678
+
let expected_did = "did:plc:testuser456";
679
+
680
+
// First resolution - should call inner resolver and cache both directions
681
+
let (result1, _) = redis_resolver.resolve(test_handle).await.unwrap();
682
+
assert_eq!(result1, expected_did);
683
+
684
+
// Verify both keys exist in Redis
685
+
if let Ok(mut conn) = pool.get().await {
686
+
let mut h = MetroHash64::default();
687
+
h.write(test_handle.as_bytes());
688
+
let handle_key = format!("{}{}", test_prefix, h.finish());
689
+
690
+
let mut h2 = MetroHash64::default();
691
+
h2.write(expected_did.as_bytes());
692
+
let did_key = format!("{}{}", test_prefix, h2.finish());
693
+
694
+
// Check handle -> DID mapping exists
695
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
696
+
assert!(handle_exists, "Handle key should exist in cache");
697
+
698
+
// Check DID -> handle mapping exists
699
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
700
+
assert!(did_exists, "DID key should exist in cache");
701
+
702
+
// Test purge by handle using the trait method
703
+
redis_resolver.purge(test_handle).await.unwrap();
704
+
705
+
// Verify both keys were deleted
706
+
let handle_exists_after: bool = conn.exists(&handle_key).await.unwrap();
707
+
assert!(
708
+
!handle_exists_after,
709
+
"Handle key should be deleted after purge"
710
+
);
711
+
712
+
let did_exists_after: bool = conn.exists(&did_key).await.unwrap();
713
+
assert!(!did_exists_after, "DID key should be deleted after purge");
714
+
}
715
+
716
+
// Re-resolve to cache again
717
+
let (result2, _) = redis_resolver.resolve(test_handle).await.unwrap();
718
+
assert_eq!(result2, expected_did);
719
+
720
+
// Test purge by DID using the trait method
721
+
redis_resolver.purge(expected_did).await.unwrap();
722
+
723
+
// Verify both keys were deleted again
724
+
if let Ok(mut conn) = pool.get().await {
725
+
let mut h = MetroHash64::default();
726
+
h.write(test_handle.as_bytes());
727
+
let handle_key = format!("{}{}", test_prefix, h.finish());
728
+
729
+
let mut h2 = MetroHash64::default();
730
+
h2.write(expected_did.as_bytes());
731
+
let did_key = format!("{}{}", test_prefix, h2.finish());
732
+
733
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
734
+
assert!(
735
+
!handle_exists,
736
+
"Handle key should be deleted after DID purge"
737
+
);
738
+
739
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
740
+
assert!(!did_exists, "DID key should be deleted after DID purge");
741
+
}
742
+
}
743
+
744
+
#[tokio::test]
745
+
async fn test_redis_handle_resolver_purge_input_types() {
746
+
let pool = match crate::test_helpers::get_test_redis_pool() {
747
+
Some(p) => p,
748
+
None => return,
749
+
};
750
+
751
+
// Create mock resolver
752
+
let mock_resolver = Arc::new(MockHandleResolver {
753
+
should_fail: false,
754
+
expected_did: "did:plc:testuser789".to_string(),
755
+
});
756
+
757
+
// Create metrics publisher
758
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
759
+
760
+
// Create Redis-backed resolver with a unique key prefix for testing
761
+
let test_prefix = format!(
762
+
"test:handle:{}:",
763
+
std::time::SystemTime::now()
764
+
.duration_since(std::time::UNIX_EPOCH)
765
+
.unwrap()
766
+
.as_nanos()
767
+
);
768
+
let redis_resolver = RedisHandleResolver::with_full_config(
769
+
mock_resolver,
770
+
pool.clone(),
771
+
test_prefix.clone(),
772
+
3600,
773
+
metrics,
774
+
);
775
+
776
+
// Test different input formats
777
+
let test_cases = vec![
778
+
("alice.bsky.social", "alice.bsky.social"), // Handle
779
+
("ALICE.BSKY.SOCIAL", "alice.bsky.social"), // Handle (uppercase)
780
+
("did:plc:abc123", "did:plc:abc123"), // PLC DID
781
+
("did:web:example.com", "did:web:example.com"), // Web DID
782
+
];
783
+
784
+
for (input, expected_key) in test_cases {
785
+
// Resolve first to cache it
786
+
if !input.starts_with("did:") {
787
+
let _ = redis_resolver.resolve(input).await;
788
+
}
789
+
790
+
// Test purging with different input formats
791
+
let result = redis_resolver.purge(input).await;
792
+
assert!(result.is_ok(), "Failed to purge {}: {:?}", input, result);
793
+
794
+
// Verify the key was handled correctly based on type
795
+
if let Ok(mut conn) = pool.get().await {
796
+
let mut h = MetroHash64::default();
797
+
h.write(expected_key.as_bytes());
798
+
let key = format!("{}{}", test_prefix, h.finish());
799
+
800
+
// After purge, key should not exist
801
+
let exists: bool = conn.exists(&key).await.unwrap_or(false);
802
+
assert!(!exists, "Key for {} should not exist after purge", input);
803
+
}
804
+
}
805
+
}
806
+
807
+
#[tokio::test]
808
+
async fn test_redis_handle_resolver_set_method() {
809
+
let pool = match crate::test_helpers::get_test_redis_pool() {
810
+
Some(p) => p,
811
+
None => return,
812
+
};
813
+
814
+
// Create mock resolver
815
+
let mock_resolver = Arc::new(MockHandleResolver {
816
+
should_fail: false,
817
+
expected_did: "did:plc:old".to_string(),
818
+
});
819
+
820
+
// Create metrics publisher
821
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
822
+
823
+
// Create Redis-backed resolver with a unique key prefix for testing
824
+
let test_prefix = format!(
825
+
"test:handle:{}:",
826
+
std::time::SystemTime::now()
827
+
.duration_since(std::time::UNIX_EPOCH)
828
+
.unwrap()
829
+
.as_nanos()
830
+
);
831
+
let redis_resolver = RedisHandleResolver::with_full_config(
832
+
mock_resolver,
833
+
pool.clone(),
834
+
test_prefix.clone(),
835
+
3600,
836
+
metrics,
837
+
);
838
+
839
+
let test_handle = "charlie.bsky.social";
840
+
let test_did = "did:plc:newuser123";
841
+
842
+
// Set the mapping using the trait method
843
+
redis_resolver.set(test_handle, test_did).await.unwrap();
844
+
845
+
// Verify the mapping by resolving the handle
846
+
let (resolved_did, _) = redis_resolver.resolve(test_handle).await.unwrap();
847
+
assert_eq!(resolved_did, test_did);
848
+
849
+
// Test that uppercase handles are normalized
850
+
redis_resolver
851
+
.set("DAVE.BSKY.SOCIAL", "did:plc:dave456")
852
+
.await
853
+
.unwrap();
854
+
let (resolved_did2, _) = redis_resolver.resolve("dave.bsky.social").await.unwrap();
855
+
assert_eq!(resolved_did2, "did:plc:dave456");
856
+
857
+
// Verify both forward and reverse mappings exist
858
+
if let Ok(mut conn) = pool.get().await {
859
+
let mut h = MetroHash64::default();
860
+
h.write(test_handle.as_bytes());
861
+
let handle_key = format!("{}{}", test_prefix, h.finish());
862
+
863
+
let mut h2 = MetroHash64::default();
864
+
h2.write(test_did.as_bytes());
865
+
let did_key = format!("{}{}", test_prefix, h2.finish());
866
+
867
+
// Check both keys exist
868
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
869
+
assert!(handle_exists, "Handle key should exist after set");
870
+
871
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
872
+
assert!(did_exists, "DID key should exist after set");
873
+
874
+
// Clean up test data
875
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
876
+
}
877
+
}
878
+
879
+
#[tokio::test]
329
880
async fn test_redis_handle_resolver_cache_error() {
330
881
let pool = match crate::test_helpers::get_test_redis_pool() {
331
882
Some(p) => p,
···
337
888
should_fail: true,
338
889
expected_did: String::new(),
339
890
});
891
+
892
+
// Create metrics publisher
893
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
340
894
341
895
// Create Redis-backed resolver with a unique key prefix for testing
342
-
let test_prefix = format!("test:handle:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
896
+
let test_prefix = format!(
897
+
"test:handle:{}:",
898
+
std::time::SystemTime::now()
899
+
.duration_since(std::time::UNIX_EPOCH)
900
+
.unwrap()
901
+
.as_nanos()
902
+
);
343
903
let redis_resolver = RedisHandleResolver::with_full_config(
344
904
mock_resolver,
345
905
pool.clone(),
346
906
test_prefix.clone(),
347
907
3600,
908
+
metrics,
348
909
);
349
910
350
911
let test_handle = "error.bsky.social";
+623
src/handle_resolver/sqlite.rs
+623
src/handle_resolver/sqlite.rs
···
1
+
//! SQLite-backed caching handle resolver.
2
+
//!
3
+
//! This module provides a handle resolver that caches resolution results in SQLite
4
+
//! with configurable expiration times. SQLite caching provides persistence across
5
+
//! service restarts while remaining lightweight for single-instance deployments.
6
+
7
+
use super::errors::HandleResolverError;
8
+
use super::traits::HandleResolver;
9
+
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
11
+
use async_trait::async_trait;
12
+
use metrohash::MetroHash64;
13
+
use sqlx::{Row, SqlitePool};
14
+
use std::hash::Hasher as _;
15
+
use std::sync::Arc;
16
+
use std::time::{SystemTime, UNIX_EPOCH};
17
+
18
+
/// SQLite-backed caching handle resolver.
19
+
///
20
+
/// This resolver caches handle resolution results in SQLite with a configurable TTL.
21
+
/// Results are stored in a compact binary format using bincode serialization
22
+
/// to minimize storage overhead.
23
+
///
24
+
/// # Features
25
+
///
26
+
/// - Persistent caching across service restarts
27
+
/// - Lightweight single-file database
28
+
/// - Configurable TTL (default: 90 days)
29
+
/// - Compact binary storage format
30
+
/// - Automatic schema management
31
+
/// - Graceful fallback if SQLite is unavailable
32
+
///
33
+
/// # Example
34
+
///
35
+
/// ```no_run
36
+
/// use std::sync::Arc;
37
+
/// use sqlx::SqlitePool;
38
+
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
39
+
/// use quickdid::metrics::NoOpMetricsPublisher;
40
+
///
41
+
/// # async fn example() {
42
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
43
+
/// # use reqwest::Client;
44
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
45
+
/// # let http_client = Client::new();
46
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
47
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
48
+
/// # let sqlite_pool: SqlitePool = todo!();
49
+
/// // Create with default 90-day TTL
50
+
/// let resolver = create_sqlite_resolver(
51
+
/// base_resolver,
52
+
/// sqlite_pool,
53
+
/// metrics
54
+
/// );
55
+
/// # }
56
+
/// ```
57
+
pub(super) struct SqliteHandleResolver {
58
+
/// Base handle resolver to perform actual resolution
59
+
inner: Arc<dyn HandleResolver>,
60
+
/// SQLite connection pool
61
+
pool: SqlitePool,
62
+
/// TTL for cache entries in seconds
63
+
ttl_seconds: u64,
64
+
/// Metrics publisher for telemetry
65
+
metrics: SharedMetricsPublisher,
66
+
}
67
+
68
+
impl SqliteHandleResolver {
69
+
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
70
+
fn new(
71
+
inner: Arc<dyn HandleResolver>,
72
+
pool: SqlitePool,
73
+
metrics: SharedMetricsPublisher,
74
+
) -> Self {
75
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
76
+
}
77
+
78
+
/// Create a new SQLite-backed handle resolver with custom TTL.
79
+
fn with_ttl(
80
+
inner: Arc<dyn HandleResolver>,
81
+
pool: SqlitePool,
82
+
ttl_seconds: u64,
83
+
metrics: SharedMetricsPublisher,
84
+
) -> Self {
85
+
Self {
86
+
inner,
87
+
pool,
88
+
ttl_seconds,
89
+
metrics,
90
+
}
91
+
}
92
+
93
+
/// Generate the cache key for a handle.
94
+
///
95
+
/// Uses MetroHash64 to generate a consistent hash of the handle
96
+
/// for use as the primary key. This provides better key distribution
97
+
/// and avoids issues with special characters in handles.
98
+
fn make_key(&self, handle: &str) -> u64 {
99
+
let mut h = MetroHash64::default();
100
+
h.write(handle.as_bytes());
101
+
h.finish()
102
+
}
103
+
104
+
/// Check if a cache entry is expired.
105
+
fn is_expired(&self, updated_timestamp: i64) -> bool {
106
+
let current_timestamp = SystemTime::now()
107
+
.duration_since(UNIX_EPOCH)
108
+
.unwrap_or_default()
109
+
.as_secs() as i64;
110
+
111
+
(current_timestamp - updated_timestamp) > (self.ttl_seconds as i64)
112
+
}
113
+
}
114
+
115
+
#[async_trait]
116
+
impl HandleResolver for SqliteHandleResolver {
117
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
118
+
let handle = s.to_string();
119
+
let key = self.make_key(&handle) as i64; // SQLite uses signed integers
120
+
121
+
// Try to get from SQLite cache first
122
+
let cached_result =
123
+
sqlx::query("SELECT result, updated FROM handle_resolution_cache WHERE key = ?1")
124
+
.bind(key)
125
+
.fetch_optional(&self.pool)
126
+
.await;
127
+
128
+
match cached_result {
129
+
Ok(Some(row)) => {
130
+
let cached_bytes: Vec<u8> = row.get("result");
131
+
let updated_timestamp: i64 = row.get("updated");
132
+
133
+
// Check if the entry is expired
134
+
if !self.is_expired(updated_timestamp) {
135
+
// Deserialize the cached result
136
+
match HandleResolutionResult::from_bytes(&cached_bytes) {
137
+
Ok(cached_result) => {
138
+
if let Some(did) = cached_result.to_did() {
139
+
tracing::debug!("Cache hit for handle {}: {}", handle, did);
140
+
self.metrics.incr("resolver.sqlite.cache_hit").await;
141
+
return Ok((did, cached_result.timestamp));
142
+
} else {
143
+
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
144
+
self.metrics
145
+
.incr("resolver.sqlite.cache_hit_not_resolved")
146
+
.await;
147
+
return Err(HandleResolverError::HandleNotFound);
148
+
}
149
+
}
150
+
Err(e) => {
151
+
tracing::warn!(
152
+
"Failed to deserialize cached result for handle {}: {}",
153
+
handle,
154
+
e
155
+
);
156
+
self.metrics.incr("resolver.sqlite.deserialize_error").await;
157
+
// Fall through to re-resolve if deserialization fails
158
+
}
159
+
}
160
+
} else {
161
+
tracing::debug!("Cache entry expired for handle {}", handle);
162
+
self.metrics.incr("resolver.sqlite.cache_expired").await;
163
+
// Entry is expired, we'll re-resolve and update it
164
+
}
165
+
}
166
+
Ok(None) => {
167
+
tracing::debug!("Cache miss for handle {}, resolving...", handle);
168
+
self.metrics.incr("resolver.sqlite.cache_miss").await;
169
+
}
170
+
Err(e) => {
171
+
tracing::warn!("Failed to query SQLite cache for handle {}: {}", handle, e);
172
+
self.metrics.incr("resolver.sqlite.query_error").await;
173
+
// Fall through to resolve without caching on database error
174
+
}
175
+
}
176
+
177
+
// Not in cache or expired, resolve through inner resolver
178
+
let result = self.inner.resolve(s).await;
179
+
180
+
// Create and serialize resolution result
181
+
let resolution_result = match &result {
182
+
Ok((did, _timestamp)) => {
183
+
tracing::debug!(
184
+
"Caching successful resolution for handle {}: {}",
185
+
handle,
186
+
did
187
+
);
188
+
match HandleResolutionResult::success(did) {
189
+
Ok(res) => res,
190
+
Err(e) => {
191
+
tracing::warn!("Failed to create resolution result: {}", e);
192
+
self.metrics
193
+
.incr("resolver.sqlite.result_create_error")
194
+
.await;
195
+
return result;
196
+
}
197
+
}
198
+
}
199
+
Err(e) => {
200
+
tracing::debug!("Caching failed resolution for handle {}: {}", handle, e);
201
+
match HandleResolutionResult::not_resolved() {
202
+
Ok(res) => res,
203
+
Err(err) => {
204
+
tracing::warn!("Failed to create not_resolved result: {}", err);
205
+
self.metrics
206
+
.incr("resolver.sqlite.result_create_error")
207
+
.await;
208
+
return result;
209
+
}
210
+
}
211
+
}
212
+
};
213
+
214
+
// Serialize to bytes
215
+
match resolution_result.to_bytes() {
216
+
Ok(bytes) => {
217
+
let current_timestamp = SystemTime::now()
218
+
.duration_since(UNIX_EPOCH)
219
+
.unwrap_or_default()
220
+
.as_secs() as i64;
221
+
222
+
// Insert or update the cache entry
223
+
let query_result = sqlx::query(
224
+
r#"
225
+
INSERT INTO handle_resolution_cache (key, result, created, updated)
226
+
VALUES (?1, ?2, ?3, ?4)
227
+
ON CONFLICT(key) DO UPDATE SET
228
+
result = excluded.result,
229
+
updated = excluded.updated
230
+
"#,
231
+
)
232
+
.bind(key)
233
+
.bind(&bytes)
234
+
.bind(current_timestamp)
235
+
.bind(current_timestamp)
236
+
.execute(&self.pool)
237
+
.await;
238
+
239
+
if let Err(e) = query_result {
240
+
tracing::warn!("Failed to cache handle resolution in SQLite: {}", e);
241
+
self.metrics.incr("resolver.sqlite.cache_set_error").await;
242
+
} else {
243
+
self.metrics.incr("resolver.sqlite.cache_set").await;
244
+
}
245
+
}
246
+
Err(e) => {
247
+
tracing::warn!(
248
+
"Failed to serialize resolution result for handle {}: {}",
249
+
handle,
250
+
e
251
+
);
252
+
self.metrics.incr("resolver.sqlite.serialize_error").await;
253
+
}
254
+
}
255
+
256
+
result
257
+
}
258
+
259
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
260
+
// Normalize the handle to lowercase
261
+
let handle = handle.to_lowercase();
262
+
263
+
// Update the SQLite cache
264
+
if let Ok(mut conn) = self.pool.acquire().await {
265
+
// Create a resolution result for the successful mapping
266
+
let resolution_result = match HandleResolutionResult::success(did) {
267
+
Ok(res) => res,
268
+
Err(e) => {
269
+
tracing::warn!(
270
+
"Failed to create resolution result for set operation: {}",
271
+
e
272
+
);
273
+
self.metrics
274
+
.incr("resolver.sqlite.set_result_create_error")
275
+
.await;
276
+
// Still chain to inner resolver even if we can't cache
277
+
return self.inner.set(&handle, did).await;
278
+
}
279
+
};
280
+
281
+
// Serialize to bytes
282
+
match resolution_result.to_bytes() {
283
+
Ok(bytes) => {
284
+
// Insert or update the cache entry
285
+
let timestamp = std::time::SystemTime::now()
286
+
.duration_since(std::time::UNIX_EPOCH)
287
+
.unwrap_or_default()
288
+
.as_secs() as i64;
289
+
290
+
let expires_at = timestamp + self.ttl_seconds as i64;
291
+
292
+
match sqlx::query(
293
+
"INSERT OR REPLACE INTO handle_resolution_cache (handle, resolved_value, created_at, expires_at) VALUES (?, ?, ?, ?)"
294
+
)
295
+
.bind(&handle)
296
+
.bind(&bytes)
297
+
.bind(timestamp)
298
+
.bind(expires_at)
299
+
.execute(&mut *conn)
300
+
.await
301
+
{
302
+
Ok(_) => {
303
+
tracing::debug!("Set handle {} -> DID {} in SQLite cache", handle, did);
304
+
self.metrics.incr("resolver.sqlite.set_success").await;
305
+
}
306
+
Err(e) => {
307
+
tracing::warn!("Failed to set handle->DID mapping in SQLite: {}", e);
308
+
self.metrics.incr("resolver.sqlite.set_cache_error").await;
309
+
// Still chain to inner resolver even if cache update fails
310
+
}
311
+
}
312
+
}
313
+
Err(e) => {
314
+
tracing::warn!(
315
+
"Failed to serialize resolution result for set operation: {}",
316
+
e
317
+
);
318
+
self.metrics
319
+
.incr("resolver.sqlite.set_serialize_error")
320
+
.await;
321
+
// Still chain to inner resolver even if serialization fails
322
+
}
323
+
}
324
+
} else {
325
+
tracing::warn!("Failed to get SQLite connection for set operation");
326
+
self.metrics
327
+
.incr("resolver.sqlite.set_connection_error")
328
+
.await;
329
+
}
330
+
331
+
// Chain to inner resolver
332
+
self.inner.set(&handle, did).await
333
+
}
334
+
}
335
+
336
+
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
337
+
///
338
+
/// # Arguments
339
+
///
340
+
/// * `inner` - The underlying resolver to use for actual resolution
341
+
/// * `pool` - SQLite connection pool
342
+
/// * `metrics` - Metrics publisher for telemetry
343
+
///
344
+
/// # Example
345
+
///
346
+
/// ```no_run
347
+
/// use std::sync::Arc;
348
+
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
349
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
350
+
/// use quickdid::metrics::NoOpMetricsPublisher;
351
+
///
352
+
/// # async fn example() -> anyhow::Result<()> {
353
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
354
+
/// # use reqwest::Client;
355
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
356
+
/// # let http_client = Client::new();
357
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
358
+
/// let base = create_base_resolver(
359
+
/// dns_resolver,
360
+
/// http_client,
361
+
/// metrics.clone(),
362
+
/// );
363
+
///
364
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
365
+
/// let resolver = create_sqlite_resolver(base, pool, metrics);
366
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
367
+
/// # Ok(())
368
+
/// # }
369
+
/// ```
370
+
pub fn create_sqlite_resolver(
371
+
inner: Arc<dyn HandleResolver>,
372
+
pool: SqlitePool,
373
+
metrics: SharedMetricsPublisher,
374
+
) -> Arc<dyn HandleResolver> {
375
+
Arc::new(SqliteHandleResolver::new(inner, pool, metrics))
376
+
}
377
+
378
+
/// Create a new SQLite-backed handle resolver with custom TTL.
379
+
///
380
+
/// # Arguments
381
+
///
382
+
/// * `inner` - The underlying resolver to use for actual resolution
383
+
/// * `pool` - SQLite connection pool
384
+
/// * `ttl_seconds` - TTL for cache entries in seconds
385
+
/// * `metrics` - Metrics publisher for telemetry
386
+
pub fn create_sqlite_resolver_with_ttl(
387
+
inner: Arc<dyn HandleResolver>,
388
+
pool: SqlitePool,
389
+
ttl_seconds: u64,
390
+
metrics: SharedMetricsPublisher,
391
+
) -> Arc<dyn HandleResolver> {
392
+
Arc::new(SqliteHandleResolver::with_ttl(
393
+
inner,
394
+
pool,
395
+
ttl_seconds,
396
+
metrics,
397
+
))
398
+
}
399
+
400
+
#[cfg(test)]
401
+
mod tests {
402
+
use super::*;
403
+
404
+
// Mock handle resolver for testing
405
+
#[derive(Clone)]
406
+
struct MockHandleResolver {
407
+
should_fail: bool,
408
+
expected_did: String,
409
+
}
410
+
411
+
#[async_trait]
412
+
impl HandleResolver for MockHandleResolver {
413
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
414
+
if self.should_fail {
415
+
Err(HandleResolverError::MockResolutionFailure)
416
+
} else {
417
+
let timestamp = std::time::SystemTime::now()
418
+
.duration_since(std::time::UNIX_EPOCH)
419
+
.unwrap_or_default()
420
+
.as_secs();
421
+
Ok((self.expected_did.clone(), timestamp))
422
+
}
423
+
}
424
+
}
425
+
426
+
#[tokio::test]
427
+
async fn test_sqlite_handle_resolver_cache_hit() {
428
+
// Create in-memory SQLite database for testing
429
+
let pool = SqlitePool::connect("sqlite::memory:")
430
+
.await
431
+
.expect("Failed to connect to in-memory SQLite");
432
+
433
+
// Create the schema
434
+
crate::sqlite_schema::create_schema(&pool)
435
+
.await
436
+
.expect("Failed to create schema");
437
+
438
+
// Create mock resolver
439
+
let mock_resolver = Arc::new(MockHandleResolver {
440
+
should_fail: false,
441
+
expected_did: "did:plc:testuser123".to_string(),
442
+
});
443
+
444
+
// Create metrics publisher
445
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
446
+
447
+
// Create SQLite-backed resolver
448
+
let sqlite_resolver =
449
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
450
+
451
+
let test_handle = "alice.bsky.social";
452
+
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
453
+
454
+
// Verify database is empty initially
455
+
let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
456
+
.fetch_one(&pool)
457
+
.await
458
+
.expect("Failed to query initial count");
459
+
assert_eq!(initial_count, 0);
460
+
461
+
// First resolution - should call inner resolver and cache the result
462
+
let (result1, _timestamp1) = sqlite_resolver.resolve(test_handle).await.unwrap();
463
+
assert_eq!(result1, "did:plc:testuser123");
464
+
465
+
// Verify record was inserted
466
+
let count_after_first: i64 =
467
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
468
+
.fetch_one(&pool)
469
+
.await
470
+
.expect("Failed to query count after first resolution");
471
+
assert_eq!(count_after_first, 1);
472
+
473
+
// Verify the cached record has correct key and non-empty result
474
+
let cached_record = sqlx::query(
475
+
"SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1",
476
+
)
477
+
.bind(expected_key)
478
+
.fetch_one(&pool)
479
+
.await
480
+
.expect("Failed to fetch cached record");
481
+
482
+
let cached_key: i64 = cached_record.get("key");
483
+
let cached_result: Vec<u8> = cached_record.get("result");
484
+
let cached_created: i64 = cached_record.get("created");
485
+
let cached_updated: i64 = cached_record.get("updated");
486
+
487
+
assert_eq!(cached_key, expected_key);
488
+
assert!(
489
+
!cached_result.is_empty(),
490
+
"Cached result should not be empty"
491
+
);
492
+
assert!(cached_created > 0, "Created timestamp should be positive");
493
+
assert!(cached_updated > 0, "Updated timestamp should be positive");
494
+
assert_eq!(
495
+
cached_created, cached_updated,
496
+
"Created and updated should be equal on first insert"
497
+
);
498
+
499
+
// Verify we can deserialize the cached result
500
+
let resolution_result =
501
+
crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
502
+
.expect("Failed to deserialize cached result");
503
+
let cached_did = resolution_result.to_did().expect("Should have a DID");
504
+
assert_eq!(cached_did, "did:plc:testuser123");
505
+
506
+
// Second resolution - should hit cache (no additional database insert)
507
+
let (result2, _timestamp2) = sqlite_resolver.resolve(test_handle).await.unwrap();
508
+
assert_eq!(result2, "did:plc:testuser123");
509
+
510
+
// Verify count hasn't changed (cache hit, no new insert)
511
+
let count_after_second: i64 =
512
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
513
+
.fetch_one(&pool)
514
+
.await
515
+
.expect("Failed to query count after second resolution");
516
+
assert_eq!(count_after_second, 1);
517
+
}
518
+
519
+
#[tokio::test]
520
+
async fn test_sqlite_handle_resolver_cache_error() {
521
+
// Create in-memory SQLite database for testing
522
+
let pool = SqlitePool::connect("sqlite::memory:")
523
+
.await
524
+
.expect("Failed to connect to in-memory SQLite");
525
+
526
+
// Create the schema
527
+
crate::sqlite_schema::create_schema(&pool)
528
+
.await
529
+
.expect("Failed to create schema");
530
+
531
+
// Create mock resolver that fails
532
+
let mock_resolver = Arc::new(MockHandleResolver {
533
+
should_fail: true,
534
+
expected_did: String::new(),
535
+
});
536
+
537
+
// Create metrics publisher
538
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
539
+
540
+
// Create SQLite-backed resolver
541
+
let sqlite_resolver =
542
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
543
+
544
+
let test_handle = "error.bsky.social";
545
+
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
546
+
547
+
// Verify database is empty initially
548
+
let initial_count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
549
+
.fetch_one(&pool)
550
+
.await
551
+
.expect("Failed to query initial count");
552
+
assert_eq!(initial_count, 0);
553
+
554
+
// First resolution - should fail and cache the failure
555
+
let result1 = sqlite_resolver.resolve(test_handle).await;
556
+
assert!(result1.is_err());
557
+
558
+
// Match the specific error type we expect
559
+
match result1 {
560
+
Err(HandleResolverError::MockResolutionFailure) => {}
561
+
other => panic!("Expected MockResolutionFailure, got {:?}", other),
562
+
}
563
+
564
+
// Verify the failure was cached
565
+
let count_after_first: i64 =
566
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
567
+
.fetch_one(&pool)
568
+
.await
569
+
.expect("Failed to query count after first resolution");
570
+
assert_eq!(count_after_first, 1);
571
+
572
+
// Verify the cached error record
573
+
let cached_record = sqlx::query(
574
+
"SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1",
575
+
)
576
+
.bind(expected_key)
577
+
.fetch_one(&pool)
578
+
.await
579
+
.expect("Failed to fetch cached error record");
580
+
581
+
let cached_key: i64 = cached_record.get("key");
582
+
let cached_result: Vec<u8> = cached_record.get("result");
583
+
let cached_created: i64 = cached_record.get("created");
584
+
let cached_updated: i64 = cached_record.get("updated");
585
+
586
+
assert_eq!(cached_key, expected_key);
587
+
assert!(
588
+
!cached_result.is_empty(),
589
+
"Cached error result should not be empty"
590
+
);
591
+
assert!(cached_created > 0, "Created timestamp should be positive");
592
+
assert!(cached_updated > 0, "Updated timestamp should be positive");
593
+
assert_eq!(
594
+
cached_created, cached_updated,
595
+
"Created and updated should be equal on first insert"
596
+
);
597
+
598
+
// Verify we can deserialize the cached error result
599
+
let resolution_result =
600
+
crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
601
+
.expect("Failed to deserialize cached error result");
602
+
let cached_did = resolution_result.to_did();
603
+
assert!(cached_did.is_none(), "Error result should have no DID");
604
+
605
+
// Second resolution - should hit cache with error (no additional database operations)
606
+
let result2 = sqlite_resolver.resolve(test_handle).await;
607
+
assert!(result2.is_err());
608
+
609
+
// Match the specific error type we expect from cache
610
+
match result2 {
611
+
Err(HandleResolverError::HandleNotFound) => {} // Cache returns HandleNotFound for "not resolved"
612
+
other => panic!("Expected HandleNotFound from cache, got {:?}", other),
613
+
}
614
+
615
+
// Verify count hasn't changed (cache hit, no new operations)
616
+
let count_after_second: i64 =
617
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
618
+
.fetch_one(&pool)
619
+
.await
620
+
.expect("Failed to query count after second resolution");
621
+
assert_eq!(count_after_second, 1);
622
+
}
623
+
}
+105
-5
src/handle_resolver/traits.rs
+105
-5
src/handle_resolver/traits.rs
···
17
17
/// ```no_run
18
18
/// use async_trait::async_trait;
19
19
/// use quickdid::handle_resolver::{HandleResolver, HandleResolverError};
20
+
/// use std::time::{SystemTime, UNIX_EPOCH};
20
21
///
21
22
/// struct MyResolver;
22
23
///
23
24
/// #[async_trait]
24
25
/// impl HandleResolver for MyResolver {
25
-
/// async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
26
+
/// async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
26
27
/// // Custom resolution logic
27
-
/// Ok(format!("did:plc:{}", s.replace('.', "")))
28
+
/// let did = format!("did:plc:{}", s.replace('.', ""));
29
+
/// let timestamp = SystemTime::now()
30
+
/// .duration_since(UNIX_EPOCH)
31
+
/// .unwrap()
32
+
/// .as_secs();
33
+
/// Ok((did, timestamp))
28
34
/// }
29
35
/// }
30
36
/// ```
31
37
#[async_trait]
32
38
pub trait HandleResolver: Send + Sync {
33
-
/// Resolve a handle to its DID.
39
+
/// Resolve a handle to its DID with timestamp.
34
40
///
35
41
/// # Arguments
36
42
///
···
38
44
///
39
45
/// # Returns
40
46
///
41
-
/// The resolved DID on success, or an error if resolution fails.
47
+
/// A tuple containing:
48
+
/// - The resolved DID string
49
+
/// - The resolution timestamp as UNIX epoch seconds
42
50
///
43
51
/// # Errors
44
52
///
···
46
54
/// - The handle cannot be resolved
47
55
/// - Network errors occur during resolution
48
56
/// - The handle is invalid or doesn't exist
49
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError>;
57
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError>;
58
+
59
+
/// Purge a handle or DID from the cache.
60
+
///
61
+
/// This method removes cached entries for the given identifier, which can be
62
+
/// either a handle (e.g., "alice.bsky.social") or a DID (e.g., "did:plc:xyz123").
63
+
/// Implementations should handle bidirectional purging where applicable.
64
+
///
65
+
/// # Arguments
66
+
///
67
+
/// * `identifier` - Either a handle or DID to purge from cache
68
+
///
69
+
/// # Returns
70
+
///
71
+
/// Ok(()) if the purge was successful or if the identifier wasn't cached.
72
+
/// Most implementations will simply return Ok(()) as a no-op.
73
+
///
74
+
/// # Default Implementation
75
+
///
76
+
/// The default implementation is a no-op that always returns Ok(()).
77
+
async fn purge(&self, _subject: &str) -> Result<(), HandleResolverError> {
78
+
Ok(())
79
+
}
80
+
81
+
/// Set a handle-to-DID mapping in the cache.
82
+
///
83
+
/// This method allows manually setting or updating a cached mapping between
84
+
/// a handle and its corresponding DID. This is useful for pre-populating
85
+
/// caches or updating stale entries.
86
+
///
87
+
/// # Arguments
88
+
///
89
+
/// * `handle` - The handle to cache (e.g., "alice.bsky.social")
90
+
/// * `did` - The DID to associate with the handle (e.g., "did:plc:xyz123")
91
+
///
92
+
/// # Returns
93
+
///
94
+
/// Ok(()) if the mapping was successfully set or if the implementation
95
+
/// doesn't support manual cache updates. Most implementations will simply
96
+
/// return Ok(()) as a no-op.
97
+
///
98
+
/// # Default Implementation
99
+
///
100
+
/// The default implementation is a no-op that always returns Ok(()).
101
+
async fn set(&self, _handle: &str, _did: &str) -> Result<(), HandleResolverError> {
102
+
Ok(())
103
+
}
104
+
}
105
+
106
+
#[cfg(test)]
107
+
mod tests {
108
+
use super::*;
109
+
110
+
// Simple test resolver that doesn't cache anything
111
+
struct NoOpTestResolver;
112
+
113
+
#[async_trait]
114
+
impl HandleResolver for NoOpTestResolver {
115
+
async fn resolve(&self, _s: &str) -> Result<(String, u64), HandleResolverError> {
116
+
Ok(("did:test:123".to_string(), 1234567890))
117
+
}
118
+
// Uses default purge implementation
119
+
}
120
+
121
+
#[tokio::test]
122
+
async fn test_default_purge_implementation() {
123
+
let resolver = NoOpTestResolver;
124
+
125
+
// Default implementation should always return Ok(())
126
+
assert!(resolver.purge("alice.bsky.social").await.is_ok());
127
+
assert!(resolver.purge("did:plc:xyz123").await.is_ok());
128
+
assert!(resolver.purge("").await.is_ok());
129
+
}
130
+
131
+
#[tokio::test]
132
+
async fn test_default_set_implementation() {
133
+
let resolver = NoOpTestResolver;
134
+
135
+
// Default implementation should always return Ok(())
136
+
assert!(
137
+
resolver
138
+
.set("alice.bsky.social", "did:plc:xyz123")
139
+
.await
140
+
.is_ok()
141
+
);
142
+
assert!(
143
+
resolver
144
+
.set("bob.example.com", "did:web:example.com")
145
+
.await
146
+
.is_ok()
147
+
);
148
+
assert!(resolver.set("", "").await.is_ok());
149
+
}
50
150
}
+146
-95
src/handle_resolver_task.rs
+146
-95
src/handle_resolver_task.rs
···
5
5
//! and ensures resolved handles are cached for efficient subsequent lookups.
6
6
7
7
use crate::handle_resolver::HandleResolver;
8
-
use crate::queue_adapter::{HandleResolutionWork, QueueAdapter};
8
+
use crate::metrics::SharedMetricsPublisher;
9
+
use crate::queue::{HandleResolutionWork, QueueAdapter};
9
10
use anyhow::Result;
10
11
use std::sync::Arc;
11
12
use std::time::Duration;
···
16
17
/// Handle resolver task errors
17
18
#[derive(Error, Debug)]
18
19
pub(crate) enum HandleResolverError {
19
-
#[error("Queue adapter health check failed: adapter is not healthy")]
20
+
/// Queue adapter health check failed
21
+
#[error("error-quickdid-task-1 Queue adapter health check failed: adapter is not healthy")]
20
22
QueueAdapterUnhealthy,
21
23
}
22
24
···
35
37
}
36
38
}
37
39
38
-
/// Metrics for handle resolution processing
39
-
#[derive(Debug, Default)]
40
-
pub(crate) struct HandleResolverMetrics {
41
-
pub total_processed: std::sync::atomic::AtomicU64,
42
-
pub total_succeeded: std::sync::atomic::AtomicU64,
43
-
pub total_failed: std::sync::atomic::AtomicU64,
44
-
pub total_cached: std::sync::atomic::AtomicU64,
45
-
}
46
-
47
40
/// Handle resolver task processor
48
41
pub(crate) struct HandleResolverTask {
49
42
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
50
43
handle_resolver: Arc<dyn HandleResolver>,
51
44
cancel_token: CancellationToken,
52
45
config: HandleResolverTaskConfig,
53
-
metrics: Arc<HandleResolverMetrics>,
46
+
metrics_publisher: SharedMetricsPublisher,
54
47
}
55
48
56
49
impl HandleResolverTask {
···
59
52
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
60
53
handle_resolver: Arc<dyn HandleResolver>,
61
54
cancel_token: CancellationToken,
55
+
metrics_publisher: SharedMetricsPublisher,
62
56
) -> Self {
63
57
let config = HandleResolverTaskConfig::default();
64
58
Self {
···
66
60
handle_resolver,
67
61
cancel_token,
68
62
config,
69
-
metrics: Arc::new(HandleResolverMetrics::default()),
63
+
metrics_publisher,
70
64
}
71
65
}
72
66
···
76
70
handle_resolver: Arc<dyn HandleResolver>,
77
71
cancel_token: CancellationToken,
78
72
config: HandleResolverTaskConfig,
73
+
metrics_publisher: SharedMetricsPublisher,
79
74
) -> Self {
80
75
Self {
81
76
adapter,
82
77
handle_resolver,
83
78
cancel_token,
84
79
config,
85
-
metrics: Arc::new(HandleResolverMetrics::default()),
80
+
metrics_publisher,
86
81
}
87
82
}
88
83
···
115
110
116
111
// All work has been processed
117
112
info!("All handle resolutions completed");
113
+
info!("Handle resolver task processor stopped");
118
114
119
-
info!(
120
-
total_processed = self
121
-
.metrics
122
-
.total_processed
123
-
.load(std::sync::atomic::Ordering::Relaxed),
124
-
total_succeeded = self
125
-
.metrics
126
-
.total_succeeded
127
-
.load(std::sync::atomic::Ordering::Relaxed),
128
-
total_failed = self
129
-
.metrics
130
-
.total_failed
131
-
.load(std::sync::atomic::Ordering::Relaxed),
132
-
total_cached = self
133
-
.metrics
134
-
.total_cached
135
-
.load(std::sync::atomic::Ordering::Relaxed),
136
-
"Handle resolver task processor stopped"
137
-
);
115
+
Ok(())
116
+
}
138
117
139
-
Ok(())
118
+
/// Check if an error represents a soft failure (handle not found)
119
+
/// rather than a real error condition.
120
+
///
121
+
/// These atproto_identity library errors indicate the handle doesn't support
122
+
/// the specific resolution method, which is normal and expected:
123
+
/// - error-atproto-identity-resolve-4: DNS resolution failed (no records)
124
+
/// - error-atproto-identity-resolve-5: HTTP resolution failed (hostname not found)
125
+
fn is_soft_failure(error_str: &str) -> bool {
126
+
// Check for specific atproto_identity error codes that indicate "not found"
127
+
// rather than actual failures
128
+
if error_str.starts_with("error-atproto-identity-resolve-4") {
129
+
// DNS resolution - check if it's a "no records" scenario
130
+
error_str.contains("NoRecordsFound")
131
+
} else if error_str.starts_with("error-atproto-identity-resolve-6") {
132
+
// HTTP resolution - check if it's a DID format issue
133
+
error_str.contains("expected DID format")
134
+
} else if error_str.starts_with("error-atproto-identity-resolve-5") {
135
+
// HTTP resolution - check if it's a hostname lookup failure
136
+
error_str.contains("No address associated with hostname")
137
+
|| error_str.contains("failed to lookup address information")
138
+
} else {
139
+
false
140
+
}
140
141
}
141
142
142
143
/// Process a single handle resolution work item
···
156
157
157
158
let duration_ms = start_time.elapsed().as_millis() as u64;
158
159
159
-
// Update metrics
160
-
self.metrics
161
-
.total_processed
162
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
160
+
// Publish metrics
161
+
self.metrics_publisher
162
+
.incr("task.handle_resolution.processed")
163
+
.await;
164
+
self.metrics_publisher
165
+
.time("task.handle_resolution.duration_ms", duration_ms)
166
+
.await;
163
167
164
168
match result {
165
-
Ok(Ok(did)) => {
166
-
self.metrics
167
-
.total_succeeded
168
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
169
-
self.metrics
170
-
.total_cached
171
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
169
+
Ok(Ok((did, _timestamp))) => {
170
+
// Publish success metrics
171
+
self.metrics_publisher
172
+
.incr("task.handle_resolution.success")
173
+
.await;
174
+
self.metrics_publisher
175
+
.incr("task.handle_resolution.cached")
176
+
.await;
172
177
173
-
info!(
178
+
debug!(
174
179
handle = %work.handle,
175
180
did = %did,
176
181
duration_ms = duration_ms,
···
178
183
);
179
184
}
180
185
Ok(Err(e)) => {
181
-
self.metrics
182
-
.total_failed
183
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
186
+
let error_str = e.to_string();
187
+
188
+
if Self::is_soft_failure(&error_str) {
189
+
// This is a soft failure - handle simply doesn't support this resolution method
190
+
// Publish not-found metrics
191
+
self.metrics_publisher
192
+
.incr("task.handle_resolution.not_found")
193
+
.await;
194
+
195
+
debug!(
196
+
handle = %work.handle,
197
+
error = %error_str,
198
+
duration_ms = duration_ms,
199
+
"Handle not found (soft failure)"
200
+
);
201
+
} else {
202
+
// This is a real error
203
+
// Publish failure metrics
204
+
self.metrics_publisher
205
+
.incr("task.handle_resolution.failed")
206
+
.await;
184
207
185
-
error!(
186
-
handle = %work.handle,
187
-
error = %e,
188
-
duration_ms = duration_ms,
189
-
"Handle resolution failed"
190
-
);
208
+
error!(
209
+
handle = %work.handle,
210
+
error = %error_str,
211
+
duration_ms = duration_ms,
212
+
"Handle resolution failed"
213
+
);
214
+
}
191
215
}
192
216
Err(_) => {
193
-
self.metrics
194
-
.total_failed
195
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
217
+
// Publish timeout metrics
218
+
self.metrics_publisher
219
+
.incr("task.handle_resolution.timeout")
220
+
.await;
196
221
197
222
error!(
198
223
handle = %work.handle,
···
227
252
/// * `adapter` - Queue adapter for work items
228
253
/// * `handle_resolver` - Handle resolver implementation
229
254
/// * `cancel_token` - Token for graceful shutdown
255
+
/// * `metrics_publisher` - Metrics publisher for telemetry
230
256
pub fn create_handle_resolver_task(
231
257
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
232
258
handle_resolver: Arc<dyn HandleResolver>,
233
259
cancel_token: CancellationToken,
260
+
metrics_publisher: SharedMetricsPublisher,
234
261
) -> HandleResolverTaskHandle {
235
262
HandleResolverTaskHandle {
236
-
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token),
263
+
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token, metrics_publisher),
237
264
}
238
265
}
239
266
···
245
272
/// * `handle_resolver` - Handle resolver implementation
246
273
/// * `cancel_token` - Token for graceful shutdown
247
274
/// * `config` - Task configuration
275
+
/// * `metrics_publisher` - Metrics publisher for telemetry
248
276
pub fn create_handle_resolver_task_with_config(
249
277
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
250
278
handle_resolver: Arc<dyn HandleResolver>,
251
279
cancel_token: CancellationToken,
252
280
config: HandleResolverTaskConfig,
281
+
metrics_publisher: SharedMetricsPublisher,
253
282
) -> HandleResolverTaskHandle {
254
283
HandleResolverTaskHandle {
255
-
task: HandleResolverTask::with_config(adapter, handle_resolver, cancel_token, config),
284
+
task: HandleResolverTask::with_config(
285
+
adapter,
286
+
handle_resolver,
287
+
cancel_token,
288
+
config,
289
+
metrics_publisher,
290
+
),
256
291
}
257
292
}
258
293
259
294
#[cfg(test)]
260
295
mod tests {
261
296
use super::*;
262
-
use crate::queue_adapter::MpscQueueAdapter;
297
+
use crate::queue::MpscQueueAdapter;
263
298
use async_trait::async_trait;
264
299
use std::sync::Arc;
265
300
use tokio::sync::mpsc;
···
275
310
async fn resolve(
276
311
&self,
277
312
handle: &str,
278
-
) -> Result<String, crate::handle_resolver::HandleResolverError> {
313
+
) -> Result<(String, u64), crate::handle_resolver::HandleResolverError> {
279
314
if self.should_fail {
280
315
Err(crate::handle_resolver::HandleResolverError::MockResolutionFailure)
281
316
} else {
282
-
Ok(format!("did:plc:{}", handle.replace('.', "")))
317
+
let timestamp = std::time::SystemTime::now()
318
+
.duration_since(std::time::UNIX_EPOCH)
319
+
.unwrap_or_default()
320
+
.as_secs();
321
+
Ok((format!("did:plc:{}", handle.replace('.', "")), timestamp))
283
322
}
284
323
}
285
324
}
···
296
335
// Create cancellation token
297
336
let cancel_token = CancellationToken::new();
298
337
338
+
// Create metrics publisher
339
+
let metrics_publisher = Arc::new(crate::metrics::NoOpMetricsPublisher);
340
+
299
341
// Create task with custom config
300
342
let config = HandleResolverTaskConfig {
301
343
default_timeout_ms: 5000,
···
306
348
handle_resolver,
307
349
cancel_token.clone(),
308
350
config,
351
+
metrics_publisher,
309
352
);
310
353
311
354
// Create handle resolution work
···
314
357
// Send work to queue
315
358
sender.send(work).await.unwrap();
316
359
317
-
// Get metrics reference before moving task
318
-
let metrics = task.metrics.clone();
319
-
320
360
// Run task for a short time
321
361
let task_handle = tokio::spawn(async move { task.run().await });
322
362
···
329
369
// Wait for task to complete
330
370
let _ = task_handle.await;
331
371
332
-
// Verify metrics
333
-
assert_eq!(
334
-
metrics
335
-
.total_processed
336
-
.load(std::sync::atomic::Ordering::Relaxed),
337
-
1
338
-
);
339
-
assert_eq!(
340
-
metrics
341
-
.total_succeeded
342
-
.load(std::sync::atomic::Ordering::Relaxed),
343
-
1
344
-
);
372
+
// Test passes if task runs without panic
345
373
}
346
374
347
-
#[tokio::test]
348
-
async fn test_handle_resolver_metrics() {
349
-
use std::sync::atomic::Ordering;
375
+
#[test]
376
+
fn test_is_soft_failure() {
377
+
// Test DNS NoRecordsFound pattern (error-atproto-identity-resolve-4)
378
+
let dns_no_records = "error-atproto-identity-resolve-4 DNS resolution failed: ResolveError { kind: Proto(ProtoError { kind: NoRecordsFound { query: Query { name: Name(\"_atproto.noahshachtman.bsky.social.railway.internal.\"), query_type: TXT, query_class: IN }, soa: None, ns: None, negative_ttl: None, response_code: NotImp, trusted: true, authorities: None } }) }";
379
+
assert!(HandleResolverTask::is_soft_failure(dns_no_records));
350
380
351
-
let metrics = HandleResolverMetrics::default();
381
+
// Test HTTP hostname not found pattern (error-atproto-identity-resolve-5)
382
+
let http_no_hostname = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://mattie.thegem.city/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information: No address associated with hostname\" })) }";
383
+
assert!(HandleResolverTask::is_soft_failure(http_no_hostname));
352
384
353
-
// Test initial values
354
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 0);
355
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 0);
356
-
assert_eq!(metrics.total_failed.load(Ordering::Relaxed), 0);
357
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 0);
385
+
// Test alternate HTTP hostname failure message
386
+
let http_lookup_failed = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://example.com/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information\" })) }";
387
+
assert!(HandleResolverTask::is_soft_failure(http_lookup_failed));
358
388
359
-
// Test incrementing
360
-
metrics.total_processed.fetch_add(1, Ordering::Relaxed);
361
-
metrics.total_succeeded.fetch_add(1, Ordering::Relaxed);
362
-
metrics.total_cached.fetch_add(1, Ordering::Relaxed);
389
+
// Test HTTP invalid DID format (error-atproto-identity-resolve-6) - like reuters.com
390
+
let http_invalid_did = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
391
+
assert!(HandleResolverTask::is_soft_failure(http_invalid_did));
363
392
364
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 1);
365
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 1);
366
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 1);
393
+
// Test weratedogs.com case
394
+
let weratedogs_error = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
395
+
assert!(HandleResolverTask::is_soft_failure(weratedogs_error));
396
+
397
+
// Test DNS error that is NOT a soft failure (different DNS error)
398
+
let dns_real_error = "error-atproto-identity-resolve-4 DNS resolution failed: timeout";
399
+
assert!(!HandleResolverTask::is_soft_failure(dns_real_error));
400
+
401
+
// Test HTTP error that is NOT a soft failure (connection timeout)
402
+
let http_timeout =
403
+
"error-atproto-identity-resolve-5 HTTP resolution failed: connection timeout";
404
+
assert!(!HandleResolverTask::is_soft_failure(http_timeout));
405
+
406
+
// Test HTTP error that is NOT a soft failure (500 error)
407
+
let http_500 = "error-atproto-identity-resolve-5 HTTP resolution failed: status code 500";
408
+
assert!(!HandleResolverTask::is_soft_failure(http_500));
409
+
410
+
// Test QuickDID errors should never be soft failures
411
+
let quickdid_error =
412
+
"error-quickdid-resolve-1 Failed to resolve subject: internal server error";
413
+
assert!(!HandleResolverTask::is_soft_failure(quickdid_error));
414
+
415
+
// Test other atproto_identity error codes should not be soft failures
416
+
let other_atproto_error = "error-atproto-identity-resolve-1 Some other error";
417
+
assert!(!HandleResolverTask::is_soft_failure(other_atproto_error));
367
418
}
368
419
}
+290
-29
src/http/handle_xrpc_resolve_handle.rs
+290
-29
src/http/handle_xrpc_resolve_handle.rs
···
1
1
use std::sync::Arc;
2
+
use std::time::{Duration, SystemTime, UNIX_EPOCH};
2
3
3
4
use crate::{
4
5
handle_resolver::HandleResolver,
5
-
queue_adapter::{HandleResolutionWork, QueueAdapter},
6
+
http::AppContext,
7
+
metrics::SharedMetricsPublisher,
8
+
queue::{HandleResolutionWork, QueueAdapter},
6
9
};
7
10
8
11
use atproto_identity::resolve::{InputType, parse_input};
9
12
use axum::{
10
13
extract::{Query, State},
11
-
http::StatusCode,
12
-
response::{IntoResponse, Json},
14
+
http::{HeaderMap, HeaderValue, StatusCode, header},
15
+
response::{IntoResponse, Json, Response},
13
16
};
17
+
use metrohash::MetroHash64;
14
18
use serde::{Deserialize, Serialize};
19
+
use std::hash::Hasher;
15
20
16
21
#[derive(Deserialize)]
17
22
pub(super) struct ResolveHandleParams {
···
31
36
message: String,
32
37
}
33
38
39
+
/// Represents the result of a handle resolution
40
+
enum ResolutionResult {
41
+
Success {
42
+
did: String,
43
+
timestamp: u64,
44
+
etag: String,
45
+
},
46
+
Error {
47
+
error: String,
48
+
message: String,
49
+
timestamp: u64,
50
+
etag: String,
51
+
},
52
+
}
53
+
54
+
struct ResolutionResultView {
55
+
result: ResolutionResult,
56
+
cache_control: Option<String>,
57
+
if_none_match: Option<HeaderValue>,
58
+
if_modified_since: Option<HeaderValue>,
59
+
}
60
+
61
+
impl IntoResponse for ResolutionResultView {
62
+
fn into_response(self) -> Response {
63
+
let (last_modified, etag) = match &self.result {
64
+
ResolutionResult::Success {
65
+
timestamp, etag, ..
66
+
} => (*timestamp, etag),
67
+
ResolutionResult::Error {
68
+
timestamp, etag, ..
69
+
} => (*timestamp, etag),
70
+
};
71
+
72
+
let mut headers = HeaderMap::new();
73
+
74
+
// WARNING: this swallows errors
75
+
if let Ok(etag_value) = HeaderValue::from_str(etag) {
76
+
headers.insert(header::ETAG, etag_value);
77
+
}
78
+
79
+
// Add Last-Modified header
80
+
let last_modified_date = format_http_date(last_modified);
81
+
// WARNING: this swallows errors
82
+
if let Ok(last_modified_value) = HeaderValue::from_str(&last_modified_date) {
83
+
headers.insert(header::LAST_MODIFIED, last_modified_value);
84
+
}
85
+
86
+
// Add Cache-Control header if configured
87
+
if let Some(cache_control) = &self.cache_control {
88
+
// WARNING: this swallows errors
89
+
if let Ok(cache_control_value) = HeaderValue::from_str(cache_control) {
90
+
headers.insert(header::CACHE_CONTROL, cache_control_value);
91
+
}
92
+
}
93
+
94
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
95
+
headers.insert(
96
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
97
+
HeaderValue::from_static("*"),
98
+
);
99
+
headers.insert(
100
+
header::ACCESS_CONTROL_ALLOW_METHODS,
101
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
102
+
);
103
+
headers.insert(
104
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
105
+
HeaderValue::from_static("*"),
106
+
);
107
+
headers.insert(
108
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
109
+
HeaderValue::from_static("*"),
110
+
);
111
+
headers.insert(
112
+
header::ACCESS_CONTROL_MAX_AGE,
113
+
HeaderValue::from_static("86400"),
114
+
);
115
+
headers.insert(
116
+
"Access-Control-Request-Headers",
117
+
HeaderValue::from_static("*"),
118
+
);
119
+
headers.insert(
120
+
"Access-Control-Request-Method",
121
+
HeaderValue::from_static("GET"),
122
+
);
123
+
124
+
if let ResolutionResult::Success { .. } = self.result {
125
+
let fresh = self
126
+
.if_modified_since
127
+
.and_then(|inner_header_value| match inner_header_value.to_str() {
128
+
Ok(value) => Some(value.to_string()),
129
+
Err(_) => None,
130
+
})
131
+
.and_then(|inner_str_value| parse_http_date(&inner_str_value))
132
+
.is_some_and(|inner_if_modified_since| last_modified <= inner_if_modified_since);
133
+
134
+
if fresh {
135
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
136
+
}
137
+
}
138
+
139
+
let fresh = self
140
+
.if_none_match
141
+
.is_some_and(|if_none_match_value| if_none_match_value == etag);
142
+
if fresh {
143
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
144
+
}
145
+
146
+
match &self.result {
147
+
ResolutionResult::Success { did, .. } => (
148
+
StatusCode::OK,
149
+
headers,
150
+
Json(ResolveHandleResponse { did: did.clone() }),
151
+
)
152
+
.into_response(),
153
+
ResolutionResult::Error { error, message, .. } => (
154
+
StatusCode::BAD_REQUEST,
155
+
headers,
156
+
Json(ErrorResponse {
157
+
error: error.clone(),
158
+
message: message.clone(),
159
+
}),
160
+
)
161
+
.into_response(),
162
+
}
163
+
164
+
// (status_code, headers).into_response()
165
+
}
166
+
}
167
+
168
+
/// Calculate a weak ETag for the given content using MetroHash64 with a seed
169
+
fn calculate_etag(content: &str, seed: &str) -> String {
170
+
let mut hasher = MetroHash64::new();
171
+
hasher.write(seed.as_bytes());
172
+
hasher.write(content.as_bytes());
173
+
let hash = hasher.finish();
174
+
format!("W/\"{:x}\"", hash)
175
+
}
176
+
177
+
/// Format a UNIX timestamp as an HTTP date string (RFC 7231)
178
+
fn format_http_date(timestamp: u64) -> String {
179
+
let system_time = UNIX_EPOCH + Duration::from_secs(timestamp);
180
+
httpdate::fmt_http_date(system_time)
181
+
}
182
+
183
+
/// Parse an HTTP date string (RFC 7231) into a UNIX timestamp
184
+
fn parse_http_date(date_str: &str) -> Option<u64> {
185
+
httpdate::parse_http_date(date_str)
186
+
.ok()
187
+
.and_then(|system_time| system_time.duration_since(UNIX_EPOCH).ok())
188
+
.map(|duration| duration.as_secs())
189
+
}
190
+
34
191
pub(super) async fn handle_xrpc_resolve_handle(
192
+
headers: HeaderMap,
35
193
Query(params): Query<ResolveHandleParams>,
194
+
State(app_context): State<AppContext>,
36
195
State(handle_resolver): State<Arc<dyn HandleResolver>>,
37
196
State(queue): State<Arc<dyn QueueAdapter<HandleResolutionWork>>>,
38
-
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
197
+
State(metrics): State<SharedMetricsPublisher>,
198
+
) -> impl IntoResponse {
199
+
let validating = params.validate.is_some();
200
+
let queueing = params.queue.is_some();
201
+
39
202
// Validate that handle is provided
40
203
let handle = match params.handle {
41
204
Some(h) => h,
42
205
None => {
43
-
return Err((
206
+
metrics
207
+
.incr_with_tags(
208
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
209
+
&[("reason", "missing")],
210
+
)
211
+
.await;
212
+
return (
44
213
StatusCode::BAD_REQUEST,
45
214
Json(ErrorResponse {
46
215
error: "InvalidRequest".to_string(),
47
216
message: "Error: Params must have the property \"handle\"".to_string(),
48
217
}),
49
-
));
218
+
)
219
+
.into_response();
50
220
}
51
221
};
52
222
53
223
// Validate that the input is a handle and not a DID
54
224
let handle = match parse_input(&handle) {
55
-
Ok(InputType::Handle(value)) => value,
225
+
Ok(InputType::Handle(value)) => value.to_lowercase(),
56
226
Ok(InputType::Plc(_)) | Ok(InputType::Web(_)) => {
57
227
// It's a DID, not a handle
58
-
return Err((
228
+
metrics
229
+
.incr_with_tags(
230
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
231
+
&[("reason", "did")],
232
+
)
233
+
.await;
234
+
return (
59
235
StatusCode::BAD_REQUEST,
60
236
Json(ErrorResponse {
61
237
error: "InvalidRequest".to_string(),
62
238
message: "Error: handle must be a valid handle".to_string(),
63
239
}),
64
-
));
240
+
)
241
+
.into_response();
65
242
}
66
243
Err(_) => {
67
-
return Err((
244
+
metrics
245
+
.incr_with_tags(
246
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
247
+
&[("reason", "error")],
248
+
)
249
+
.await;
250
+
return (
68
251
StatusCode::BAD_REQUEST,
69
252
Json(ErrorResponse {
70
253
error: "InvalidRequest".to_string(),
71
254
message: "Error: handle must be a valid handle".to_string(),
72
255
}),
73
-
));
256
+
)
257
+
.into_response();
74
258
}
75
259
};
76
260
77
-
if params.validate.is_some() {
78
-
return Ok(StatusCode::NO_CONTENT.into_response());
261
+
if validating {
262
+
metrics
263
+
.incr("xrpc.com.atproto.identity.resolveHandle")
264
+
.await;
265
+
return StatusCode::NO_CONTENT.into_response();
79
266
}
80
267
81
-
if params.queue.is_some() {
268
+
if queueing {
82
269
// Create work item
83
270
let work = HandleResolutionWork::new(handle.clone());
84
271
85
272
// Queue the work
86
273
match queue.push(work).await {
87
274
Ok(()) => {
275
+
metrics
276
+
.incr("xrpc.com.atproto.identity.resolveHandle")
277
+
.await;
88
278
tracing::debug!("Queued handle resolution for {}", handle);
89
279
}
90
280
Err(e) => {
281
+
metrics
282
+
.incr("xrpc.com.atproto.identity.resolveHandle.queue_failure")
283
+
.await;
91
284
tracing::error!("Failed to queue handle resolution: {}", e);
92
285
}
93
286
}
94
287
95
-
return Ok(StatusCode::NO_CONTENT.into_response());
288
+
return StatusCode::NO_CONTENT.into_response();
96
289
}
97
290
98
-
tracing::debug!("Resolving handle: {}", handle);
291
+
tracing::debug!(handle, "Resolving handle");
99
292
100
-
match handle_resolver.resolve(&handle).await {
101
-
Ok(did) => {
102
-
tracing::debug!("Found cached DID for handle {}: {}", handle, did);
103
-
Ok(Json(ResolveHandleResponse { did }).into_response())
293
+
// Get conditional request headers
294
+
let if_none_match = headers.get(header::IF_NONE_MATCH).cloned();
295
+
let if_modified_since = headers.get(header::IF_MODIFIED_SINCE).cloned();
296
+
297
+
// Perform the resolution and build the response
298
+
let result = match handle_resolver.resolve(&handle).await {
299
+
Ok((did, timestamp)) => {
300
+
tracing::debug!(handle, did, "Found cached DID for handle");
301
+
302
+
metrics
303
+
.incr_with_tags("handle.resolution.request", &[("success", "1")])
304
+
.await;
305
+
306
+
let etag = calculate_etag(&did, app_context.etag_seed());
307
+
ResolutionResult::Success {
308
+
did,
309
+
timestamp,
310
+
etag,
311
+
}
104
312
}
105
-
Err(_) => {
106
-
// {"error":"InvalidRequest","message":"Unable to resolve handle"}
107
-
Err((
108
-
StatusCode::BAD_REQUEST,
109
-
Json(ErrorResponse {
110
-
error: "InvalidRequest".to_string(),
111
-
message: "Unable to resolve handle".to_string(),
112
-
}),
113
-
))
313
+
Err(err) => {
314
+
tracing::debug!(error = ?err, handle, "Error resolving handle");
315
+
metrics
316
+
.incr_with_tags("handle.resolution.request", &[("success", "0")])
317
+
.await;
318
+
let error_content = format!("error:{}:{}", handle, err);
319
+
let etag = calculate_etag(&error_content, app_context.etag_seed());
320
+
let timestamp = SystemTime::now()
321
+
.duration_since(UNIX_EPOCH)
322
+
.unwrap_or_default()
323
+
.as_secs();
324
+
ResolutionResult::Error {
325
+
error: "InvalidRequest".to_string(),
326
+
message: "Unable to resolve handle".to_string(),
327
+
timestamp,
328
+
etag,
329
+
}
114
330
}
331
+
};
332
+
333
+
ResolutionResultView {
334
+
result,
335
+
cache_control: app_context.cache_control_header().map(|s| s.to_string()),
336
+
if_none_match,
337
+
if_modified_since,
115
338
}
339
+
.into_response()
340
+
}
341
+
342
+
pub(super) async fn handle_xrpc_resolve_handle_options() -> impl IntoResponse {
343
+
let mut headers = HeaderMap::new();
344
+
345
+
// Add CORS and Allow headers for OPTIONS request
346
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
347
+
headers.insert(
348
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
349
+
HeaderValue::from_static("*"),
350
+
);
351
+
headers.insert(
352
+
header::ACCESS_CONTROL_ALLOW_METHODS,
353
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
354
+
);
355
+
headers.insert(
356
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
357
+
HeaderValue::from_static("*"),
358
+
);
359
+
headers.insert(
360
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
361
+
HeaderValue::from_static("*"),
362
+
);
363
+
headers.insert(
364
+
header::ACCESS_CONTROL_MAX_AGE,
365
+
HeaderValue::from_static("86400"),
366
+
);
367
+
headers.insert(
368
+
"Access-Control-Request-Headers",
369
+
HeaderValue::from_static("*"),
370
+
);
371
+
headers.insert(
372
+
"Access-Control-Request-Method",
373
+
HeaderValue::from_static("GET"),
374
+
);
375
+
376
+
(StatusCode::NO_CONTENT, headers)
116
377
}
+126
src/http/handle_xrpc_resolve_lexicon.rs
+126
src/http/handle_xrpc_resolve_lexicon.rs
···
1
+
use std::sync::Arc;
2
+
3
+
use atproto_lexicon::resolve::LexiconResolver;
4
+
use axum::{
5
+
extract::{Query, State},
6
+
http::{HeaderMap, HeaderValue, StatusCode, header},
7
+
response::{IntoResponse, Json},
8
+
};
9
+
use serde::{Deserialize, Serialize};
10
+
11
+
use crate::metrics::SharedMetricsPublisher;
12
+
13
+
#[derive(Deserialize)]
14
+
pub(super) struct ResolveLexiconParams {
15
+
nsid: Option<String>,
16
+
}
17
+
18
+
#[derive(Serialize)]
19
+
pub(super) struct ErrorResponse {
20
+
error: String,
21
+
message: String,
22
+
}
23
+
24
+
pub(super) async fn handle_xrpc_resolve_lexicon(
25
+
Query(params): Query<ResolveLexiconParams>,
26
+
State(lexicon_resolver): State<Arc<dyn LexiconResolver>>,
27
+
State(metrics): State<SharedMetricsPublisher>,
28
+
) -> impl IntoResponse {
29
+
// Validate that nsid is provided
30
+
let nsid = match params.nsid {
31
+
Some(n) => n,
32
+
None => {
33
+
metrics
34
+
.incr_with_tags(
35
+
"xrpc.com.atproto.lexicon.resolveLexicon.invalid_nsid",
36
+
&[("reason", "missing")],
37
+
)
38
+
.await;
39
+
return (
40
+
StatusCode::BAD_REQUEST,
41
+
Json(ErrorResponse {
42
+
error: "InvalidRequest".to_string(),
43
+
message: "Error: Params must have the property \"nsid\"".to_string(),
44
+
}),
45
+
)
46
+
.into_response();
47
+
}
48
+
};
49
+
50
+
tracing::debug!(nsid, "Resolving lexicon");
51
+
52
+
// Perform the lexicon resolution
53
+
match lexicon_resolver.resolve(&nsid).await {
54
+
Ok(resolved) => {
55
+
tracing::debug!(nsid, "Successfully resolved lexicon");
56
+
57
+
metrics
58
+
.incr_with_tags("lexicon.resolution.request", &[("success", "1")])
59
+
.await;
60
+
61
+
let mut headers = HeaderMap::new();
62
+
add_cors_headers(&mut headers);
63
+
64
+
// The resolved value is already a serde_json::Value, so just return it as JSON
65
+
(StatusCode::OK, headers, Json(resolved)).into_response()
66
+
}
67
+
Err(err) => {
68
+
tracing::debug!(error = ?err, nsid, "Error resolving lexicon");
69
+
70
+
metrics
71
+
.incr_with_tags("lexicon.resolution.request", &[("success", "0")])
72
+
.await;
73
+
74
+
let mut headers = HeaderMap::new();
75
+
add_cors_headers(&mut headers);
76
+
77
+
(
78
+
StatusCode::BAD_REQUEST,
79
+
headers,
80
+
Json(ErrorResponse {
81
+
error: "LexiconNotFound".to_string(),
82
+
message: "No lexicon was resolved for the NSID".to_string(),
83
+
}),
84
+
)
85
+
.into_response()
86
+
}
87
+
}
88
+
}
89
+
90
+
pub(super) async fn handle_xrpc_resolve_lexicon_options() -> impl IntoResponse {
91
+
let mut headers = HeaderMap::new();
92
+
add_cors_headers(&mut headers);
93
+
(StatusCode::NO_CONTENT, headers)
94
+
}
95
+
96
+
fn add_cors_headers(headers: &mut HeaderMap) {
97
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
98
+
headers.insert(
99
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
100
+
HeaderValue::from_static("*"),
101
+
);
102
+
headers.insert(
103
+
header::ACCESS_CONTROL_ALLOW_METHODS,
104
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
105
+
);
106
+
headers.insert(
107
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
108
+
HeaderValue::from_static("*"),
109
+
);
110
+
headers.insert(
111
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
112
+
HeaderValue::from_static("*"),
113
+
);
114
+
headers.insert(
115
+
header::ACCESS_CONTROL_MAX_AGE,
116
+
HeaderValue::from_static("86400"),
117
+
);
118
+
headers.insert(
119
+
"Access-Control-Request-Headers",
120
+
HeaderValue::from_static("*"),
121
+
);
122
+
headers.insert(
123
+
"Access-Control-Request-Method",
124
+
HeaderValue::from_static("GET"),
125
+
);
126
+
}
+1
src/http/mod.rs
+1
src/http/mod.rs
+85
-46
src/http/server.rs
+85
-46
src/http/server.rs
···
1
1
use crate::handle_resolver::HandleResolver;
2
-
use crate::queue_adapter::{HandleResolutionWork, QueueAdapter};
2
+
use crate::metrics::SharedMetricsPublisher;
3
+
use crate::queue::{HandleResolutionWork, QueueAdapter};
4
+
use atproto_lexicon::resolve::LexiconResolver;
3
5
use axum::{
4
6
Router,
5
-
extract::State,
6
-
response::{Html, IntoResponse, Json, Response},
7
+
extract::{MatchedPath, State},
8
+
http::Request,
9
+
middleware::{self, Next},
10
+
response::{Json, Response},
7
11
routing::get,
8
12
};
9
-
use http::StatusCode;
10
13
use serde_json::json;
11
14
use std::sync::Arc;
15
+
use std::time::Instant;
16
+
use tower_http::services::ServeDir;
12
17
13
18
pub(crate) struct InnerAppContext {
14
-
pub(crate) service_document: serde_json::Value,
15
-
pub(crate) service_did: String,
16
19
pub(crate) handle_resolver: Arc<dyn HandleResolver>,
17
20
pub(crate) handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
21
+
pub(crate) lexicon_resolver: Arc<dyn LexiconResolver>,
22
+
pub(crate) metrics: SharedMetricsPublisher,
23
+
pub(crate) etag_seed: String,
24
+
pub(crate) cache_control_header: Option<String>,
25
+
pub(crate) static_files_dir: String,
18
26
}
19
27
20
28
#[derive(Clone)]
···
23
31
impl AppContext {
24
32
/// Create a new AppContext with the provided configuration.
25
33
pub fn new(
26
-
service_document: serde_json::Value,
27
-
service_did: String,
28
34
handle_resolver: Arc<dyn HandleResolver>,
29
35
handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
36
+
lexicon_resolver: Arc<dyn LexiconResolver>,
37
+
metrics: SharedMetricsPublisher,
38
+
etag_seed: String,
39
+
cache_control_header: Option<String>,
40
+
static_files_dir: String,
30
41
) -> Self {
31
42
Self(Arc::new(InnerAppContext {
32
-
service_document,
33
-
service_did,
34
43
handle_resolver,
35
44
handle_queue,
45
+
lexicon_resolver,
46
+
metrics,
47
+
etag_seed,
48
+
cache_control_header,
49
+
static_files_dir,
36
50
}))
37
51
}
38
52
39
53
// Internal accessor methods for handlers
40
-
pub(super) fn service_document(&self) -> &serde_json::Value {
41
-
&self.0.service_document
54
+
pub(super) fn etag_seed(&self) -> &str {
55
+
&self.0.etag_seed
56
+
}
57
+
58
+
pub(super) fn cache_control_header(&self) -> Option<&str> {
59
+
self.0.cache_control_header.as_deref()
42
60
}
43
61
44
-
pub(super) fn service_did(&self) -> &str {
45
-
&self.0.service_did
62
+
pub(super) fn static_files_dir(&self) -> &str {
63
+
&self.0.static_files_dir
46
64
}
47
65
}
48
66
···
64
82
handle_queue,
65
83
Arc<dyn QueueAdapter<HandleResolutionWork>>
66
84
);
85
+
impl_from_ref!(AppContext, lexicon_resolver, Arc<dyn LexiconResolver>);
86
+
impl_from_ref!(AppContext, metrics, SharedMetricsPublisher);
87
+
88
+
/// Middleware to track HTTP request metrics
89
+
async fn metrics_middleware(
90
+
State(metrics): State<SharedMetricsPublisher>,
91
+
matched_path: Option<MatchedPath>,
92
+
request: Request<axum::body::Body>,
93
+
next: Next,
94
+
) -> Response {
95
+
let start = Instant::now();
96
+
let method = request.method().to_string();
97
+
let path = matched_path
98
+
.as_ref()
99
+
.map(|p| p.as_str().to_string())
100
+
.unwrap_or_else(|| "unknown".to_string());
101
+
102
+
// Process the request
103
+
let response = next.run(request).await;
104
+
105
+
// Calculate duration
106
+
let duration_ms = start.elapsed().as_millis() as u64;
107
+
let status_code = response.status().as_u16().to_string();
108
+
109
+
// Publish metrics with tags
110
+
metrics
111
+
.time_with_tags(
112
+
"http.request.duration_ms",
113
+
duration_ms,
114
+
&[
115
+
("method", &method),
116
+
("path", &path),
117
+
("status", &status_code),
118
+
],
119
+
)
120
+
.await;
121
+
122
+
response
123
+
}
67
124
68
125
pub fn create_router(app_context: AppContext) -> Router {
126
+
let static_dir = app_context.static_files_dir().to_string();
127
+
69
128
Router::new()
70
-
.route("/", get(handle_index))
71
-
.route("/.well-known/did.json", get(handle_wellknown_did_json))
129
+
.route("/xrpc/_health", get(handle_xrpc_health))
72
130
.route(
73
-
"/.well-known/atproto-did",
74
-
get(handle_wellknown_atproto_did),
131
+
"/xrpc/com.atproto.identity.resolveHandle",
132
+
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle)
133
+
.options(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle_options),
75
134
)
76
-
.route("/xrpc/_health", get(handle_xrpc_health))
77
135
.route(
78
-
"/xrpc/com.atproto.identity.resolveHandle",
79
-
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle),
136
+
"/xrpc/com.atproto.lexicon.resolveLexicon",
137
+
get(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon)
138
+
.options(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon_options),
80
139
)
140
+
.fallback_service(ServeDir::new(static_dir))
141
+
.layer(middleware::from_fn_with_state(
142
+
app_context.0.metrics.clone(),
143
+
metrics_middleware,
144
+
))
81
145
.with_state(app_context)
82
-
}
83
-
84
-
pub(super) async fn handle_index() -> Html<&'static str> {
85
-
Html(
86
-
r#"<!DOCTYPE html>
87
-
<html>
88
-
<head>
89
-
<title>QuickDID</title>
90
-
</head>
91
-
<body>
92
-
<h1>QuickDID</h1>
93
-
<p>AT Protocol Identity Resolution Service</p>
94
-
</body>
95
-
</html>"#,
96
-
)
97
-
}
98
-
99
-
pub(super) async fn handle_wellknown_did_json(
100
-
State(context): State<AppContext>,
101
-
) -> Json<serde_json::Value> {
102
-
Json(context.service_document().clone())
103
-
}
104
-
105
-
pub(super) async fn handle_wellknown_atproto_did(State(context): State<AppContext>) -> Response {
106
-
(StatusCode::OK, context.service_did().to_string()).into_response()
107
146
}
108
147
109
148
pub(super) async fn handle_xrpc_health() -> Json<serde_json::Value> {
+360
src/jetstream_handler.rs
+360
src/jetstream_handler.rs
···
1
+
//! Jetstream event handler for QuickDID
2
+
//!
3
+
//! This module provides the event handler for processing AT Protocol Jetstream events,
4
+
//! specifically handling Account and Identity events to maintain cache consistency.
5
+
6
+
use crate::handle_resolver::HandleResolver;
7
+
use crate::metrics::MetricsPublisher;
8
+
use anyhow::Result;
9
+
use atproto_jetstream::{EventHandler, JetstreamEvent};
10
+
use std::sync::Arc;
11
+
use tracing::{debug, info, warn};
12
+
13
+
/// Jetstream event handler for QuickDID
14
+
///
15
+
/// This handler processes AT Protocol events from the Jetstream firehose to keep
16
+
/// the handle resolver cache in sync with the network state.
17
+
///
18
+
/// # Event Processing
19
+
///
20
+
/// ## Account Events
21
+
/// - When an account is marked as "deleted" or "deactivated", the DID is purged from the cache
22
+
/// - Metrics are tracked for successful and failed purge operations
23
+
///
24
+
/// ## Identity Events
25
+
/// - When an identity event contains a handle, the handle-to-DID mapping is updated
26
+
/// - When an identity event lacks a handle (indicating removal), the DID is purged
27
+
/// - Metrics are tracked for successful and failed update/purge operations
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use quickdid::jetstream_handler::QuickDidEventHandler;
33
+
/// use quickdid::handle_resolver::HandleResolver;
34
+
/// use quickdid::metrics::MetricsPublisher;
35
+
/// use std::sync::Arc;
36
+
///
37
+
/// # async fn example(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) {
38
+
/// let handler = QuickDidEventHandler::new(resolver, metrics);
39
+
/// // Register with a JetstreamConsumer
40
+
/// # }
41
+
/// ```
42
+
pub struct QuickDidEventHandler {
43
+
resolver: Arc<dyn HandleResolver>,
44
+
metrics: Arc<dyn MetricsPublisher>,
45
+
}
46
+
47
+
impl QuickDidEventHandler {
48
+
/// Create a new Jetstream event handler
49
+
///
50
+
/// # Arguments
51
+
///
52
+
/// * `resolver` - The handle resolver to use for cache operations
53
+
/// * `metrics` - The metrics publisher for tracking event processing
54
+
pub fn new(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) -> Self {
55
+
Self { resolver, metrics }
56
+
}
57
+
}
58
+
59
+
#[async_trait::async_trait]
60
+
impl EventHandler for QuickDidEventHandler {
61
+
fn handler_id(&self) -> String {
62
+
"quickdid_handler".to_string()
63
+
}
64
+
65
+
async fn handle_event(&self, event: JetstreamEvent) -> Result<()> {
66
+
match event {
67
+
JetstreamEvent::Account { did, kind, .. } => {
68
+
// If account kind is "deleted" or "deactivated", purge the DID
69
+
if kind == "deleted" || kind == "deactivated" {
70
+
info!(did = %did, kind = %kind, "Purging account");
71
+
match self.resolver.purge(&did).await {
72
+
Ok(()) => {
73
+
self.metrics.incr("jetstream.account.purged").await;
74
+
}
75
+
Err(e) => {
76
+
warn!(did = %did, error = ?e, "Failed to purge DID");
77
+
self.metrics.incr("jetstream.account.purge_error").await;
78
+
}
79
+
}
80
+
}
81
+
self.metrics.incr("jetstream.account.processed").await;
82
+
}
83
+
JetstreamEvent::Identity { did, identity, .. } => {
84
+
// Extract handle from identity JSON if available
85
+
if !identity.is_null() {
86
+
if let Some(handle_value) = identity.get("handle") {
87
+
if let Some(handle) = handle_value.as_str() {
88
+
info!(handle = %handle, did = %did, "Updating identity mapping");
89
+
match self.resolver.set(handle, &did).await {
90
+
Ok(()) => {
91
+
self.metrics.incr("jetstream.identity.updated").await;
92
+
}
93
+
Err(e) => {
94
+
warn!(handle = %handle, did = %did, error = ?e, "Failed to update mapping");
95
+
self.metrics.incr("jetstream.identity.update_error").await;
96
+
}
97
+
}
98
+
} else {
99
+
// No handle or invalid handle, purge the DID
100
+
info!(did = %did, "Purging identity without valid handle");
101
+
match self.resolver.purge(&did).await {
102
+
Ok(()) => {
103
+
self.metrics.incr("jetstream.identity.purged").await;
104
+
}
105
+
Err(e) => {
106
+
warn!(did = %did, error = ?e, "Failed to purge DID");
107
+
self.metrics.incr("jetstream.identity.purge_error").await;
108
+
}
109
+
}
110
+
}
111
+
} else {
112
+
// No handle field, purge the DID
113
+
info!(did = %did, "Purging identity without handle field");
114
+
match self.resolver.purge(&did).await {
115
+
Ok(()) => {
116
+
self.metrics.incr("jetstream.identity.purged").await;
117
+
}
118
+
Err(e) => {
119
+
warn!(did = %did, error = ?e, "Failed to purge DID");
120
+
self.metrics.incr("jetstream.identity.purge_error").await;
121
+
}
122
+
}
123
+
}
124
+
} else {
125
+
// Null identity means removed, purge the DID
126
+
info!(did = %did, "Purging identity with null info");
127
+
match self.resolver.purge(&did).await {
128
+
Ok(()) => {
129
+
self.metrics.incr("jetstream.identity.purged").await;
130
+
}
131
+
Err(e) => {
132
+
warn!(did = %did, error = ?e, "Failed to purge DID");
133
+
self.metrics.incr("jetstream.identity.purge_error").await;
134
+
}
135
+
}
136
+
}
137
+
self.metrics.incr("jetstream.identity.processed").await;
138
+
}
139
+
_ => {
140
+
// Other event types we don't care about
141
+
debug!("Ignoring unhandled Jetstream event type");
142
+
}
143
+
}
144
+
Ok(())
145
+
}
146
+
}
147
+
148
+
#[cfg(test)]
149
+
mod tests {
150
+
use super::*;
151
+
use crate::handle_resolver::HandleResolverError;
152
+
use crate::metrics::NoOpMetricsPublisher;
153
+
use async_trait::async_trait;
154
+
use serde_json::json;
155
+
156
+
/// Mock resolver for testing
157
+
struct MockResolver {
158
+
purge_called: std::sync::Arc<std::sync::Mutex<Vec<String>>>,
159
+
set_called: std::sync::Arc<std::sync::Mutex<Vec<(String, String)>>>,
160
+
}
161
+
162
+
impl MockResolver {
163
+
fn new() -> Self {
164
+
Self {
165
+
purge_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
166
+
set_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
167
+
}
168
+
}
169
+
170
+
fn get_purge_calls(&self) -> Vec<String> {
171
+
self.purge_called.lock().unwrap().clone()
172
+
}
173
+
174
+
fn get_set_calls(&self) -> Vec<(String, String)> {
175
+
self.set_called.lock().unwrap().clone()
176
+
}
177
+
}
178
+
179
+
#[async_trait]
180
+
impl HandleResolver for MockResolver {
181
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
182
+
unimplemented!("Not needed for tests")
183
+
}
184
+
185
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
186
+
self.purge_called.lock().unwrap().push(subject.to_string());
187
+
Ok(())
188
+
}
189
+
190
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
191
+
self.set_called
192
+
.lock()
193
+
.unwrap()
194
+
.push((handle.to_string(), did.to_string()));
195
+
Ok(())
196
+
}
197
+
}
198
+
199
+
#[tokio::test]
200
+
async fn test_account_deleted_event() {
201
+
let resolver = Arc::new(MockResolver::new());
202
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
203
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
204
+
205
+
// Create a deleted account event
206
+
let event = JetstreamEvent::Account {
207
+
did: "did:plc:test123".to_string(),
208
+
kind: "deleted".to_string(),
209
+
time_us: 0,
210
+
account: json!(null),
211
+
};
212
+
213
+
handler.handle_event(event).await.unwrap();
214
+
215
+
// Verify the DID was purged
216
+
let purge_calls = resolver.get_purge_calls();
217
+
assert_eq!(purge_calls.len(), 1);
218
+
assert_eq!(purge_calls[0], "did:plc:test123");
219
+
}
220
+
221
+
#[tokio::test]
222
+
async fn test_account_deactivated_event() {
223
+
let resolver = Arc::new(MockResolver::new());
224
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
225
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
226
+
227
+
// Create a deactivated account event
228
+
let event = JetstreamEvent::Account {
229
+
did: "did:plc:test456".to_string(),
230
+
kind: "deactivated".to_string(),
231
+
time_us: 0,
232
+
account: json!(null),
233
+
};
234
+
235
+
handler.handle_event(event).await.unwrap();
236
+
237
+
// Verify the DID was purged
238
+
let purge_calls = resolver.get_purge_calls();
239
+
assert_eq!(purge_calls.len(), 1);
240
+
assert_eq!(purge_calls[0], "did:plc:test456");
241
+
}
242
+
243
+
#[tokio::test]
244
+
async fn test_account_active_event() {
245
+
let resolver = Arc::new(MockResolver::new());
246
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
247
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
248
+
249
+
// Create an active account event (should not purge)
250
+
let event = JetstreamEvent::Account {
251
+
did: "did:plc:test789".to_string(),
252
+
kind: "active".to_string(),
253
+
time_us: 0,
254
+
account: json!(null),
255
+
};
256
+
257
+
handler.handle_event(event).await.unwrap();
258
+
259
+
// Verify the DID was NOT purged
260
+
let purge_calls = resolver.get_purge_calls();
261
+
assert_eq!(purge_calls.len(), 0);
262
+
}
263
+
264
+
#[tokio::test]
265
+
async fn test_identity_with_handle_event() {
266
+
let resolver = Arc::new(MockResolver::new());
267
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
268
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
269
+
270
+
// Create an identity event with a handle
271
+
let event = JetstreamEvent::Identity {
272
+
did: "did:plc:testuser".to_string(),
273
+
kind: "update".to_string(),
274
+
time_us: 0,
275
+
identity: json!({
276
+
"handle": "alice.bsky.social"
277
+
}),
278
+
};
279
+
280
+
handler.handle_event(event).await.unwrap();
281
+
282
+
// Verify the set method was called
283
+
let set_calls = resolver.get_set_calls();
284
+
assert_eq!(set_calls.len(), 1);
285
+
assert_eq!(
286
+
set_calls[0],
287
+
(
288
+
"alice.bsky.social".to_string(),
289
+
"did:plc:testuser".to_string()
290
+
)
291
+
);
292
+
293
+
// Verify no purge was called
294
+
let purge_calls = resolver.get_purge_calls();
295
+
assert_eq!(purge_calls.len(), 0);
296
+
}
297
+
298
+
#[tokio::test]
299
+
async fn test_identity_without_handle_event() {
300
+
let resolver = Arc::new(MockResolver::new());
301
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
302
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
303
+
304
+
// Create an identity event without a handle field
305
+
let event = JetstreamEvent::Identity {
306
+
did: "did:plc:nohandle".to_string(),
307
+
kind: "update".to_string(),
308
+
time_us: 0,
309
+
identity: json!({
310
+
"other_field": "value"
311
+
}),
312
+
};
313
+
314
+
handler.handle_event(event).await.unwrap();
315
+
316
+
// Verify the DID was purged
317
+
let purge_calls = resolver.get_purge_calls();
318
+
assert_eq!(purge_calls.len(), 1);
319
+
assert_eq!(purge_calls[0], "did:plc:nohandle");
320
+
321
+
// Verify set was not called
322
+
let set_calls = resolver.get_set_calls();
323
+
assert_eq!(set_calls.len(), 0);
324
+
}
325
+
326
+
#[tokio::test]
327
+
async fn test_identity_with_null_identity() {
328
+
let resolver = Arc::new(MockResolver::new());
329
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
330
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
331
+
332
+
// Create an identity event with null identity
333
+
let event = JetstreamEvent::Identity {
334
+
did: "did:plc:nullidentity".to_string(),
335
+
kind: "delete".to_string(),
336
+
time_us: 0,
337
+
identity: json!(null),
338
+
};
339
+
340
+
handler.handle_event(event).await.unwrap();
341
+
342
+
// Verify the DID was purged
343
+
let purge_calls = resolver.get_purge_calls();
344
+
assert_eq!(purge_calls.len(), 1);
345
+
assert_eq!(purge_calls[0], "did:plc:nullidentity");
346
+
347
+
// Verify set was not called
348
+
let set_calls = resolver.get_set_calls();
349
+
assert_eq!(set_calls.len(), 0);
350
+
}
351
+
352
+
#[tokio::test]
353
+
async fn test_handler_id() {
354
+
let resolver = Arc::new(MockResolver::new());
355
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
356
+
let handler = QuickDidEventHandler::new(resolver, metrics);
357
+
358
+
assert_eq!(handler.handler_id(), "quickdid_handler");
359
+
}
360
+
}
+8
src/lexicon_resolver/mod.rs
+8
src/lexicon_resolver/mod.rs
···
1
+
//! Lexicon resolution with caching support.
2
+
//!
3
+
//! This module provides implementations for resolving AT Protocol lexicons (NSIDs)
4
+
//! to their schemas with various caching strategies.
5
+
6
+
mod redis;
7
+
8
+
pub use redis::{create_redis_lexicon_resolver, create_redis_lexicon_resolver_with_ttl};
+458
src/lexicon_resolver/redis.rs
+458
src/lexicon_resolver/redis.rs
···
1
+
//! Redis-backed caching lexicon resolver.
2
+
//!
3
+
//! This module provides a lexicon resolver that caches resolution results in Redis
4
+
//! with configurable expiration times. Redis caching provides persistence across
5
+
//! service restarts and allows sharing of cached results across multiple instances.
6
+
7
+
use crate::metrics::SharedMetricsPublisher;
8
+
use async_trait::async_trait;
9
+
use atproto_lexicon::resolve::LexiconResolver;
10
+
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
11
+
use metrohash::MetroHash64;
12
+
use std::hash::Hasher as _;
13
+
use std::sync::Arc;
14
+
15
+
/// Redis-backed caching lexicon resolver.
16
+
///
17
+
/// This resolver caches lexicon resolution results in Redis with a configurable TTL.
18
+
/// Results are stored as JSON bytes to minimize storage overhead while maintaining
19
+
/// the schema structure.
20
+
///
21
+
/// # Features
22
+
///
23
+
/// - Persistent caching across service restarts
24
+
/// - Shared cache across multiple service instances
25
+
/// - Configurable TTL (default: 90 days)
26
+
/// - JSON storage format for lexicon schemas
27
+
/// - Graceful fallback if Redis is unavailable
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use std::sync::Arc;
33
+
/// use deadpool_redis::Pool;
34
+
/// use atproto_lexicon::resolve::LexiconResolver;
35
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
37
+
///
38
+
/// # async fn example() {
39
+
/// # let inner_resolver: Arc<dyn LexiconResolver> = todo!();
40
+
/// # let redis_pool: Pool = todo!();
41
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
42
+
/// // Create with default 90-day TTL
43
+
/// let resolver = create_redis_lexicon_resolver(
44
+
/// inner_resolver,
45
+
/// redis_pool,
46
+
/// metrics
47
+
/// );
48
+
/// # }
49
+
/// ```
50
+
pub(super) struct RedisLexiconResolver {
51
+
/// Base lexicon resolver to perform actual resolution
52
+
inner: Arc<dyn LexiconResolver>,
53
+
/// Redis connection pool
54
+
pool: RedisPool,
55
+
/// Redis key prefix for lexicon resolution cache
56
+
key_prefix: String,
57
+
/// TTL for cache entries in seconds
58
+
ttl_seconds: u64,
59
+
/// Metrics publisher for telemetry
60
+
metrics: SharedMetricsPublisher,
61
+
}
62
+
63
+
impl RedisLexiconResolver {
64
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
65
+
fn new(
66
+
inner: Arc<dyn LexiconResolver>,
67
+
pool: RedisPool,
68
+
metrics: SharedMetricsPublisher,
69
+
) -> Self {
70
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
71
+
}
72
+
73
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
74
+
fn with_ttl(
75
+
inner: Arc<dyn LexiconResolver>,
76
+
pool: RedisPool,
77
+
ttl_seconds: u64,
78
+
metrics: SharedMetricsPublisher,
79
+
) -> Self {
80
+
Self::with_full_config(inner, pool, "lexicon:".to_string(), ttl_seconds, metrics)
81
+
}
82
+
83
+
/// Create a new Redis-backed lexicon resolver with full configuration.
84
+
fn with_full_config(
85
+
inner: Arc<dyn LexiconResolver>,
86
+
pool: RedisPool,
87
+
key_prefix: String,
88
+
ttl_seconds: u64,
89
+
metrics: SharedMetricsPublisher,
90
+
) -> Self {
91
+
Self {
92
+
inner,
93
+
pool,
94
+
key_prefix,
95
+
ttl_seconds,
96
+
metrics,
97
+
}
98
+
}
99
+
100
+
/// Generate the Redis key for an NSID.
101
+
///
102
+
/// Uses MetroHash64 to generate a consistent hash of the NSID
103
+
/// for use as the Redis key. This provides better key distribution
104
+
/// and avoids issues with special characters in NSIDs.
105
+
fn make_key(&self, nsid: &str) -> String {
106
+
let mut h = MetroHash64::default();
107
+
h.write(nsid.as_bytes());
108
+
format!("{}{}", self.key_prefix, h.finish())
109
+
}
110
+
111
+
/// Get the TTL in seconds.
112
+
fn ttl_seconds(&self) -> u64 {
113
+
self.ttl_seconds
114
+
}
115
+
}
116
+
117
+
#[async_trait]
118
+
impl LexiconResolver for RedisLexiconResolver {
119
+
async fn resolve(&self, nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
120
+
let key = self.make_key(nsid);
121
+
122
+
// Try to get from Redis cache first
123
+
match self.pool.get().await {
124
+
Ok(mut conn) => {
125
+
// Check if the key exists in Redis (stored as JSON bytes)
126
+
let cached: Option<Vec<u8>> = match conn.get(&key).await {
127
+
Ok(value) => value,
128
+
Err(e) => {
129
+
self.metrics.incr("lexicon_resolver.redis.get_error").await;
130
+
tracing::warn!("Failed to get NSID from Redis cache: {}", e);
131
+
None
132
+
}
133
+
};
134
+
135
+
if let Some(cached_bytes) = cached {
136
+
// Deserialize the cached JSON
137
+
match serde_json::from_slice::<serde_json::Value>(&cached_bytes) {
138
+
Ok(cached_value) => {
139
+
tracing::debug!("Cache hit for NSID {}", nsid);
140
+
self.metrics.incr("lexicon_resolver.redis.cache_hit").await;
141
+
return Ok(cached_value);
142
+
}
143
+
Err(e) => {
144
+
tracing::warn!(
145
+
"Failed to deserialize cached lexicon for NSID {}: {}",
146
+
nsid,
147
+
e
148
+
);
149
+
self.metrics
150
+
.incr("lexicon_resolver.redis.deserialize_error")
151
+
.await;
152
+
// Fall through to re-resolve if deserialization fails
153
+
}
154
+
}
155
+
}
156
+
157
+
// Not in cache, resolve through inner resolver
158
+
tracing::debug!("Cache miss for NSID {}, resolving...", nsid);
159
+
self.metrics.incr("lexicon_resolver.redis.cache_miss").await;
160
+
let result = self.inner.resolve(nsid).await;
161
+
162
+
// Cache successful result
163
+
if let Ok(ref schema) = result {
164
+
// Serialize to JSON bytes
165
+
match serde_json::to_vec(schema) {
166
+
Ok(bytes) => {
167
+
// Set with expiration (ignore errors to not fail the resolution)
168
+
if let Err(e) = conn
169
+
.set_ex::<_, _, ()>(&key, bytes, self.ttl_seconds())
170
+
.await
171
+
{
172
+
tracing::warn!(
173
+
"Failed to cache lexicon resolution in Redis: {}",
174
+
e
175
+
);
176
+
self.metrics
177
+
.incr("lexicon_resolver.redis.cache_set_error")
178
+
.await;
179
+
} else {
180
+
tracing::debug!("Cached lexicon for NSID {}", nsid);
181
+
self.metrics.incr("lexicon_resolver.redis.cache_set").await;
182
+
}
183
+
}
184
+
Err(e) => {
185
+
tracing::warn!(
186
+
"Failed to serialize lexicon result for NSID {}: {}",
187
+
nsid,
188
+
e
189
+
);
190
+
self.metrics
191
+
.incr("lexicon_resolver.redis.serialize_error")
192
+
.await;
193
+
}
194
+
}
195
+
}
196
+
197
+
result
198
+
}
199
+
Err(e) => {
200
+
// Redis connection failed, fall back to inner resolver
201
+
tracing::warn!(
202
+
"Failed to get Redis connection, falling back to uncached resolution: {}",
203
+
e
204
+
);
205
+
self.metrics
206
+
.incr("lexicon_resolver.redis.connection_error")
207
+
.await;
208
+
self.inner.resolve(nsid).await
209
+
}
210
+
}
211
+
}
212
+
}
213
+
214
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
215
+
///
216
+
/// # Arguments
217
+
///
218
+
/// * `inner` - The underlying resolver to use for actual resolution
219
+
/// * `pool` - Redis connection pool
220
+
/// * `metrics` - Metrics publisher for telemetry
221
+
///
222
+
/// # Example
223
+
///
224
+
/// ```no_run
225
+
/// use std::sync::Arc;
226
+
/// use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
227
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
228
+
/// use quickdid::cache::create_redis_pool;
229
+
/// use quickdid::metrics::NoOpMetricsPublisher;
230
+
///
231
+
/// # async fn example() -> anyhow::Result<()> {
232
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
233
+
/// # use reqwest::Client;
234
+
/// # let dns_resolver = HickoryDnsResolver::create_resolver(&[]);
235
+
/// # let http_client = Client::new();
236
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
237
+
/// let base: Arc<dyn LexiconResolver> = Arc::new(
238
+
/// DefaultLexiconResolver::new(http_client, dns_resolver)
239
+
/// );
240
+
///
241
+
/// let pool = create_redis_pool("redis://localhost:6379")?;
242
+
/// let resolver = create_redis_lexicon_resolver(base, pool, metrics);
243
+
/// let schema = resolver.resolve("app.bsky.feed.post").await.unwrap();
244
+
/// # Ok(())
245
+
/// # }
246
+
/// ```
247
+
pub fn create_redis_lexicon_resolver(
248
+
inner: Arc<dyn LexiconResolver>,
249
+
pool: RedisPool,
250
+
metrics: SharedMetricsPublisher,
251
+
) -> Arc<dyn LexiconResolver> {
252
+
Arc::new(RedisLexiconResolver::new(inner, pool, metrics))
253
+
}
254
+
255
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
256
+
///
257
+
/// # Arguments
258
+
///
259
+
/// * `inner` - The underlying resolver to use for actual resolution
260
+
/// * `pool` - Redis connection pool
261
+
/// * `ttl_seconds` - TTL for cache entries in seconds
262
+
/// * `metrics` - Metrics publisher for telemetry
263
+
pub fn create_redis_lexicon_resolver_with_ttl(
264
+
inner: Arc<dyn LexiconResolver>,
265
+
pool: RedisPool,
266
+
ttl_seconds: u64,
267
+
metrics: SharedMetricsPublisher,
268
+
) -> Arc<dyn LexiconResolver> {
269
+
Arc::new(RedisLexiconResolver::with_ttl(
270
+
inner,
271
+
pool,
272
+
ttl_seconds,
273
+
metrics,
274
+
))
275
+
}
276
+
277
+
#[cfg(test)]
278
+
mod tests {
279
+
use super::*;
280
+
281
+
// Mock lexicon resolver for testing
282
+
#[derive(Clone)]
283
+
struct MockLexiconResolver {
284
+
should_fail: bool,
285
+
expected_schema: serde_json::Value,
286
+
}
287
+
288
+
#[async_trait]
289
+
impl LexiconResolver for MockLexiconResolver {
290
+
async fn resolve(&self, _nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
291
+
if self.should_fail {
292
+
Err(anyhow::anyhow!("Mock resolution failure"))
293
+
} else {
294
+
Ok(self.expected_schema.clone())
295
+
}
296
+
}
297
+
}
298
+
299
+
#[tokio::test]
300
+
async fn test_redis_lexicon_resolver_cache_hit() {
301
+
let pool = match crate::test_helpers::get_test_redis_pool() {
302
+
Some(p) => p,
303
+
None => return,
304
+
};
305
+
306
+
// Create mock resolver with sample schema
307
+
let schema = serde_json::json!({
308
+
"lexicon": 1,
309
+
"id": "app.bsky.feed.post",
310
+
"defs": {
311
+
"main": {
312
+
"type": "record",
313
+
"description": "A post record"
314
+
}
315
+
}
316
+
});
317
+
318
+
let mock_resolver = Arc::new(MockLexiconResolver {
319
+
should_fail: false,
320
+
expected_schema: schema.clone(),
321
+
});
322
+
323
+
// Create metrics publisher
324
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
325
+
326
+
// Create Redis-backed resolver with a unique key prefix for testing
327
+
let test_prefix = format!(
328
+
"test:lexicon:{}:",
329
+
std::time::SystemTime::now()
330
+
.duration_since(std::time::UNIX_EPOCH)
331
+
.unwrap()
332
+
.as_nanos()
333
+
);
334
+
let redis_resolver = RedisLexiconResolver::with_full_config(
335
+
mock_resolver,
336
+
pool.clone(),
337
+
test_prefix.clone(),
338
+
3600,
339
+
metrics,
340
+
);
341
+
342
+
let test_nsid = "app.bsky.feed.post";
343
+
344
+
// First resolution - should call inner resolver
345
+
let result1 = redis_resolver.resolve(test_nsid).await.unwrap();
346
+
assert_eq!(result1, schema);
347
+
348
+
// Second resolution - should hit cache
349
+
let result2 = redis_resolver.resolve(test_nsid).await.unwrap();
350
+
assert_eq!(result2, schema);
351
+
352
+
// Clean up test data
353
+
if let Ok(mut conn) = pool.get().await {
354
+
let mut h = MetroHash64::default();
355
+
h.write(test_nsid.as_bytes());
356
+
let key = format!("{}{}", test_prefix, h.finish());
357
+
let _: Result<(), _> = conn.del(key).await;
358
+
}
359
+
}
360
+
361
+
#[tokio::test]
362
+
async fn test_redis_lexicon_resolver_cache_miss() {
363
+
let pool = match crate::test_helpers::get_test_redis_pool() {
364
+
Some(p) => p,
365
+
None => return,
366
+
};
367
+
368
+
let schema = serde_json::json!({
369
+
"lexicon": 1,
370
+
"id": "com.example.test",
371
+
});
372
+
373
+
let mock_resolver = Arc::new(MockLexiconResolver {
374
+
should_fail: false,
375
+
expected_schema: schema.clone(),
376
+
});
377
+
378
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
379
+
380
+
let test_prefix = format!(
381
+
"test:lexicon:{}:",
382
+
std::time::SystemTime::now()
383
+
.duration_since(std::time::UNIX_EPOCH)
384
+
.unwrap()
385
+
.as_nanos()
386
+
);
387
+
let redis_resolver = RedisLexiconResolver::with_full_config(
388
+
mock_resolver,
389
+
pool.clone(),
390
+
test_prefix.clone(),
391
+
3600,
392
+
metrics,
393
+
);
394
+
395
+
let test_nsid = "com.example.test";
396
+
397
+
// Ensure key doesn't exist
398
+
if let Ok(mut conn) = pool.get().await {
399
+
let mut h = MetroHash64::default();
400
+
h.write(test_nsid.as_bytes());
401
+
let key = format!("{}{}", test_prefix, h.finish());
402
+
let _: Result<(), _> = conn.del(&key).await;
403
+
}
404
+
405
+
// Resolution should succeed and cache the result
406
+
let result = redis_resolver.resolve(test_nsid).await.unwrap();
407
+
assert_eq!(result, schema);
408
+
409
+
// Verify the result was cached
410
+
if let Ok(mut conn) = pool.get().await {
411
+
let mut h = MetroHash64::default();
412
+
h.write(test_nsid.as_bytes());
413
+
let key = format!("{}{}", test_prefix, h.finish());
414
+
let exists: bool = conn.exists(&key).await.unwrap();
415
+
assert!(exists, "Result should be cached");
416
+
417
+
// Clean up
418
+
let _: Result<(), _> = conn.del(key).await;
419
+
}
420
+
}
421
+
422
+
#[tokio::test]
423
+
async fn test_redis_lexicon_resolver_error_handling() {
424
+
let pool = match crate::test_helpers::get_test_redis_pool() {
425
+
Some(p) => p,
426
+
None => return,
427
+
};
428
+
429
+
// Create mock resolver that fails
430
+
let mock_resolver = Arc::new(MockLexiconResolver {
431
+
should_fail: true,
432
+
expected_schema: serde_json::Value::Null,
433
+
});
434
+
435
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
436
+
437
+
let test_prefix = format!(
438
+
"test:lexicon:{}:",
439
+
std::time::SystemTime::now()
440
+
.duration_since(std::time::UNIX_EPOCH)
441
+
.unwrap()
442
+
.as_nanos()
443
+
);
444
+
let redis_resolver = RedisLexiconResolver::with_full_config(
445
+
mock_resolver,
446
+
pool.clone(),
447
+
test_prefix,
448
+
3600,
449
+
metrics,
450
+
);
451
+
452
+
let test_nsid = "com.example.nonexistent";
453
+
454
+
// Resolution should fail
455
+
let result = redis_resolver.resolve(test_nsid).await;
456
+
assert!(result.is_err());
457
+
}
458
+
}
+5
-1
src/lib.rs
+5
-1
src/lib.rs
···
2
2
pub mod config; // Config and Args needed by binary
3
3
pub mod handle_resolver; // Only traits and factory functions exposed
4
4
pub mod http; // Only create_router exposed
5
+
pub mod jetstream_handler; // Jetstream event handler for AT Protocol events
6
+
pub mod lexicon_resolver; // Lexicon resolution with caching support
5
7
6
8
// Semi-public modules - needed by binary but with limited exposure
7
9
pub mod cache; // Only create_redis_pool exposed
8
10
pub mod handle_resolver_task; // Factory functions and TaskConfig exposed
9
-
pub mod queue_adapter; // Trait and factory functions exposed
11
+
pub mod metrics; // Metrics publishing trait and implementations
12
+
pub mod queue; // Queue adapter system with trait and factory functions
13
+
pub mod sqlite_schema; // SQLite schema management functions exposed
10
14
pub mod task_manager; // Only spawn_cancellable_task exposed
11
15
12
16
// Internal modules - crate visibility only
+547
src/metrics.rs
+547
src/metrics.rs
···
1
+
use crate::config::Config;
2
+
use async_trait::async_trait;
3
+
use cadence::{
4
+
BufferedUdpMetricSink, Counted, CountedExt, Gauged, Metric, QueuingMetricSink, StatsdClient,
5
+
Timed,
6
+
};
7
+
use std::net::UdpSocket;
8
+
use std::sync::Arc;
9
+
use thiserror::Error;
10
+
use tracing::{debug, error};
11
+
12
+
/// Trait for publishing metrics with counter and gauge support
13
+
/// Designed for minimal compatibility with cadence-style metrics
14
+
#[async_trait]
15
+
pub trait MetricsPublisher: Send + Sync {
16
+
/// Increment a counter by 1
17
+
async fn incr(&self, key: &str);
18
+
19
+
/// Increment a counter by a specific value
20
+
async fn count(&self, key: &str, value: u64);
21
+
22
+
/// Increment a counter with tags
23
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]);
24
+
25
+
/// Increment a counter by a specific value with tags
26
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
27
+
28
+
/// Record a gauge value
29
+
async fn gauge(&self, key: &str, value: u64);
30
+
31
+
/// Record a gauge value with tags
32
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
33
+
34
+
/// Record a timing in milliseconds
35
+
async fn time(&self, key: &str, millis: u64);
36
+
37
+
/// Record a timing with tags
38
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]);
39
+
}
40
+
41
+
/// No-op implementation for development and testing
42
+
#[derive(Debug, Clone, Default)]
43
+
pub struct NoOpMetricsPublisher;
44
+
45
+
impl NoOpMetricsPublisher {
46
+
pub fn new() -> Self {
47
+
Self
48
+
}
49
+
}
50
+
51
+
#[async_trait]
52
+
impl MetricsPublisher for NoOpMetricsPublisher {
53
+
async fn incr(&self, _key: &str) {
54
+
// No-op
55
+
}
56
+
57
+
async fn count(&self, _key: &str, _value: u64) {
58
+
// No-op
59
+
}
60
+
61
+
async fn incr_with_tags(&self, _key: &str, _tags: &[(&str, &str)]) {
62
+
// No-op
63
+
}
64
+
65
+
async fn count_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
66
+
// No-op
67
+
}
68
+
69
+
async fn gauge(&self, _key: &str, _value: u64) {
70
+
// No-op
71
+
}
72
+
73
+
async fn gauge_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
74
+
// No-op
75
+
}
76
+
77
+
async fn time(&self, _key: &str, _millis: u64) {
78
+
// No-op
79
+
}
80
+
81
+
async fn time_with_tags(&self, _key: &str, _millis: u64, _tags: &[(&str, &str)]) {
82
+
// No-op
83
+
}
84
+
}
85
+
86
+
/// Statsd-backed metrics publisher using cadence
87
+
pub struct StatsdMetricsPublisher {
88
+
client: StatsdClient,
89
+
default_tags: Vec<(String, String)>,
90
+
}
91
+
92
+
impl StatsdMetricsPublisher {
93
+
/// Create a new StatsdMetricsPublisher with default configuration
94
+
pub fn new(host: &str, prefix: &str) -> Result<Self, Box<dyn std::error::Error>> {
95
+
Self::new_with_bind(host, prefix, "[::]:0")
96
+
}
97
+
98
+
/// Create a new StatsdMetricsPublisher with custom bind address
99
+
pub fn new_with_bind(
100
+
host: &str,
101
+
prefix: &str,
102
+
bind_addr: &str,
103
+
) -> Result<Self, Box<dyn std::error::Error>> {
104
+
Self::new_with_bind_and_tags(host, prefix, bind_addr, vec![])
105
+
}
106
+
107
+
/// Create a new StatsdMetricsPublisher with default tags
108
+
pub fn new_with_tags(
109
+
host: &str,
110
+
prefix: &str,
111
+
default_tags: Vec<(String, String)>,
112
+
) -> Result<Self, Box<dyn std::error::Error>> {
113
+
Self::new_with_bind_and_tags(host, prefix, "[::]:0", default_tags)
114
+
}
115
+
116
+
/// Create a new StatsdMetricsPublisher with custom bind address and tags
117
+
pub fn new_with_bind_and_tags(
118
+
host: &str,
119
+
prefix: &str,
120
+
bind_addr: &str,
121
+
default_tags: Vec<(String, String)>,
122
+
) -> Result<Self, Box<dyn std::error::Error>> {
123
+
tracing::info!(
124
+
"Creating StatsdMetricsPublisher: host={}, prefix={}, bind={}, tags={:?}",
125
+
host,
126
+
prefix,
127
+
bind_addr,
128
+
default_tags
129
+
);
130
+
131
+
let socket = UdpSocket::bind(bind_addr)?;
132
+
socket.set_nonblocking(true)?;
133
+
134
+
let buffered_sink = BufferedUdpMetricSink::from(host, socket)?;
135
+
let queuing_sink = QueuingMetricSink::builder()
136
+
.with_error_handler(move |error| {
137
+
error!("Failed to send metric via sink: {}", error);
138
+
})
139
+
.build(buffered_sink);
140
+
let client = StatsdClient::from_sink(prefix, queuing_sink);
141
+
142
+
tracing::info!(
143
+
"StatsdMetricsPublisher created successfully with bind address: {}",
144
+
bind_addr
145
+
);
146
+
Ok(Self {
147
+
client,
148
+
default_tags,
149
+
})
150
+
}
151
+
152
+
/// Create from an existing StatsdClient
153
+
pub fn from_client(client: StatsdClient) -> Self {
154
+
Self::from_client_with_tags(client, vec![])
155
+
}
156
+
157
+
/// Create from an existing StatsdClient with default tags
158
+
pub fn from_client_with_tags(
159
+
client: StatsdClient,
160
+
default_tags: Vec<(String, String)>,
161
+
) -> Self {
162
+
Self {
163
+
client,
164
+
default_tags,
165
+
}
166
+
}
167
+
168
+
/// Apply default tags to a builder
169
+
fn apply_default_tags<'a, M>(
170
+
&'a self,
171
+
mut builder: cadence::MetricBuilder<'a, 'a, M>,
172
+
) -> cadence::MetricBuilder<'a, 'a, M>
173
+
where
174
+
M: Metric + From<String>,
175
+
{
176
+
for (k, v) in &self.default_tags {
177
+
builder = builder.with_tag(k.as_str(), v.as_str());
178
+
}
179
+
builder
180
+
}
181
+
}
182
+
183
+
#[async_trait]
184
+
impl MetricsPublisher for StatsdMetricsPublisher {
185
+
async fn incr(&self, key: &str) {
186
+
debug!("Sending metric incr: {}", key);
187
+
if self.default_tags.is_empty() {
188
+
match self.client.incr(key) {
189
+
Ok(_) => debug!("Successfully sent metric: {}", key),
190
+
Err(e) => error!("Failed to send metric {}: {}", key, e),
191
+
}
192
+
} else {
193
+
let builder = self.client.incr_with_tags(key);
194
+
let builder = self.apply_default_tags(builder);
195
+
let _ = builder.send();
196
+
debug!("Sent metric with tags: {}", key);
197
+
}
198
+
}
199
+
200
+
async fn count(&self, key: &str, value: u64) {
201
+
if self.default_tags.is_empty() {
202
+
let _ = self.client.count(key, value);
203
+
} else {
204
+
let builder = self.client.count_with_tags(key, value);
205
+
let builder = self.apply_default_tags(builder);
206
+
let _ = builder.send();
207
+
}
208
+
}
209
+
210
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]) {
211
+
let mut builder = self.client.incr_with_tags(key);
212
+
builder = self.apply_default_tags(builder);
213
+
for (k, v) in tags {
214
+
builder = builder.with_tag(k, v);
215
+
}
216
+
let _ = builder.send();
217
+
}
218
+
219
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
220
+
let mut builder = self.client.count_with_tags(key, value);
221
+
builder = self.apply_default_tags(builder);
222
+
for (k, v) in tags {
223
+
builder = builder.with_tag(k, v);
224
+
}
225
+
let _ = builder.send();
226
+
}
227
+
228
+
async fn gauge(&self, key: &str, value: u64) {
229
+
debug!("Sending metric gauge: {} = {}", key, value);
230
+
if self.default_tags.is_empty() {
231
+
match self.client.gauge(key, value) {
232
+
Ok(_) => debug!("Successfully sent gauge: {} = {}", key, value),
233
+
Err(e) => error!("Failed to send gauge {} = {}: {}", key, value, e),
234
+
}
235
+
} else {
236
+
let builder = self.client.gauge_with_tags(key, value);
237
+
let builder = self.apply_default_tags(builder);
238
+
builder.send();
239
+
debug!("Sent gauge with tags: {} = {}", key, value);
240
+
}
241
+
}
242
+
243
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
244
+
let mut builder = self.client.gauge_with_tags(key, value);
245
+
builder = self.apply_default_tags(builder);
246
+
for (k, v) in tags {
247
+
builder = builder.with_tag(k, v);
248
+
}
249
+
let _ = builder.send();
250
+
}
251
+
252
+
async fn time(&self, key: &str, millis: u64) {
253
+
if self.default_tags.is_empty() {
254
+
let _ = self.client.time(key, millis);
255
+
} else {
256
+
let builder = self.client.time_with_tags(key, millis);
257
+
let builder = self.apply_default_tags(builder);
258
+
let _ = builder.send();
259
+
}
260
+
}
261
+
262
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]) {
263
+
let mut builder = self.client.time_with_tags(key, millis);
264
+
builder = self.apply_default_tags(builder);
265
+
for (k, v) in tags {
266
+
builder = builder.with_tag(k, v);
267
+
}
268
+
let _ = builder.send();
269
+
}
270
+
}
271
+
272
+
/// Type alias for shared metrics publisher
273
+
pub type SharedMetricsPublisher = Arc<dyn MetricsPublisher>;
274
+
275
+
/// Metrics-specific errors
276
+
#[derive(Debug, Error)]
277
+
pub enum MetricsError {
278
+
/// Failed to create metrics publisher
279
+
#[error("error-quickdid-metrics-1 Failed to create metrics publisher: {0}")]
280
+
CreationFailed(String),
281
+
282
+
/// Invalid configuration for metrics
283
+
#[error("error-quickdid-metrics-2 Invalid metrics configuration: {0}")]
284
+
InvalidConfig(String),
285
+
}
286
+
287
+
/// Create a metrics publisher based on configuration
288
+
///
289
+
/// Returns either a no-op publisher or a StatsD publisher based on the
290
+
/// `metrics_adapter` configuration value.
291
+
///
292
+
/// ## Example
293
+
///
294
+
/// ```rust,no_run
295
+
/// use quickdid::config::Config;
296
+
/// use quickdid::metrics::create_metrics_publisher;
297
+
///
298
+
/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
299
+
/// let config = Config::from_env()?;
300
+
/// let metrics = create_metrics_publisher(&config)?;
301
+
///
302
+
/// // Use the metrics publisher
303
+
/// metrics.incr("request.count").await;
304
+
/// # Ok(())
305
+
/// # }
306
+
/// ```
307
+
pub fn create_metrics_publisher(config: &Config) -> Result<SharedMetricsPublisher, MetricsError> {
308
+
match config.metrics_adapter.as_str() {
309
+
"noop" => Ok(Arc::new(NoOpMetricsPublisher::new())),
310
+
"statsd" => {
311
+
let host = config.metrics_statsd_host.as_ref().ok_or_else(|| {
312
+
MetricsError::InvalidConfig(
313
+
"METRICS_STATSD_HOST is required when using statsd adapter".to_string(),
314
+
)
315
+
})?;
316
+
317
+
// Parse tags from comma-separated key:value pairs
318
+
let default_tags = if let Some(tags_str) = &config.metrics_tags {
319
+
tags_str
320
+
.split(',')
321
+
.filter_map(|tag| {
322
+
let parts: Vec<&str> = tag.trim().split(':').collect();
323
+
if parts.len() == 2 {
324
+
Some((parts[0].to_string(), parts[1].to_string()))
325
+
} else {
326
+
error!("Invalid tag format: {}", tag);
327
+
None
328
+
}
329
+
})
330
+
.collect()
331
+
} else {
332
+
vec![]
333
+
};
334
+
335
+
let publisher = StatsdMetricsPublisher::new_with_bind_and_tags(
336
+
host,
337
+
&config.metrics_prefix,
338
+
&config.metrics_statsd_bind,
339
+
default_tags,
340
+
)
341
+
.map_err(|e| MetricsError::CreationFailed(e.to_string()))?;
342
+
343
+
Ok(Arc::new(publisher))
344
+
}
345
+
_ => Err(MetricsError::InvalidConfig(format!(
346
+
"Unknown metrics adapter: {}",
347
+
config.metrics_adapter
348
+
))),
349
+
}
350
+
}
351
+
352
+
#[cfg(test)]
353
+
mod tests {
354
+
use super::*;
355
+
use once_cell::sync::Lazy;
356
+
use std::sync::Mutex;
357
+
358
+
// Use a mutex to serialize tests that modify environment variables
359
+
static ENV_MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
360
+
361
+
#[tokio::test]
362
+
async fn test_noop_metrics() {
363
+
let metrics = NoOpMetricsPublisher::new();
364
+
365
+
// These should all be no-ops and not panic
366
+
metrics.incr("test.counter").await;
367
+
metrics.count("test.counter", 5).await;
368
+
metrics
369
+
.incr_with_tags("test.counter", &[("env", "test")])
370
+
.await;
371
+
metrics
372
+
.count_with_tags(
373
+
"test.counter",
374
+
10,
375
+
&[("env", "test"), ("service", "quickdid")],
376
+
)
377
+
.await;
378
+
metrics.gauge("test.gauge", 100).await;
379
+
metrics
380
+
.gauge_with_tags("test.gauge", 200, &[("host", "localhost")])
381
+
.await;
382
+
metrics.time("test.timing", 42).await;
383
+
metrics
384
+
.time_with_tags("test.timing", 84, &[("endpoint", "/resolve")])
385
+
.await;
386
+
}
387
+
388
+
#[tokio::test]
389
+
async fn test_shared_metrics() {
390
+
let metrics: SharedMetricsPublisher = Arc::new(NoOpMetricsPublisher::new());
391
+
392
+
// Verify it can be used as a shared reference
393
+
metrics.incr("shared.counter").await;
394
+
metrics.gauge("shared.gauge", 50).await;
395
+
396
+
// Verify it can be cloned
397
+
let metrics2 = Arc::clone(&metrics);
398
+
metrics2.count("cloned.counter", 3).await;
399
+
}
400
+
401
+
#[test]
402
+
fn test_create_noop_publisher() {
403
+
use std::env;
404
+
405
+
// Lock mutex to prevent concurrent environment variable modification
406
+
let _guard = ENV_MUTEX.lock().unwrap();
407
+
408
+
// Clean up any existing environment variables first
409
+
unsafe {
410
+
env::remove_var("METRICS_ADAPTER");
411
+
env::remove_var("METRICS_STATSD_HOST");
412
+
env::remove_var("METRICS_PREFIX");
413
+
env::remove_var("METRICS_TAGS");
414
+
}
415
+
416
+
// Set up environment for noop adapter
417
+
unsafe {
418
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
419
+
env::set_var("METRICS_ADAPTER", "noop");
420
+
}
421
+
422
+
let config = Config::from_env().unwrap();
423
+
let metrics = create_metrics_publisher(&config).unwrap();
424
+
425
+
// Should create successfully - actual type checking happens at compile time
426
+
assert!(Arc::strong_count(&metrics) == 1);
427
+
428
+
// Clean up
429
+
unsafe {
430
+
env::remove_var("METRICS_ADAPTER");
431
+
env::remove_var("HTTP_EXTERNAL");
432
+
}
433
+
}
434
+
435
+
#[test]
436
+
fn test_create_statsd_publisher() {
437
+
use std::env;
438
+
439
+
// Lock mutex to prevent concurrent environment variable modification
440
+
let _guard = ENV_MUTEX.lock().unwrap();
441
+
442
+
// Clean up any existing environment variables first
443
+
unsafe {
444
+
env::remove_var("METRICS_ADAPTER");
445
+
env::remove_var("METRICS_STATSD_HOST");
446
+
env::remove_var("METRICS_PREFIX");
447
+
env::remove_var("METRICS_TAGS");
448
+
}
449
+
450
+
// Set up environment for statsd adapter
451
+
unsafe {
452
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
453
+
env::set_var("METRICS_ADAPTER", "statsd");
454
+
env::set_var("METRICS_STATSD_HOST", "localhost:8125");
455
+
env::set_var("METRICS_PREFIX", "test");
456
+
env::set_var("METRICS_TAGS", "env:test,service:quickdid");
457
+
}
458
+
459
+
let config = Config::from_env().unwrap();
460
+
let metrics = create_metrics_publisher(&config).unwrap();
461
+
462
+
// Should create successfully
463
+
assert!(Arc::strong_count(&metrics) == 1);
464
+
465
+
// Clean up
466
+
unsafe {
467
+
env::remove_var("METRICS_ADAPTER");
468
+
env::remove_var("METRICS_STATSD_HOST");
469
+
env::remove_var("METRICS_PREFIX");
470
+
env::remove_var("METRICS_TAGS");
471
+
env::remove_var("HTTP_EXTERNAL");
472
+
}
473
+
}
474
+
475
+
#[test]
476
+
fn test_missing_statsd_host() {
477
+
use std::env;
478
+
479
+
// Lock mutex to prevent concurrent environment variable modification
480
+
let _guard = ENV_MUTEX.lock().unwrap();
481
+
482
+
// Clean up any existing environment variables first
483
+
unsafe {
484
+
env::remove_var("METRICS_ADAPTER");
485
+
env::remove_var("METRICS_STATSD_HOST");
486
+
env::remove_var("METRICS_PREFIX");
487
+
env::remove_var("METRICS_TAGS");
488
+
}
489
+
490
+
// Set up environment for statsd adapter without host
491
+
unsafe {
492
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
493
+
env::set_var("METRICS_ADAPTER", "statsd");
494
+
env::remove_var("METRICS_STATSD_HOST");
495
+
}
496
+
497
+
let config = Config::from_env().unwrap();
498
+
let result = create_metrics_publisher(&config);
499
+
500
+
// Should fail with invalid config error
501
+
assert!(result.is_err());
502
+
if let Err(e) = result {
503
+
assert!(matches!(e, MetricsError::InvalidConfig(_)));
504
+
}
505
+
506
+
// Clean up
507
+
unsafe {
508
+
env::remove_var("METRICS_ADAPTER");
509
+
env::remove_var("HTTP_EXTERNAL");
510
+
}
511
+
}
512
+
513
+
#[test]
514
+
fn test_invalid_adapter() {
515
+
use std::env;
516
+
517
+
// Lock mutex to prevent concurrent environment variable modification
518
+
let _guard = ENV_MUTEX.lock().unwrap();
519
+
520
+
// Clean up any existing environment variables first
521
+
unsafe {
522
+
env::remove_var("METRICS_ADAPTER");
523
+
env::remove_var("METRICS_STATSD_HOST");
524
+
env::remove_var("METRICS_PREFIX");
525
+
env::remove_var("METRICS_TAGS");
526
+
}
527
+
528
+
// Set up environment with invalid adapter
529
+
unsafe {
530
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
531
+
env::set_var("METRICS_ADAPTER", "invalid");
532
+
env::remove_var("METRICS_STATSD_HOST"); // Clean up from other tests
533
+
}
534
+
535
+
let config = Config::from_env().unwrap();
536
+
537
+
// Config validation should catch this
538
+
let validation_result = config.validate();
539
+
assert!(validation_result.is_err());
540
+
541
+
// Clean up
542
+
unsafe {
543
+
env::remove_var("METRICS_ADAPTER");
544
+
env::remove_var("HTTP_EXTERNAL");
545
+
}
546
+
}
547
+
}
+189
src/queue/adapter.rs
+189
src/queue/adapter.rs
···
1
+
//! Queue adapter trait definition.
2
+
//!
3
+
//! This module defines the core `QueueAdapter` trait that provides a common
4
+
//! interface for different queue implementations (MPSC, Redis, SQLite, etc.).
5
+
6
+
use super::error::Result;
7
+
use async_trait::async_trait;
8
+
9
+
/// Generic trait for queue adapters that can work with any work type.
10
+
///
11
+
/// This trait provides a common interface for different queue implementations
12
+
/// (MPSC, Redis, PostgreSQL, SQLite, etc.) allowing them to be used interchangeably.
13
+
///
14
+
/// # Type Parameters
15
+
///
16
+
/// * `T` - The type of work items that this queue processes. Must be `Send + Sync + 'static`.
17
+
///
18
+
/// # Implementation Notes
19
+
///
20
+
/// Implementors should ensure that:
21
+
/// - `pull()` blocks until an item is available or the queue is closed
22
+
/// - `push()` may block if the queue has a bounded capacity
23
+
/// - `ack()` is used for reliable delivery semantics (can be no-op for simple queues)
24
+
/// - `try_push()` never blocks and returns an error if the queue is full
25
+
///
26
+
/// # Examples
27
+
///
28
+
/// ```no_run
29
+
/// use quickdid::queue::{QueueAdapter, MpscQueueAdapter};
30
+
/// use std::sync::Arc;
31
+
///
32
+
/// # async fn example() -> anyhow::Result<()> {
33
+
/// // Create a queue adapter for String work items
34
+
/// let queue: Arc<dyn QueueAdapter<String>> = Arc::new(MpscQueueAdapter::new(100));
35
+
///
36
+
/// // Push work to the queue
37
+
/// queue.push("process-this".to_string()).await?;
38
+
///
39
+
/// // Pull work from the queue
40
+
/// if let Some(work) = queue.pull().await {
41
+
/// println!("Processing: {}", work);
42
+
/// // Acknowledge completion
43
+
/// queue.ack(&work).await?;
44
+
/// }
45
+
/// # Ok(())
46
+
/// # }
47
+
/// ```
48
+
#[async_trait]
49
+
pub trait QueueAdapter<T>: Send + Sync
50
+
where
51
+
T: Send + Sync + 'static,
52
+
{
53
+
/// Pull the next work item from the queue.
54
+
///
55
+
/// This method blocks until an item is available or the queue is closed.
56
+
/// Returns `None` if the queue is closed or empty (depending on implementation).
57
+
///
58
+
/// # Returns
59
+
///
60
+
/// * `Some(T)` - The next work item from the queue
61
+
/// * `None` - The queue is closed or empty
62
+
async fn pull(&self) -> Option<T>;
63
+
64
+
/// Push a work item to the queue.
65
+
///
66
+
/// This method may block if the queue has bounded capacity and is full.
67
+
///
68
+
/// # Arguments
69
+
///
70
+
/// * `work` - The work item to add to the queue
71
+
///
72
+
/// # Errors
73
+
///
74
+
/// Returns an error if:
75
+
/// - The queue is full (for bounded queues)
76
+
/// - The queue is closed
77
+
/// - Serialization fails (for persistent queues)
78
+
/// - Backend connection fails (for Redis/SQLite)
79
+
async fn push(&self, work: T) -> Result<()>;
80
+
81
+
/// Acknowledge that a work item has been successfully processed.
82
+
///
83
+
/// This is used by reliable queue implementations to remove the item
84
+
/// from a temporary processing queue. Implementations that don't require
85
+
/// acknowledgment (like MPSC) can use the default no-op implementation.
86
+
///
87
+
/// # Arguments
88
+
///
89
+
/// * `item` - The work item to acknowledge
90
+
///
91
+
/// # Errors
92
+
///
93
+
/// Returns an error if acknowledgment fails (backend-specific).
94
+
async fn ack(&self, _item: &T) -> Result<()> {
95
+
// Default no-op implementation for queues that don't need acknowledgment
96
+
Ok(())
97
+
}
98
+
99
+
/// Try to push a work item without blocking.
100
+
///
101
+
/// This method returns immediately with an error if the queue is full.
102
+
///
103
+
/// # Arguments
104
+
///
105
+
/// * `work` - The work item to add to the queue
106
+
///
107
+
/// # Errors
108
+
///
109
+
/// Returns an error if:
110
+
/// - The queue is full
111
+
/// - The queue is closed
112
+
/// - Other backend-specific errors occur
113
+
async fn try_push(&self, work: T) -> Result<()> {
114
+
// Default implementation uses regular push
115
+
self.push(work).await
116
+
}
117
+
118
+
/// Get the current queue depth if available.
119
+
///
120
+
/// # Returns
121
+
///
122
+
/// * `Some(usize)` - The number of items currently in the queue
123
+
/// * `None` - Queue depth is not available or cannot be determined
124
+
async fn depth(&self) -> Option<usize> {
125
+
None
126
+
}
127
+
128
+
/// Check if the queue is healthy.
129
+
///
130
+
/// Used for health checks and monitoring. Implementations should verify
131
+
/// backend connectivity and basic functionality.
132
+
///
133
+
/// # Returns
134
+
///
135
+
/// * `true` - The queue is operational
136
+
/// * `false` - The queue has issues or is disconnected
137
+
async fn is_healthy(&self) -> bool {
138
+
true
139
+
}
140
+
}
141
+
142
+
#[cfg(test)]
143
+
mod tests {
144
+
use super::*;
145
+
146
+
// Mock implementation for testing the trait
147
+
struct MockQueue<T> {
148
+
_phantom: std::marker::PhantomData<T>,
149
+
}
150
+
151
+
impl<T> MockQueue<T> {
152
+
fn new() -> Self {
153
+
Self {
154
+
_phantom: std::marker::PhantomData,
155
+
}
156
+
}
157
+
}
158
+
159
+
#[async_trait]
160
+
impl<T> QueueAdapter<T> for MockQueue<T>
161
+
where
162
+
T: Send + Sync + 'static,
163
+
{
164
+
async fn pull(&self) -> Option<T> {
165
+
None
166
+
}
167
+
168
+
async fn push(&self, _work: T) -> Result<()> {
169
+
Ok(())
170
+
}
171
+
}
172
+
173
+
#[tokio::test]
174
+
async fn test_default_trait_methods() {
175
+
let queue = MockQueue::<String>::new();
176
+
177
+
// Test default ack implementation
178
+
assert!(queue.ack(&"test".to_string()).await.is_ok());
179
+
180
+
// Test default try_push implementation
181
+
assert!(queue.try_push("test".to_string()).await.is_ok());
182
+
183
+
// Test default depth implementation
184
+
assert_eq!(queue.depth().await, None);
185
+
186
+
// Test default is_healthy implementation
187
+
assert!(queue.is_healthy().await);
188
+
}
189
+
}
+76
src/queue/error.rs
+76
src/queue/error.rs
···
1
+
//! Queue operation error types.
2
+
//!
3
+
//! This module defines the error types that can occur during queue operations,
4
+
//! including push failures, serialization issues, and backend-specific errors.
5
+
6
+
use thiserror::Error;
7
+
8
+
/// Queue operation errors.
9
+
///
10
+
/// These errors represent various failure modes that can occur when working
11
+
/// with queue adapters, from connection issues to serialization problems.
12
+
#[derive(Error, Debug)]
13
+
pub enum QueueError {
14
+
/// Failed to push an item to the queue.
15
+
#[error("error-quickdid-queue-1 Failed to push to queue: {0}")]
16
+
PushFailed(String),
17
+
18
+
/// The queue is full and cannot accept new items.
19
+
#[error("error-quickdid-queue-2 Queue is full")]
20
+
QueueFull,
21
+
22
+
/// The queue has been closed and is no longer accepting items.
23
+
#[error("error-quickdid-queue-3 Queue is closed")]
24
+
QueueClosed,
25
+
26
+
/// Redis connection failed.
27
+
#[error("error-quickdid-queue-4 Redis connection failed: {0}")]
28
+
RedisConnectionFailed(String),
29
+
30
+
/// Redis operation failed.
31
+
#[error("error-quickdid-queue-5 Redis operation failed: {operation}: {details}")]
32
+
RedisOperationFailed {
33
+
/// The Redis operation that failed
34
+
operation: String,
35
+
/// Details about the failure
36
+
details: String,
37
+
},
38
+
39
+
/// Failed to serialize an item for storage.
40
+
#[error("error-quickdid-queue-6 Serialization failed: {0}")]
41
+
SerializationFailed(String),
42
+
43
+
/// Failed to deserialize an item from storage.
44
+
#[error("error-quickdid-queue-7 Deserialization failed: {0}")]
45
+
DeserializationFailed(String),
46
+
47
+
/// Item not found in worker queue during acknowledgment.
48
+
#[error("error-quickdid-queue-8 Item not found in worker queue during acknowledgment")]
49
+
AckItemNotFound,
50
+
}
51
+
52
+
/// Result type alias for queue operations.
53
+
pub type Result<T> = std::result::Result<T, QueueError>;
54
+
55
+
#[cfg(test)]
56
+
mod tests {
57
+
use super::*;
58
+
59
+
#[test]
60
+
fn test_error_messages() {
61
+
let err = QueueError::PushFailed("test failure".to_string());
62
+
assert!(err.to_string().contains("error-quickdid-queue-1"));
63
+
assert!(err.to_string().contains("test failure"));
64
+
65
+
let err = QueueError::QueueFull;
66
+
assert_eq!(err.to_string(), "error-quickdid-queue-2 Queue is full");
67
+
68
+
let err = QueueError::RedisOperationFailed {
69
+
operation: "LPUSH".to_string(),
70
+
details: "connection timeout".to_string(),
71
+
};
72
+
assert!(err.to_string().contains("error-quickdid-queue-5"));
73
+
assert!(err.to_string().contains("LPUSH"));
74
+
assert!(err.to_string().contains("connection timeout"));
75
+
}
76
+
}
+381
src/queue/factory.rs
+381
src/queue/factory.rs
···
1
+
//! Factory functions for creating queue adapters.
2
+
//!
3
+
//! This module provides convenient factory functions for creating different
4
+
//! types of queue adapters with appropriate configurations.
5
+
6
+
use deadpool_redis::Pool as RedisPool;
7
+
use serde::{Deserialize, Serialize};
8
+
use std::sync::Arc;
9
+
use tokio::sync::mpsc;
10
+
11
+
use super::{
12
+
adapter::QueueAdapter, mpsc::MpscQueueAdapter, noop::NoopQueueAdapter,
13
+
redis::RedisQueueAdapter, sqlite::SqliteQueueAdapter, work::DedupKey,
14
+
};
15
+
16
+
// ========= MPSC Queue Factories =========
17
+
18
+
/// Create a new MPSC queue adapter with the specified buffer size.
19
+
///
20
+
/// This creates an in-memory queue suitable for single-instance deployments.
21
+
///
22
+
/// # Arguments
23
+
///
24
+
/// * `buffer` - The buffer size for the channel
25
+
///
26
+
/// # Examples
27
+
///
28
+
/// ```
29
+
/// use quickdid::queue::create_mpsc_queue;
30
+
///
31
+
/// let queue = create_mpsc_queue::<String>(100);
32
+
/// ```
33
+
pub fn create_mpsc_queue<T>(buffer: usize) -> Arc<dyn QueueAdapter<T>>
34
+
where
35
+
T: Send + Sync + 'static,
36
+
{
37
+
Arc::new(MpscQueueAdapter::new(buffer))
38
+
}
39
+
40
+
/// Create an MPSC queue adapter from existing channels.
41
+
///
42
+
/// This allows integration with existing channel-based architectures.
43
+
///
44
+
/// # Arguments
45
+
///
46
+
/// * `sender` - The sender half of the channel
47
+
/// * `receiver` - The receiver half of the channel
48
+
///
49
+
/// # Examples
50
+
///
51
+
/// ```
52
+
/// use tokio::sync::mpsc;
53
+
/// use quickdid::queue::create_mpsc_queue_from_channel;
54
+
///
55
+
/// let (sender, receiver) = mpsc::channel::<String>(50);
56
+
/// let queue = create_mpsc_queue_from_channel(sender, receiver);
57
+
/// ```
58
+
pub fn create_mpsc_queue_from_channel<T>(
59
+
sender: mpsc::Sender<T>,
60
+
receiver: mpsc::Receiver<T>,
61
+
) -> Arc<dyn QueueAdapter<T>>
62
+
where
63
+
T: Send + Sync + 'static,
64
+
{
65
+
Arc::new(MpscQueueAdapter::from_channel(sender, receiver))
66
+
}
67
+
68
+
// ========= Redis Queue Factories =========
69
+
70
+
/// Create a new Redis-backed queue adapter.
71
+
///
72
+
/// This creates a distributed queue suitable for multi-instance deployments.
73
+
///
74
+
/// # Arguments
75
+
///
76
+
/// * `pool` - Redis connection pool
77
+
/// * `worker_id` - Worker identifier for this queue instance
78
+
/// * `key_prefix` - Redis key prefix for queue operations
79
+
/// * `timeout_seconds` - Timeout for blocking operations
80
+
///
81
+
/// # Examples
82
+
///
83
+
/// ```no_run
84
+
/// use quickdid::queue::{create_redis_queue, HandleResolutionWork};
85
+
/// use deadpool_redis::Config;
86
+
///
87
+
/// # async fn example() -> anyhow::Result<()> {
88
+
/// let cfg = Config::from_url("redis://localhost:6379");
89
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
90
+
///
91
+
/// let queue = create_redis_queue::<HandleResolutionWork>(
92
+
/// pool,
93
+
/// "worker-1".to_string(),
94
+
/// "queue:myapp:".to_string(),
95
+
/// 5,
96
+
/// );
97
+
/// # Ok(())
98
+
/// # }
99
+
/// ```
100
+
pub fn create_redis_queue<T>(
101
+
pool: RedisPool,
102
+
worker_id: String,
103
+
key_prefix: String,
104
+
timeout_seconds: u64,
105
+
) -> Arc<dyn QueueAdapter<T>>
106
+
where
107
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
108
+
{
109
+
Arc::new(RedisQueueAdapter::new(
110
+
pool,
111
+
worker_id,
112
+
key_prefix,
113
+
timeout_seconds,
114
+
))
115
+
}
116
+
117
+
/// Create a new Redis-backed queue adapter with deduplication.
118
+
///
119
+
/// This creates a distributed queue with deduplication to prevent duplicate items
120
+
/// from being queued within the specified TTL window.
121
+
///
122
+
/// # Arguments
123
+
///
124
+
/// * `pool` - Redis connection pool
125
+
/// * `worker_id` - Worker identifier for this queue instance
126
+
/// * `key_prefix` - Redis key prefix for queue operations
127
+
/// * `timeout_seconds` - Timeout for blocking operations
128
+
/// * `dedup_enabled` - Whether to enable deduplication
129
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
130
+
///
131
+
/// # Examples
132
+
///
133
+
/// ```no_run
134
+
/// use quickdid::queue::{create_redis_queue_with_dedup, HandleResolutionWork};
135
+
/// use deadpool_redis::Config;
136
+
///
137
+
/// # async fn example() -> anyhow::Result<()> {
138
+
/// let cfg = Config::from_url("redis://localhost:6379");
139
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
140
+
///
141
+
/// let queue = create_redis_queue_with_dedup::<HandleResolutionWork>(
142
+
/// pool,
143
+
/// "worker-1".to_string(),
144
+
/// "queue:myapp:".to_string(),
145
+
/// 5,
146
+
/// true, // Enable deduplication
147
+
/// 60, // 60 second dedup window
148
+
/// );
149
+
/// # Ok(())
150
+
/// # }
151
+
/// ```
152
+
pub fn create_redis_queue_with_dedup<T>(
153
+
pool: RedisPool,
154
+
worker_id: String,
155
+
key_prefix: String,
156
+
timeout_seconds: u64,
157
+
dedup_enabled: bool,
158
+
dedup_ttl: u64,
159
+
) -> Arc<dyn QueueAdapter<T>>
160
+
where
161
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
162
+
{
163
+
Arc::new(RedisQueueAdapter::with_dedup(
164
+
pool,
165
+
worker_id,
166
+
key_prefix,
167
+
timeout_seconds,
168
+
dedup_enabled,
169
+
dedup_ttl,
170
+
))
171
+
}
172
+
173
+
// ========= SQLite Queue Factories =========
174
+
175
+
/// Create a new SQLite queue adapter with unlimited queue size.
176
+
///
177
+
/// This creates a persistent queue backed by SQLite database suitable
178
+
/// for single-instance deployments that need persistence across restarts.
179
+
/// The queue has no size limit and may grow unbounded.
180
+
///
181
+
/// # Arguments
182
+
///
183
+
/// * `pool` - SQLite connection pool
184
+
///
185
+
/// # Examples
186
+
///
187
+
/// ```no_run
188
+
/// use quickdid::queue::{create_sqlite_queue, HandleResolutionWork};
189
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
190
+
///
191
+
/// # async fn example() -> anyhow::Result<()> {
192
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
193
+
/// let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
194
+
/// # Ok(())
195
+
/// # }
196
+
/// ```
197
+
pub fn create_sqlite_queue<T>(pool: sqlx::SqlitePool) -> Arc<dyn QueueAdapter<T>>
198
+
where
199
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
200
+
{
201
+
Arc::new(SqliteQueueAdapter::new(pool))
202
+
}
203
+
204
+
/// Create a new SQLite queue adapter with work shedding.
205
+
///
206
+
/// This creates a persistent queue with configurable maximum size.
207
+
/// When the queue exceeds `max_size`, the oldest entries are automatically
208
+
/// deleted to maintain the limit, preserving the most recent work items.
209
+
///
210
+
/// # Arguments
211
+
///
212
+
/// * `pool` - SQLite connection pool
213
+
/// * `max_size` - Maximum number of entries (0 = unlimited)
214
+
///
215
+
/// # Work Shedding Behavior
216
+
///
217
+
/// - New work items are always accepted
218
+
/// - When queue size exceeds `max_size`, oldest entries are deleted
219
+
/// - Deletion happens atomically with insertion in a single transaction
220
+
/// - Essential for long-running deployments to prevent disk space issues
221
+
///
222
+
/// # Examples
223
+
///
224
+
/// ```no_run
225
+
/// use quickdid::queue::{create_sqlite_queue_with_max_size, HandleResolutionWork};
226
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
227
+
///
228
+
/// # async fn example() -> anyhow::Result<()> {
229
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
230
+
/// // Limit queue to 10,000 entries with automatic work shedding
231
+
/// let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 10000);
232
+
/// # Ok(())
233
+
/// # }
234
+
/// ```
235
+
pub fn create_sqlite_queue_with_max_size<T>(
236
+
pool: sqlx::SqlitePool,
237
+
max_size: u64,
238
+
) -> Arc<dyn QueueAdapter<T>>
239
+
where
240
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
241
+
{
242
+
Arc::new(SqliteQueueAdapter::with_max_size(pool, max_size))
243
+
}
244
+
245
+
// ========= No-op Queue Factory =========
246
+
247
+
/// Create a no-operation queue adapter.
248
+
///
249
+
/// This creates a queue that discards all work items, useful for testing
250
+
/// or when queue processing is disabled.
251
+
///
252
+
/// # Examples
253
+
///
254
+
/// ```
255
+
/// use quickdid::queue::create_noop_queue;
256
+
///
257
+
/// let queue = create_noop_queue::<String>();
258
+
/// ```
259
+
pub fn create_noop_queue<T>() -> Arc<dyn QueueAdapter<T>>
260
+
where
261
+
T: Send + Sync + 'static,
262
+
{
263
+
Arc::new(NoopQueueAdapter::new())
264
+
}
265
+
266
+
#[cfg(test)]
267
+
mod tests {
268
+
use super::*;
269
+
use crate::queue::HandleResolutionWork;
270
+
271
+
#[tokio::test]
272
+
async fn test_create_mpsc_queue() {
273
+
let queue = create_mpsc_queue::<String>(10);
274
+
275
+
queue.push("test".to_string()).await.unwrap();
276
+
let item = queue.pull().await;
277
+
assert_eq!(item, Some("test".to_string()));
278
+
}
279
+
280
+
#[tokio::test]
281
+
async fn test_create_mpsc_queue_from_channel() {
282
+
let (sender, receiver) = mpsc::channel(5);
283
+
let queue = create_mpsc_queue_from_channel(sender.clone(), receiver);
284
+
285
+
// Send via original sender
286
+
sender.send("external".to_string()).await.unwrap();
287
+
288
+
// Receive via queue
289
+
let item = queue.pull().await;
290
+
assert_eq!(item, Some("external".to_string()));
291
+
}
292
+
293
+
#[tokio::test]
294
+
async fn test_create_noop_queue() {
295
+
let queue = create_noop_queue::<String>();
296
+
297
+
// Should accept pushes
298
+
queue.push("ignored".to_string()).await.unwrap();
299
+
300
+
// Should report as healthy
301
+
assert!(queue.is_healthy().await);
302
+
303
+
// Should report depth as 0
304
+
assert_eq!(queue.depth().await, Some(0));
305
+
}
306
+
307
+
#[tokio::test]
308
+
async fn test_create_sqlite_queue() {
309
+
// Create in-memory SQLite database for testing
310
+
let pool = sqlx::SqlitePool::connect("sqlite::memory:")
311
+
.await
312
+
.expect("Failed to connect to in-memory SQLite");
313
+
314
+
// Create the queue schema
315
+
crate::sqlite_schema::create_schema(&pool)
316
+
.await
317
+
.expect("Failed to create schema");
318
+
319
+
let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
320
+
321
+
let work = HandleResolutionWork::new("test.example.com".to_string());
322
+
queue.push(work.clone()).await.unwrap();
323
+
324
+
let pulled = queue.pull().await;
325
+
assert_eq!(pulled, Some(work));
326
+
}
327
+
328
+
#[tokio::test]
329
+
async fn test_create_sqlite_queue_with_max_size() {
330
+
// Create in-memory SQLite database for testing
331
+
let pool = sqlx::SqlitePool::connect("sqlite::memory:")
332
+
.await
333
+
.expect("Failed to connect to in-memory SQLite");
334
+
335
+
// Create the queue schema
336
+
crate::sqlite_schema::create_schema(&pool)
337
+
.await
338
+
.expect("Failed to create schema");
339
+
340
+
// Create queue with small max size
341
+
let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 5);
342
+
343
+
// Push items
344
+
for i in 0..10 {
345
+
let work = HandleResolutionWork::new(format!("test-{}.example.com", i));
346
+
queue.push(work).await.unwrap();
347
+
}
348
+
349
+
// Should have limited items due to work shedding
350
+
let depth = queue.depth().await.unwrap();
351
+
assert!(
352
+
depth <= 5,
353
+
"Queue should have at most 5 items after work shedding"
354
+
);
355
+
}
356
+
357
+
#[tokio::test]
358
+
async fn test_create_redis_queue() {
359
+
let pool = match crate::test_helpers::get_test_redis_pool() {
360
+
Some(p) => p,
361
+
None => {
362
+
eprintln!("Skipping Redis test - no Redis connection available");
363
+
return;
364
+
}
365
+
};
366
+
367
+
let test_prefix = format!(
368
+
"test:factory:{}:",
369
+
std::time::SystemTime::now()
370
+
.duration_since(std::time::UNIX_EPOCH)
371
+
.unwrap()
372
+
.as_nanos()
373
+
);
374
+
375
+
let queue = create_redis_queue::<String>(pool, "test-worker".to_string(), test_prefix, 1);
376
+
377
+
queue.push("test-item".to_string()).await.unwrap();
378
+
let pulled = queue.pull().await;
379
+
assert_eq!(pulled, Some("test-item".to_string()));
380
+
}
381
+
}
+78
src/queue/mod.rs
+78
src/queue/mod.rs
···
1
+
//! Queue adapter system for work queue abstraction.
2
+
//!
3
+
//! This module provides a generic trait and implementations for queue adapters
4
+
//! that can be used with any work type for handle resolution and other tasks.
5
+
//!
6
+
//! # Architecture
7
+
//!
8
+
//! The queue system is designed with the following components:
9
+
//!
10
+
//! - **Trait**: `QueueAdapter` - Common interface for all queue implementations
11
+
//! - **Implementations**:
12
+
//! - `MpscQueueAdapter` - In-memory MPSC channel-based queue
13
+
//! - `RedisQueueAdapter` - Distributed Redis-backed queue
14
+
//! - `SqliteQueueAdapter` - Persistent SQLite-backed queue
15
+
//! - `NoopQueueAdapter` - No-operation queue for testing
16
+
//! - **Work Types**: `HandleResolutionWork` - Work items for handle resolution
17
+
//! - **Factory Functions**: Convenient functions for creating queue adapters
18
+
//!
19
+
//! # Examples
20
+
//!
21
+
//! ## Simple In-Memory Queue
22
+
//!
23
+
//! ```
24
+
//! use quickdid::queue::{create_mpsc_queue, QueueAdapter};
25
+
//!
26
+
//! # async fn example() -> anyhow::Result<()> {
27
+
//! let queue = create_mpsc_queue::<String>(100);
28
+
//!
29
+
//! queue.push("work-item".to_string()).await?;
30
+
//! if let Some(item) = queue.pull().await {
31
+
//! println!("Processing: {}", item);
32
+
//! }
33
+
//! # Ok(())
34
+
//! # }
35
+
//! ```
36
+
//!
37
+
//! ## Persistent Queue with Work Shedding
38
+
//!
39
+
//! ```no_run
40
+
//! use quickdid::queue::{create_sqlite_queue_with_max_size, HandleResolutionWork};
41
+
//! use quickdid::sqlite_schema::create_sqlite_pool;
42
+
//!
43
+
//! # async fn example() -> anyhow::Result<()> {
44
+
//! let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
45
+
//! let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 10000);
46
+
//!
47
+
//! let work = HandleResolutionWork::new("alice.bsky.social".to_string());
48
+
//! queue.push(work).await?;
49
+
//! # Ok(())
50
+
//! # }
51
+
//! ```
52
+
53
+
// Internal modules
54
+
mod adapter;
55
+
mod error;
56
+
mod factory;
57
+
mod mpsc;
58
+
mod noop;
59
+
mod redis;
60
+
mod sqlite;
61
+
mod work;
62
+
63
+
// Re-export core types
64
+
pub use adapter::QueueAdapter;
65
+
pub use error::{QueueError, Result};
66
+
pub use work::{DedupKey, HandleResolutionWork};
67
+
68
+
// Re-export implementations (with limited visibility)
69
+
pub use mpsc::MpscQueueAdapter;
70
+
pub use noop::NoopQueueAdapter;
71
+
pub use redis::RedisQueueAdapter;
72
+
pub use sqlite::SqliteQueueAdapter;
73
+
74
+
// Re-export factory functions
75
+
pub use factory::{
76
+
create_mpsc_queue, create_mpsc_queue_from_channel, create_noop_queue, create_redis_queue,
77
+
create_redis_queue_with_dedup, create_sqlite_queue, create_sqlite_queue_with_max_size,
78
+
};
+286
src/queue/mpsc.rs
+286
src/queue/mpsc.rs
···
1
+
//! MPSC channel-based queue adapter implementation.
2
+
//!
3
+
//! This module provides an in-memory queue implementation using Tokio's
4
+
//! multi-producer, single-consumer (MPSC) channels. It's suitable for
5
+
//! single-instance deployments with moderate throughput requirements.
6
+
7
+
use async_trait::async_trait;
8
+
use std::sync::Arc;
9
+
use tokio::sync::{Mutex, mpsc};
10
+
11
+
use super::adapter::QueueAdapter;
12
+
use super::error::{QueueError, Result};
13
+
14
+
/// MPSC channel-based queue adapter implementation.
15
+
///
16
+
/// This adapter uses tokio's multi-producer, single-consumer channel
17
+
/// for in-memory queuing of work items. It provides fast, lock-free
18
+
/// operation for single-instance deployments.
19
+
///
20
+
/// # Features
21
+
///
22
+
/// - In-memory operation (no persistence)
23
+
/// - Bounded capacity with backpressure
24
+
/// - Fast push/pull operations
25
+
/// - No acknowledgment needed (fire-and-forget)
26
+
///
27
+
/// # Limitations
28
+
///
29
+
/// - No persistence across restarts
30
+
/// - Single consumer only
31
+
/// - No distributed operation
32
+
///
33
+
/// # Examples
34
+
///
35
+
/// ```
36
+
/// use quickdid::queue::MpscQueueAdapter;
37
+
/// use quickdid::queue::QueueAdapter;
38
+
///
39
+
/// # async fn example() -> anyhow::Result<()> {
40
+
/// // Create a queue with buffer size of 100
41
+
/// let queue = MpscQueueAdapter::<String>::new(100);
42
+
///
43
+
/// // Push items
44
+
/// queue.push("item1".to_string()).await?;
45
+
/// queue.push("item2".to_string()).await?;
46
+
///
47
+
/// // Pull items
48
+
/// while let Some(item) = queue.pull().await {
49
+
/// println!("Processing: {}", item);
50
+
/// }
51
+
/// # Ok(())
52
+
/// # }
53
+
/// ```
54
+
pub struct MpscQueueAdapter<T>
55
+
where
56
+
T: Send + Sync + 'static,
57
+
{
58
+
receiver: Arc<Mutex<mpsc::Receiver<T>>>,
59
+
sender: mpsc::Sender<T>,
60
+
}
61
+
62
+
impl<T> MpscQueueAdapter<T>
63
+
where
64
+
T: Send + Sync + 'static,
65
+
{
66
+
/// Create a new MPSC queue adapter with the specified buffer size.
67
+
///
68
+
/// # Arguments
69
+
///
70
+
/// * `buffer` - The maximum number of items that can be buffered
71
+
///
72
+
/// # Examples
73
+
///
74
+
/// ```
75
+
/// use quickdid::queue::MpscQueueAdapter;
76
+
///
77
+
/// let queue = MpscQueueAdapter::<String>::new(100);
78
+
/// ```
79
+
pub fn new(buffer: usize) -> Self {
80
+
let (sender, receiver) = mpsc::channel(buffer);
81
+
Self {
82
+
receiver: Arc::new(Mutex::new(receiver)),
83
+
sender,
84
+
}
85
+
}
86
+
87
+
/// Create an adapter from existing MPSC channels.
88
+
///
89
+
/// This constructor is useful for integrating with existing channel-based
90
+
/// architectures or when you need custom channel configuration.
91
+
///
92
+
/// # Arguments
93
+
///
94
+
/// * `sender` - The sender half of the channel
95
+
/// * `receiver` - The receiver half of the channel
96
+
///
97
+
/// # Examples
98
+
///
99
+
/// ```
100
+
/// use tokio::sync::mpsc;
101
+
/// use quickdid::queue::MpscQueueAdapter;
102
+
///
103
+
/// let (sender, receiver) = mpsc::channel::<String>(50);
104
+
/// let queue = MpscQueueAdapter::from_channel(sender, receiver);
105
+
/// ```
106
+
pub fn from_channel(sender: mpsc::Sender<T>, receiver: mpsc::Receiver<T>) -> Self {
107
+
Self {
108
+
receiver: Arc::new(Mutex::new(receiver)),
109
+
sender,
110
+
}
111
+
}
112
+
}
113
+
114
+
#[async_trait]
115
+
impl<T> QueueAdapter<T> for MpscQueueAdapter<T>
116
+
where
117
+
T: Send + Sync + 'static,
118
+
{
119
+
async fn pull(&self) -> Option<T> {
120
+
let mut receiver = self.receiver.lock().await;
121
+
receiver.recv().await
122
+
}
123
+
124
+
async fn push(&self, work: T) -> Result<()> {
125
+
self.sender
126
+
.send(work)
127
+
.await
128
+
.map_err(|e| QueueError::PushFailed(e.to_string()))
129
+
}
130
+
131
+
async fn try_push(&self, work: T) -> Result<()> {
132
+
self.sender.try_send(work).map_err(|e| match e {
133
+
mpsc::error::TrySendError::Full(_) => QueueError::QueueFull,
134
+
mpsc::error::TrySendError::Closed(_) => QueueError::QueueClosed,
135
+
})
136
+
}
137
+
138
+
async fn depth(&self) -> Option<usize> {
139
+
// Note: This is an approximation as mpsc doesn't provide exact depth
140
+
Some(self.sender.max_capacity() - self.sender.capacity())
141
+
}
142
+
143
+
async fn is_healthy(&self) -> bool {
144
+
!self.sender.is_closed()
145
+
}
146
+
}
147
+
148
+
#[cfg(test)]
149
+
mod tests {
150
+
use super::*;
151
+
152
+
#[tokio::test]
153
+
async fn test_mpsc_queue_push_pull() {
154
+
let queue = MpscQueueAdapter::<String>::new(10);
155
+
156
+
// Test push
157
+
queue.push("test1".to_string()).await.unwrap();
158
+
queue.push("test2".to_string()).await.unwrap();
159
+
160
+
// Test pull in FIFO order
161
+
let item1 = queue.pull().await;
162
+
assert_eq!(item1, Some("test1".to_string()));
163
+
164
+
let item2 = queue.pull().await;
165
+
assert_eq!(item2, Some("test2".to_string()));
166
+
}
167
+
168
+
#[tokio::test]
169
+
async fn test_mpsc_queue_try_push() {
170
+
// Create a small queue to test full condition
171
+
let queue = MpscQueueAdapter::<i32>::new(2);
172
+
173
+
// Fill the queue
174
+
queue.push(1).await.unwrap();
175
+
queue.push(2).await.unwrap();
176
+
177
+
// Try to push when full should fail
178
+
let result = queue.try_push(3).await;
179
+
assert!(matches!(result, Err(QueueError::QueueFull)));
180
+
181
+
// Pull one item to make space
182
+
let _ = queue.pull().await;
183
+
184
+
// Now try_push should succeed
185
+
queue.try_push(3).await.unwrap();
186
+
}
187
+
188
+
#[tokio::test]
189
+
async fn test_mpsc_queue_from_channel() {
190
+
let (sender, receiver) = mpsc::channel(5);
191
+
let queue = MpscQueueAdapter::from_channel(sender.clone(), receiver);
192
+
193
+
// Send via original sender
194
+
sender.send("external".to_string()).await.unwrap();
195
+
196
+
// Send via queue
197
+
queue.push("internal".to_string()).await.unwrap();
198
+
199
+
// Pull both items
200
+
assert_eq!(queue.pull().await, Some("external".to_string()));
201
+
assert_eq!(queue.pull().await, Some("internal".to_string()));
202
+
}
203
+
204
+
#[tokio::test]
205
+
async fn test_mpsc_queue_health() {
206
+
let queue = MpscQueueAdapter::<String>::new(10);
207
+
208
+
// Queue should be healthy initially
209
+
assert!(queue.is_healthy().await);
210
+
211
+
// Create a queue and drop the receiver to close it
212
+
let (sender, receiver) = mpsc::channel::<String>(10);
213
+
drop(receiver);
214
+
let closed_queue = MpscQueueAdapter::from_channel(sender, mpsc::channel(1).1);
215
+
216
+
// Push should fail on closed queue
217
+
let result = closed_queue.push("test".to_string()).await;
218
+
assert!(result.is_err());
219
+
}
220
+
221
+
#[tokio::test]
222
+
async fn test_mpsc_queue_depth() {
223
+
let queue = MpscQueueAdapter::<i32>::new(10);
224
+
225
+
// Initially empty
226
+
let depth = queue.depth().await;
227
+
assert_eq!(depth, Some(0));
228
+
229
+
// Add items and check depth
230
+
queue.push(1).await.unwrap();
231
+
queue.push(2).await.unwrap();
232
+
queue.push(3).await.unwrap();
233
+
234
+
let depth = queue.depth().await;
235
+
assert_eq!(depth, Some(3));
236
+
237
+
// Pull an item and check depth
238
+
let _ = queue.pull().await;
239
+
let depth = queue.depth().await;
240
+
assert_eq!(depth, Some(2));
241
+
}
242
+
243
+
#[tokio::test]
244
+
async fn test_mpsc_queue_concurrent_operations() {
245
+
use std::sync::Arc;
246
+
247
+
let queue = Arc::new(MpscQueueAdapter::<i32>::new(100));
248
+
249
+
// Spawn multiple producers
250
+
let mut handles = vec![];
251
+
for i in 0..10 {
252
+
let q = queue.clone();
253
+
handles.push(tokio::spawn(async move {
254
+
for j in 0..10 {
255
+
q.push(i * 10 + j).await.unwrap();
256
+
}
257
+
}));
258
+
}
259
+
260
+
// Wait for all producers
261
+
for handle in handles {
262
+
handle.await.unwrap();
263
+
}
264
+
265
+
// Verify we can pull all 100 items
266
+
let mut count = 0;
267
+
while queue.pull().await.is_some() {
268
+
count += 1;
269
+
if count >= 100 {
270
+
break;
271
+
}
272
+
}
273
+
assert_eq!(count, 100);
274
+
}
275
+
276
+
#[tokio::test]
277
+
async fn test_mpsc_queue_no_ack_needed() {
278
+
let queue = MpscQueueAdapter::<String>::new(10);
279
+
280
+
queue.push("test".to_string()).await.unwrap();
281
+
let item = queue.pull().await.unwrap();
282
+
283
+
// Ack should always succeed (no-op)
284
+
queue.ack(&item).await.unwrap();
285
+
}
286
+
}
+222
src/queue/noop.rs
+222
src/queue/noop.rs
···
1
+
//! No-operation queue adapter implementation.
2
+
//!
3
+
//! This module provides a queue adapter that discards all work items,
4
+
//! useful for testing or when queue processing is disabled.
5
+
6
+
use async_trait::async_trait;
7
+
use std::time::Duration;
8
+
use tokio::time::sleep;
9
+
10
+
use super::adapter::QueueAdapter;
11
+
use super::error::Result;
12
+
13
+
/// No-operation queue adapter that discards all work items.
14
+
///
15
+
/// This adapter is useful for configurations where queuing is disabled
16
+
/// or as a fallback when other queue adapters fail to initialize.
17
+
/// All work items pushed to this queue are silently discarded.
18
+
///
19
+
/// # Features
20
+
///
21
+
/// - Zero resource usage
22
+
/// - Always healthy
23
+
/// - Discards all work items
24
+
/// - Never returns items from pull
25
+
///
26
+
/// # Use Cases
27
+
///
28
+
/// - Testing environments where queue processing isn't needed
29
+
/// - Graceful degradation when queue backends are unavailable
30
+
/// - Configurations where queue processing is explicitly disabled
31
+
///
32
+
/// # Examples
33
+
///
34
+
/// ```
35
+
/// use quickdid::queue::NoopQueueAdapter;
36
+
/// use quickdid::queue::QueueAdapter;
37
+
///
38
+
/// # async fn example() -> anyhow::Result<()> {
39
+
/// let queue = NoopQueueAdapter::<String>::new();
40
+
///
41
+
/// // Push is silently discarded
42
+
/// queue.push("ignored".to_string()).await?;
43
+
///
44
+
/// // Pull never returns items (blocks indefinitely)
45
+
/// // let item = queue.pull().await; // Would block forever
46
+
///
47
+
/// // Always reports healthy
48
+
/// assert!(queue.is_healthy().await);
49
+
///
50
+
/// // Always reports empty
51
+
/// assert_eq!(queue.depth().await, Some(0));
52
+
/// # Ok(())
53
+
/// # }
54
+
/// ```
55
+
pub struct NoopQueueAdapter<T>
56
+
where
57
+
T: Send + Sync + 'static,
58
+
{
59
+
_phantom: std::marker::PhantomData<T>,
60
+
}
61
+
62
+
impl<T> NoopQueueAdapter<T>
63
+
where
64
+
T: Send + Sync + 'static,
65
+
{
66
+
/// Create a new no-op queue adapter.
67
+
///
68
+
/// # Examples
69
+
///
70
+
/// ```
71
+
/// use quickdid::queue::NoopQueueAdapter;
72
+
///
73
+
/// let queue = NoopQueueAdapter::<String>::new();
74
+
/// ```
75
+
pub fn new() -> Self {
76
+
Self {
77
+
_phantom: std::marker::PhantomData,
78
+
}
79
+
}
80
+
}
81
+
82
+
impl<T> Default for NoopQueueAdapter<T>
83
+
where
84
+
T: Send + Sync + 'static,
85
+
{
86
+
fn default() -> Self {
87
+
Self::new()
88
+
}
89
+
}
90
+
91
+
#[async_trait]
92
+
impl<T> QueueAdapter<T> for NoopQueueAdapter<T>
93
+
where
94
+
T: Send + Sync + 'static,
95
+
{
96
+
async fn pull(&self) -> Option<T> {
97
+
// Never returns any work - sleeps to avoid busy-waiting
98
+
sleep(Duration::from_secs(60)).await;
99
+
None
100
+
}
101
+
102
+
async fn push(&self, _work: T) -> Result<()> {
103
+
// Silently discard the work
104
+
Ok(())
105
+
}
106
+
107
+
async fn ack(&self, _item: &T) -> Result<()> {
108
+
// No-op
109
+
Ok(())
110
+
}
111
+
112
+
async fn try_push(&self, _work: T) -> Result<()> {
113
+
// Silently discard the work
114
+
Ok(())
115
+
}
116
+
117
+
async fn depth(&self) -> Option<usize> {
118
+
// Always empty
119
+
Some(0)
120
+
}
121
+
122
+
async fn is_healthy(&self) -> bool {
123
+
// Always healthy
124
+
true
125
+
}
126
+
}
127
+
128
+
#[cfg(test)]
129
+
mod tests {
130
+
use super::*;
131
+
132
+
#[tokio::test]
133
+
async fn test_noop_queue_push() {
134
+
let queue = NoopQueueAdapter::<String>::new();
135
+
136
+
// Push should always succeed
137
+
queue.push("test1".to_string()).await.unwrap();
138
+
queue.push("test2".to_string()).await.unwrap();
139
+
queue.push("test3".to_string()).await.unwrap();
140
+
}
141
+
142
+
#[tokio::test]
143
+
async fn test_noop_queue_try_push() {
144
+
let queue = NoopQueueAdapter::<i32>::new();
145
+
146
+
// Try push should always succeed
147
+
queue.try_push(1).await.unwrap();
148
+
queue.try_push(2).await.unwrap();
149
+
queue.try_push(3).await.unwrap();
150
+
}
151
+
152
+
#[tokio::test]
153
+
async fn test_noop_queue_ack() {
154
+
let queue = NoopQueueAdapter::<String>::new();
155
+
156
+
// Ack should always succeed
157
+
queue.ack(&"any".to_string()).await.unwrap();
158
+
}
159
+
160
+
#[tokio::test]
161
+
async fn test_noop_queue_depth() {
162
+
let queue = NoopQueueAdapter::<String>::new();
163
+
164
+
// Should always report empty
165
+
assert_eq!(queue.depth().await, Some(0));
166
+
167
+
// Even after pushing items
168
+
queue.push("item".to_string()).await.unwrap();
169
+
assert_eq!(queue.depth().await, Some(0));
170
+
}
171
+
172
+
#[tokio::test]
173
+
async fn test_noop_queue_health() {
174
+
let queue = NoopQueueAdapter::<String>::new();
175
+
176
+
// Should always be healthy
177
+
assert!(queue.is_healthy().await);
178
+
}
179
+
180
+
#[tokio::test]
181
+
async fn test_noop_queue_default() {
182
+
let queue: NoopQueueAdapter<String> = Default::default();
183
+
184
+
// Default instance should work normally
185
+
queue.push("test".to_string()).await.unwrap();
186
+
assert!(queue.is_healthy().await);
187
+
}
188
+
189
+
#[tokio::test(flavor = "multi_thread")]
190
+
async fn test_noop_queue_pull_blocks() {
191
+
use tokio::time::timeout;
192
+
193
+
let queue = NoopQueueAdapter::<String>::new();
194
+
195
+
// Pull should block and not return immediately
196
+
let result = timeout(Duration::from_millis(100), queue.pull()).await;
197
+
assert!(result.is_err(), "Pull should have timed out");
198
+
}
199
+
200
+
#[tokio::test]
201
+
async fn test_noop_queue_with_custom_type() {
202
+
use serde::{Deserialize, Serialize};
203
+
204
+
#[derive(Debug, Clone, Serialize, Deserialize)]
205
+
struct CustomWork {
206
+
id: u64,
207
+
data: Vec<String>,
208
+
}
209
+
210
+
let queue = NoopQueueAdapter::<CustomWork>::new();
211
+
212
+
let work = CustomWork {
213
+
id: 123,
214
+
data: vec!["test".to_string()],
215
+
};
216
+
217
+
// Should handle custom types without issue
218
+
queue.push(work.clone()).await.unwrap();
219
+
queue.ack(&work).await.unwrap();
220
+
assert_eq!(queue.depth().await, Some(0));
221
+
}
222
+
}
+702
src/queue/redis.rs
+702
src/queue/redis.rs
···
1
+
//! Redis-backed queue adapter implementation.
2
+
//!
3
+
//! This module provides a distributed queue implementation using Redis lists
4
+
//! with a reliable queue pattern for at-least-once delivery semantics.
5
+
6
+
use async_trait::async_trait;
7
+
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
8
+
use serde::{Deserialize, Serialize};
9
+
use tracing::{debug, error, warn};
10
+
11
+
use super::adapter::QueueAdapter;
12
+
use super::error::{QueueError, Result};
13
+
use super::work::DedupKey;
14
+
15
+
/// Redis-backed queue adapter implementation.
16
+
///
17
+
/// This adapter uses Redis lists with a reliable queue pattern:
18
+
/// - LPUSH to push items to the primary queue
19
+
/// - BRPOPLPUSH to atomically move items from primary to worker queue
20
+
/// - LREM to acknowledge processed items from worker queue
21
+
///
22
+
/// This ensures at-least-once delivery semantics and allows for recovery
23
+
/// of in-flight items if a worker crashes.
24
+
///
25
+
/// # Features
26
+
///
27
+
/// - Distributed operation across multiple instances
28
+
/// - Persistent storage with Redis
29
+
/// - At-least-once delivery guarantees
30
+
/// - Automatic recovery of failed items
31
+
/// - Configurable timeouts
32
+
///
33
+
/// # Architecture
34
+
///
35
+
/// ```text
36
+
/// Producer -> [Primary Queue] -> BRPOPLPUSH -> [Worker Queue] -> Consumer
37
+
/// |
38
+
/// LREM (on ack)
39
+
/// ```
40
+
///
41
+
/// # Examples
42
+
///
43
+
/// ```no_run
44
+
/// use quickdid::queue::{RedisQueueAdapter, QueueAdapter, HandleResolutionWork};
45
+
/// use deadpool_redis::Config;
46
+
///
47
+
/// # async fn example() -> anyhow::Result<()> {
48
+
/// // Create Redis pool
49
+
/// let cfg = Config::from_url("redis://localhost:6379");
50
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
51
+
///
52
+
/// // Create queue adapter
53
+
/// let queue = RedisQueueAdapter::<HandleResolutionWork>::new(
54
+
/// pool,
55
+
/// "worker-1".to_string(),
56
+
/// "queue:myapp:".to_string(),
57
+
/// 5, // 5 second timeout
58
+
/// );
59
+
///
60
+
/// // Use the queue
61
+
/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
62
+
/// queue.push(work.clone()).await?;
63
+
/// if let Some(item) = queue.pull().await {
64
+
/// // Process item
65
+
/// queue.ack(&item).await?;
66
+
/// }
67
+
/// # Ok(())
68
+
/// # }
69
+
/// ```
70
+
pub struct RedisQueueAdapter<T>
71
+
where
72
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
73
+
{
74
+
/// Redis connection pool
75
+
pool: RedisPool,
76
+
/// Unique worker ID for this adapter instance
77
+
worker_id: String,
78
+
/// Key prefix for all queues (default: "queue:handleresolver:")
79
+
key_prefix: String,
80
+
/// Timeout for blocking RPOPLPUSH operations (in seconds)
81
+
timeout_seconds: u64,
82
+
/// Enable deduplication to prevent duplicate items in queue
83
+
dedup_enabled: bool,
84
+
/// TTL for deduplication keys in seconds
85
+
dedup_ttl: u64,
86
+
/// Type marker for generic parameter
87
+
_phantom: std::marker::PhantomData<T>,
88
+
}
89
+
90
+
impl<T> RedisQueueAdapter<T>
91
+
where
92
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
93
+
{
94
+
/// Create a new Redis queue adapter.
95
+
///
96
+
/// # Arguments
97
+
///
98
+
/// * `pool` - Redis connection pool
99
+
/// * `worker_id` - Unique identifier for this worker instance
100
+
/// * `key_prefix` - Redis key prefix for queue operations
101
+
/// * `timeout_seconds` - Timeout for blocking pull operations
102
+
///
103
+
/// # Examples
104
+
///
105
+
/// ```no_run
106
+
/// use quickdid::queue::RedisQueueAdapter;
107
+
/// use deadpool_redis::Config;
108
+
///
109
+
/// # async fn example() -> anyhow::Result<()> {
110
+
/// let cfg = Config::from_url("redis://localhost:6379");
111
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
112
+
///
113
+
/// let queue = RedisQueueAdapter::<String>::new(
114
+
/// pool,
115
+
/// "worker-1".to_string(),
116
+
/// "queue:myapp:".to_string(),
117
+
/// 5,
118
+
/// );
119
+
/// # Ok(())
120
+
/// # }
121
+
/// ```
122
+
pub fn new(
123
+
pool: RedisPool,
124
+
worker_id: String,
125
+
key_prefix: String,
126
+
timeout_seconds: u64,
127
+
) -> Self {
128
+
Self::with_dedup(
129
+
pool,
130
+
worker_id,
131
+
key_prefix,
132
+
timeout_seconds,
133
+
false,
134
+
60, // Default TTL of 60 seconds
135
+
)
136
+
}
137
+
138
+
/// Create a new Redis queue adapter with deduplication settings.
139
+
///
140
+
/// # Arguments
141
+
///
142
+
/// * `pool` - Redis connection pool
143
+
/// * `worker_id` - Unique identifier for this worker instance
144
+
/// * `key_prefix` - Redis key prefix for queue operations
145
+
/// * `timeout_seconds` - Timeout for blocking pull operations
146
+
/// * `dedup_enabled` - Whether to enable deduplication
147
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
148
+
pub fn with_dedup(
149
+
pool: RedisPool,
150
+
worker_id: String,
151
+
key_prefix: String,
152
+
timeout_seconds: u64,
153
+
dedup_enabled: bool,
154
+
dedup_ttl: u64,
155
+
) -> Self {
156
+
Self {
157
+
pool,
158
+
worker_id,
159
+
key_prefix,
160
+
timeout_seconds,
161
+
dedup_enabled,
162
+
dedup_ttl,
163
+
_phantom: std::marker::PhantomData,
164
+
}
165
+
}
166
+
167
+
/// Get the primary queue key.
168
+
fn primary_queue_key(&self) -> String {
169
+
format!("{}primary", self.key_prefix)
170
+
}
171
+
172
+
/// Get the worker-specific temporary queue key.
173
+
fn worker_queue_key(&self) -> String {
174
+
format!("{}{}", self.key_prefix, self.worker_id)
175
+
}
176
+
177
+
/// Get the deduplication key for an item.
178
+
/// This key is used to track if an item is already queued.
179
+
fn dedup_key(&self, item_id: &str) -> String {
180
+
format!("{}dedup:{}", self.key_prefix, item_id)
181
+
}
182
+
183
+
/// Check and mark an item for deduplication.
184
+
/// Returns true if the item was successfully marked (not duplicate),
185
+
/// false if it was already in the deduplication set (duplicate).
186
+
async fn check_and_mark_dedup(
187
+
&self,
188
+
conn: &mut deadpool_redis::Connection,
189
+
item_id: &str,
190
+
) -> Result<bool> {
191
+
if !self.dedup_enabled {
192
+
return Ok(true); // Always allow if dedup is disabled
193
+
}
194
+
195
+
let dedup_key = self.dedup_key(item_id);
196
+
197
+
// Use SET NX EX to atomically set if not exists with expiry
198
+
// Returns OK if the key was set, Nil if it already existed
199
+
let result: Option<String> = deadpool_redis::redis::cmd("SET")
200
+
.arg(&dedup_key)
201
+
.arg("1")
202
+
.arg("NX") // Only set if not exists
203
+
.arg("EX") // Set expiry
204
+
.arg(self.dedup_ttl)
205
+
.query_async(conn)
206
+
.await
207
+
.map_err(|e| QueueError::RedisOperationFailed {
208
+
operation: "SET NX EX".to_string(),
209
+
details: e.to_string(),
210
+
})?;
211
+
212
+
// If result is Some("OK"), the key was set (not duplicate)
213
+
// If result is None, the key already existed (duplicate)
214
+
Ok(result.is_some())
215
+
}
216
+
}
217
+
218
+
#[async_trait]
219
+
impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
220
+
where
221
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
222
+
{
223
+
async fn pull(&self) -> Option<T> {
224
+
match self.pool.get().await {
225
+
Ok(mut conn) => {
226
+
let primary_key = self.primary_queue_key();
227
+
let worker_key = self.worker_queue_key();
228
+
229
+
// Use blocking RPOPLPUSH to atomically move item from primary to worker queue
230
+
let data: Option<Vec<u8>> = match conn
231
+
.brpoplpush(&primary_key, &worker_key, self.timeout_seconds as f64)
232
+
.await
233
+
{
234
+
Ok(data) => data,
235
+
Err(e) => {
236
+
error!("Failed to pull from queue: {}", e);
237
+
return None;
238
+
}
239
+
};
240
+
241
+
if let Some(data) = data {
242
+
// Deserialize the item
243
+
match serde_json::from_slice(&data) {
244
+
Ok(item) => {
245
+
debug!(
246
+
worker_id = %self.worker_id,
247
+
"Pulled item from queue"
248
+
);
249
+
Some(item)
250
+
}
251
+
Err(e) => {
252
+
error!("Failed to deserialize item: {}", e);
253
+
// Remove the corrupted item from worker queue
254
+
let _: std::result::Result<(), _> =
255
+
conn.lrem(&worker_key, 1, &data).await;
256
+
None
257
+
}
258
+
}
259
+
} else {
260
+
None
261
+
}
262
+
}
263
+
Err(e) => {
264
+
error!("Failed to get Redis connection: {}", e);
265
+
None
266
+
}
267
+
}
268
+
}
269
+
270
+
async fn push(&self, work: T) -> Result<()> {
271
+
let mut conn = self
272
+
.pool
273
+
.get()
274
+
.await
275
+
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
276
+
277
+
// Check for deduplication if enabled
278
+
if self.dedup_enabled {
279
+
let dedup_id = work.dedup_key();
280
+
let is_new = self.check_and_mark_dedup(&mut conn, &dedup_id).await?;
281
+
282
+
if !is_new {
283
+
debug!(
284
+
dedup_key = %dedup_id,
285
+
"Item already queued, skipping duplicate"
286
+
);
287
+
return Ok(()); // Successfully deduplicated
288
+
}
289
+
}
290
+
291
+
let data = serde_json::to_vec(&work)
292
+
.map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
293
+
294
+
let primary_key = self.primary_queue_key();
295
+
296
+
conn.lpush::<_, _, ()>(&primary_key, data)
297
+
.await
298
+
.map_err(|e| QueueError::RedisOperationFailed {
299
+
operation: "LPUSH".to_string(),
300
+
details: e.to_string(),
301
+
})?;
302
+
303
+
debug!("Pushed item to queue");
304
+
Ok(())
305
+
}
306
+
307
+
async fn ack(&self, item: &T) -> Result<()> {
308
+
let mut conn = self
309
+
.pool
310
+
.get()
311
+
.await
312
+
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
313
+
314
+
let data =
315
+
serde_json::to_vec(item).map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
316
+
317
+
let worker_key = self.worker_queue_key();
318
+
319
+
// Remove exactly one occurrence of this item from the worker queue
320
+
let removed: i32 = conn.lrem(&worker_key, 1, &data).await.map_err(|e| {
321
+
QueueError::RedisOperationFailed {
322
+
operation: "LREM".to_string(),
323
+
details: e.to_string(),
324
+
}
325
+
})?;
326
+
327
+
if removed == 0 {
328
+
warn!(
329
+
worker_id = %self.worker_id,
330
+
"Item not found in worker queue during acknowledgment"
331
+
);
332
+
} else {
333
+
debug!(
334
+
worker_id = %self.worker_id,
335
+
"Acknowledged item"
336
+
);
337
+
}
338
+
339
+
Ok(())
340
+
}
341
+
342
+
async fn depth(&self) -> Option<usize> {
343
+
match self.pool.get().await {
344
+
Ok(mut conn) => {
345
+
let primary_key = self.primary_queue_key();
346
+
match conn.llen::<_, usize>(&primary_key).await {
347
+
Ok(len) => Some(len),
348
+
Err(e) => {
349
+
error!("Failed to get queue depth: {}", e);
350
+
None
351
+
}
352
+
}
353
+
}
354
+
Err(e) => {
355
+
error!("Failed to get Redis connection: {}", e);
356
+
None
357
+
}
358
+
}
359
+
}
360
+
361
+
async fn is_healthy(&self) -> bool {
362
+
match self.pool.get().await {
363
+
Ok(mut conn) => {
364
+
// Ping Redis to check health
365
+
match deadpool_redis::redis::cmd("PING")
366
+
.query_async::<String>(&mut conn)
367
+
.await
368
+
{
369
+
Ok(response) => response == "PONG",
370
+
Err(_) => false,
371
+
}
372
+
}
373
+
Err(_) => false,
374
+
}
375
+
}
376
+
}
377
+
378
+
#[cfg(test)]
379
+
mod tests {
380
+
use super::*;
381
+
382
+
#[tokio::test]
383
+
async fn test_redis_queue_push_pull() {
384
+
let pool = match crate::test_helpers::get_test_redis_pool() {
385
+
Some(p) => p,
386
+
None => {
387
+
eprintln!("Skipping Redis test - no Redis connection available");
388
+
return;
389
+
}
390
+
};
391
+
392
+
// Create adapter with unique prefix for testing
393
+
let test_prefix = format!(
394
+
"test:queue:{}:",
395
+
std::time::SystemTime::now()
396
+
.duration_since(std::time::UNIX_EPOCH)
397
+
.unwrap()
398
+
.as_nanos()
399
+
);
400
+
let adapter = RedisQueueAdapter::<String>::new(
401
+
pool.clone(),
402
+
"test-worker".to_string(),
403
+
test_prefix.clone(),
404
+
1, // 1 second timeout for tests
405
+
);
406
+
407
+
// Test push
408
+
adapter.push("test-item".to_string()).await.unwrap();
409
+
410
+
// Test pull
411
+
let pulled = adapter.pull().await;
412
+
assert_eq!(pulled, Some("test-item".to_string()));
413
+
414
+
// Test ack
415
+
adapter
416
+
.ack(&"test-item".to_string())
417
+
.await
418
+
.expect("Ack should succeed");
419
+
}
420
+
421
+
#[tokio::test]
422
+
async fn test_redis_queue_reliable_delivery() {
423
+
let pool = match crate::test_helpers::get_test_redis_pool() {
424
+
Some(p) => p,
425
+
None => {
426
+
eprintln!("Skipping Redis test - no Redis connection available");
427
+
return;
428
+
}
429
+
};
430
+
431
+
let test_prefix = format!(
432
+
"test:queue:{}:",
433
+
std::time::SystemTime::now()
434
+
.duration_since(std::time::UNIX_EPOCH)
435
+
.unwrap()
436
+
.as_nanos()
437
+
);
438
+
let worker_id = "test-worker-reliable";
439
+
440
+
// Create adapter
441
+
let adapter1 = RedisQueueAdapter::<String>::new(
442
+
pool.clone(),
443
+
worker_id.to_string(),
444
+
test_prefix.clone(),
445
+
1,
446
+
);
447
+
448
+
// Push multiple items
449
+
adapter1.push("item1".to_string()).await.unwrap();
450
+
adapter1.push("item2".to_string()).await.unwrap();
451
+
adapter1.push("item3".to_string()).await.unwrap();
452
+
453
+
// Pull but don't ack (simulating worker crash)
454
+
let item1 = adapter1.pull().await;
455
+
assert_eq!(item1, Some("item1".to_string()));
456
+
457
+
// Item should be in worker queue
458
+
// In production, a recovery process would handle unacked items
459
+
// For this test, we verify the item is in the worker queue
460
+
let item2 = adapter1.pull().await;
461
+
assert_eq!(item2, Some("item2".to_string()));
462
+
463
+
// Ack the second item
464
+
adapter1.ack(&"item2".to_string()).await.unwrap();
465
+
}
466
+
467
+
#[tokio::test]
468
+
async fn test_redis_queue_depth() {
469
+
let pool = match crate::test_helpers::get_test_redis_pool() {
470
+
Some(p) => p,
471
+
None => {
472
+
eprintln!("Skipping Redis test - no Redis connection available");
473
+
return;
474
+
}
475
+
};
476
+
477
+
let test_prefix = format!(
478
+
"test:queue:{}:",
479
+
std::time::SystemTime::now()
480
+
.duration_since(std::time::UNIX_EPOCH)
481
+
.unwrap()
482
+
.as_nanos()
483
+
);
484
+
let adapter =
485
+
RedisQueueAdapter::<String>::new(pool, "test-worker-depth".to_string(), test_prefix, 1);
486
+
487
+
// Initially empty
488
+
assert_eq!(adapter.depth().await, Some(0));
489
+
490
+
// Push items and check depth
491
+
adapter.push("item1".to_string()).await.unwrap();
492
+
assert_eq!(adapter.depth().await, Some(1));
493
+
494
+
adapter.push("item2".to_string()).await.unwrap();
495
+
assert_eq!(adapter.depth().await, Some(2));
496
+
497
+
// Pull and check depth (note: depth checks primary queue)
498
+
let _ = adapter.pull().await;
499
+
assert_eq!(adapter.depth().await, Some(1));
500
+
}
501
+
502
+
#[tokio::test]
503
+
async fn test_redis_queue_health() {
504
+
let pool = match crate::test_helpers::get_test_redis_pool() {
505
+
Some(p) => p,
506
+
None => {
507
+
eprintln!("Skipping Redis test - no Redis connection available");
508
+
return;
509
+
}
510
+
};
511
+
512
+
let adapter = RedisQueueAdapter::<String>::new(
513
+
pool,
514
+
"test-worker-health".to_string(),
515
+
"test:queue:health:".to_string(),
516
+
1,
517
+
);
518
+
519
+
// Should be healthy if Redis is running
520
+
assert!(adapter.is_healthy().await);
521
+
}
522
+
523
+
#[tokio::test]
524
+
async fn test_redis_queue_deduplication() {
525
+
use crate::queue::HandleResolutionWork;
526
+
527
+
let pool = match crate::test_helpers::get_test_redis_pool() {
528
+
Some(p) => p,
529
+
None => {
530
+
eprintln!("Skipping Redis test - no Redis connection available");
531
+
return;
532
+
}
533
+
};
534
+
535
+
let test_prefix = format!(
536
+
"test:queue:dedup:{}:",
537
+
std::time::SystemTime::now()
538
+
.duration_since(std::time::UNIX_EPOCH)
539
+
.unwrap()
540
+
.as_nanos()
541
+
);
542
+
543
+
// Create adapter with deduplication enabled
544
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
545
+
pool.clone(),
546
+
"test-worker-dedup".to_string(),
547
+
test_prefix.clone(),
548
+
1,
549
+
true, // Enable deduplication
550
+
2, // 2 second TTL for quick testing
551
+
);
552
+
553
+
let work = HandleResolutionWork::new("alice.example.com".to_string());
554
+
555
+
// First push should succeed
556
+
adapter
557
+
.push(work.clone())
558
+
.await
559
+
.expect("First push should succeed");
560
+
561
+
// Second push of same item should be deduplicated (but still return Ok)
562
+
adapter
563
+
.push(work.clone())
564
+
.await
565
+
.expect("Second push should succeed (deduplicated)");
566
+
567
+
// Queue should only have one item
568
+
let depth = adapter.depth().await;
569
+
assert_eq!(
570
+
depth,
571
+
Some(1),
572
+
"Queue should only have one item after deduplication"
573
+
);
574
+
575
+
// Pull the item
576
+
let pulled = adapter.pull().await;
577
+
assert_eq!(pulled, Some(work.clone()));
578
+
579
+
// Queue should now be empty
580
+
let depth = adapter.depth().await;
581
+
assert_eq!(depth, Some(0), "Queue should be empty after pulling");
582
+
583
+
// Wait for dedup TTL to expire
584
+
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
585
+
586
+
// Should be able to push again after TTL expires
587
+
adapter
588
+
.push(work.clone())
589
+
.await
590
+
.expect("Push after TTL expiry should succeed");
591
+
592
+
let depth = adapter.depth().await;
593
+
assert_eq!(
594
+
depth,
595
+
Some(1),
596
+
"Queue should have one item after TTL expiry"
597
+
);
598
+
}
599
+
600
+
#[tokio::test]
601
+
async fn test_redis_queue_deduplication_disabled() {
602
+
use crate::queue::HandleResolutionWork;
603
+
604
+
let pool = match crate::test_helpers::get_test_redis_pool() {
605
+
Some(p) => p,
606
+
None => {
607
+
eprintln!("Skipping Redis test - no Redis connection available");
608
+
return;
609
+
}
610
+
};
611
+
612
+
let test_prefix = format!(
613
+
"test:queue:nodedup:{}:",
614
+
std::time::SystemTime::now()
615
+
.duration_since(std::time::UNIX_EPOCH)
616
+
.unwrap()
617
+
.as_nanos()
618
+
);
619
+
620
+
// Create adapter with deduplication disabled
621
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
622
+
pool.clone(),
623
+
"test-worker-nodedup".to_string(),
624
+
test_prefix.clone(),
625
+
1,
626
+
false, // Disable deduplication
627
+
60,
628
+
);
629
+
630
+
let work = HandleResolutionWork::new("bob.example.com".to_string());
631
+
632
+
// Push same item twice
633
+
adapter
634
+
.push(work.clone())
635
+
.await
636
+
.expect("First push should succeed");
637
+
adapter
638
+
.push(work.clone())
639
+
.await
640
+
.expect("Second push should succeed");
641
+
642
+
// Queue should have two items (no deduplication)
643
+
let depth = adapter.depth().await;
644
+
assert_eq!(
645
+
depth,
646
+
Some(2),
647
+
"Queue should have two items when deduplication is disabled"
648
+
);
649
+
650
+
// Pull both items
651
+
let pulled1 = adapter.pull().await;
652
+
assert_eq!(pulled1, Some(work.clone()));
653
+
654
+
let pulled2 = adapter.pull().await;
655
+
assert_eq!(pulled2, Some(work.clone()));
656
+
657
+
// Queue should now be empty
658
+
let depth = adapter.depth().await;
659
+
assert_eq!(
660
+
depth,
661
+
Some(0),
662
+
"Queue should be empty after pulling all items"
663
+
);
664
+
}
665
+
666
+
#[tokio::test]
667
+
async fn test_redis_queue_serialization() {
668
+
use crate::queue::HandleResolutionWork;
669
+
670
+
let pool = match crate::test_helpers::get_test_redis_pool() {
671
+
Some(p) => p,
672
+
None => {
673
+
eprintln!("Skipping Redis test - no Redis connection available");
674
+
return;
675
+
}
676
+
};
677
+
678
+
let test_prefix = format!(
679
+
"test:queue:{}:",
680
+
std::time::SystemTime::now()
681
+
.duration_since(std::time::UNIX_EPOCH)
682
+
.unwrap()
683
+
.as_nanos()
684
+
);
685
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::new(
686
+
pool,
687
+
"test-worker-ser".to_string(),
688
+
test_prefix,
689
+
1,
690
+
);
691
+
692
+
let work = HandleResolutionWork::new("alice.example.com".to_string());
693
+
694
+
// Push and pull
695
+
adapter.push(work.clone()).await.unwrap();
696
+
let pulled = adapter.pull().await;
697
+
assert_eq!(pulled, Some(work.clone()));
698
+
699
+
// Ack
700
+
adapter.ack(&work).await.unwrap();
701
+
}
702
+
}
+505
src/queue/sqlite.rs
+505
src/queue/sqlite.rs
···
1
+
//! SQLite-backed queue adapter implementation.
2
+
//!
3
+
//! This module provides a persistent queue implementation using SQLite
4
+
//! with optional work shedding to prevent unbounded growth.
5
+
6
+
use async_trait::async_trait;
7
+
use serde::{Deserialize, Serialize};
8
+
use sqlx::{self, Row};
9
+
use tracing::{debug, error, info, warn};
10
+
11
+
use super::adapter::QueueAdapter;
12
+
use super::error::{QueueError, Result};
13
+
14
+
/// SQLite-backed queue adapter implementation.
15
+
///
16
+
/// This adapter uses SQLite database for persistent queuing of work items.
17
+
/// It's suitable for single-instance deployments that need persistence
18
+
/// across service restarts while remaining lightweight.
19
+
///
20
+
/// # Features
21
+
///
22
+
/// - Persistent queuing across service restarts
23
+
/// - Simple FIFO ordering based on insertion time
24
+
/// - Single consumer design (no complex locking needed)
25
+
/// - Simple pull-and-delete semantics
26
+
/// - Optional work shedding to prevent unbounded queue growth
27
+
///
28
+
/// # Work Shedding
29
+
///
30
+
/// When `max_size` is configured (> 0), the adapter implements work shedding:
31
+
/// - New work items are always accepted
32
+
/// - When the queue exceeds `max_size`, oldest entries are automatically deleted
33
+
/// - This maintains the most recent work items while preventing unbounded growth
34
+
/// - Essential for long-running deployments to avoid disk space issues
35
+
///
36
+
/// # Database Schema
37
+
///
38
+
/// The adapter expects the following table structure:
39
+
/// ```sql
40
+
/// CREATE TABLE handle_resolution_queue (
41
+
/// id INTEGER PRIMARY KEY AUTOINCREMENT,
42
+
/// work TEXT NOT NULL,
43
+
/// queued_at INTEGER NOT NULL
44
+
/// );
45
+
/// CREATE INDEX idx_queue_timestamp ON handle_resolution_queue(queued_at);
46
+
/// ```
47
+
///
48
+
/// # Examples
49
+
///
50
+
/// ```no_run
51
+
/// use quickdid::queue::SqliteQueueAdapter;
52
+
/// use quickdid::queue::QueueAdapter;
53
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
54
+
///
55
+
/// # async fn example() -> anyhow::Result<()> {
56
+
/// // Create SQLite pool
57
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
58
+
///
59
+
/// // Create queue with unlimited size
60
+
/// let queue = SqliteQueueAdapter::<String>::new(pool.clone());
61
+
///
62
+
/// // Or create queue with work shedding (max 10,000 items)
63
+
/// let bounded_queue = SqliteQueueAdapter::<String>::with_max_size(pool, 10000);
64
+
///
65
+
/// // Use the queue
66
+
/// queue.push("work-item".to_string()).await?;
67
+
/// if let Some(item) = queue.pull().await {
68
+
/// // Process item (automatically deleted from queue)
69
+
/// println!("Processing: {}", item);
70
+
/// }
71
+
/// # Ok(())
72
+
/// # }
73
+
/// ```
74
+
pub struct SqliteQueueAdapter<T>
75
+
where
76
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
77
+
{
78
+
/// SQLite connection pool
79
+
pool: sqlx::SqlitePool,
80
+
/// Maximum queue size (0 = unlimited)
81
+
/// When exceeded, oldest entries are deleted to maintain this limit
82
+
max_size: u64,
83
+
/// Type marker for generic parameter
84
+
_phantom: std::marker::PhantomData<T>,
85
+
}
86
+
87
+
impl<T> SqliteQueueAdapter<T>
88
+
where
89
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
90
+
{
91
+
/// Create a new SQLite queue adapter with unlimited queue size.
92
+
///
93
+
/// # Arguments
94
+
///
95
+
/// * `pool` - SQLite connection pool
96
+
///
97
+
/// # Examples
98
+
///
99
+
/// ```no_run
100
+
/// use quickdid::queue::SqliteQueueAdapter;
101
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
102
+
///
103
+
/// # async fn example() -> anyhow::Result<()> {
104
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
105
+
/// let queue = SqliteQueueAdapter::<String>::new(pool);
106
+
/// # Ok(())
107
+
/// # }
108
+
/// ```
109
+
pub fn new(pool: sqlx::SqlitePool) -> Self {
110
+
Self::with_max_size(pool, 0)
111
+
}
112
+
113
+
/// Create a new SQLite queue adapter with specified maximum queue size.
114
+
///
115
+
/// # Arguments
116
+
///
117
+
/// * `pool` - SQLite connection pool
118
+
/// * `max_size` - Maximum number of entries in queue (0 = unlimited)
119
+
///
120
+
/// # Work Shedding Behavior
121
+
///
122
+
/// When `max_size` > 0:
123
+
/// - New work items are always accepted
124
+
/// - If queue size exceeds `max_size` after insertion, oldest entries are deleted
125
+
/// - This preserves the most recent work while preventing unbounded growth
126
+
///
127
+
/// # Examples
128
+
///
129
+
/// ```no_run
130
+
/// use quickdid::queue::SqliteQueueAdapter;
131
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
132
+
///
133
+
/// # async fn example() -> anyhow::Result<()> {
134
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
135
+
/// // Limit queue to 10,000 entries with automatic work shedding
136
+
/// let queue = SqliteQueueAdapter::<String>::with_max_size(pool, 10000);
137
+
/// # Ok(())
138
+
/// # }
139
+
/// ```
140
+
pub fn with_max_size(pool: sqlx::SqlitePool, max_size: u64) -> Self {
141
+
Self {
142
+
pool,
143
+
max_size,
144
+
_phantom: std::marker::PhantomData,
145
+
}
146
+
}
147
+
}
148
+
149
+
#[async_trait]
150
+
impl<T> QueueAdapter<T> for SqliteQueueAdapter<T>
151
+
where
152
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
153
+
{
154
+
async fn pull(&self) -> Option<T> {
155
+
// Get the oldest queued item and delete it in a transaction
156
+
let mut transaction = match self.pool.begin().await {
157
+
Ok(tx) => tx,
158
+
Err(e) => {
159
+
error!("Failed to start SQLite transaction: {}", e);
160
+
return None;
161
+
}
162
+
};
163
+
164
+
// Select the oldest queued item
165
+
let record = match sqlx::query(
166
+
"SELECT id, work FROM handle_resolution_queue
167
+
ORDER BY queued_at ASC
168
+
LIMIT 1",
169
+
)
170
+
.fetch_optional(&mut *transaction)
171
+
.await
172
+
{
173
+
Ok(Some(row)) => row,
174
+
Ok(None) => {
175
+
// No queued items available
176
+
debug!("No queued items available in SQLite queue");
177
+
return None;
178
+
}
179
+
Err(e) => {
180
+
error!("Failed to query SQLite queue: {}", e);
181
+
return None;
182
+
}
183
+
};
184
+
185
+
let item_id: i64 = record.get("id");
186
+
let work_json: String = record.get("work");
187
+
188
+
// Delete the item from the queue
189
+
if let Err(e) = sqlx::query("DELETE FROM handle_resolution_queue WHERE id = ?1")
190
+
.bind(item_id)
191
+
.execute(&mut *transaction)
192
+
.await
193
+
{
194
+
error!("Failed to delete item from queue: {}", e);
195
+
return None;
196
+
}
197
+
198
+
// Commit the transaction
199
+
if let Err(e) = transaction.commit().await {
200
+
error!("Failed to commit SQLite transaction: {}", e);
201
+
return None;
202
+
}
203
+
204
+
// Deserialize the work item from JSON
205
+
match serde_json::from_str(&work_json) {
206
+
Ok(work) => {
207
+
debug!("Pulled work item from SQLite queue");
208
+
Some(work)
209
+
}
210
+
Err(e) => {
211
+
error!("Failed to deserialize work item: {}", e);
212
+
None
213
+
}
214
+
}
215
+
}
216
+
217
+
async fn push(&self, work: T) -> Result<()> {
218
+
// Serialize the entire work item as JSON
219
+
let work_json = serde_json::to_string(&work)
220
+
.map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
221
+
222
+
let current_timestamp = std::time::SystemTime::now()
223
+
.duration_since(std::time::UNIX_EPOCH)
224
+
.unwrap_or_default()
225
+
.as_secs() as i64;
226
+
227
+
// Optimized approach: Insert first, then check if cleanup needed
228
+
// This avoids counting on every insert
229
+
sqlx::query("INSERT INTO handle_resolution_queue (work, queued_at) VALUES (?1, ?2)")
230
+
.bind(&work_json)
231
+
.bind(current_timestamp)
232
+
.execute(&self.pool)
233
+
.await
234
+
.map_err(|e| QueueError::PushFailed(format!("Failed to insert work item: {}", e)))?;
235
+
236
+
// Implement optimized work shedding if max_size is configured
237
+
if self.max_size > 0 {
238
+
// Optimized approach: Only check and clean periodically or when likely over limit
239
+
// Use a limited count to avoid full table scan
240
+
let check_limit = self.max_size as i64 + (self.max_size as i64 / 10).max(1); // Check 10% over limit
241
+
let approx_count: Option<i64> = sqlx::query_scalar(
242
+
"SELECT COUNT(*) FROM (
243
+
SELECT 1 FROM handle_resolution_queue LIMIT ?1
244
+
) AS limited_count",
245
+
)
246
+
.bind(check_limit)
247
+
.fetch_one(&self.pool)
248
+
.await
249
+
.map_err(|e| QueueError::PushFailed(format!("Failed to check queue size: {}", e)))?;
250
+
251
+
// Only perform cleanup if we're definitely over the limit
252
+
if let Some(count) = approx_count
253
+
&& count >= check_limit
254
+
{
255
+
// Perform batch cleanup - delete more than just the excess to reduce frequency
256
+
// Delete 20% more than needed to avoid frequent shedding
257
+
let target_size = (self.max_size as f64 * 0.8) as i64; // Keep 80% of max_size
258
+
let to_delete = count - target_size;
259
+
260
+
if to_delete > 0 {
261
+
// Optimized deletion: First get the cutoff id and timestamp
262
+
// This avoids the expensive subquery in the DELETE statement
263
+
let cutoff: Option<(i64, i64)> = sqlx::query_as(
264
+
"SELECT id, queued_at FROM handle_resolution_queue
265
+
ORDER BY queued_at ASC, id ASC
266
+
LIMIT 1 OFFSET ?1",
267
+
)
268
+
.bind(to_delete - 1)
269
+
.fetch_optional(&self.pool)
270
+
.await
271
+
.map_err(|e| QueueError::PushFailed(format!("Failed to find cutoff: {}", e)))?;
272
+
273
+
if let Some((cutoff_id, cutoff_timestamp)) = cutoff {
274
+
// Delete entries older than cutoff, or equal timestamp with lower id
275
+
// This handles the case where multiple entries have the same timestamp
276
+
let deleted_result = sqlx::query(
277
+
"DELETE FROM handle_resolution_queue
278
+
WHERE queued_at < ?1
279
+
OR (queued_at = ?1 AND id <= ?2)",
280
+
)
281
+
.bind(cutoff_timestamp)
282
+
.bind(cutoff_id)
283
+
.execute(&self.pool)
284
+
.await
285
+
.map_err(|e| {
286
+
QueueError::PushFailed(format!(
287
+
"Failed to delete excess entries: {}",
288
+
e
289
+
))
290
+
})?;
291
+
292
+
let deleted_count = deleted_result.rows_affected();
293
+
if deleted_count > 0 {
294
+
info!(
295
+
"Work shedding: deleted {} oldest entries (target size: {}, max: {})",
296
+
deleted_count, target_size, self.max_size
297
+
);
298
+
}
299
+
}
300
+
}
301
+
}
302
+
}
303
+
304
+
debug!(
305
+
"Pushed work item to SQLite queue (max_size: {})",
306
+
self.max_size
307
+
);
308
+
Ok(())
309
+
}
310
+
311
+
async fn ack(&self, _item: &T) -> Result<()> {
312
+
// With the simplified SQLite queue design, items are deleted when pulled,
313
+
// so acknowledgment is a no-op (item is already processed and removed)
314
+
debug!("Acknowledged work item in SQLite queue (no-op)");
315
+
Ok(())
316
+
}
317
+
318
+
async fn depth(&self) -> Option<usize> {
319
+
match sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM handle_resolution_queue")
320
+
.fetch_one(&self.pool)
321
+
.await
322
+
{
323
+
Ok(count) => Some(count as usize),
324
+
Err(e) => {
325
+
warn!("Failed to get SQLite queue depth: {}", e);
326
+
None
327
+
}
328
+
}
329
+
}
330
+
331
+
async fn is_healthy(&self) -> bool {
332
+
// Test the connection by running a simple query
333
+
sqlx::query_scalar::<_, i64>("SELECT 1")
334
+
.fetch_one(&self.pool)
335
+
.await
336
+
.map(|_| true)
337
+
.unwrap_or(false)
338
+
}
339
+
}
340
+
341
+
#[cfg(test)]
342
+
mod tests {
343
+
use super::*;
344
+
use crate::queue::HandleResolutionWork;
345
+
346
+
async fn create_test_pool() -> sqlx::SqlitePool {
347
+
let pool = sqlx::SqlitePool::connect("sqlite::memory:")
348
+
.await
349
+
.expect("Failed to connect to in-memory SQLite");
350
+
351
+
// Create the queue schema
352
+
crate::sqlite_schema::create_schema(&pool)
353
+
.await
354
+
.expect("Failed to create schema");
355
+
356
+
pool
357
+
}
358
+
359
+
#[tokio::test]
360
+
async fn test_sqlite_queue_push_pull() {
361
+
let pool = create_test_pool().await;
362
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool.clone());
363
+
364
+
let work = HandleResolutionWork::new("alice.example.com".to_string());
365
+
366
+
// Test push
367
+
adapter.push(work.clone()).await.unwrap();
368
+
369
+
// Verify depth
370
+
assert_eq!(adapter.depth().await, Some(1));
371
+
372
+
// Test pull
373
+
let pulled = adapter.pull().await;
374
+
assert_eq!(pulled, Some(work));
375
+
376
+
// Verify queue is empty after pull
377
+
assert_eq!(adapter.depth().await, Some(0));
378
+
assert!(adapter.pull().await.is_none());
379
+
}
380
+
381
+
#[tokio::test]
382
+
async fn test_sqlite_queue_fifo_ordering() {
383
+
let pool = create_test_pool().await;
384
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
385
+
386
+
// Push multiple items
387
+
let handles = vec![
388
+
"alice.example.com",
389
+
"bob.example.com",
390
+
"charlie.example.com",
391
+
];
392
+
for handle in &handles {
393
+
let work = HandleResolutionWork::new(handle.to_string());
394
+
adapter.push(work).await.unwrap();
395
+
}
396
+
397
+
// Pull items in FIFO order
398
+
for expected_handle in handles {
399
+
let pulled = adapter.pull().await;
400
+
assert!(pulled.is_some());
401
+
assert_eq!(pulled.unwrap().handle, expected_handle);
402
+
}
403
+
404
+
// Queue should be empty
405
+
assert!(adapter.pull().await.is_none());
406
+
}
407
+
408
+
#[tokio::test]
409
+
async fn test_sqlite_queue_ack_noop() {
410
+
let pool = create_test_pool().await;
411
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
412
+
413
+
// Ack should always succeed as it's a no-op
414
+
let work = HandleResolutionWork::new("any.example.com".to_string());
415
+
adapter.ack(&work).await.unwrap();
416
+
}
417
+
418
+
#[tokio::test]
419
+
async fn test_sqlite_queue_health() {
420
+
let pool = create_test_pool().await;
421
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
422
+
423
+
// Should be healthy if SQLite is working
424
+
assert!(adapter.is_healthy().await);
425
+
}
426
+
427
+
#[tokio::test]
428
+
async fn test_sqlite_queue_work_shedding() {
429
+
let pool = create_test_pool().await;
430
+
431
+
// Create adapter with small max_size for testing
432
+
let max_size = 10;
433
+
let adapter =
434
+
SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(pool.clone(), max_size);
435
+
436
+
// Push items up to the limit (should not trigger shedding)
437
+
for i in 0..max_size {
438
+
let work = HandleResolutionWork::new(format!("test-{:03}", i));
439
+
adapter.push(work).await.expect("Push should succeed");
440
+
}
441
+
442
+
// Verify all items are present
443
+
assert_eq!(adapter.depth().await, Some(max_size as usize));
444
+
445
+
// Push beyond 110% of max_size to trigger batch shedding
446
+
let trigger_point = max_size + (max_size / 10) + 1;
447
+
for i in max_size..trigger_point {
448
+
let work = HandleResolutionWork::new(format!("test-{:03}", i));
449
+
adapter.push(work).await.expect("Push should succeed");
450
+
}
451
+
452
+
// After triggering shedding, queue should be around 80% of max_size
453
+
let depth_after_shedding = adapter.depth().await.unwrap();
454
+
let expected_size = (max_size as f64 * 0.8) as usize;
455
+
456
+
// Allow some variance due to batch deletion
457
+
assert!(
458
+
depth_after_shedding <= expected_size + 1,
459
+
"Queue size {} should be around 80% of max_size ({})",
460
+
depth_after_shedding,
461
+
expected_size
462
+
);
463
+
}
464
+
465
+
#[tokio::test]
466
+
async fn test_sqlite_queue_work_shedding_disabled() {
467
+
let pool = create_test_pool().await;
468
+
469
+
// Create adapter with max_size = 0 (disabled work shedding)
470
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(pool, 0);
471
+
472
+
// Push many items (should not trigger any shedding)
473
+
for i in 0..100 {
474
+
let work = HandleResolutionWork::new(format!("test-{:03}", i));
475
+
adapter.push(work).await.expect("Push should succeed");
476
+
}
477
+
478
+
// Verify all items are present (no shedding occurred)
479
+
assert_eq!(adapter.depth().await, Some(100));
480
+
}
481
+
482
+
#[tokio::test]
483
+
async fn test_sqlite_queue_generic_work_type() {
484
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
485
+
struct CustomWork {
486
+
id: u64,
487
+
name: String,
488
+
data: Vec<i32>,
489
+
}
490
+
491
+
let pool = create_test_pool().await;
492
+
let adapter = SqliteQueueAdapter::<CustomWork>::new(pool);
493
+
494
+
let work = CustomWork {
495
+
id: 123,
496
+
name: "test_work".to_string(),
497
+
data: vec![1, 2, 3, 4, 5],
498
+
};
499
+
500
+
// Test push and pull
501
+
adapter.push(work.clone()).await.unwrap();
502
+
let pulled = adapter.pull().await;
503
+
assert_eq!(pulled, Some(work));
504
+
}
505
+
}
+133
src/queue/work.rs
+133
src/queue/work.rs
···
1
+
//! Work item types for queue processing.
2
+
//!
3
+
//! This module defines the various work item types that can be processed
4
+
//! through the queue system, such as handle resolution requests.
5
+
6
+
use serde::{Deserialize, Serialize};
7
+
8
+
/// Work item for handle resolution tasks.
9
+
///
10
+
/// This structure represents a request to resolve an AT Protocol handle
11
+
/// to its corresponding DID. It's the primary work type processed by
12
+
/// the QuickDID service's background queue workers.
13
+
///
14
+
/// # Examples
15
+
///
16
+
/// ```
17
+
/// use quickdid::queue::HandleResolutionWork;
18
+
///
19
+
/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
20
+
/// assert_eq!(work.handle, "alice.bsky.social");
21
+
/// ```
22
+
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
23
+
pub struct HandleResolutionWork {
24
+
/// The AT Protocol handle to resolve (e.g., "alice.bsky.social")
25
+
pub handle: String,
26
+
}
27
+
28
+
impl HandleResolutionWork {
29
+
/// Create a new handle resolution work item.
30
+
///
31
+
/// # Arguments
32
+
///
33
+
/// * `handle` - The AT Protocol handle to resolve
34
+
///
35
+
/// # Examples
36
+
///
37
+
/// ```
38
+
/// use quickdid::queue::HandleResolutionWork;
39
+
///
40
+
/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
41
+
/// ```
42
+
pub fn new(handle: String) -> Self {
43
+
Self { handle }
44
+
}
45
+
}
46
+
47
+
impl std::fmt::Display for HandleResolutionWork {
48
+
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
49
+
write!(f, "HandleResolution({})", self.handle)
50
+
}
51
+
}
52
+
53
+
/// Trait for getting a unique deduplication key from a work item.
54
+
/// This is used by the Redis queue adapter to prevent duplicate items.
55
+
pub trait DedupKey {
56
+
/// Get a unique key for deduplication purposes.
57
+
/// This should return a consistent identifier for equivalent work items.
58
+
fn dedup_key(&self) -> String;
59
+
}
60
+
61
+
impl DedupKey for HandleResolutionWork {
62
+
fn dedup_key(&self) -> String {
63
+
// Use the handle itself as the dedup key
64
+
self.handle.clone()
65
+
}
66
+
}
67
+
68
+
// For testing purposes, implement DedupKey for String
69
+
#[cfg(test)]
70
+
impl DedupKey for String {
71
+
fn dedup_key(&self) -> String {
72
+
self.clone()
73
+
}
74
+
}
75
+
76
+
#[cfg(test)]
77
+
mod tests {
78
+
use super::*;
79
+
80
+
#[test]
81
+
fn test_handle_resolution_work_creation() {
82
+
let handle = "alice.example.com";
83
+
let work = HandleResolutionWork::new(handle.to_string());
84
+
assert_eq!(work.handle, handle);
85
+
}
86
+
87
+
#[test]
88
+
fn test_handle_resolution_work_serialization() {
89
+
let work = HandleResolutionWork::new("bob.example.com".to_string());
90
+
91
+
// Test JSON serialization (which is what we actually use in the queue adapters)
92
+
let json = serde_json::to_string(&work).expect("Failed to serialize to JSON");
93
+
let deserialized: HandleResolutionWork =
94
+
serde_json::from_str(&json).expect("Failed to deserialize from JSON");
95
+
assert_eq!(work, deserialized);
96
+
97
+
// Verify the JSON structure
98
+
let json_value: serde_json::Value = serde_json::from_str(&json).unwrap();
99
+
assert_eq!(json_value["handle"], "bob.example.com");
100
+
}
101
+
102
+
#[test]
103
+
fn test_handle_resolution_work_display() {
104
+
let work = HandleResolutionWork::new("charlie.example.com".to_string());
105
+
let display = format!("{}", work);
106
+
assert_eq!(display, "HandleResolution(charlie.example.com)");
107
+
}
108
+
109
+
#[test]
110
+
fn test_handle_resolution_work_equality() {
111
+
let work1 = HandleResolutionWork::new("alice.example.com".to_string());
112
+
let work2 = HandleResolutionWork::new("alice.example.com".to_string());
113
+
let work3 = HandleResolutionWork::new("bob.example.com".to_string());
114
+
115
+
assert_eq!(work1, work2);
116
+
assert_ne!(work1, work3);
117
+
}
118
+
119
+
#[test]
120
+
fn test_handle_resolution_work_dedup_key() {
121
+
let work1 = HandleResolutionWork::new("alice.example.com".to_string());
122
+
let work2 = HandleResolutionWork::new("alice.example.com".to_string());
123
+
let work3 = HandleResolutionWork::new("bob.example.com".to_string());
124
+
125
+
// Same handle should have same dedup key
126
+
assert_eq!(work1.dedup_key(), work2.dedup_key());
127
+
assert_eq!(work1.dedup_key(), "alice.example.com");
128
+
129
+
// Different handle should have different dedup key
130
+
assert_ne!(work1.dedup_key(), work3.dedup_key());
131
+
assert_eq!(work3.dedup_key(), "bob.example.com");
132
+
}
133
+
}
-673
src/queue_adapter.rs
-673
src/queue_adapter.rs
···
1
-
//! Generic queue adapter system for work queue abstraction.
2
-
//!
3
-
//! This module provides a generic trait and implementations for queue adapters
4
-
//! that can be used with any work type for handle resolution and other tasks.
5
-
6
-
use async_trait::async_trait;
7
-
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
8
-
use serde::{Deserialize, Serialize};
9
-
use std::sync::Arc;
10
-
use thiserror::Error;
11
-
use tokio::sync::{Mutex, mpsc};
12
-
use tracing::{debug, error, warn};
13
-
14
-
/// Queue operation errors
15
-
#[derive(Error, Debug)]
16
-
pub enum QueueError {
17
-
#[error("error-quickdid-queue-1 Failed to push to queue: {0}")]
18
-
PushFailed(String),
19
-
20
-
#[error("error-quickdid-queue-2 Queue is full")]
21
-
QueueFull,
22
-
23
-
#[error("error-quickdid-queue-3 Queue is closed")]
24
-
QueueClosed,
25
-
26
-
#[error("error-quickdid-queue-4 Redis connection failed: {0}")]
27
-
RedisConnectionFailed(String),
28
-
29
-
#[error("error-quickdid-queue-5 Redis operation failed: {operation}: {details}")]
30
-
RedisOperationFailed { operation: String, details: String },
31
-
32
-
#[error("error-quickdid-queue-6 Serialization failed: {0}")]
33
-
SerializationFailed(String),
34
-
35
-
#[error("error-quickdid-queue-7 Deserialization failed: {0}")]
36
-
DeserializationFailed(String),
37
-
38
-
#[error("error-quickdid-queue-8 Item not found in worker queue during acknowledgment")]
39
-
AckItemNotFound,
40
-
}
41
-
42
-
type Result<T> = std::result::Result<T, QueueError>;
43
-
44
-
/// Generic trait for queue adapters that can work with any work type.
45
-
///
46
-
/// This trait provides a common interface for different queue implementations
47
-
/// (MPSC, Redis, PostgreSQL, etc.) allowing them to be used interchangeably.
48
-
#[async_trait]
49
-
pub trait QueueAdapter<T>: Send + Sync
50
-
where
51
-
T: Send + Sync + 'static,
52
-
{
53
-
/// Pull the next work item from the queue.
54
-
///
55
-
/// Returns None if the queue is closed or empty (depending on implementation).
56
-
async fn pull(&self) -> Option<T>;
57
-
58
-
/// Push a work item to the queue.
59
-
///
60
-
/// Returns an error if the queue is full or closed.
61
-
async fn push(&self, work: T) -> Result<()>;
62
-
63
-
/// Acknowledge that a work item has been successfully processed.
64
-
///
65
-
/// This is used by reliable queue implementations to remove the item
66
-
/// from a temporary processing queue. Implementations that don't require
67
-
/// acknowledgment (like MPSC) can use the default no-op implementation.
68
-
async fn ack(&self, _item: &T) -> Result<()> {
69
-
// Default no-op implementation for queues that don't need acknowledgment
70
-
Ok(())
71
-
}
72
-
73
-
/// Try to push a work item without blocking.
74
-
///
75
-
/// Returns an error if the queue is full or closed.
76
-
async fn try_push(&self, work: T) -> Result<()> {
77
-
// Default implementation uses regular push
78
-
self.push(work).await
79
-
}
80
-
81
-
/// Get the current queue depth if available.
82
-
///
83
-
/// Returns None if the implementation doesn't support queue depth.
84
-
async fn depth(&self) -> Option<usize> {
85
-
None
86
-
}
87
-
88
-
/// Check if the queue is healthy.
89
-
///
90
-
/// Used for health checks and monitoring.
91
-
async fn is_healthy(&self) -> bool {
92
-
true
93
-
}
94
-
}
95
-
96
-
/// MPSC channel-based queue adapter implementation.
97
-
///
98
-
/// This adapter uses tokio's multi-producer, single-consumer channel
99
-
/// for in-memory queuing of work items. It's suitable for single-instance
100
-
/// deployments with moderate throughput requirements.
101
-
pub(crate) struct MpscQueueAdapter<T>
102
-
where
103
-
T: Send + Sync + 'static,
104
-
{
105
-
receiver: Arc<Mutex<mpsc::Receiver<T>>>,
106
-
sender: mpsc::Sender<T>,
107
-
}
108
-
109
-
impl<T> MpscQueueAdapter<T>
110
-
where
111
-
T: Send + Sync + 'static,
112
-
{
113
-
/// Create a new MPSC queue adapter with the specified buffer size.
114
-
pub(crate) fn new(buffer: usize) -> Self {
115
-
let (sender, receiver) = mpsc::channel(buffer);
116
-
Self {
117
-
receiver: Arc::new(Mutex::new(receiver)),
118
-
sender,
119
-
}
120
-
}
121
-
122
-
/// Create an adapter from existing MPSC channels (for backward compatibility).
123
-
pub(crate) fn from_channel(sender: mpsc::Sender<T>, receiver: mpsc::Receiver<T>) -> Self {
124
-
Self {
125
-
receiver: Arc::new(Mutex::new(receiver)),
126
-
sender,
127
-
}
128
-
}
129
-
}
130
-
131
-
#[async_trait]
132
-
impl<T> QueueAdapter<T> for MpscQueueAdapter<T>
133
-
where
134
-
T: Send + Sync + 'static,
135
-
{
136
-
async fn pull(&self) -> Option<T> {
137
-
let mut receiver = self.receiver.lock().await;
138
-
receiver.recv().await
139
-
}
140
-
141
-
async fn push(&self, work: T) -> Result<()> {
142
-
self.sender
143
-
.send(work)
144
-
.await
145
-
.map_err(|e| QueueError::PushFailed(e.to_string()))
146
-
}
147
-
148
-
async fn try_push(&self, work: T) -> Result<()> {
149
-
self.sender.try_send(work).map_err(|e| match e {
150
-
mpsc::error::TrySendError::Full(_) => QueueError::QueueFull,
151
-
mpsc::error::TrySendError::Closed(_) => QueueError::QueueClosed,
152
-
})
153
-
}
154
-
155
-
async fn depth(&self) -> Option<usize> {
156
-
// Note: This is an approximation as mpsc doesn't provide exact depth
157
-
Some(self.sender.max_capacity() - self.sender.capacity())
158
-
}
159
-
160
-
async fn is_healthy(&self) -> bool {
161
-
!self.sender.is_closed()
162
-
}
163
-
}
164
-
165
-
/// Work item for handle resolution tasks
166
-
#[derive(Debug, Clone, Serialize, Deserialize)]
167
-
pub struct HandleResolutionWork {
168
-
/// The handle to resolve
169
-
pub handle: String,
170
-
}
171
-
172
-
impl HandleResolutionWork {
173
-
/// Create a new handle resolution work item
174
-
pub fn new(handle: String) -> Self {
175
-
Self { handle }
176
-
}
177
-
}
178
-
179
-
/// Redis-backed queue adapter implementation.
180
-
///
181
-
/// This adapter uses Redis lists with a reliable queue pattern:
182
-
/// - LPUSH to push items to the primary queue
183
-
/// - RPOPLPUSH to atomically move items from primary to worker queue
184
-
/// - LREM to acknowledge processed items from worker queue
185
-
///
186
-
/// This ensures at-least-once delivery semantics and allows for recovery
187
-
/// of in-flight items if a worker crashes.
188
-
pub(crate) struct RedisQueueAdapter<T>
189
-
where
190
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
191
-
{
192
-
/// Redis connection pool
193
-
pool: RedisPool,
194
-
/// Unique worker ID for this adapter instance
195
-
worker_id: String,
196
-
/// Key prefix for all queues (default: "queue:handleresolver:")
197
-
key_prefix: String,
198
-
/// Timeout for blocking RPOPLPUSH operations
199
-
timeout_seconds: u64,
200
-
/// Type marker for generic parameter
201
-
_phantom: std::marker::PhantomData<T>,
202
-
}
203
-
204
-
impl<T> RedisQueueAdapter<T>
205
-
where
206
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
207
-
{
208
-
/// Create a new Redis queue adapter with custom configuration
209
-
fn with_config(
210
-
pool: RedisPool,
211
-
worker_id: String,
212
-
key_prefix: String,
213
-
timeout_seconds: u64,
214
-
) -> Self {
215
-
Self {
216
-
pool,
217
-
worker_id,
218
-
key_prefix,
219
-
timeout_seconds,
220
-
_phantom: std::marker::PhantomData,
221
-
}
222
-
}
223
-
224
-
/// Get the primary queue key
225
-
fn primary_queue_key(&self) -> String {
226
-
format!("{}primary", self.key_prefix)
227
-
}
228
-
229
-
/// Get the worker-specific temporary queue key
230
-
fn worker_queue_key(&self) -> String {
231
-
format!("{}{}", self.key_prefix, self.worker_id)
232
-
}
233
-
}
234
-
235
-
#[async_trait]
236
-
impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
237
-
where
238
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
239
-
{
240
-
async fn pull(&self) -> Option<T> {
241
-
match self.pool.get().await {
242
-
Ok(mut conn) => {
243
-
let primary_key = self.primary_queue_key();
244
-
let worker_key = self.worker_queue_key();
245
-
246
-
// Use blocking RPOPLPUSH to atomically move item from primary to worker queue
247
-
let data: Option<Vec<u8>> = match conn
248
-
.brpoplpush(&primary_key, &worker_key, self.timeout_seconds as f64)
249
-
.await
250
-
{
251
-
Ok(data) => data,
252
-
Err(e) => {
253
-
error!("Failed to pull from queue: {}", e);
254
-
return None;
255
-
}
256
-
};
257
-
258
-
if let Some(data) = data {
259
-
// Deserialize the item
260
-
match serde_json::from_slice(&data) {
261
-
Ok(item) => {
262
-
debug!(
263
-
worker_id = %self.worker_id,
264
-
"Pulled item from queue"
265
-
);
266
-
Some(item)
267
-
}
268
-
Err(e) => {
269
-
error!("Failed to deserialize item: {}", e);
270
-
// Remove the corrupted item from worker queue
271
-
let _: std::result::Result<(), _> =
272
-
conn.lrem(&worker_key, 1, &data).await;
273
-
None
274
-
}
275
-
}
276
-
} else {
277
-
None
278
-
}
279
-
}
280
-
Err(e) => {
281
-
error!("Failed to get Redis connection: {}", e);
282
-
None
283
-
}
284
-
}
285
-
}
286
-
287
-
async fn push(&self, work: T) -> Result<()> {
288
-
let mut conn = self
289
-
.pool
290
-
.get()
291
-
.await
292
-
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
293
-
294
-
let data = serde_json::to_vec(&work)
295
-
.map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
296
-
297
-
let primary_key = self.primary_queue_key();
298
-
299
-
conn.lpush::<_, _, ()>(&primary_key, data)
300
-
.await
301
-
.map_err(|e| QueueError::RedisOperationFailed {
302
-
operation: "LPUSH".to_string(),
303
-
details: e.to_string(),
304
-
})?;
305
-
306
-
debug!("Pushed item to queue");
307
-
Ok(())
308
-
}
309
-
310
-
async fn ack(&self, item: &T) -> Result<()> {
311
-
let mut conn = self
312
-
.pool
313
-
.get()
314
-
.await
315
-
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
316
-
317
-
let data =
318
-
serde_json::to_vec(item).map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
319
-
320
-
let worker_key = self.worker_queue_key();
321
-
322
-
// Remove exactly one occurrence of this item from the worker queue
323
-
let removed: i32 = conn.lrem(&worker_key, 1, &data).await.map_err(|e| {
324
-
QueueError::RedisOperationFailed {
325
-
operation: "LREM".to_string(),
326
-
details: e.to_string(),
327
-
}
328
-
})?;
329
-
330
-
if removed == 0 {
331
-
warn!(
332
-
worker_id = %self.worker_id,
333
-
"Item not found in worker queue during acknowledgment"
334
-
);
335
-
} else {
336
-
debug!(
337
-
worker_id = %self.worker_id,
338
-
"Acknowledged item"
339
-
);
340
-
}
341
-
342
-
Ok(())
343
-
}
344
-
345
-
async fn depth(&self) -> Option<usize> {
346
-
match self.pool.get().await {
347
-
Ok(mut conn) => {
348
-
let primary_key = self.primary_queue_key();
349
-
match conn.llen::<_, usize>(&primary_key).await {
350
-
Ok(len) => Some(len),
351
-
Err(e) => {
352
-
error!("Failed to get queue depth: {}", e);
353
-
None
354
-
}
355
-
}
356
-
}
357
-
Err(e) => {
358
-
error!("Failed to get Redis connection: {}", e);
359
-
None
360
-
}
361
-
}
362
-
}
363
-
364
-
async fn is_healthy(&self) -> bool {
365
-
match self.pool.get().await {
366
-
Ok(mut conn) => {
367
-
// Ping Redis to check health
368
-
match deadpool_redis::redis::cmd("PING")
369
-
.query_async::<String>(&mut conn)
370
-
.await
371
-
{
372
-
Ok(response) => response == "PONG",
373
-
Err(_) => false,
374
-
}
375
-
}
376
-
Err(_) => false,
377
-
}
378
-
}
379
-
}
380
-
381
-
/// No-operation queue adapter that discards all work items.
382
-
///
383
-
/// This adapter is useful for configurations where queuing is disabled
384
-
/// or as a fallback when other queue adapters fail to initialize.
385
-
pub(crate) struct NoopQueueAdapter<T>
386
-
where
387
-
T: Send + Sync + 'static,
388
-
{
389
-
_phantom: std::marker::PhantomData<T>,
390
-
}
391
-
392
-
impl<T> NoopQueueAdapter<T>
393
-
where
394
-
T: Send + Sync + 'static,
395
-
{
396
-
/// Create a new no-op queue adapter
397
-
pub(crate) fn new() -> Self {
398
-
Self {
399
-
_phantom: std::marker::PhantomData,
400
-
}
401
-
}
402
-
}
403
-
404
-
impl<T> Default for NoopQueueAdapter<T>
405
-
where
406
-
T: Send + Sync + 'static,
407
-
{
408
-
fn default() -> Self {
409
-
Self::new()
410
-
}
411
-
}
412
-
413
-
#[async_trait]
414
-
impl<T> QueueAdapter<T> for NoopQueueAdapter<T>
415
-
where
416
-
T: Send + Sync + 'static,
417
-
{
418
-
async fn pull(&self) -> Option<T> {
419
-
// Never returns any work
420
-
tokio::time::sleep(std::time::Duration::from_secs(60)).await;
421
-
None
422
-
}
423
-
424
-
async fn push(&self, _work: T) -> Result<()> {
425
-
// Silently discard the work
426
-
Ok(())
427
-
}
428
-
429
-
async fn ack(&self, _item: &T) -> Result<()> {
430
-
// No-op
431
-
Ok(())
432
-
}
433
-
434
-
async fn try_push(&self, _work: T) -> Result<()> {
435
-
// Silently discard the work
436
-
Ok(())
437
-
}
438
-
439
-
async fn depth(&self) -> Option<usize> {
440
-
// Always empty
441
-
Some(0)
442
-
}
443
-
444
-
async fn is_healthy(&self) -> bool {
445
-
// Always healthy
446
-
true
447
-
}
448
-
}
449
-
450
-
// ========= Factory Functions for Queue Adapters =========
451
-
452
-
/// Create a new MPSC queue adapter with the specified buffer size.
453
-
///
454
-
/// This creates an in-memory queue suitable for single-instance deployments.
455
-
///
456
-
/// # Arguments
457
-
///
458
-
/// * `buffer` - The buffer size for the channel
459
-
pub fn create_mpsc_queue<T>(buffer: usize) -> Arc<dyn QueueAdapter<T>>
460
-
where
461
-
T: Send + Sync + 'static,
462
-
{
463
-
Arc::new(MpscQueueAdapter::new(buffer))
464
-
}
465
-
466
-
/// Create an MPSC queue adapter from existing channels.
467
-
///
468
-
/// This allows integration with existing channel-based architectures.
469
-
///
470
-
/// # Arguments
471
-
///
472
-
/// * `sender` - The sender half of the channel
473
-
/// * `receiver` - The receiver half of the channel
474
-
pub fn create_mpsc_queue_from_channel<T>(
475
-
sender: mpsc::Sender<T>,
476
-
receiver: mpsc::Receiver<T>,
477
-
) -> Arc<dyn QueueAdapter<T>>
478
-
where
479
-
T: Send + Sync + 'static,
480
-
{
481
-
Arc::new(MpscQueueAdapter::from_channel(sender, receiver))
482
-
}
483
-
484
-
/// Create a new Redis-backed queue adapter.
485
-
///
486
-
/// This creates a distributed queue suitable for multi-instance deployments.
487
-
///
488
-
/// # Arguments
489
-
///
490
-
/// * `pool` - Redis connection pool
491
-
/// * `worker_id` - Worker identifier for this queue instance
492
-
/// * `key_prefix` - Redis key prefix for queue operations
493
-
/// * `timeout_seconds` - Timeout for blocking operations
494
-
pub fn create_redis_queue<T>(
495
-
pool: RedisPool,
496
-
worker_id: String,
497
-
key_prefix: String,
498
-
timeout_seconds: u64,
499
-
) -> Arc<dyn QueueAdapter<T>>
500
-
where
501
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
502
-
{
503
-
Arc::new(RedisQueueAdapter::with_config(
504
-
pool,
505
-
worker_id,
506
-
key_prefix,
507
-
timeout_seconds,
508
-
))
509
-
}
510
-
511
-
/// Create a no-operation queue adapter.
512
-
///
513
-
/// This creates a queue that discards all work items, useful for testing
514
-
/// or when queue processing is disabled.
515
-
pub fn create_noop_queue<T>() -> Arc<dyn QueueAdapter<T>>
516
-
where
517
-
T: Send + Sync + 'static,
518
-
{
519
-
Arc::new(NoopQueueAdapter::new())
520
-
}
521
-
522
-
#[cfg(test)]
523
-
mod tests {
524
-
use super::*;
525
-
526
-
#[tokio::test]
527
-
async fn test_mpsc_queue_adapter_push_pull() {
528
-
let adapter = Arc::new(MpscQueueAdapter::<String>::new(10));
529
-
530
-
// Test push
531
-
adapter.push("test".to_string()).await.unwrap();
532
-
533
-
// Test pull
534
-
let pulled = adapter.pull().await;
535
-
assert!(pulled.is_some());
536
-
assert_eq!(pulled.unwrap(), "test");
537
-
}
538
-
539
-
#[tokio::test]
540
-
async fn test_handle_resolution_work() {
541
-
let work = HandleResolutionWork::new("alice.example.com".to_string());
542
-
543
-
assert_eq!(work.handle, "alice.example.com");
544
-
}
545
-
546
-
#[tokio::test]
547
-
async fn test_redis_queue_adapter_push_pull() {
548
-
let pool = match crate::test_helpers::get_test_redis_pool() {
549
-
Some(p) => p,
550
-
None => return,
551
-
};
552
-
553
-
// Create adapter with unique prefix for testing
554
-
let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
555
-
let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
556
-
pool.clone(),
557
-
"test-worker".to_string(),
558
-
test_prefix.clone(),
559
-
1, // 1 second timeout for tests
560
-
));
561
-
562
-
// Test push
563
-
adapter.push("test-item".to_string()).await.unwrap();
564
-
565
-
// Test pull
566
-
let pulled = adapter.pull().await;
567
-
assert!(pulled.is_some());
568
-
assert_eq!(pulled.unwrap(), "test-item");
569
-
570
-
// Test ack
571
-
adapter
572
-
.ack(&"test-item".to_string())
573
-
.await
574
-
.expect("Ack should succeed");
575
-
576
-
// Clean up test data - manually clean worker queue since cleanup was removed
577
-
// In production, items would timeout or be processed
578
-
}
579
-
580
-
#[tokio::test]
581
-
async fn test_redis_queue_adapter_reliable_queue() {
582
-
let pool = match crate::test_helpers::get_test_redis_pool() {
583
-
Some(p) => p,
584
-
None => return,
585
-
};
586
-
587
-
let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
588
-
let worker_id = "test-worker-reliable";
589
-
590
-
// Create first adapter
591
-
let adapter1 = Arc::new(RedisQueueAdapter::<String>::with_config(
592
-
pool.clone(),
593
-
worker_id.to_string(),
594
-
test_prefix.clone(),
595
-
1,
596
-
));
597
-
598
-
// Push multiple items
599
-
adapter1.push("item1".to_string()).await.unwrap();
600
-
adapter1.push("item2".to_string()).await.unwrap();
601
-
adapter1.push("item3".to_string()).await.unwrap();
602
-
603
-
// Pull but don't ack (simulating worker crash)
604
-
let item1 = adapter1.pull().await;
605
-
assert!(item1.is_some());
606
-
assert_eq!(item1.unwrap(), "item1");
607
-
608
-
// Create second adapter with same worker_id (simulating restart)
609
-
let adapter2 = Arc::new(RedisQueueAdapter::<String>::with_config(
610
-
pool.clone(),
611
-
worker_id.to_string(),
612
-
test_prefix.clone(),
613
-
1,
614
-
));
615
-
616
-
// In a real scenario, unacked items would be handled by timeout or manual recovery
617
-
// For this test, we just verify the item is in the worker queue
618
-
let recovered = adapter2.pull().await;
619
-
assert!(recovered.is_some());
620
-
}
621
-
622
-
#[tokio::test]
623
-
async fn test_redis_queue_adapter_depth() {
624
-
let pool = match crate::test_helpers::get_test_redis_pool() {
625
-
Some(p) => p,
626
-
None => return,
627
-
};
628
-
629
-
let test_prefix = format!("test:queue:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
630
-
let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
631
-
pool.clone(),
632
-
"test-worker-depth".to_string(),
633
-
test_prefix.clone(),
634
-
1,
635
-
));
636
-
637
-
// Initially empty
638
-
let depth = adapter.depth().await;
639
-
assert_eq!(depth, Some(0));
640
-
641
-
// Push items and check depth
642
-
adapter.push("item1".to_string()).await.unwrap();
643
-
assert_eq!(adapter.depth().await, Some(1));
644
-
645
-
adapter.push("item2".to_string()).await.unwrap();
646
-
assert_eq!(adapter.depth().await, Some(2));
647
-
648
-
// Pull and check depth decreases
649
-
let _ = adapter.pull().await;
650
-
// Note: depth checks primary queue, not worker queue
651
-
assert_eq!(adapter.depth().await, Some(1));
652
-
653
-
// Test cleanup is automatic when adapter is dropped
654
-
}
655
-
656
-
#[tokio::test]
657
-
async fn test_redis_queue_adapter_health() {
658
-
let pool = match crate::test_helpers::get_test_redis_pool() {
659
-
Some(p) => p,
660
-
None => return,
661
-
};
662
-
663
-
let adapter = Arc::new(RedisQueueAdapter::<String>::with_config(
664
-
pool,
665
-
"test-worker-health".to_string(),
666
-
"test:queue:health:".to_string(),
667
-
1,
668
-
));
669
-
670
-
// Should be healthy if Redis is running
671
-
assert!(adapter.is_healthy().await);
672
-
}
673
-
}
+382
src/sqlite_schema.rs
+382
src/sqlite_schema.rs
···
1
+
//! SQLite schema management for QuickDID.
2
+
//!
3
+
//! This module provides functionality to create and manage the SQLite database
4
+
//! schema used by the SQLite-backed handle resolver cache.
5
+
6
+
use anyhow::Result;
7
+
use sqlx::{Sqlite, SqlitePool, migrate::MigrateDatabase};
8
+
use std::path::Path;
9
+
10
+
/// SQL schema for the handle resolution cache table.
11
+
const CREATE_HANDLE_RESOLUTION_CACHE_TABLE: &str = r#"
12
+
CREATE TABLE IF NOT EXISTS handle_resolution_cache (
13
+
key INTEGER PRIMARY KEY,
14
+
result BLOB NOT NULL,
15
+
created INTEGER NOT NULL,
16
+
updated INTEGER NOT NULL
17
+
);
18
+
19
+
CREATE INDEX IF NOT EXISTS idx_handle_resolution_cache_updated
20
+
ON handle_resolution_cache(updated);
21
+
"#;
22
+
23
+
/// SQL schema for the handle resolution queue table.
24
+
const CREATE_HANDLE_RESOLUTION_QUEUE_TABLE: &str = r#"
25
+
CREATE TABLE IF NOT EXISTS handle_resolution_queue (
26
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
27
+
work TEXT NOT NULL,
28
+
queued_at INTEGER NOT NULL
29
+
);
30
+
31
+
CREATE INDEX IF NOT EXISTS idx_handle_resolution_queue_queued_at
32
+
ON handle_resolution_queue(queued_at);
33
+
"#;
34
+
35
+
/// Create or connect to a SQLite database and ensure schema is initialized.
36
+
///
37
+
/// # Arguments
38
+
///
39
+
/// * `database_url` - SQLite database URL (e.g., "sqlite:./quickdid.db" or "sqlite::memory:")
40
+
///
41
+
/// # Returns
42
+
///
43
+
/// Returns a SqlitePool connected to the database with schema initialized.
44
+
///
45
+
/// # Example
46
+
///
47
+
/// ```no_run
48
+
/// use quickdid::sqlite_schema::create_sqlite_pool;
49
+
///
50
+
/// # async fn example() -> anyhow::Result<()> {
51
+
/// // File-based database
52
+
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
53
+
///
54
+
/// // In-memory database (for testing)
55
+
/// let pool = create_sqlite_pool("sqlite::memory:").await?;
56
+
/// # Ok(())
57
+
/// # }
58
+
/// ```
59
+
pub async fn create_sqlite_pool(database_url: &str) -> Result<SqlitePool> {
60
+
tracing::info!("Initializing SQLite database: {}", database_url);
61
+
62
+
// Extract the database path from the URL for file-based databases
63
+
if let Some(path) = database_url.strip_prefix("sqlite:")
64
+
&& path != ":memory:"
65
+
&& !path.is_empty()
66
+
{
67
+
// Create the database file if it doesn't exist
68
+
if !Sqlite::database_exists(database_url).await? {
69
+
tracing::info!("Creating SQLite database file: {}", path);
70
+
Sqlite::create_database(database_url).await?;
71
+
}
72
+
73
+
// Ensure the parent directory exists
74
+
if let Some(parent) = Path::new(path).parent()
75
+
&& !parent.exists()
76
+
{
77
+
tracing::info!("Creating directory: {}", parent.display());
78
+
std::fs::create_dir_all(parent)?;
79
+
}
80
+
}
81
+
82
+
// Connect to the database
83
+
let pool = SqlitePool::connect(database_url).await?;
84
+
tracing::info!("Connected to SQLite database");
85
+
86
+
// Create the schema
87
+
create_schema(&pool).await?;
88
+
89
+
Ok(pool)
90
+
}
91
+
92
+
/// Create the database schema if it doesn't exist.
93
+
///
94
+
/// # Arguments
95
+
///
96
+
/// * `pool` - SQLite connection pool
97
+
///
98
+
/// # Example
99
+
///
100
+
/// ```no_run
101
+
/// use quickdid::sqlite_schema::create_schema;
102
+
/// use sqlx::SqlitePool;
103
+
///
104
+
/// # async fn example() -> anyhow::Result<()> {
105
+
/// let pool = SqlitePool::connect("sqlite::memory:").await?;
106
+
/// create_schema(&pool).await?;
107
+
/// # Ok(())
108
+
/// # }
109
+
/// ```
110
+
pub async fn create_schema(pool: &SqlitePool) -> Result<()> {
111
+
tracing::debug!("Creating SQLite schema if not exists");
112
+
113
+
// Execute the schema creation SQL
114
+
sqlx::query(CREATE_HANDLE_RESOLUTION_CACHE_TABLE)
115
+
.execute(pool)
116
+
.await?;
117
+
118
+
sqlx::query(CREATE_HANDLE_RESOLUTION_QUEUE_TABLE)
119
+
.execute(pool)
120
+
.await?;
121
+
122
+
tracing::info!("SQLite schema initialized");
123
+
124
+
Ok(())
125
+
}
126
+
127
+
/// Clean up expired entries from the handle resolution cache.
128
+
///
129
+
/// This function removes entries that are older than the specified TTL.
130
+
/// It should be called periodically to prevent the database from growing indefinitely.
131
+
///
132
+
/// # Arguments
133
+
///
134
+
/// * `pool` - SQLite connection pool
135
+
/// * `ttl_seconds` - TTL in seconds for cache entries
136
+
///
137
+
/// # Returns
138
+
///
139
+
/// Returns the number of entries deleted.
140
+
///
141
+
/// # Example
142
+
///
143
+
/// ```no_run
144
+
/// use quickdid::sqlite_schema::cleanup_expired_entries;
145
+
/// use sqlx::SqlitePool;
146
+
///
147
+
/// # async fn example() -> anyhow::Result<()> {
148
+
/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
149
+
/// let deleted_count = cleanup_expired_entries(&pool, 7776000).await?; // 90 days
150
+
/// println!("Deleted {} expired entries", deleted_count);
151
+
/// # Ok(())
152
+
/// # }
153
+
/// ```
154
+
pub async fn cleanup_expired_entries(pool: &SqlitePool, ttl_seconds: u64) -> Result<u64> {
155
+
let current_timestamp = std::time::SystemTime::now()
156
+
.duration_since(std::time::UNIX_EPOCH)
157
+
.unwrap_or_default()
158
+
.as_secs() as i64;
159
+
160
+
let cutoff_timestamp = current_timestamp - (ttl_seconds as i64);
161
+
162
+
let result = sqlx::query("DELETE FROM handle_resolution_cache WHERE updated < ?1")
163
+
.bind(cutoff_timestamp)
164
+
.execute(pool)
165
+
.await?;
166
+
167
+
let deleted_count = result.rows_affected();
168
+
if deleted_count > 0 {
169
+
tracing::info!("Cleaned up {} expired cache entries", deleted_count);
170
+
}
171
+
172
+
Ok(deleted_count)
173
+
}
174
+
175
+
/// Get statistics about the handle resolution cache.
176
+
///
177
+
/// # Arguments
178
+
///
179
+
/// * `pool` - SQLite connection pool
180
+
///
181
+
/// # Returns
182
+
///
183
+
/// Returns a tuple of (total_entries, database_size_bytes).
184
+
///
185
+
/// # Example
186
+
///
187
+
/// ```no_run
188
+
/// use quickdid::sqlite_schema::get_cache_stats;
189
+
/// use sqlx::SqlitePool;
190
+
///
191
+
/// # async fn example() -> anyhow::Result<()> {
192
+
/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
193
+
/// let (total_entries, size_bytes) = get_cache_stats(&pool).await?;
194
+
/// println!("Cache has {} entries, {} bytes", total_entries, size_bytes);
195
+
/// # Ok(())
196
+
/// # }
197
+
/// ```
198
+
pub async fn get_cache_stats(pool: &SqlitePool) -> Result<(i64, i64)> {
199
+
// Get total entries
200
+
let total_entries: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
201
+
.fetch_one(pool)
202
+
.await?;
203
+
204
+
// Get database page size and page count to calculate total size
205
+
let page_size: i64 = sqlx::query_scalar("PRAGMA page_size")
206
+
.fetch_one(pool)
207
+
.await?;
208
+
209
+
let page_count: i64 = sqlx::query_scalar("PRAGMA page_count")
210
+
.fetch_one(pool)
211
+
.await?;
212
+
213
+
let size_bytes = page_size * page_count;
214
+
215
+
Ok((total_entries, size_bytes))
216
+
}
217
+
218
+
/// Clean up old entries from the handle resolution queue.
219
+
///
220
+
/// This function removes entries that are older than the specified age.
221
+
///
222
+
/// # Arguments
223
+
///
224
+
/// * `pool` - SQLite connection pool
225
+
/// * `max_age_seconds` - Maximum age in seconds for queue entries to be kept
226
+
///
227
+
/// # Returns
228
+
///
229
+
/// Returns the number of entries deleted.
230
+
///
231
+
/// # Example
232
+
///
233
+
/// ```no_run
234
+
/// use quickdid::sqlite_schema::cleanup_queue_entries;
235
+
/// use sqlx::SqlitePool;
236
+
///
237
+
/// # async fn example() -> anyhow::Result<()> {
238
+
/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
239
+
/// let deleted_count = cleanup_queue_entries(&pool, 86400).await?; // 1 day
240
+
/// println!("Deleted {} old queue entries", deleted_count);
241
+
/// # Ok(())
242
+
/// # }
243
+
/// ```
244
+
pub async fn cleanup_queue_entries(pool: &SqlitePool, max_age_seconds: u64) -> Result<u64> {
245
+
let current_timestamp = std::time::SystemTime::now()
246
+
.duration_since(std::time::UNIX_EPOCH)
247
+
.unwrap_or_default()
248
+
.as_secs() as i64;
249
+
250
+
let cutoff_timestamp = current_timestamp - (max_age_seconds as i64);
251
+
252
+
let result = sqlx::query("DELETE FROM handle_resolution_queue WHERE queued_at < ?1")
253
+
.bind(cutoff_timestamp)
254
+
.execute(pool)
255
+
.await?;
256
+
257
+
let deleted_count = result.rows_affected();
258
+
if deleted_count > 0 {
259
+
tracing::info!("Cleaned up {} old queue entries", deleted_count);
260
+
}
261
+
262
+
Ok(deleted_count)
263
+
}
264
+
265
+
/// Get statistics about the handle resolution queue.
266
+
///
267
+
/// # Arguments
268
+
///
269
+
/// * `pool` - SQLite connection pool
270
+
///
271
+
/// # Returns
272
+
///
273
+
/// Returns the total number of entries in the queue.
274
+
///
275
+
/// # Example
276
+
///
277
+
/// ```no_run
278
+
/// use quickdid::sqlite_schema::get_queue_stats;
279
+
/// use sqlx::SqlitePool;
280
+
///
281
+
/// # async fn example() -> anyhow::Result<()> {
282
+
/// let pool = SqlitePool::connect("sqlite:./quickdid.db").await?;
283
+
/// let total = get_queue_stats(&pool).await?;
284
+
/// println!("Queue: {} total entries", total);
285
+
/// # Ok(())
286
+
/// # }
287
+
/// ```
288
+
pub async fn get_queue_stats(pool: &SqlitePool) -> Result<i64> {
289
+
// Get total entries
290
+
let total_entries: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_queue")
291
+
.fetch_one(pool)
292
+
.await?;
293
+
294
+
Ok(total_entries)
295
+
}
296
+
297
+
#[cfg(test)]
298
+
mod tests {
299
+
use super::*;
300
+
301
+
#[tokio::test]
302
+
async fn test_create_sqlite_pool_memory() {
303
+
let pool = create_sqlite_pool("sqlite::memory:")
304
+
.await
305
+
.expect("Failed to create in-memory SQLite pool");
306
+
307
+
// Verify the table was created
308
+
let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
309
+
.fetch_one(&pool)
310
+
.await
311
+
.expect("Failed to query table");
312
+
313
+
assert_eq!(count, 0);
314
+
}
315
+
316
+
#[tokio::test]
317
+
async fn test_cleanup_expired_entries() {
318
+
let pool = create_sqlite_pool("sqlite::memory:")
319
+
.await
320
+
.expect("Failed to create in-memory SQLite pool");
321
+
322
+
// Insert a test entry that's already expired
323
+
let old_timestamp = std::time::SystemTime::now()
324
+
.duration_since(std::time::UNIX_EPOCH)
325
+
.unwrap()
326
+
.as_secs() as i64
327
+
- 3600; // 1 hour ago
328
+
329
+
sqlx::query(
330
+
"INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (1, ?1, ?2, ?2)"
331
+
)
332
+
.bind(&b"test_data"[..])
333
+
.bind(old_timestamp)
334
+
.execute(&pool)
335
+
.await
336
+
.expect("Failed to insert test data");
337
+
338
+
// Clean up entries older than 30 minutes (1800 seconds)
339
+
let deleted = cleanup_expired_entries(&pool, 1800)
340
+
.await
341
+
.expect("Failed to cleanup expired entries");
342
+
343
+
assert_eq!(deleted, 1);
344
+
345
+
// Verify the entry was deleted
346
+
let count: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
347
+
.fetch_one(&pool)
348
+
.await
349
+
.expect("Failed to query table");
350
+
351
+
assert_eq!(count, 0);
352
+
}
353
+
354
+
#[tokio::test]
355
+
async fn test_get_cache_stats() {
356
+
let pool = create_sqlite_pool("sqlite::memory:")
357
+
.await
358
+
.expect("Failed to create in-memory SQLite pool");
359
+
360
+
// Insert a test entry
361
+
let current_timestamp = std::time::SystemTime::now()
362
+
.duration_since(std::time::UNIX_EPOCH)
363
+
.unwrap()
364
+
.as_secs() as i64;
365
+
366
+
sqlx::query(
367
+
"INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (1, ?1, ?2, ?2)"
368
+
)
369
+
.bind(&b"test_data"[..])
370
+
.bind(current_timestamp)
371
+
.execute(&pool)
372
+
.await
373
+
.expect("Failed to insert test data");
374
+
375
+
let (total_entries, size_bytes) = get_cache_stats(&pool)
376
+
.await
377
+
.expect("Failed to get cache stats");
378
+
379
+
assert_eq!(total_entries, 1);
380
+
assert!(size_bytes > 0);
381
+
}
382
+
}
+2
-3
src/test_helpers.rs
+2
-3
src/test_helpers.rs
···
1
1
//! Test helper utilities for QuickDID tests
2
-
#![cfg(test)]
3
2
4
3
use crate::cache::create_redis_pool;
5
4
use deadpool_redis::Pool;
6
5
7
6
/// Helper function to get a Redis pool for testing.
8
-
///
7
+
///
9
8
/// Returns None if TEST_REDIS_URL is not set, logging a skip message.
10
9
/// This consolidates the repeated Redis test setup code.
11
10
pub(crate) fn get_test_redis_pool() -> Option<Pool> {
···
33
32
None => return,
34
33
}
35
34
};
36
-
}
35
+
}
+364
test-scripts/docker-test.sh
+364
test-scripts/docker-test.sh
···
1
+
#!/bin/bash
2
+
3
+
# Comprehensive test script for Telegraf/TimescaleDB metrics setup
4
+
# This script validates the entire metrics pipeline
5
+
6
+
set -e
7
+
8
+
echo "========================================="
9
+
echo "Telegraf/TimescaleDB Metrics Test Suite"
10
+
echo "========================================="
11
+
echo ""
12
+
13
+
# Check if Docker is running
14
+
if ! docker info > /dev/null 2>&1; then
15
+
echo "โ Docker is not running. Please start Docker first."
16
+
exit 1
17
+
fi
18
+
19
+
# Function to wait for a service to be healthy
20
+
wait_for_service() {
21
+
local service=$1
22
+
local max_attempts=30
23
+
local attempt=1
24
+
25
+
echo -n "Waiting for $service to be healthy"
26
+
while [ $attempt -le $max_attempts ]; do
27
+
if docker-compose ps $service | grep -q "healthy"; then
28
+
echo " โ
"
29
+
return 0
30
+
fi
31
+
echo -n "."
32
+
sleep 2
33
+
attempt=$((attempt + 1))
34
+
done
35
+
echo " โ"
36
+
echo "Service $service failed to become healthy after $max_attempts attempts"
37
+
return 1
38
+
}
39
+
40
+
# Function to run SQL query
41
+
run_query() {
42
+
docker exec -i timescaledb psql -U postgres -d metrics -t -c "$1" 2>/dev/null
43
+
}
44
+
45
+
# Function to check table exists
46
+
check_table() {
47
+
local table=$1
48
+
local result=$(run_query "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = '$table');")
49
+
if [[ "$result" =~ "t" ]]; then
50
+
echo "โ
Table '$table' exists"
51
+
return 0
52
+
else
53
+
echo "โ Table '$table' does not exist"
54
+
return 1
55
+
fi
56
+
}
57
+
58
+
# Navigate to the metrics-stack directory (create if needed)
59
+
if [ ! -d "metrics-stack" ]; then
60
+
echo "Creating metrics-stack directory..."
61
+
mkdir -p metrics-stack/telegraf
62
+
mkdir -p metrics-stack/test-scripts
63
+
mkdir -p metrics-stack/init-scripts
64
+
fi
65
+
66
+
cd metrics-stack
67
+
68
+
# Create .env file if it doesn't exist
69
+
if [ ! -f ".env" ]; then
70
+
echo "Creating .env file..."
71
+
cat > .env << 'EOF'
72
+
# PostgreSQL/TimescaleDB Configuration
73
+
POSTGRES_DB=metrics
74
+
POSTGRES_USER=postgres
75
+
POSTGRES_PASSWORD=secretpassword
76
+
77
+
# Telegraf Database User
78
+
TELEGRAF_DB_USER=postgres
79
+
TELEGRAF_DB_PASSWORD=secretpassword
80
+
81
+
# TimescaleDB Settings
82
+
TIMESCALE_TELEMETRY=off
83
+
EOF
84
+
fi
85
+
86
+
# Copy configuration files if they don't exist
87
+
if [ ! -f "telegraf/telegraf.conf" ]; then
88
+
echo "Creating telegraf.conf..."
89
+
cat > telegraf/telegraf.conf << 'EOF'
90
+
[agent]
91
+
interval = "10s"
92
+
round_interval = true
93
+
metric_batch_size = 1000
94
+
metric_buffer_limit = 10000
95
+
collection_jitter = "0s"
96
+
flush_interval = "10s"
97
+
flush_jitter = "0s"
98
+
precision = ""
99
+
debug = false
100
+
quiet = false
101
+
hostname = "telegraf-agent"
102
+
omit_hostname = false
103
+
104
+
[[inputs.statsd]]
105
+
service_address = ":8125"
106
+
protocol = "udp"
107
+
delete_gauges = true
108
+
delete_counters = true
109
+
delete_sets = true
110
+
delete_timings = true
111
+
percentiles = [50, 90, 95, 99]
112
+
metric_separator = "."
113
+
allowed_pending_messages = 10000
114
+
datadog_extensions = true
115
+
datadog_distributions = true
116
+
117
+
[[outputs.postgresql]]
118
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
119
+
schema = "public"
120
+
create_templates = [
121
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
122
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
123
+
]
124
+
tags_as_jsonb = true
125
+
fields_as_jsonb = false
126
+
EOF
127
+
fi
128
+
129
+
# Copy docker-compose.yml if it doesn't exist
130
+
if [ ! -f "docker-compose.yml" ]; then
131
+
echo "Creating docker-compose.yml..."
132
+
cat > docker-compose.yml << 'EOF'
133
+
version: '3.8'
134
+
135
+
services:
136
+
timescaledb:
137
+
image: timescale/timescaledb:latest-pg17
138
+
container_name: timescaledb
139
+
restart: unless-stopped
140
+
environment:
141
+
POSTGRES_DB: ${POSTGRES_DB}
142
+
POSTGRES_USER: ${POSTGRES_USER}
143
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
144
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
145
+
ports:
146
+
- "5442:5432"
147
+
volumes:
148
+
- timescale_data:/home/postgres/pgdata/data
149
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
150
+
command:
151
+
- postgres
152
+
- -c
153
+
- shared_buffers=256MB
154
+
- -c
155
+
- effective_cache_size=1GB
156
+
- -c
157
+
- maintenance_work_mem=64MB
158
+
- -c
159
+
- work_mem=8MB
160
+
healthcheck:
161
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
162
+
interval: 10s
163
+
timeout: 5s
164
+
retries: 5
165
+
networks:
166
+
- metrics_network
167
+
168
+
telegraf:
169
+
image: telegraf:1.35
170
+
container_name: telegraf
171
+
restart: unless-stopped
172
+
environment:
173
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
174
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
175
+
POSTGRES_DB: ${POSTGRES_DB}
176
+
ports:
177
+
- "8125:8125/udp"
178
+
volumes:
179
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
180
+
depends_on:
181
+
timescaledb:
182
+
condition: service_healthy
183
+
networks:
184
+
- metrics_network
185
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
186
+
187
+
networks:
188
+
metrics_network:
189
+
driver: bridge
190
+
191
+
volumes:
192
+
timescale_data:
193
+
EOF
194
+
fi
195
+
196
+
# Create init script
197
+
if [ ! -f "init-scripts/01-init.sql" ]; then
198
+
echo "Creating init script..."
199
+
cat > init-scripts/01-init.sql << 'EOF'
200
+
-- Enable TimescaleDB extension
201
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
202
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
203
+
EOF
204
+
fi
205
+
206
+
echo ""
207
+
echo "Step 1: Starting Docker services..."
208
+
echo "========================================="
209
+
docker-compose down -v 2>/dev/null || true
210
+
docker-compose up -d
211
+
212
+
echo ""
213
+
echo "Step 2: Waiting for services to be healthy..."
214
+
echo "========================================="
215
+
wait_for_service timescaledb
216
+
sleep 5 # Extra time for Telegraf to connect
217
+
218
+
echo ""
219
+
echo "Step 3: Sending test metrics..."
220
+
echo "========================================="
221
+
222
+
# Send various types of metrics
223
+
echo "Sending counter metrics..."
224
+
for i in {1..5}; do
225
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
226
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
227
+
done
228
+
229
+
echo "Sending gauge metrics..."
230
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
231
+
sleep 1
232
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
233
+
234
+
echo "Sending timing metrics..."
235
+
for i in {1..10}; do
236
+
duration=$((RANDOM % 100 + 10))
237
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
238
+
done
239
+
240
+
echo "Sending histogram metrics..."
241
+
for i in {1..5}; do
242
+
resolution_time=$((RANDOM % 500 + 50))
243
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
244
+
done
245
+
246
+
echo "Waiting 15 seconds for Telegraf to flush metrics..."
247
+
sleep 15
248
+
249
+
echo ""
250
+
echo "Step 4: Verifying table creation..."
251
+
echo "========================================="
252
+
253
+
# Check if tables were created
254
+
check_table "quickdid.http.request.count"
255
+
check_table "quickdid.http.request.duration_ms"
256
+
check_table "quickdid.resolver.rate_limit.available_permits"
257
+
check_table "quickdid.resolver.resolution_time"
258
+
259
+
echo ""
260
+
echo "Step 5: Verifying data insertion..."
261
+
echo "========================================="
262
+
263
+
# Check row counts
264
+
for table in "quickdid.http.request.count" "quickdid.http.request.duration_ms" "quickdid.resolver.rate_limit.available_permits" "quickdid.resolver.resolution_time"; do
265
+
count=$(run_query "SELECT COUNT(*) FROM \"$table\";" | tr -d ' ')
266
+
if [ "$count" -gt 0 ]; then
267
+
echo "โ
Table '$table' has $count rows"
268
+
else
269
+
echo "โ Table '$table' is empty"
270
+
fi
271
+
done
272
+
273
+
echo ""
274
+
echo "Step 6: Testing JSONB tag queries..."
275
+
echo "========================================="
276
+
277
+
# Test JSONB tag filtering
278
+
result=$(run_query "SELECT COUNT(*) FROM \"quickdid.http.request.count\" WHERE tags->>'method' = 'GET';" | tr -d ' ')
279
+
if [ "$result" -gt 0 ]; then
280
+
echo "โ
JSONB tag filtering works (found $result GET requests)"
281
+
else
282
+
echo "โ JSONB tag filtering failed"
283
+
fi
284
+
285
+
echo ""
286
+
echo "Step 7: Testing TimescaleDB functions..."
287
+
echo "========================================="
288
+
289
+
# Test time_bucket function
290
+
result=$(run_query "SELECT COUNT(*) FROM (SELECT time_bucket('1 minute', time) FROM \"quickdid.http.request.count\" GROUP BY 1) t;" | tr -d ' ')
291
+
if [ "$result" -gt 0 ]; then
292
+
echo "โ
time_bucket function works"
293
+
else
294
+
echo "โ time_bucket function failed"
295
+
fi
296
+
297
+
# Check if hypertables were created
298
+
hypertable_count=$(run_query "SELECT COUNT(*) FROM timescaledb_information.hypertables WHERE hypertable_name LIKE 'quickdid%';" | tr -d ' ')
299
+
if [ "$hypertable_count" -gt 0 ]; then
300
+
echo "โ
Found $hypertable_count hypertables"
301
+
else
302
+
echo "โ No hypertables found"
303
+
fi
304
+
305
+
echo ""
306
+
echo "Step 8: Running comprehensive query tests..."
307
+
echo "========================================="
308
+
309
+
# Run the verify-queries.sql script if it exists
310
+
if [ -f "../test-scripts/verify-queries.sql" ]; then
311
+
echo "Running verify-queries.sql..."
312
+
docker exec -i timescaledb psql -U postgres -d metrics < ../test-scripts/verify-queries.sql > query_results.txt 2>&1
313
+
if [ $? -eq 0 ]; then
314
+
echo "โ
All queries executed successfully"
315
+
echo " Results saved to query_results.txt"
316
+
else
317
+
echo "โ Some queries failed. Check query_results.txt for details"
318
+
fi
319
+
else
320
+
echo "โ ๏ธ verify-queries.sql not found, skipping comprehensive query tests"
321
+
fi
322
+
323
+
echo ""
324
+
echo "========================================="
325
+
echo "Test Summary"
326
+
echo "========================================="
327
+
328
+
# Generate summary
329
+
failures=0
330
+
successes=0
331
+
332
+
# Count successes and failures from the output
333
+
if check_table "quickdid.http.request.count" > /dev/null 2>&1; then
334
+
successes=$((successes + 1))
335
+
else
336
+
failures=$((failures + 1))
337
+
fi
338
+
339
+
if [ "$hypertable_count" -gt 0 ]; then
340
+
successes=$((successes + 1))
341
+
else
342
+
failures=$((failures + 1))
343
+
fi
344
+
345
+
echo ""
346
+
if [ $failures -eq 0 ]; then
347
+
echo "โ
All tests passed successfully!"
348
+
echo ""
349
+
echo "You can now:"
350
+
echo "1. Connect to the database: docker exec -it timescaledb psql -U postgres -d metrics"
351
+
echo "2. View logs: docker-compose logs -f"
352
+
echo "3. Send more metrics: echo 'metric.name:value|type|#tag:value' | nc -u -w0 localhost 8125"
353
+
echo "4. Stop services: docker-compose down"
354
+
else
355
+
echo "โ ๏ธ Some tests failed. Please check the output above for details."
356
+
echo ""
357
+
echo "Troubleshooting tips:"
358
+
echo "1. Check Telegraf logs: docker-compose logs telegraf"
359
+
echo "2. Check TimescaleDB logs: docker-compose logs timescaledb"
360
+
echo "3. Verify connectivity: docker exec telegraf telegraf --test"
361
+
fi
362
+
363
+
echo ""
364
+
echo "Test complete!"
+44
test-scripts/send-metrics.sh
+44
test-scripts/send-metrics.sh
···
1
+
#!/bin/bash
2
+
3
+
# Send test metrics to StatsD/Telegraf
4
+
5
+
echo "Sending test metrics to StatsD on localhost:8125..."
6
+
7
+
# Counter metrics
8
+
for i in {1..10}; do
9
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
10
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
11
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
12
+
done
13
+
14
+
# Gauge metrics
15
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
16
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
17
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
18
+
19
+
# Timing metrics (in milliseconds)
20
+
for i in {1..20}; do
21
+
duration=$((RANDOM % 100 + 10))
22
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
23
+
done
24
+
25
+
for i in {1..10}; do
26
+
duration=$((RANDOM % 200 + 50))
27
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
28
+
done
29
+
30
+
# Histogram metrics
31
+
for i in {1..15}; do
32
+
resolution_time=$((RANDOM % 500 + 50))
33
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
34
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
35
+
done
36
+
37
+
# Cache metrics
38
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
39
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
40
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
41
+
42
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
43
+
sleep 15
44
+
echo "Done!"
+145
test-scripts/verify-queries.sql
+145
test-scripts/verify-queries.sql
···
1
+
-- Test script to verify all metrics queries work correctly
2
+
-- Run this after sending test metrics with send-metrics.sh
3
+
4
+
\echo '===== CHECKING AVAILABLE TABLES ====='
5
+
SELECT table_name
6
+
FROM information_schema.tables
7
+
WHERE table_schema = 'public'
8
+
AND table_name LIKE 'quickdid%'
9
+
ORDER BY table_name;
10
+
11
+
\echo ''
12
+
\echo '===== CHECKING TABLE STRUCTURES ====='
13
+
\echo 'Structure of quickdid.http.request.count table:'
14
+
\d "quickdid.http.request.count"
15
+
16
+
\echo ''
17
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
18
+
\d "quickdid.http.request.duration_ms"
19
+
20
+
\echo ''
21
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
22
+
SELECT
23
+
time,
24
+
tags,
25
+
tags->>'method' as method,
26
+
tags->>'path' as path,
27
+
tags->>'status' as status,
28
+
value
29
+
FROM "quickdid.http.request.count"
30
+
WHERE time > NOW() - INTERVAL '1 hour'
31
+
ORDER BY time DESC
32
+
LIMIT 10;
33
+
34
+
\echo ''
35
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
36
+
SELECT
37
+
time_bucket('1 minute', time) AS minute,
38
+
tags->>'method' as method,
39
+
tags->>'path' as path,
40
+
tags->>'status' as status,
41
+
COUNT(*) as request_count,
42
+
AVG(mean) as avg_duration_ms,
43
+
MAX(p99) as p99_duration_ms,
44
+
MIN(mean) as min_duration_ms
45
+
FROM "quickdid.http.request.duration_ms"
46
+
WHERE time > NOW() - INTERVAL '1 hour'
47
+
AND tags IS NOT NULL
48
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
49
+
ORDER BY minute DESC
50
+
LIMIT 10;
51
+
52
+
\echo ''
53
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
54
+
SELECT
55
+
time,
56
+
value as available_permits
57
+
FROM "quickdid.resolver.rate_limit.available_permits"
58
+
WHERE time > NOW() - INTERVAL '1 hour'
59
+
ORDER BY time DESC
60
+
LIMIT 10;
61
+
62
+
\echo ''
63
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
64
+
SELECT
65
+
tags->>'resolver' as resolver_type,
66
+
COUNT(*) as sample_count,
67
+
AVG(mean) as avg_resolution_time_ms,
68
+
MAX(p99) as p99_resolution_time_ms,
69
+
MIN(mean) as min_resolution_time_ms
70
+
FROM "quickdid.resolver.resolution_time"
71
+
WHERE time > NOW() - INTERVAL '1 hour'
72
+
AND tags->>'resolver' IS NOT NULL
73
+
GROUP BY tags->>'resolver'
74
+
ORDER BY avg_resolution_time_ms;
75
+
76
+
\echo ''
77
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
78
+
WITH cache_stats AS (
79
+
SELECT
80
+
'hits' as metric_type,
81
+
SUM(value) as total_count
82
+
FROM "quickdid.cache.hit.count"
83
+
WHERE time > NOW() - INTERVAL '1 hour'
84
+
UNION ALL
85
+
SELECT
86
+
'misses' as metric_type,
87
+
SUM(value) as total_count
88
+
FROM "quickdid.cache.miss.count"
89
+
WHERE time > NOW() - INTERVAL '1 hour'
90
+
)
91
+
SELECT
92
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
93
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
94
+
CASE
95
+
WHEN SUM(total_count) > 0 THEN
96
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
97
+
ELSE 0
98
+
END as hit_rate_percentage
99
+
FROM cache_stats;
100
+
101
+
\echo ''
102
+
\echo '===== QUERY 6: Hypertable Information ====='
103
+
SELECT
104
+
hypertable_schema,
105
+
hypertable_name,
106
+
owner,
107
+
num_dimensions,
108
+
num_chunks,
109
+
compression_enabled
110
+
FROM timescaledb_information.hypertables
111
+
WHERE hypertable_name LIKE 'quickdid%'
112
+
ORDER BY hypertable_name;
113
+
114
+
\echo ''
115
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
116
+
WITH status_counts AS (
117
+
SELECT
118
+
time_bucket('5 minutes', time) as period,
119
+
tags->>'path' as path,
120
+
CASE
121
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
122
+
ELSE 'success'
123
+
END as status_category,
124
+
SUM(value) as request_count
125
+
FROM "quickdid.http.request.count"
126
+
WHERE time > NOW() - INTERVAL '1 hour'
127
+
GROUP BY period, path, status_category
128
+
)
129
+
SELECT
130
+
period,
131
+
path,
132
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
133
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
134
+
CASE
135
+
WHEN SUM(request_count) > 0 THEN
136
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
137
+
ELSE 0
138
+
END as error_rate_percentage
139
+
FROM status_counts
140
+
GROUP BY period, path
141
+
HAVING SUM(request_count) > 0
142
+
ORDER BY period DESC, error_rate_percentage DESC;
143
+
144
+
\echo ''
145
+
\echo '===== TEST COMPLETED ====='
+1
www/.well-known/atproto-did
+1
www/.well-known/atproto-did
···
1
+
did:web:quickdid.smokesignal.tools
+15
www/.well-known/did.json
+15
www/.well-known/did.json
···
1
+
{
2
+
"@context": [
3
+
"https://www.w3.org/ns/did/v1",
4
+
"https://w3id.org/security/multikey/v1"
5
+
],
6
+
"id": "did:web:quickdid.smokesignal.tools",
7
+
"verificationMethod": [],
8
+
"service": [
9
+
{
10
+
"id": "#quickdid",
11
+
"type": "QuickDIDService",
12
+
"serviceEndpoint": "https://quickdid.smokesignal.tools"
13
+
}
14
+
]
15
+
}
+74
www/README.md
+74
www/README.md
···
1
+
# QuickDID Static Files Directory
2
+
3
+
This directory contains static files that are served by QuickDID. By default, QuickDID serves files from the `www` directory, but this can be configured using the `STATIC_FILES_DIR` environment variable.
4
+
5
+
## Directory Structure
6
+
7
+
```
8
+
www/
9
+
โโโ .well-known/
10
+
โ โโโ atproto-did # AT Protocol DID identifier
11
+
โ โโโ did.json # DID document
12
+
โโโ index.html # Landing page
13
+
โโโ README.md # This file
14
+
```
15
+
16
+
## Files
17
+
18
+
### `.well-known/atproto-did`
19
+
Contains the service's DID identifier (e.g., `did:web:example.com`). This file is used by AT Protocol clients to discover the service's DID.
20
+
21
+
### `.well-known/did.json`
22
+
Contains the DID document with verification methods and service endpoints. This is a JSON-LD document following the W3C DID specification.
23
+
24
+
### `index.html`
25
+
The landing page shown when users visit the root URL. This provides information about the service and available endpoints.
26
+
27
+
## Customization
28
+
29
+
### Using the Generation Script
30
+
31
+
You can generate the `.well-known` files for your deployment using the provided script:
32
+
33
+
```bash
34
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
35
+
```
36
+
37
+
This will create the appropriate files based on your domain.
38
+
39
+
### Manual Customization
40
+
41
+
1. **Update `.well-known/atproto-did`**: Replace with your service's DID
42
+
2. **Update `.well-known/did.json`**: Add your public key to the `verificationMethod` array if needed
43
+
3. **Customize `index.html`**: Modify the landing page to match your branding
44
+
45
+
### Docker Deployment
46
+
47
+
When using Docker, you can mount custom static files:
48
+
49
+
```yaml
50
+
volumes:
51
+
- ./custom-www:/app/www:ro
52
+
```
53
+
54
+
Or just override specific files:
55
+
56
+
```yaml
57
+
volumes:
58
+
- ./custom-index.html:/app/www/index.html:ro
59
+
- ./custom-wellknown:/app/www/.well-known:ro
60
+
```
61
+
62
+
### Environment Variable
63
+
64
+
You can change the static files directory using:
65
+
66
+
```bash
67
+
STATIC_FILES_DIR=/path/to/custom/www
68
+
```
69
+
70
+
## Security Notes
71
+
72
+
- Static files are served with automatic MIME type detection
73
+
- The `.well-known` files are crucial for AT Protocol compatibility
74
+
- Ensure proper permissions on mounted volumes in production
+4
www/css/pico.classless.green.min.css
+4
www/css/pico.classless.green.min.css
···
1
+
@charset "UTF-8";/*!
2
+
* Pico CSS โจ v2.1.1 (https://picocss.com)
3
+
* Copyright 2019-2025 - Licensed under MIT
4
+
*/:host,:root{--pico-font-family-emoji:"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--pico-font-family-sans-serif:system-ui,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,Helvetica,Arial,"Helvetica Neue",sans-serif,var(--pico-font-family-emoji);--pico-font-family-monospace:ui-monospace,SFMono-Regular,"SF Mono",Menlo,Consolas,"Liberation Mono",monospace,var(--pico-font-family-emoji);--pico-font-family:var(--pico-font-family-sans-serif);--pico-line-height:1.5;--pico-font-weight:400;--pico-font-size:100%;--pico-text-underline-offset:0.1rem;--pico-border-radius:0.25rem;--pico-border-width:0.0625rem;--pico-outline-width:0.125rem;--pico-transition:0.2s ease-in-out;--pico-spacing:1rem;--pico-typography-spacing-vertical:1rem;--pico-block-spacing-vertical:var(--pico-spacing);--pico-block-spacing-horizontal:var(--pico-spacing);--pico-form-element-spacing-vertical:0.75rem;--pico-form-element-spacing-horizontal:1rem;--pico-group-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-group-box-shadow-focus-with-button:0 0 0 var(--pico-outline-width) var(--pico-primary-focus);--pico-group-box-shadow-focus-with-input:0 0 0 0.0625rem var(--pico-form-element-border-color);--pico-modal-overlay-backdrop-filter:blur(0.375rem);--pico-nav-element-spacing-vertical:1rem;--pico-nav-element-spacing-horizontal:0.5rem;--pico-nav-link-spacing-vertical:0.5rem;--pico-nav-link-spacing-horizontal:0.5rem;--pico-nav-breadcrumb-divider:">";--pico-icon-checkbox:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-minus:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='5' y1='12' x2='19' y2='12'%3E%3C/line%3E%3C/svg%3E");--pico-icon-chevron:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-date:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Crect x='3' y='4' width='18' height='18' rx='2' ry='2'%3E%3C/rect%3E%3Cline x1='16' y1='2' x2='16' y2='6'%3E%3C/line%3E%3Cline x1='8' y1='2' x2='8' y2='6'%3E%3C/line%3E%3Cline x1='3' y1='10' x2='21' y2='10'%3E%3C/line%3E%3C/svg%3E");--pico-icon-time:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cpolyline points='12 6 12 12 16 14'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-search:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='11' cy='11' r='8'%3E%3C/circle%3E%3Cline x1='21' y1='21' x2='16.65' y2='16.65'%3E%3C/line%3E%3C/svg%3E");--pico-icon-close:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='3' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='18' y1='6' x2='6' y2='18'%3E%3C/line%3E%3Cline x1='6' y1='6' x2='18' y2='18'%3E%3C/line%3E%3C/svg%3E");--pico-icon-loading:url("data:image/svg+xml,%3Csvg fill='none' height='24' width='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg' %3E%3Cstyle%3E g %7B animation: rotate 2s linear infinite; transform-origin: center center; %7D circle %7B stroke-dasharray: 75,100; stroke-dashoffset: -5; animation: dash 1.5s ease-in-out infinite; stroke-linecap: round; %7D @keyframes rotate %7B 0%25 %7B transform: rotate(0deg); %7D 100%25 %7B transform: rotate(360deg); %7D %7D @keyframes dash %7B 0%25 %7B stroke-dasharray: 1,100; stroke-dashoffset: 0; %7D 50%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -17.5; %7D 100%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -62; %7D %7D %3C/style%3E%3Cg%3E%3Ccircle cx='12' cy='12' r='10' fill='none' stroke='rgb(136, 145, 164)' stroke-width='4' /%3E%3C/g%3E%3C/svg%3E")}@media (min-width:576px){:host,:root{--pico-font-size:106.25%}}@media (min-width:768px){:host,:root{--pico-font-size:112.5%}}@media (min-width:1024px){:host,:root{--pico-font-size:118.75%}}@media (min-width:1280px){:host,:root{--pico-font-size:125%}}@media (min-width:1536px){:host,:root{--pico-font-size:131.25%}}a{--pico-text-decoration:underline}small{--pico-font-size:0.875em}h1,h2,h3,h4,h5,h6{--pico-font-weight:700}h1{--pico-font-size:2rem;--pico-line-height:1.125;--pico-typography-spacing-top:3rem}h2{--pico-font-size:1.75rem;--pico-line-height:1.15;--pico-typography-spacing-top:2.625rem}h3{--pico-font-size:1.5rem;--pico-line-height:1.175;--pico-typography-spacing-top:2.25rem}h4{--pico-font-size:1.25rem;--pico-line-height:1.2;--pico-typography-spacing-top:1.874rem}h5{--pico-font-size:1.125rem;--pico-line-height:1.225;--pico-typography-spacing-top:1.6875rem}h6{--pico-font-size:1rem;--pico-line-height:1.25;--pico-typography-spacing-top:1.5rem}tfoot td,tfoot th,thead td,thead th{--pico-font-weight:600;--pico-border-width:0.1875rem}code,kbd,pre,samp{--pico-font-family:var(--pico-font-family-monospace)}kbd{--pico-font-weight:bolder}:where(select,textarea),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-outline-width:0.0625rem}[type=search]{--pico-border-radius:5rem}[type=checkbox],[type=radio]{--pico-border-width:0.125rem}[type=checkbox][role=switch]{--pico-border-width:0.1875rem}[role=search]{--pico-border-radius:5rem}[role=group] [role=button],[role=group] [type=button],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=submit],[role=search] button{--pico-form-element-spacing-horizontal:2rem}details summary[role=button]::after{filter:brightness(0) invert(1)}[aria-busy=true]:not(input,select,textarea):is(button,[type=submit],[type=button],[type=reset],[role=button])::before{filter:brightness(0) invert(1)}:host(:not([data-theme=dark])),:root:not([data-theme=dark]),[data-theme=light]{color-scheme:light;--pico-background-color:#fff;--pico-color:#373c44;--pico-text-selection-color:rgba(71, 164, 23, 0.25);--pico-muted-color:#646b79;--pico-muted-border-color:rgb(231, 234, 239.5);--pico-primary:#33790f;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(51, 121, 15, 0.5);--pico-primary-hover:#265e09;--pico-primary-hover-background:#33790f;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(71, 164, 23, 0.5);--pico-primary-inverse:#fff;--pico-secondary:#5d6b89;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(93, 107, 137, 0.5);--pico-secondary-hover:#48536b;--pico-secondary-hover-background:#48536b;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(93, 107, 137, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#181c25;--pico-contrast-background:#181c25;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(24, 28, 37, 0.5);--pico-contrast-hover:#000;--pico-contrast-hover-background:#000;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-secondary-hover);--pico-contrast-focus:rgba(93, 107, 137, 0.25);--pico-contrast-inverse:#fff;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(129, 145, 181, 0.01698),0.0335rem 0.067rem 0.402rem rgba(129, 145, 181, 0.024),0.0625rem 0.125rem 0.75rem rgba(129, 145, 181, 0.03),0.1125rem 0.225rem 1.35rem rgba(129, 145, 181, 0.036),0.2085rem 0.417rem 2.502rem rgba(129, 145, 181, 0.04302),0.5rem 1rem 6rem rgba(129, 145, 181, 0.06),0 0 0 0.0625rem rgba(129, 145, 181, 0.015);--pico-h1-color:#2d3138;--pico-h2-color:#373c44;--pico-h3-color:#424751;--pico-h4-color:#4d535e;--pico-h5-color:#5c6370;--pico-h6-color:#646b79;--pico-mark-background-color:rgb(252.5, 230.5, 191.5);--pico-mark-color:#0f1114;--pico-ins-color:rgb(28.5, 105.5, 84);--pico-del-color:rgb(136, 56.5, 53);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(243, 244.5, 246.75);--pico-code-color:#646b79;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(251, 251.5, 252.25);--pico-form-element-selected-background-color:#dfe3eb;--pico-form-element-border-color:#cfd5e2;--pico-form-element-color:#23262c;--pico-form-element-placeholder-color:var(--pico-muted-color);--pico-form-element-active-background-color:#fff;--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(183.5, 105.5, 106.5);--pico-form-element-invalid-active-border-color:rgb(200.25, 79.25, 72.25);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:rgb(76, 154.5, 137.5);--pico-form-element-valid-active-border-color:rgb(39, 152.75, 118.75);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#bfc7d9;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#dfe3eb;--pico-range-active-border-color:#bfc7d9;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:var(--pico-background-color);--pico-card-border-color:var(--pico-muted-border-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(251, 251.5, 252.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(232, 234, 237, 0.75);--pico-progress-background-color:#dfe3eb;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(76, 154.5, 137.5)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(200.25, 79.25, 72.25)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme=dark])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme=dark]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),[data-theme=light] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}@media only screen and (prefers-color-scheme:dark){:host(:not([data-theme])),:root:not([data-theme]){color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}}[data-theme=dark]{color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}[data-theme=dark] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}[type=checkbox],[type=radio],[type=range],progress{accent-color:var(--pico-primary)}*,::after,::before{box-sizing:border-box;background-repeat:no-repeat}::after,::before{text-decoration:inherit;vertical-align:inherit}:where(:host),:where(:root){-webkit-tap-highlight-color:transparent;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family);text-underline-offset:var(--pico-text-underline-offset);text-rendering:optimizeLegibility;overflow-wrap:break-word;-moz-tab-size:4;-o-tab-size:4;tab-size:4}body{width:100%;margin:0}main{display:block}body>footer,body>header,body>main{width:100%;margin-right:auto;margin-left:auto;padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal)}@media (min-width:576px){body>footer,body>header,body>main{max-width:510px;padding-right:0;padding-left:0}}@media (min-width:768px){body>footer,body>header,body>main{max-width:700px}}@media (min-width:1024px){body>footer,body>header,body>main{max-width:950px}}@media (min-width:1280px){body>footer,body>header,body>main{max-width:1200px}}@media (min-width:1536px){body>footer,body>header,body>main{max-width:1450px}}section{margin-bottom:var(--pico-block-spacing-vertical)}b,strong{font-weight:bolder}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}address,blockquote,dl,ol,p,pre,table,ul{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-style:normal;font-weight:var(--pico-font-weight)}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family)}h1{--pico-color:var(--pico-h1-color)}h2{--pico-color:var(--pico-h2-color)}h3{--pico-color:var(--pico-h3-color)}h4{--pico-color:var(--pico-h4-color)}h5{--pico-color:var(--pico-h5-color)}h6{--pico-color:var(--pico-h6-color)}:where(article,address,blockquote,dl,figure,form,ol,p,pre,table,ul)~:is(h1,h2,h3,h4,h5,h6){margin-top:var(--pico-typography-spacing-top)}p{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup>*{margin-top:0;margin-bottom:0}hgroup>:not(:first-child):last-child{--pico-color:var(--pico-muted-color);--pico-font-weight:unset;font-size:1rem}:where(ol,ul) li{margin-bottom:calc(var(--pico-typography-spacing-vertical) * .25)}:where(dl,ol,ul) :where(dl,ol,ul){margin:0;margin-top:calc(var(--pico-typography-spacing-vertical) * .25)}ul li{list-style:square}mark{padding:.125rem .25rem;background-color:var(--pico-mark-background-color);color:var(--pico-mark-color);vertical-align:baseline}blockquote{display:block;margin:var(--pico-typography-spacing-vertical) 0;padding:var(--pico-spacing);border-right:none;border-left:.25rem solid var(--pico-blockquote-border-color);border-inline-start:0.25rem solid var(--pico-blockquote-border-color);border-inline-end:none}blockquote footer{margin-top:calc(var(--pico-typography-spacing-vertical) * .5);color:var(--pico-blockquote-footer-color)}abbr[title]{border-bottom:1px dotted;text-decoration:none;cursor:help}ins{color:var(--pico-ins-color);text-decoration:none}del{color:var(--pico-del-color)}::-moz-selection{background-color:var(--pico-text-selection-color)}::selection{background-color:var(--pico-text-selection-color)}:where(a:not([role=button])),[role=link]{--pico-color:var(--pico-primary);--pico-background-color:transparent;--pico-underline:var(--pico-primary-underline);outline:0;background-color:var(--pico-background-color);color:var(--pico-color);-webkit-text-decoration:var(--pico-text-decoration);text-decoration:var(--pico-text-decoration);text-decoration-color:var(--pico-underline);text-underline-offset:0.125em;transition:background-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition)}:where(a:not([role=button])):is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[role=link]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-color:var(--pico-primary-hover);--pico-underline:var(--pico-primary-hover-underline);--pico-text-decoration:underline}:where(a:not([role=button])):focus-visible,[role=link]:focus-visible{box-shadow:0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}a[role=button]{display:inline-block}button{margin:0;overflow:visible;font-family:inherit;text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[role=button],[type=button],[type=file]::file-selector-button,[type=reset],[type=submit],button{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);--pico-color:var(--pico-primary-inverse);--pico-box-shadow:var(--pico-button-box-shadow, 0 0 0 rgba(0, 0, 0, 0));padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal);border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:1rem;line-height:var(--pico-line-height);text-align:center;text-decoration:none;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}[role=button]:is(:hover,:active,:focus),[role=button]:is([aria-current]:not([aria-current=false])),[type=button]:is(:hover,:active,:focus),[type=button]:is([aria-current]:not([aria-current=false])),[type=file]::file-selector-button:is(:hover,:active,:focus),[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])),[type=reset]:is(:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false])),[type=submit]:is(:hover,:active,:focus),[type=submit]:is([aria-current]:not([aria-current=false])),button:is(:hover,:active,:focus),button:is([aria-current]:not([aria-current=false])){--pico-background-color:var(--pico-primary-hover-background);--pico-border-color:var(--pico-primary-hover-border);--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0));--pico-color:var(--pico-primary-inverse)}[role=button]:focus,[role=button]:is([aria-current]:not([aria-current=false])):focus,[type=button]:focus,[type=button]:is([aria-current]:not([aria-current=false])):focus,[type=file]::file-selector-button:focus,[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])):focus,[type=reset]:focus,[type=reset]:is([aria-current]:not([aria-current=false])):focus,[type=submit]:focus,[type=submit]:is([aria-current]:not([aria-current=false])):focus,button:focus,button:is([aria-current]:not([aria-current=false])):focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}[type=button],[type=reset],[type=submit]{margin-bottom:var(--pico-spacing)}[type=file]::file-selector-button,[type=reset]{--pico-background-color:var(--pico-secondary-background);--pico-border-color:var(--pico-secondary-border);--pico-color:var(--pico-secondary-inverse);cursor:pointer}[type=file]::file-selector-button:is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border);--pico-color:var(--pico-secondary-inverse)}[type=file]::file-selector-button:focus,[type=reset]:focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}:where(button,[type=submit],[type=reset],[type=button],[role=button])[disabled],:where(fieldset[disabled]) :is(button,[type=submit],[type=button],[type=reset],[role=button]){opacity:.5;pointer-events:none}:where(table){width:100%;border-collapse:collapse;border-spacing:0;text-indent:0}td,th{padding:calc(var(--pico-spacing)/ 2) var(--pico-spacing);border-bottom:var(--pico-border-width) solid var(--pico-table-border-color);background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);text-align:left;text-align:start}tfoot td,tfoot th{border-top:var(--pico-border-width) solid var(--pico-table-border-color);border-bottom:0}table.striped tbody tr:nth-child(odd) td,table.striped tbody tr:nth-child(odd) th{background-color:var(--pico-table-row-stripped-background-color)}:where(audio,canvas,iframe,img,svg,video){vertical-align:middle}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}:where(iframe){border-style:none}img{max-width:100%;height:auto;border-style:none}:where(svg:not([fill])){fill:currentColor}svg:not(:host),svg:not(:root){overflow:hidden}code,kbd,pre,samp{font-size:.875em;font-family:var(--pico-font-family)}pre code,pre samp{font-size:inherit;font-family:inherit}pre{-ms-overflow-style:scrollbar;overflow:auto}code,kbd,pre,samp{border-radius:var(--pico-border-radius);background:var(--pico-code-background-color);color:var(--pico-code-color);font-weight:var(--pico-font-weight);line-height:initial}code,kbd,samp{display:inline-block;padding:.375rem}pre{display:block;margin-bottom:var(--pico-spacing);overflow-x:auto}pre>code,pre>samp{display:block;padding:var(--pico-spacing);background:0 0;line-height:var(--pico-line-height)}kbd{background-color:var(--pico-code-kbd-background-color);color:var(--pico-code-kbd-color);vertical-align:baseline}figure{display:block;margin:0;padding:0}figure figcaption{padding:calc(var(--pico-spacing) * .5) 0;color:var(--pico-muted-color)}hr{height:0;margin:var(--pico-typography-spacing-vertical) 0;border:0;border-top:1px solid var(--pico-muted-border-color);color:inherit}[hidden],template{display:none!important}canvas{display:inline-block}input,optgroup,select,textarea{margin:0;font-size:1rem;line-height:var(--pico-line-height);font-family:inherit;letter-spacing:inherit}input{overflow:visible}select{text-transform:none}legend{max-width:100%;padding:0;color:inherit;white-space:normal}textarea{overflow:auto}[type=checkbox],[type=radio]{padding:0}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}::-moz-focus-inner{padding:0;border-style:none}:-moz-focusring{outline:0}:-moz-ui-invalid{box-shadow:none}::-ms-expand{display:none}[type=file],[type=range]{padding:0;border-width:0}input:not([type=checkbox],[type=radio],[type=range]){height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2)}fieldset{width:100%;margin:0;margin-bottom:var(--pico-spacing);padding:0;border:0}fieldset legend,label{display:block;margin-bottom:calc(var(--pico-spacing) * .375);color:var(--pico-color);font-weight:var(--pico-form-label-font-weight,var(--pico-font-weight))}fieldset legend{margin-bottom:calc(var(--pico-spacing) * .5)}button[type=submit],input:not([type=checkbox],[type=radio]),select,textarea{width:100%}input:not([type=checkbox],[type=radio],[type=range],[type=file]),select,textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none;padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal)}input,select,textarea{--pico-background-color:var(--pico-form-element-background-color);--pico-border-color:var(--pico-form-element-border-color);--pico-color:var(--pico-form-element-color);--pico-box-shadow:none;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[readonly]):is(:active,:focus){--pico-background-color:var(--pico-form-element-active-background-color)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[role=switch],[readonly]):is(:active,:focus){--pico-border-color:var(--pico-form-element-active-border-color)}:where(select,textarea):not([readonly]):focus,input:not([type=submit],[type=button],[type=reset],[type=range],[type=file],[readonly]):focus{--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-focus-color)}:where(fieldset[disabled]) :is(input:not([type=submit],[type=button],[type=reset]),select,textarea),input:not([type=submit],[type=button],[type=reset])[disabled],label[aria-disabled=true],select[disabled],textarea[disabled]{opacity:var(--pico-form-element-disabled-opacity);pointer-events:none}label[aria-disabled=true] input[disabled]{opacity:1}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid]{padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal)!important;padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=false]:not(select){background-image:var(--pico-icon-valid)}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=true]:not(select){background-image:var(--pico-icon-invalid)}:where(input,select,textarea)[aria-invalid=false]{--pico-border-color:var(--pico-form-element-valid-border-color)}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus){--pico-border-color:var(--pico-form-element-valid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-valid-focus-color)!important}:where(input,select,textarea)[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus){--pico-border-color:var(--pico-form-element-invalid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-invalid-focus-color)!important}[dir=rtl] :where(input,select,textarea):not([type=checkbox],[type=radio]):is([aria-invalid],[aria-invalid=true],[aria-invalid=false]){background-position:center left .75rem}input::-webkit-input-placeholder,input::placeholder,select:invalid,textarea::-webkit-input-placeholder,textarea::placeholder{color:var(--pico-form-element-placeholder-color);opacity:1}input:not([type=checkbox],[type=radio]),select,textarea{margin-bottom:var(--pico-spacing)}select::-ms-expand{border:0;background-color:transparent}select:not([multiple],[size]){padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal);padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);background-image:var(--pico-icon-chevron);background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}select[multiple] option:checked{background:var(--pico-form-element-selected-background-color);color:var(--pico-form-element-color)}[dir=rtl] select:not([multiple],[size]){background-position:center left .75rem}textarea{display:block;resize:vertical}textarea[aria-invalid]{--pico-icon-height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2);background-position:top right .75rem!important;background-size:1rem var(--pico-icon-height)!important}:where(input,select,textarea,fieldset)+small{display:block;width:100%;margin-top:calc(var(--pico-spacing) * -.75);margin-bottom:var(--pico-spacing);color:var(--pico-muted-color)}:where(input,select,textarea,fieldset)[aria-invalid=false]+small{color:var(--pico-ins-color)}:where(input,select,textarea,fieldset)[aria-invalid=true]+small{color:var(--pico-del-color)}label>:where(input,select,textarea){margin-top:calc(var(--pico-spacing) * .25)}label:has([type=checkbox],[type=radio]){width:-moz-fit-content;width:fit-content;cursor:pointer}[type=checkbox],[type=radio]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:1.25em;height:1.25em;margin-top:-.125em;margin-inline-end:.5em;border-width:var(--pico-border-width);vertical-align:middle;cursor:pointer}[type=checkbox]::-ms-check,[type=radio]::-ms-check{display:none}[type=checkbox]:checked,[type=checkbox]:checked:active,[type=checkbox]:checked:focus,[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-checkbox);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=checkbox]~label,[type=radio]~label{display:inline-block;margin-bottom:0;cursor:pointer}[type=checkbox]~label:not(:last-of-type),[type=radio]~label:not(:last-of-type){margin-inline-end:1em}[type=checkbox]:indeterminate{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-minus);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=radio]{border-radius:50%}[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-inverse);border-width:.35em;background-image:none}[type=checkbox][role=switch]{--pico-background-color:var(--pico-switch-background-color);--pico-color:var(--pico-switch-color);width:2.25em;height:1.25em;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:1.25em;background-color:var(--pico-background-color);line-height:1.25em}[type=checkbox][role=switch]:not([aria-invalid]){--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:before{display:block;aspect-ratio:1;height:100%;border-radius:50%;background-color:var(--pico-color);box-shadow:var(--pico-switch-thumb-box-shadow);content:"";transition:margin .1s ease-in-out}[type=checkbox][role=switch]:focus{--pico-background-color:var(--pico-switch-background-color);--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:checked{--pico-background-color:var(--pico-switch-checked-background-color);--pico-border-color:var(--pico-switch-checked-background-color);background-image:none}[type=checkbox][role=switch]:checked::before{margin-inline-start:calc(2.25em - 1.25em)}[type=checkbox][role=switch][disabled]{--pico-background-color:var(--pico-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus{--pico-background-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true]{--pico-background-color:var(--pico-form-element-invalid-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus,[type=radio][aria-invalid=false]:checked,[type=radio][aria-invalid=false]:checked:active,[type=radio][aria-invalid=false]:checked:focus{--pico-border-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true],[type=radio]:checked:active[aria-invalid=true],[type=radio]:checked:focus[aria-invalid=true],[type=radio]:checked[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}[type=color]::-webkit-color-swatch-wrapper{padding:0}[type=color]::-moz-focus-inner{padding:0}[type=color]::-webkit-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}[type=color]::-moz-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}input:not([type=checkbox],[type=radio],[type=range],[type=file]):is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){--pico-icon-position:0.75rem;--pico-icon-width:1rem;padding-right:calc(var(--pico-icon-width) + var(--pico-icon-position));background-image:var(--pico-icon-date);background-position:center right var(--pico-icon-position);background-size:var(--pico-icon-width) auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=time]{background-image:var(--pico-icon-time)}[type=date]::-webkit-calendar-picker-indicator,[type=datetime-local]::-webkit-calendar-picker-indicator,[type=month]::-webkit-calendar-picker-indicator,[type=time]::-webkit-calendar-picker-indicator,[type=week]::-webkit-calendar-picker-indicator{width:var(--pico-icon-width);margin-right:calc(var(--pico-icon-width) * -1);margin-left:var(--pico-icon-position);opacity:0}@-moz-document url-prefix(){[type=date],[type=datetime-local],[type=month],[type=time],[type=week]{padding-right:var(--pico-form-element-spacing-horizontal)!important;background-image:none!important}}[dir=rtl] :is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){text-align:right}[type=file]{--pico-color:var(--pico-muted-color);margin-left:calc(var(--pico-outline-width) * -1);padding:calc(var(--pico-form-element-spacing-vertical) * .5) 0;padding-left:var(--pico-outline-width);border:0;border-radius:0;background:0 0}[type=file]::file-selector-button{margin-right:calc(var(--pico-spacing)/ 2);padding:calc(var(--pico-form-element-spacing-vertical) * .5) var(--pico-form-element-spacing-horizontal)}[type=file]:is(:hover,:active,:focus)::file-selector-button{--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border)}[type=file]:focus::file-selector-button{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}[type=range]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:100%;height:1.25rem;background:0 0}[type=range]::-webkit-slider-runnable-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-webkit-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-moz-range-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-moz-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-ms-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-ms-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-webkit-slider-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-webkit-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-moz-range-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-moz-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-ms-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-ms-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]:active,[type=range]:focus-within{--pico-range-border-color:var(--pico-range-active-border-color);--pico-range-thumb-color:var(--pico-range-thumb-active-color)}[type=range]:active::-webkit-slider-thumb{transform:scale(1.25)}[type=range]:active::-moz-range-thumb{transform:scale(1.25)}[type=range]:active::-ms-thumb{transform:scale(1.25)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem);background-image:var(--pico-icon-search);background-position:center left calc(var(--pico-form-element-spacing-horizontal) + .125rem);background-size:1rem auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem)!important;background-position:center left 1.125rem,center right .75rem}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=false]{background-image:var(--pico-icon-search),var(--pico-icon-valid)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=true]{background-image:var(--pico-icon-search),var(--pico-icon-invalid)}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{background-position:center right 1.125rem}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{background-position:center right 1.125rem,center left .75rem}details{display:block;margin-bottom:var(--pico-spacing)}details summary{line-height:1rem;list-style-type:none;cursor:pointer;transition:color var(--pico-transition)}details summary:not([role]){color:var(--pico-accordion-close-summary-color)}details summary::-webkit-details-marker{display:none}details summary::marker{display:none}details summary::-moz-list-bullet{list-style-type:none}details summary::after{display:block;width:1rem;height:1rem;margin-inline-start:calc(var(--pico-spacing,1rem) * .5);float:right;transform:rotate(-90deg);background-image:var(--pico-icon-chevron);background-position:right center;background-size:1rem auto;background-repeat:no-repeat;content:"";transition:transform var(--pico-transition)}details summary:focus{outline:0}details summary:focus:not([role]){color:var(--pico-accordion-active-summary-color)}details summary:focus-visible:not([role]){outline:var(--pico-outline-width) solid var(--pico-primary-focus);outline-offset:calc(var(--pico-spacing,1rem) * 0.5);color:var(--pico-primary)}details summary[role=button]{width:100%;text-align:left}details summary[role=button]::after{height:calc(1rem * var(--pico-line-height,1.5))}details[open]>summary{margin-bottom:var(--pico-spacing)}details[open]>summary:not([role]):not(:focus){color:var(--pico-accordion-open-summary-color)}details[open]>summary::after{transform:rotate(0)}[dir=rtl] details summary{text-align:right}[dir=rtl] details summary::after{float:left;background-position:left center}article{margin-bottom:var(--pico-block-spacing-vertical);padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal);border-radius:var(--pico-border-radius);background:var(--pico-card-background-color);box-shadow:var(--pico-card-box-shadow)}article>footer,article>header{margin-right:calc(var(--pico-block-spacing-horizontal) * -1);margin-left:calc(var(--pico-block-spacing-horizontal) * -1);padding:calc(var(--pico-block-spacing-vertical) * .66) var(--pico-block-spacing-horizontal);background-color:var(--pico-card-sectioning-background-color)}article>header{margin-top:calc(var(--pico-block-spacing-vertical) * -1);margin-bottom:var(--pico-block-spacing-vertical);border-bottom:var(--pico-border-width) solid var(--pico-card-border-color);border-top-right-radius:var(--pico-border-radius);border-top-left-radius:var(--pico-border-radius)}article>footer{margin-top:var(--pico-block-spacing-vertical);margin-bottom:calc(var(--pico-block-spacing-vertical) * -1);border-top:var(--pico-border-width) solid var(--pico-card-border-color);border-bottom-right-radius:var(--pico-border-radius);border-bottom-left-radius:var(--pico-border-radius)}[role=group],[role=search]{display:inline-flex;position:relative;width:100%;margin-bottom:var(--pico-spacing);border-radius:var(--pico-border-radius);box-shadow:var(--pico-group-box-shadow,0 0 0 transparent);vertical-align:middle;transition:box-shadow var(--pico-transition)}[role=group] input:not([type=checkbox],[type=radio]),[role=group] select,[role=group]>*,[role=search] input:not([type=checkbox],[type=radio]),[role=search] select,[role=search]>*{position:relative;flex:1 1 auto;margin-bottom:0}[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=group]>:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child),[role=search]>:not(:first-child){margin-left:0;border-top-left-radius:0;border-bottom-left-radius:0}[role=group] input:not([type=checkbox],[type=radio]):not(:last-child),[role=group] select:not(:last-child),[role=group]>:not(:last-child),[role=search] input:not([type=checkbox],[type=radio]):not(:last-child),[role=search] select:not(:last-child),[role=search]>:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}[role=group] input:not([type=checkbox],[type=radio]):focus,[role=group] select:focus,[role=group]>:focus,[role=search] input:not([type=checkbox],[type=radio]):focus,[role=search] select:focus,[role=search]>:focus{z-index:2}[role=group] [role=button]:not(:first-child),[role=group] [type=button]:not(:first-child),[role=group] [type=reset]:not(:first-child),[role=group] [type=submit]:not(:first-child),[role=group] button:not(:first-child),[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=search] [role=button]:not(:first-child),[role=search] [type=button]:not(:first-child),[role=search] [type=reset]:not(:first-child),[role=search] [type=submit]:not(:first-child),[role=search] button:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child){margin-left:calc(var(--pico-border-width) * -1)}[role=group] [role=button],[role=group] [type=button],[role=group] [type=reset],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=reset],[role=search] [type=submit],[role=search] button{width:auto}@supports selector(:has(*)){[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-button)}[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select,[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select{border-color:transparent}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus),[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-input)}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) button,[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) button{--pico-button-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-border);--pico-button-hover-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-hover-border)}[role=group] [role=button]:focus,[role=group] [type=button]:focus,[role=group] [type=reset]:focus,[role=group] [type=submit]:focus,[role=group] button:focus,[role=search] [role=button]:focus,[role=search] [type=button]:focus,[role=search] [type=reset]:focus,[role=search] [type=submit]:focus,[role=search] button:focus{box-shadow:none}}[role=search]>:first-child{border-top-left-radius:5rem;border-bottom-left-radius:5rem}[role=search]>:last-child{border-top-right-radius:5rem;border-bottom-right-radius:5rem}[aria-busy=true]:not(input,select,textarea,html,form){white-space:nowrap}[aria-busy=true]:not(input,select,textarea,html,form)::before{display:inline-block;width:1em;height:1em;background-image:var(--pico-icon-loading);background-size:1em auto;background-repeat:no-repeat;content:"";vertical-align:-.125em}[aria-busy=true]:not(input,select,textarea,html,form):not(:empty)::before{margin-inline-end:calc(var(--pico-spacing) * .5)}[aria-busy=true]:not(input,select,textarea,html,form):empty{text-align:center}[role=button][aria-busy=true],[type=button][aria-busy=true],[type=reset][aria-busy=true],[type=submit][aria-busy=true],a[aria-busy=true],button[aria-busy=true]{pointer-events:none}:host,:root{--pico-scrollbar-width:0px}dialog{display:flex;z-index:999;position:fixed;top:0;right:0;bottom:0;left:0;align-items:center;justify-content:center;width:inherit;min-width:100%;height:inherit;min-height:100%;padding:0;border:0;-webkit-backdrop-filter:var(--pico-modal-overlay-backdrop-filter);backdrop-filter:var(--pico-modal-overlay-backdrop-filter);background-color:var(--pico-modal-overlay-background-color);color:var(--pico-color)}dialog>article{width:100%;max-height:calc(100vh - var(--pico-spacing) * 2);margin:var(--pico-spacing);overflow:auto}@media (min-width:576px){dialog>article{max-width:510px}}@media (min-width:768px){dialog>article{max-width:700px}}dialog>article>header>*{margin-bottom:0}dialog>article>header :is(a,button)[rel=prev]{margin:0;margin-left:var(--pico-spacing);padding:0;float:right}dialog>article>footer{text-align:right}dialog>article>footer [role=button],dialog>article>footer button{margin-bottom:0}dialog>article>footer [role=button]:not(:first-of-type),dialog>article>footer button:not(:first-of-type){margin-left:calc(var(--pico-spacing) * .5)}dialog>article :is(a,button)[rel=prev]{display:block;width:1rem;height:1rem;margin-top:calc(var(--pico-spacing) * -1);margin-bottom:var(--pico-spacing);margin-left:auto;border:none;background-image:var(--pico-icon-close);background-position:center;background-size:auto 1rem;background-repeat:no-repeat;background-color:transparent;opacity:.5;transition:opacity var(--pico-transition)}dialog>article :is(a,button)[rel=prev]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){opacity:1}dialog:not([open]),dialog[open=false]{display:none}:where(nav li)::before{float:left;content:"โ"}nav,nav ul{display:flex}nav{justify-content:space-between;overflow:visible}nav ol,nav ul{align-items:center;margin-bottom:0;padding:0;list-style:none}nav ol:first-of-type,nav ul:first-of-type{margin-left:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav ol:last-of-type,nav ul:last-of-type{margin-right:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav li{display:inline-block;margin:0;padding:var(--pico-nav-element-spacing-vertical) var(--pico-nav-element-spacing-horizontal)}nav li :where(a,[role=link]){display:inline-block;margin:calc(var(--pico-nav-link-spacing-vertical) * -1) calc(var(--pico-nav-link-spacing-horizontal) * -1);padding:var(--pico-nav-link-spacing-vertical) var(--pico-nav-link-spacing-horizontal);border-radius:var(--pico-border-radius)}nav li :where(a,[role=link]):not(:hover){text-decoration:none}nav li [role=button],nav li [type=button],nav li button,nav li input:not([type=checkbox],[type=radio],[type=range],[type=file]),nav li select{height:auto;margin-right:inherit;margin-bottom:0;margin-left:inherit;padding:calc(var(--pico-nav-link-spacing-vertical) - var(--pico-border-width) * 2) var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb]{align-items:center;justify-content:start}nav[aria-label=breadcrumb] ul li:not(:first-child){margin-inline-start:var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb] ul li a{margin:calc(var(--pico-nav-link-spacing-vertical) * -1) 0;margin-inline-start:calc(var(--pico-nav-link-spacing-horizontal) * -1)}nav[aria-label=breadcrumb] ul li:not(:last-child)::after{display:inline-block;position:absolute;width:calc(var(--pico-nav-link-spacing-horizontal) * 4);margin:0 calc(var(--pico-nav-link-spacing-horizontal) * -1);content:var(--pico-nav-breadcrumb-divider);color:var(--pico-muted-color);text-align:center;text-decoration:none;white-space:nowrap}nav[aria-label=breadcrumb] a[aria-current]:not([aria-current=false]){background-color:transparent;color:inherit;text-decoration:none;pointer-events:none}aside li,aside nav,aside ol,aside ul{display:block}aside li{padding:calc(var(--pico-nav-element-spacing-vertical) * .5) var(--pico-nav-element-spacing-horizontal)}aside li a{display:block}aside li [role=button]{margin:inherit}[dir=rtl] nav[aria-label=breadcrumb] ul li:not(:last-child) ::after{content:"\\"}progress{display:inline-block;vertical-align:baseline}progress{-webkit-appearance:none;-moz-appearance:none;display:inline-block;appearance:none;width:100%;height:.5rem;margin-bottom:calc(var(--pico-spacing) * .5);overflow:hidden;border:0;border-radius:var(--pico-border-radius);background-color:var(--pico-progress-background-color);color:var(--pico-progress-color)}progress::-webkit-progress-bar{border-radius:var(--pico-border-radius);background:0 0}progress[value]::-webkit-progress-value{background-color:var(--pico-progress-color);-webkit-transition:inline-size var(--pico-transition);transition:inline-size var(--pico-transition)}progress::-moz-progress-bar{background-color:var(--pico-progress-color)}@media (prefers-reduced-motion:no-preference){progress:indeterminate{background:var(--pico-progress-background-color) linear-gradient(to right,var(--pico-progress-color) 30%,var(--pico-progress-background-color) 30%) top left/150% 150% no-repeat;animation:progress-indeterminate 1s linear infinite}progress:indeterminate[value]::-webkit-progress-value{background-color:transparent}progress:indeterminate::-moz-progress-bar{background-color:transparent}}@media (prefers-reduced-motion:no-preference){[dir=rtl] progress:indeterminate{animation-direction:reverse}}@keyframes progress-indeterminate{0%{background-position:200% 0}100%{background-position:-200% 0}}[data-tooltip]{position:relative}[data-tooltip]:not(a,button,input,[role=button]){border-bottom:1px dotted;text-decoration:none;cursor:help}[data-tooltip]::after,[data-tooltip]::before,[data-tooltip][data-placement=top]::after,[data-tooltip][data-placement=top]::before{display:block;z-index:99;position:absolute;bottom:100%;left:50%;padding:.25rem .5rem;overflow:hidden;transform:translate(-50%,-.25rem);border-radius:var(--pico-border-radius);background:var(--pico-tooltip-background-color);content:attr(data-tooltip);color:var(--pico-tooltip-color);font-style:normal;font-weight:var(--pico-font-weight);font-size:.875rem;text-decoration:none;text-overflow:ellipsis;white-space:nowrap;opacity:0;pointer-events:none}[data-tooltip]::after,[data-tooltip][data-placement=top]::after{padding:0;transform:translate(-50%,0);border-top:.3rem solid;border-right:.3rem solid transparent;border-left:.3rem solid transparent;border-radius:0;background-color:transparent;content:"";color:var(--pico-tooltip-background-color)}[data-tooltip][data-placement=bottom]::after,[data-tooltip][data-placement=bottom]::before{top:100%;bottom:auto;transform:translate(-50%,.25rem)}[data-tooltip][data-placement=bottom]:after{transform:translate(-50%,-.3rem);border:.3rem solid transparent;border-bottom:.3rem solid}[data-tooltip][data-placement=left]::after,[data-tooltip][data-placement=left]::before{top:50%;right:100%;bottom:auto;left:auto;transform:translate(-.25rem,-50%)}[data-tooltip][data-placement=left]:after{transform:translate(.3rem,-50%);border:.3rem solid transparent;border-left:.3rem solid}[data-tooltip][data-placement=right]::after,[data-tooltip][data-placement=right]::before{top:50%;right:auto;bottom:auto;left:100%;transform:translate(.25rem,-50%)}[data-tooltip][data-placement=right]:after{transform:translate(-.3rem,-50%);border:.3rem solid transparent;border-right:.3rem solid}[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{opacity:1}@media (hover:hover) and (pointer:fine){[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{--pico-tooltip-slide-to:translate(-50%, -0.25rem);transform:translate(-50%,.75rem);animation-duration:.2s;animation-fill-mode:forwards;animation-name:tooltip-slide;opacity:0}[data-tooltip]:focus::after,[data-tooltip]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, 0rem);transform:translate(-50%,-.25rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:focus::before,[data-tooltip][data-placement=bottom]:hover::after,[data-tooltip][data-placement=bottom]:hover::before{--pico-tooltip-slide-to:translate(-50%, 0.25rem);transform:translate(-50%,-.75rem);animation-name:tooltip-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, -0.3rem);transform:translate(-50%,-.5rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:focus::before,[data-tooltip][data-placement=left]:hover::after,[data-tooltip][data-placement=left]:hover::before{--pico-tooltip-slide-to:translate(-0.25rem, -50%);transform:translate(.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:hover::after{--pico-tooltip-caret-slide-to:translate(0.3rem, -50%);transform:translate(.05rem,-50%);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:focus::before,[data-tooltip][data-placement=right]:hover::after,[data-tooltip][data-placement=right]:hover::before{--pico-tooltip-slide-to:translate(0.25rem, -50%);transform:translate(-.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:hover::after{--pico-tooltip-caret-slide-to:translate(-0.3rem, -50%);transform:translate(-.05rem,-50%);animation-name:tooltip-caret-slide}}@keyframes tooltip-slide{to{transform:var(--pico-tooltip-slide-to);opacity:1}}@keyframes tooltip-caret-slide{50%{opacity:0}to{transform:var(--pico-tooltip-caret-slide-to);opacity:1}}[aria-controls]{cursor:pointer}[aria-disabled=true],[disabled]{cursor:not-allowed}[aria-hidden=false][hidden]{display:initial}[aria-hidden=false][hidden]:not(:focus){clip:rect(0,0,0,0);position:absolute}[tabindex],a,area,button,input,label,select,summary,textarea{-ms-touch-action:manipulation}[dir=rtl]{direction:rtl}@media (prefers-reduced-motion:reduce){:not([aria-busy=true]),:not([aria-busy=true])::after,:not([aria-busy=true])::before{background-attachment:initial!important;animation-duration:1ms!important;animation-delay:-1ms!important;animation-iteration-count:1!important;scroll-behavior:auto!important;transition-delay:0s!important;transition-duration:0s!important}}
+224
www/index.html
+224
www/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
4
+
<head>
5
+
<meta charset="utf-8">
6
+
<meta name="viewport" content="width=device-width, initial-scale=1">
7
+
<meta name="color-scheme" content="light dark">
8
+
9
+
<!-- Primary Meta Tags -->
10
+
<title>QuickDID - AT Protocol Identity Resolution Service</title>
11
+
<meta name="title" content="QuickDID - AT Protocol Identity Resolution Service">
12
+
<meta name="description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
13
+
<meta name="keywords" content="ATProtocol, Bluesky, DID, handle resolution, decentralized identity, atproto">
14
+
<meta name="author" content="Nick Gerakines">
15
+
16
+
<!-- Open Graph / Facebook -->
17
+
<meta property="og:type" content="website">
18
+
<meta property="og:url" content="https://quickdid.smokesignal.tools/">
19
+
<meta property="og:title" content="QuickDID - AT Protocol Identity Resolution Service">
20
+
<meta property="og:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
21
+
<meta property="og:site_name" content="QuickDID">
22
+
23
+
<!-- Twitter -->
24
+
<meta property="twitter:card" content="summary_large_image">
25
+
<meta property="twitter:url" content="https://quickdid.smokesignal.tools/">
26
+
<meta property="twitter:title" content="QuickDID - AT Protocol Identity Resolution Service">
27
+
<meta property="twitter:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
28
+
29
+
<!-- Additional Meta Tags -->
30
+
<meta name="robots" content="index, follow">
31
+
<meta name="language" content="English">
32
+
<meta name="theme-color" content="#1976d2">
33
+
<link rel="canonical" href="https://quickdid.smokesignal.tools/">
34
+
35
+
<!-- Stylesheet -->
36
+
<link rel="stylesheet" href="/css/pico.classless.green.min.css">
37
+
<style>
38
+
.resolver-form {
39
+
margin: 2rem 0;
40
+
padding: 1.5rem;
41
+
background: var(--card-background-color);
42
+
border-radius: var(--border-radius);
43
+
border: 1px solid var(--muted-border-color);
44
+
}
45
+
46
+
.resolver-result {
47
+
margin-top: 1rem;
48
+
padding: 1.5rem;
49
+
background: var(--code-background-color);
50
+
border-radius: var(--border-radius);
51
+
border: 1px solid var(--muted-border-color);
52
+
}
53
+
54
+
.result-content {
55
+
background: transparent;
56
+
padding: 1rem;
57
+
overflow-x: auto;
58
+
white-space: pre-wrap;
59
+
word-break: break-word;
60
+
}
61
+
62
+
code {
63
+
padding: 0.25rem 0.5rem;
64
+
background: var(--code-background-color);
65
+
border-radius: var(--border-radius);
66
+
}
67
+
68
+
span {
69
+
display: inline-block;
70
+
padding: 0.25rem 0.5rem;
71
+
background: var(--primary);
72
+
color: var(--primary-inverse);
73
+
border-radius: var(--border-radius);
74
+
font-size: 0.875rem;
75
+
font-weight: bold;
76
+
margin-right: 0.5rem;
77
+
}
78
+
79
+
.endpoint-section {
80
+
margin-bottom: 3rem;
81
+
}
82
+
</style>
83
+
</head>
84
+
85
+
<body>
86
+
<header>
87
+
<hgroup>
88
+
<h1>QuickDID</h1>
89
+
<p>AT Protocol Identity Resolution Service</p>
90
+
</hgroup>
91
+
</header>
92
+
<main>
93
+
<p>QuickDID provides high-performance resolution services for the AT Protocol ecosystem.</p>
94
+
95
+
<h2>Available Endpoints</h2>
96
+
97
+
<section class="endpoint-section">
98
+
<h3>GET /xrpc/com.atproto.identity.resolveHandle</h3>
99
+
<p>Resolve an AT Protocol handle to its DID</p>
100
+
<p>Parameters: <code>?handle={handle}</code></p>
101
+
102
+
<h4>Try It Out</h4>
103
+
<form id="handleResolveForm" class="resolver-form">
104
+
<label for="handleInput">
105
+
Enter an AT Protocol handle to resolve:
106
+
<input type="text" id="handleInput" name="handle" placeholder="e.g., alice.bsky.social" required>
107
+
</label>
108
+
<button type="submit">Resolve Handle</button>
109
+
</form>
110
+
111
+
<div id="handleResult" class="resolver-result" style="display: none;">
112
+
<h4>Result</h4>
113
+
<pre id="handleResultContent" class="result-content"></pre>
114
+
</div>
115
+
116
+
<h4>Example Usage</h4>
117
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.identity.resolveHandle?handle=ngerakines.me"</code>
118
+
</section>
119
+
120
+
<section class="endpoint-section">
121
+
<h3>GET /xrpc/com.atproto.lexicon.resolveLexicon</h3>
122
+
<p>Resolve an AT Protocol lexicon (NSID) to its schema</p>
123
+
<p>Parameters: <code>?nsid={nsid}</code></p>
124
+
125
+
<h4>Try It Out</h4>
126
+
<form id="lexiconResolveForm" class="resolver-form">
127
+
<label for="nsidInput">
128
+
Enter an AT Protocol NSID to resolve:
129
+
<input type="text" id="nsidInput" name="nsid" placeholder="e.g., app.bsky.feed.post" required>
130
+
</label>
131
+
<button type="submit">Resolve Lexicon</button>
132
+
</form>
133
+
134
+
<div id="lexiconResult" class="resolver-result" style="display: none;">
135
+
<h4>Result</h4>
136
+
<pre id="lexiconResultContent" class="result-content"></pre>
137
+
</div>
138
+
139
+
<h4>Example Usage</h4>
140
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.lexicon.resolveLexicon?nsid=app.bsky.feed.post"</code>
141
+
</section>
142
+
143
+
<h2>Documentation</h2>
144
+
<p>
145
+
For more information, visit the
146
+
<a href="https://tangled.sh/@smokesignal.events/quickdid" target="_blank">
147
+
QuickDID repository
148
+
</a>
149
+
.
150
+
</p>
151
+
</main>
152
+
153
+
<script>
154
+
// Handle form submission for handle resolution
155
+
document.getElementById('handleResolveForm').addEventListener('submit', async (e) => {
156
+
e.preventDefault();
157
+
158
+
const handle = document.getElementById('handleInput').value.trim();
159
+
const resultDiv = document.getElementById('handleResult');
160
+
const resultContent = document.getElementById('handleResultContent');
161
+
162
+
// Show loading state
163
+
resultDiv.style.display = 'block';
164
+
resultContent.textContent = 'Loading...';
165
+
166
+
try {
167
+
// Build the request URL
168
+
const url = `/xrpc/com.atproto.identity.resolveHandle?handle=${encodeURIComponent(handle)}`;
169
+
170
+
// Make the GET request
171
+
const response = await fetch(url);
172
+
const data = await response.json();
173
+
174
+
// Display the result
175
+
if (response.ok) {
176
+
resultContent.textContent = JSON.stringify(data, null, 2);
177
+
resultContent.style.color = '';
178
+
} else {
179
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
180
+
resultContent.style.color = '#d32f2f';
181
+
}
182
+
} catch (error) {
183
+
resultContent.textContent = `Network Error: ${error.message}`;
184
+
resultContent.style.color = '#d32f2f';
185
+
}
186
+
});
187
+
188
+
// Handle form submission for lexicon resolution
189
+
document.getElementById('lexiconResolveForm').addEventListener('submit', async (e) => {
190
+
e.preventDefault();
191
+
192
+
const nsid = document.getElementById('nsidInput').value.trim();
193
+
const resultDiv = document.getElementById('lexiconResult');
194
+
const resultContent = document.getElementById('lexiconResultContent');
195
+
196
+
// Show loading state
197
+
resultDiv.style.display = 'block';
198
+
resultContent.textContent = 'Loading...';
199
+
200
+
try {
201
+
// Build the request URL
202
+
const url = `/xrpc/com.atproto.lexicon.resolveLexicon?nsid=${encodeURIComponent(nsid)}`;
203
+
204
+
// Make the GET request
205
+
const response = await fetch(url);
206
+
const data = await response.json();
207
+
208
+
// Display the result
209
+
if (response.ok) {
210
+
resultContent.textContent = JSON.stringify(data, null, 2);
211
+
resultContent.style.color = '';
212
+
} else {
213
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
214
+
resultContent.style.color = '#d32f2f';
215
+
}
216
+
} catch (error) {
217
+
resultContent.textContent = `Network Error: ${error.message}`;
218
+
resultContent.style.color = '#d32f2f';
219
+
}
220
+
});
221
+
</script>
222
+
</body>
223
+
224
+
</html>