+37
.dockerignore
+37
.dockerignore
···
1
+
# Git
2
+
.git
3
+
.gitignore
4
+
5
+
# Documentation
6
+
*.md
7
+
docs/
8
+
LICENSE
9
+
10
+
# Development files
11
+
.vscode/
12
+
.env
13
+
.env.local
14
+
*.log
15
+
16
+
# Build artifacts
17
+
target/
18
+
Dockerfile
19
+
.dockerignore
20
+
21
+
# Test files
22
+
tests/
23
+
benches/
24
+
25
+
# Scripts (except the ones we need)
26
+
*.sh
27
+
28
+
# SQLite databases
29
+
*.db
30
+
*.db-*
31
+
32
+
# OS files
33
+
.DS_Store
34
+
Thumbs.db
35
+
36
+
# Keep the www directory for static files
37
+
!www/
+12
-8
.env.example
+12
-8
.env.example
···
1
1
# QuickDID Environment Configuration Template
2
2
# Copy this file to .env and customize for your deployment
3
-
#
4
-
# IMPORTANT: Never commit .env files with real SERVICE_KEY values
5
3
6
4
# ============================================================================
7
5
# REQUIRED CONFIGURATION
···
13
11
# - quickdid.example.com:8080
14
12
# - localhost:3007
15
13
HTTP_EXTERNAL=quickdid.example.com
16
-
17
-
# Private key for service identity (REQUIRED)
18
-
# SECURITY: Generate a new key for each environment
19
-
# NEVER commit real keys to version control
20
-
SERVICE_KEY=did:key:YOUR_PRIVATE_KEY_HERE
21
14
22
15
# ============================================================================
23
16
# NETWORK CONFIGURATION
···
98
91
QUEUE_BUFFER_SIZE=1000
99
92
100
93
# ============================================================================
94
+
# STATIC FILES CONFIGURATION
95
+
# ============================================================================
96
+
97
+
# Directory for serving static files (default: www)
98
+
# This should contain:
99
+
# - index.html (landing page)
100
+
# - .well-known/atproto-did (service DID)
101
+
# - .well-known/did.json (DID document)
102
+
# Docker default: /app/www
103
+
STATIC_FILES_DIR=www
104
+
105
+
# ============================================================================
101
106
# LOGGING
102
107
# ============================================================================
103
108
···
112
117
# ============================================================================
113
118
114
119
# HTTP_EXTERNAL=localhost:3007
115
-
# SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
116
120
# RUST_LOG=debug
117
121
# CACHE_TTL_MEMORY=60
118
122
# CACHE_TTL_REDIS=300
+3
.gitignore
+3
.gitignore
+77
CHANGELOG.md
+77
CHANGELOG.md
···
7
7
8
8
## [Unreleased]
9
9
10
+
## [1.0.0-rc.5] - 2025-09-10
11
+
12
+
### Added
13
+
- Bidirectional caching support for handle-to-DID and DID-to-handle lookups in Redis resolver
14
+
- `purge` method to HandleResolver trait for removing entries by handle or DID
15
+
- `set` method to HandleResolver trait for manual cache updates
16
+
- Jetstream consumer integration for real-time cache updates from AT Protocol firehose
17
+
- QuickDidEventHandler module for processing Account and Identity events
18
+
- Static file serving with www directory support for landing page and well-known files
19
+
- Comprehensive test coverage for new bidirectional cache operations
20
+
21
+
### Changed
22
+
- Handle normalization to lowercase throughout the system for consistency
23
+
- Updated all resolver implementations to chain `purge` and `set` calls through the stack
24
+
- Enhanced documentation to reflect Jetstream configuration and bidirectional caching
25
+
- Improved production deployment guide with real-time sync recommendations
26
+
27
+
### Fixed
28
+
- Handle case sensitivity issues - all handles now normalized to lowercase
29
+
- Cache consistency between handle and DID lookups
30
+
- Event processing error handling in Jetstream consumer
31
+
32
+
## [1.0.0-rc.4] - 2025-09-08
33
+
34
+
### Added
35
+
- Metrics system with pluggable adapters (StatsD support) for monitoring and observability
36
+
- Proactive refresh resolver for keeping cached entries fresh before expiration
37
+
- Redis queue deduplication to prevent duplicate handle resolution work items
38
+
- Configurable bind address for StatsD UDP socket supporting both IPv4 and IPv6
39
+
- CORS headers support for cross-origin requests
40
+
- OPTIONS method handling for preflight requests
41
+
- Resolution timing measurements for performance monitoring
42
+
- Comprehensive metrics tracking including counters, gauges, and timings
43
+
- Telegraf and TimescaleDB integration guide for metrics aggregation
44
+
- Railway deployment resources for production environments
45
+
46
+
### Changed
47
+
- Replaced chrono with httpdate for more efficient HTTP date formatting
48
+
- Refactored handle resolver to include resolution time measurements
49
+
- Improved handle resolution view architecture
50
+
- Enhanced documentation with metrics configuration and deployment guides
51
+
52
+
### Fixed
53
+
- Minor typo in feature commit message ("fesature" corrected to "feature")
54
+
55
+
## [1.0.0-rc.3] - 2025-09-06
56
+
57
+
### Added
58
+
- SQLite support for persistent caching and queue processing
59
+
- Rate limiting with semaphore-based concurrency control (`RESOLVER_MAX_CONCURRENT`)
60
+
- Timeout support for rate limit permit acquisition (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`)
61
+
- SQLite queue adapter with work shedding capabilities (`QUEUE_SQLITE_MAX_SIZE`)
62
+
- Comprehensive error system with unique identifiers (e.g., `error-quickdid-config-1`)
63
+
- 12-factor app compliance with environment-only configuration
64
+
65
+
### Changed
66
+
- Configuration now exclusively uses environment variables (removed clap dependency)
67
+
- Command-line arguments limited to `--version` and `--help` only
68
+
- Improved error handling with strongly-typed errors using `thiserror` throughout
69
+
- Enhanced documentation with accurate configuration defaults and examples
70
+
- Updated README with complete architecture overview and deployment strategies
71
+
- Cache priority system: Redis โ SQLite โ Memory (first available)
72
+
73
+
### Fixed
74
+
- Error messages now consistently follow `error-quickdid-<domain>-<number>` format
75
+
- Configuration validation for all TTL and timeout values
76
+
- Documentation inconsistencies in CLAUDE.md development guide
77
+
- Queue adapter validation to include 'sqlite' option
78
+
79
+
### Removed
80
+
- `clap` crate dependency (replaced with simple argument handling)
81
+
- `anyhow!()` macro usage in favor of proper error types
82
+
- Command-line configuration options (following 12-factor methodology)
83
+
10
84
## [1.0.0-rc.2] - 2025-09-05
11
85
12
86
### Changed
···
51
125
- Unnecessary feature flags (axum macros, deadpool-redis script)
52
126
- 4 unused dependencies reducing compilation time
53
127
128
+
[1.0.0-rc.5]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.5
129
+
[1.0.0-rc.4]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.4
130
+
[1.0.0-rc.3]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.3
54
131
[1.0.0-rc.2]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.2
55
132
[1.0.0-rc.1]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.1
+164
-33
CLAUDE.md
+164
-33
CLAUDE.md
···
1
1
# QuickDID - Development Guide for Claude
2
2
3
3
## Overview
4
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
4
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides bidirectional handle-to-DID and DID-to-handle resolution with multi-layer caching (Redis, SQLite, in-memory), queue processing, metrics support, proactive cache refreshing, and real-time cache updates via Jetstream consumer.
5
+
6
+
## Configuration
7
+
8
+
QuickDID follows the 12-factor app methodology and uses environment variables exclusively for configuration. There are no command-line arguments except for `--version` and `--help`.
9
+
10
+
Configuration is validated at startup, and the service will exit with specific error codes if validation fails:
11
+
- `error-quickdid-config-1`: Missing required environment variable
12
+
- `error-quickdid-config-2`: Invalid configuration value
13
+
- `error-quickdid-config-3`: Invalid TTL value (must be positive)
14
+
- `error-quickdid-config-4`: Invalid timeout value (must be positive)
5
15
6
16
## Common Commands
7
17
···
10
20
# Build the project
11
21
cargo build
12
22
13
-
# Run in debug mode
14
-
cargo run
23
+
# Run in debug mode (requires environment variables)
24
+
HTTP_EXTERNAL=localhost:3007 cargo run
15
25
16
26
# Run tests
17
27
cargo test
···
19
29
# Type checking
20
30
cargo check
21
31
22
-
# Run with environment variables
23
-
HTTP_EXTERNAL=localhost:3007 SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK cargo run
32
+
# Linting
33
+
cargo clippy
34
+
35
+
# Show version
36
+
cargo run -- --version
37
+
38
+
# Show help
39
+
cargo run -- --help
24
40
```
25
41
26
42
### Development with VS Code
···
30
46
31
47
### Core Components
32
48
33
-
1. **Handle Resolution** (`src/handle_resolver.rs`)
49
+
1. **Handle Resolution** (`src/handle_resolver/`)
34
50
- `BaseHandleResolver`: Core resolution using DNS and HTTP
35
-
- `CachingHandleResolver`: In-memory caching layer
36
-
- `RedisHandleResolver`: Redis-backed persistent caching with 90-day TTL
51
+
- `RateLimitedHandleResolver`: Semaphore-based rate limiting with optional timeout
52
+
- `CachingHandleResolver`: In-memory caching layer with bidirectional support
53
+
- `RedisHandleResolver`: Redis-backed persistent caching with bidirectional lookups
54
+
- `SqliteHandleResolver`: SQLite-backed persistent caching with bidirectional support
55
+
- `ProactiveRefreshResolver`: Automatically refreshes cache entries before expiration
56
+
- All resolvers implement `HandleResolver` trait with:
57
+
- `resolve`: Handle-to-DID resolution
58
+
- `purge`: Remove entries by handle or DID
59
+
- `set`: Manually update handle-to-DID mappings
37
60
- Uses binary serialization via `HandleResolutionResult` for space efficiency
61
+
- Resolution stack: Cache โ ProactiveRefresh (optional) โ RateLimited (optional) โ Base โ DNS/HTTP
62
+
- Includes resolution timing measurements for metrics
38
63
39
64
2. **Binary Serialization** (`src/handle_resolution_result.rs`)
40
65
- Compact storage format using bincode
41
66
- Strips DID prefixes for did:web and did:plc methods
42
67
- Stores: timestamp (u64), method type (i16), payload (String)
43
68
44
-
3. **Queue System** (`src/queue_adapter.rs`)
45
-
- Supports MPSC (in-process) and Redis adapters
69
+
3. **Queue System** (`src/queue/`)
70
+
- Supports MPSC (in-process), Redis, SQLite, and no-op adapters
46
71
- `HandleResolutionWork` items processed asynchronously
47
72
- Redis uses reliable queue pattern (LPUSH/RPOPLPUSH/LREM)
73
+
- SQLite provides persistent queue with work shedding capabilities
48
74
49
75
4. **HTTP Server** (`src/http/`)
50
76
- XRPC endpoints for AT Protocol compatibility
51
77
- Health check endpoint
52
-
- DID document serving via .well-known
78
+
- Static file serving from configurable directory (default: www)
79
+
- Serves .well-known files as static content
80
+
- CORS headers support for cross-origin requests
81
+
- Cache-Control headers with configurable max-age and stale directives
82
+
- ETag support with configurable seed for cache invalidation
83
+
84
+
5. **Metrics System** (`src/metrics.rs`)
85
+
- Pluggable metrics publishing with StatsD support
86
+
- Tracks counters, gauges, and timings
87
+
- Configurable tags for environment/service identification
88
+
- No-op adapter for development environments
89
+
- Metrics for Jetstream event processing
90
+
91
+
6. **Jetstream Consumer** (`src/jetstream_handler.rs`)
92
+
- Consumes AT Protocol firehose events via WebSocket
93
+
- Processes Account events (purges deleted/deactivated accounts)
94
+
- Processes Identity events (updates handle-to-DID mappings)
95
+
- Automatic reconnection with exponential backoff
96
+
- Comprehensive metrics for event processing
97
+
- Spawned as cancellable task using task manager
53
98
54
99
## Key Technical Details
55
100
···
59
104
- Other DID methods stored with full identifier
60
105
61
106
### Redis Integration
62
-
- **Caching**: Uses MetroHash64 for key generation, stores binary data
107
+
- **Bidirectional Caching**:
108
+
- Stores both handleโDID and DIDโhandle mappings
109
+
- Uses MetroHash64 for key generation
110
+
- Binary data storage for efficiency
111
+
- Automatic synchronization of both directions
63
112
- **Queuing**: Reliable queue with processing/dead letter queues
64
113
- **Key Prefixes**: Configurable via `QUEUE_REDIS_PREFIX` environment variable
65
114
66
115
### Handle Resolution Flow
67
-
1. Check Redis cache (if configured)
68
-
2. Fall back to in-memory cache
116
+
1. Check cache (Redis/SQLite/in-memory based on configuration)
117
+
2. If cache miss and rate limiting enabled:
118
+
- Acquire semaphore permit (with optional timeout)
119
+
- If timeout configured and exceeded, return error
69
120
3. Perform DNS TXT lookup or HTTP well-known query
70
-
4. Cache result with appropriate TTL
121
+
4. Cache result with appropriate TTL in both directions (handleโDID and DIDโhandle)
71
122
5. Return DID or error
72
123
124
+
### Cache Management Operations
125
+
- **Purge**: Removes entries by either handle or DID
126
+
- Uses `atproto_identity::resolve::parse_input` for identifier detection
127
+
- Removes both handleโDID and DIDโhandle mappings
128
+
- Chains through all resolver layers
129
+
- **Set**: Manually updates handle-to-DID mappings
130
+
- Updates both directions in cache
131
+
- Normalizes handles to lowercase
132
+
- Chains through all resolver layers
133
+
73
134
## Environment Variables
74
135
75
136
### Required
76
137
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
77
-
- `SERVICE_KEY`: Private key for service identity (DID format)
78
138
79
-
### Optional
139
+
### Optional - Core Configuration
80
140
- `HTTP_PORT`: Server port (default: 8080)
81
141
- `PLC_HOSTNAME`: PLC directory hostname (default: plc.directory)
142
+
- `RUST_LOG`: Logging level (e.g., debug, info)
143
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
144
+
145
+
### Optional - Caching
82
146
- `REDIS_URL`: Redis connection URL for caching
83
-
- `QUEUE_ADAPTER`: Queue type - 'mpsc' or 'redis' (default: mpsc)
147
+
- `SQLITE_URL`: SQLite database URL for caching (e.g., `sqlite:./quickdid.db`)
148
+
- `CACHE_TTL_MEMORY`: TTL for in-memory cache in seconds (default: 600)
149
+
- `CACHE_TTL_REDIS`: TTL for Redis cache in seconds (default: 7776000)
150
+
- `CACHE_TTL_SQLITE`: TTL for SQLite cache in seconds (default: 7776000)
151
+
152
+
### Optional - Queue Configuration
153
+
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
84
154
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
85
-
- `QUEUE_WORKER_ID`: Worker ID for Redis queue (auto-generated if not set)
86
-
- `RUST_LOG`: Logging level (e.g., debug, info)
155
+
- `QUEUE_WORKER_ID`: Worker ID for queue operations (default: worker1)
156
+
- `QUEUE_BUFFER_SIZE`: Buffer size for MPSC queue (default: 1000)
157
+
- `QUEUE_SQLITE_MAX_SIZE`: Max queue size for SQLite work shedding (default: 10000)
158
+
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
159
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication to prevent duplicate handles (default: false)
160
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
161
+
162
+
### Optional - Rate Limiting
163
+
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
164
+
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
165
+
166
+
### Optional - HTTP Cache Control
167
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
168
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
169
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate directive in seconds (default: 86400)
170
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
171
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
172
+
173
+
### Optional - Metrics
174
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
175
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd, e.g., localhost:8125)
176
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
177
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
178
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
179
+
180
+
### Optional - Proactive Refresh
181
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
182
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
183
+
184
+
### Optional - Jetstream Consumer
185
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
186
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
87
187
88
188
## Error Handling
89
189
···
91
191
92
192
error-quickdid-<domain>-<number> <message>: <details>
93
193
94
-
Example errors:
194
+
Current error domains and examples:
95
195
96
-
* error-quickdid-resolve-1 Multiple DIDs resolved for method
97
-
* error-quickdid-plc-1 HTTP request failed: https://google.com/ Not Found
98
-
* error-quickdid-key-1 Error decoding key: invalid
196
+
* `config`: Configuration errors (e.g., error-quickdid-config-1 Missing required environment variable)
197
+
* `resolve`: Handle resolution errors (e.g., error-quickdid-resolve-1 Failed to resolve subject)
198
+
* `queue`: Queue operation errors (e.g., error-quickdid-queue-1 Failed to push to queue)
199
+
* `cache`: Cache-related errors (e.g., error-quickdid-cache-1 Redis pool creation failed)
200
+
* `result`: Serialization errors (e.g., error-quickdid-result-1 System time error)
201
+
* `task`: Task processing errors (e.g., error-quickdid-task-1 Queue adapter health check failed)
99
202
100
203
Errors should be represented as enums using the `thiserror` library.
101
204
···
118
221
### Test Coverage Areas
119
222
- Handle resolution with various DID methods
120
223
- Binary serialization/deserialization
121
-
- Redis caching and expiration
224
+
- Redis caching and expiration with bidirectional lookups
122
225
- Queue processing logic
123
226
- HTTP endpoint responses
227
+
- Jetstream event handler processing
228
+
- Purge and set operations across resolver layers
124
229
125
230
## Development Patterns
126
231
127
232
### Error Handling
128
-
- Uses `anyhow::Result` for error propagation
129
-
- Graceful fallbacks when Redis is unavailable
233
+
- Uses strongly-typed errors with `thiserror` for all modules
234
+
- Each error has a unique identifier following the pattern `error-quickdid-<domain>-<number>`
235
+
- Graceful fallbacks when Redis/SQLite is unavailable
130
236
- Detailed tracing for debugging
237
+
- Avoid using `anyhow!()` or `bail!()` macros - use proper error types instead
131
238
132
239
### Performance Optimizations
133
240
- Binary serialization reduces storage by ~40%
134
241
- MetroHash64 for fast key generation
135
242
- Connection pooling for Redis
136
243
- Configurable TTLs for cache entries
244
+
- Rate limiting via semaphore-based concurrency control
245
+
- HTTP caching with ETag and Cache-Control headers
246
+
- Resolution timing metrics for performance monitoring
137
247
138
248
### Code Style
139
249
- Follow existing Rust idioms and patterns
140
250
- Use `tracing` for logging, not `println!`
141
251
- Prefer `Arc` for shared state across async tasks
142
252
- Handle errors explicitly, avoid `.unwrap()` in production code
253
+
- Use `httpdate` crate for HTTP date formatting (not `chrono`)
143
254
144
255
## Common Tasks
145
256
···
149
260
3. Add test cases for the new method type
150
261
151
262
### Modifying Cache TTL
152
-
- For in-memory: Pass TTL to `CachingHandleResolver::new()`
153
-
- For Redis: Modify `RedisHandleResolver::ttl_seconds()`
263
+
- For in-memory: Set `CACHE_TTL_MEMORY` environment variable
264
+
- For Redis: Set `CACHE_TTL_REDIS` environment variable
265
+
- For SQLite: Set `CACHE_TTL_SQLITE` environment variable
266
+
267
+
### Configuring Metrics
268
+
1. Set `METRICS_ADAPTER=statsd` and `METRICS_STATSD_HOST=localhost:8125`
269
+
2. Configure tags with `METRICS_TAGS=env:prod,service:quickdid`
270
+
3. Use Telegraf + TimescaleDB for aggregation (see `docs/telegraf-timescaledb-metrics-guide.md`)
271
+
4. Railway deployment resources available in `railway-resources/telegraf/`
154
272
155
273
### Debugging Resolution Issues
156
274
1. Enable debug logging: `RUST_LOG=debug`
157
-
2. Check Redis cache: `redis-cli GET "handle:<hash>"`
158
-
3. Monitor queue processing in logs
159
-
4. Verify DNS/HTTP connectivity to AT Protocol infrastructure
275
+
2. Check Redis cache:
276
+
- Handle lookup: `redis-cli GET "handle:<hash>"`
277
+
- DID lookup: `redis-cli GET "handle:<hash>"` (same key format)
278
+
3. Check SQLite cache: `sqlite3 quickdid.db "SELECT * FROM handle_resolution_cache;"`
279
+
4. Monitor queue processing in logs
280
+
5. Check rate limiting: Look for "Rate limit permit acquisition timed out" errors
281
+
6. Verify DNS/HTTP connectivity to AT Protocol infrastructure
282
+
7. Monitor metrics for resolution timing and cache hit rates
283
+
8. Check Jetstream consumer status:
284
+
- Look for "Jetstream consumer" log entries
285
+
- Monitor `jetstream.*` metrics
286
+
- Check reconnection attempts in logs
160
287
161
288
## Dependencies
162
289
- `atproto-identity`: Core AT Protocol identity resolution
290
+
- `atproto-jetstream`: AT Protocol Jetstream event consumer
163
291
- `bincode`: Binary serialization
164
292
- `deadpool-redis`: Redis connection pooling
165
293
- `metrohash`: Fast non-cryptographic hashing
166
294
- `tokio`: Async runtime
167
-
- `axum`: Web framework
295
+
- `axum`: Web framework
296
+
- `httpdate`: HTTP date formatting (replacing chrono)
297
+
- `cadence`: StatsD metrics client
298
+
- `thiserror`: Error handling
+647
-674
Cargo.lock
+647
-674
Cargo.lock
···
3
3
version = 4
4
4
5
5
[[package]]
6
-
name = "addr2line"
7
-
version = "0.24.2"
8
-
source = "registry+https://github.com/rust-lang/crates.io-index"
9
-
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
10
-
dependencies = [
11
-
"gimli",
12
-
]
13
-
14
-
[[package]]
15
-
name = "adler2"
16
-
version = "2.0.1"
17
-
source = "registry+https://github.com/rust-lang/crates.io-index"
18
-
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
19
-
20
-
[[package]]
21
6
name = "aho-corasick"
22
-
version = "1.1.3"
7
+
version = "1.1.4"
23
8
source = "registry+https://github.com/rust-lang/crates.io-index"
24
-
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
9
+
checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
25
10
dependencies = [
26
11
"memchr",
27
12
]
···
33
18
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
34
19
35
20
[[package]]
36
-
name = "android-tzdata"
37
-
version = "0.1.1"
38
-
source = "registry+https://github.com/rust-lang/crates.io-index"
39
-
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
40
-
41
-
[[package]]
42
-
name = "android_system_properties"
43
-
version = "0.1.5"
44
-
source = "registry+https://github.com/rust-lang/crates.io-index"
45
-
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
46
-
dependencies = [
47
-
"libc",
48
-
]
49
-
50
-
[[package]]
51
-
name = "anstream"
52
-
version = "0.6.20"
53
-
source = "registry+https://github.com/rust-lang/crates.io-index"
54
-
checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192"
55
-
dependencies = [
56
-
"anstyle",
57
-
"anstyle-parse",
58
-
"anstyle-query",
59
-
"anstyle-wincon",
60
-
"colorchoice",
61
-
"is_terminal_polyfill",
62
-
"utf8parse",
63
-
]
64
-
65
-
[[package]]
66
-
name = "anstyle"
67
-
version = "1.0.11"
68
-
source = "registry+https://github.com/rust-lang/crates.io-index"
69
-
checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd"
70
-
71
-
[[package]]
72
-
name = "anstyle-parse"
73
-
version = "0.2.7"
74
-
source = "registry+https://github.com/rust-lang/crates.io-index"
75
-
checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2"
76
-
dependencies = [
77
-
"utf8parse",
78
-
]
79
-
80
-
[[package]]
81
-
name = "anstyle-query"
82
-
version = "1.1.4"
83
-
source = "registry+https://github.com/rust-lang/crates.io-index"
84
-
checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2"
85
-
dependencies = [
86
-
"windows-sys 0.60.2",
87
-
]
88
-
89
-
[[package]]
90
-
name = "anstyle-wincon"
91
-
version = "3.0.10"
92
-
source = "registry+https://github.com/rust-lang/crates.io-index"
93
-
checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a"
94
-
dependencies = [
95
-
"anstyle",
96
-
"once_cell_polyfill",
97
-
"windows-sys 0.60.2",
98
-
]
99
-
100
-
[[package]]
101
21
name = "anyhow"
102
-
version = "1.0.99"
22
+
version = "1.0.100"
103
23
source = "registry+https://github.com/rust-lang/crates.io-index"
104
-
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
24
+
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
105
25
106
26
[[package]]
107
27
name = "arc-swap"
···
117
37
dependencies = [
118
38
"proc-macro2",
119
39
"quote",
120
-
"syn",
40
+
"syn 2.0.108",
121
41
]
122
42
123
43
[[package]]
···
136
56
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
137
57
138
58
[[package]]
59
+
name = "atproto-client"
60
+
version = "0.13.0"
61
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
62
+
dependencies = [
63
+
"anyhow",
64
+
"async-trait",
65
+
"atproto-identity",
66
+
"atproto-oauth",
67
+
"atproto-record",
68
+
"bytes",
69
+
"reqwest",
70
+
"reqwest-chain",
71
+
"reqwest-middleware",
72
+
"serde",
73
+
"serde_json",
74
+
"thiserror 2.0.17",
75
+
"tokio",
76
+
"tracing",
77
+
"urlencoding",
78
+
]
79
+
80
+
[[package]]
139
81
name = "atproto-identity"
140
-
version = "0.11.3"
141
-
source = "registry+https://github.com/rust-lang/crates.io-index"
142
-
checksum = "aaac8751c7e4329a95714c01d9e47d22d94bc8c96e78079098312235128acb9f"
82
+
version = "0.13.0"
83
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
143
84
dependencies = [
144
85
"anyhow",
145
86
"async-trait",
···
156
97
"serde",
157
98
"serde_ipld_dagcbor",
158
99
"serde_json",
159
-
"thiserror 2.0.16",
100
+
"thiserror 2.0.17",
101
+
"tokio",
102
+
"tracing",
103
+
"url",
104
+
"urlencoding",
105
+
]
106
+
107
+
[[package]]
108
+
name = "atproto-jetstream"
109
+
version = "0.13.0"
110
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
111
+
dependencies = [
112
+
"anyhow",
113
+
"async-trait",
114
+
"atproto-identity",
115
+
"futures",
116
+
"http",
117
+
"serde",
118
+
"serde_json",
119
+
"thiserror 2.0.17",
120
+
"tokio",
121
+
"tokio-util",
122
+
"tokio-websockets",
123
+
"tracing",
124
+
"tracing-subscriber",
125
+
"urlencoding",
126
+
"zstd",
127
+
]
128
+
129
+
[[package]]
130
+
name = "atproto-lexicon"
131
+
version = "0.13.0"
132
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
133
+
dependencies = [
134
+
"anyhow",
135
+
"async-trait",
136
+
"atproto-client",
137
+
"atproto-identity",
138
+
"hickory-resolver",
139
+
"reqwest",
140
+
"serde",
141
+
"serde_json",
142
+
"thiserror 2.0.17",
143
+
"tokio",
144
+
"tracing",
145
+
]
146
+
147
+
[[package]]
148
+
name = "atproto-oauth"
149
+
version = "0.13.0"
150
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
151
+
dependencies = [
152
+
"anyhow",
153
+
"async-trait",
154
+
"atproto-identity",
155
+
"base64",
156
+
"chrono",
157
+
"ecdsa",
158
+
"elliptic-curve",
159
+
"k256",
160
+
"lru",
161
+
"multibase",
162
+
"p256",
163
+
"p384",
164
+
"rand 0.8.5",
165
+
"reqwest",
166
+
"reqwest-chain",
167
+
"reqwest-middleware",
168
+
"serde",
169
+
"serde_ipld_dagcbor",
170
+
"serde_json",
171
+
"sha2",
172
+
"thiserror 2.0.17",
160
173
"tokio",
161
174
"tracing",
175
+
"ulid",
176
+
]
177
+
178
+
[[package]]
179
+
name = "atproto-record"
180
+
version = "0.13.0"
181
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
182
+
dependencies = [
183
+
"anyhow",
184
+
"atproto-identity",
185
+
"base64",
186
+
"chrono",
187
+
"cid",
188
+
"multihash",
189
+
"rand 0.8.5",
190
+
"serde",
191
+
"serde_ipld_dagcbor",
192
+
"serde_json",
193
+
"sha2",
194
+
"thiserror 2.0.17",
162
195
]
163
196
164
197
[[package]]
···
169
202
170
203
[[package]]
171
204
name = "axum"
172
-
version = "0.8.4"
205
+
version = "0.8.6"
173
206
source = "registry+https://github.com/rust-lang/crates.io-index"
174
-
checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5"
207
+
checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871"
175
208
dependencies = [
176
209
"axum-core",
177
210
"bytes",
···
188
221
"mime",
189
222
"percent-encoding",
190
223
"pin-project-lite",
191
-
"rustversion",
192
-
"serde",
224
+
"serde_core",
193
225
"serde_json",
194
226
"serde_path_to_error",
195
227
"serde_urlencoded",
···
203
235
204
236
[[package]]
205
237
name = "axum-core"
206
-
version = "0.5.2"
238
+
version = "0.5.5"
207
239
source = "registry+https://github.com/rust-lang/crates.io-index"
208
-
checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6"
240
+
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
209
241
dependencies = [
210
242
"bytes",
211
243
"futures-core",
···
214
246
"http-body-util",
215
247
"mime",
216
248
"pin-project-lite",
217
-
"rustversion",
218
249
"sync_wrapper",
219
250
"tower-layer",
220
251
"tower-service",
···
223
254
224
255
[[package]]
225
256
name = "backon"
226
-
version = "1.5.2"
257
+
version = "1.6.0"
227
258
source = "registry+https://github.com/rust-lang/crates.io-index"
228
-
checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d"
259
+
checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef"
229
260
dependencies = [
230
261
"fastrand",
231
262
]
232
263
233
264
[[package]]
234
-
name = "backtrace"
235
-
version = "0.3.75"
236
-
source = "registry+https://github.com/rust-lang/crates.io-index"
237
-
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
238
-
dependencies = [
239
-
"addr2line",
240
-
"cfg-if",
241
-
"libc",
242
-
"miniz_oxide",
243
-
"object",
244
-
"rustc-demangle",
245
-
"windows-targets 0.52.6",
246
-
]
247
-
248
-
[[package]]
249
265
name = "base-x"
250
266
version = "0.2.11"
251
267
source = "registry+https://github.com/rust-lang/crates.io-index"
···
258
274
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
259
275
260
276
[[package]]
277
+
name = "base256emoji"
278
+
version = "1.0.2"
279
+
source = "registry+https://github.com/rust-lang/crates.io-index"
280
+
checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c"
281
+
dependencies = [
282
+
"const-str",
283
+
"match-lookup",
284
+
]
285
+
286
+
[[package]]
261
287
name = "base64"
262
288
version = "0.22.1"
263
289
source = "registry+https://github.com/rust-lang/crates.io-index"
···
291
317
292
318
[[package]]
293
319
name = "bitflags"
294
-
version = "2.9.4"
320
+
version = "2.10.0"
295
321
source = "registry+https://github.com/rust-lang/crates.io-index"
296
-
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
322
+
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
297
323
dependencies = [
298
-
"serde",
324
+
"serde_core",
299
325
]
300
326
301
327
[[package]]
···
326
352
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
327
353
328
354
[[package]]
355
+
name = "cadence"
356
+
version = "1.6.0"
357
+
source = "registry+https://github.com/rust-lang/crates.io-index"
358
+
checksum = "3075f133bee430b7644c54fb629b9b4420346ffa275a45c81a6babe8b09b4f51"
359
+
dependencies = [
360
+
"crossbeam-channel",
361
+
]
362
+
363
+
[[package]]
329
364
name = "cbor4ii"
330
365
version = "0.2.14"
331
366
source = "registry+https://github.com/rust-lang/crates.io-index"
···
336
371
337
372
[[package]]
338
373
name = "cc"
339
-
version = "1.2.36"
374
+
version = "1.2.44"
340
375
source = "registry+https://github.com/rust-lang/crates.io-index"
341
-
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
376
+
checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3"
342
377
dependencies = [
343
378
"find-msvc-tools",
379
+
"jobserver",
380
+
"libc",
344
381
"shlex",
345
382
]
346
383
347
384
[[package]]
348
385
name = "cfg-if"
349
-
version = "1.0.3"
386
+
version = "1.0.4"
350
387
source = "registry+https://github.com/rust-lang/crates.io-index"
351
-
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
388
+
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
352
389
353
390
[[package]]
354
391
name = "cfg_aliases"
···
358
395
359
396
[[package]]
360
397
name = "chrono"
361
-
version = "0.4.41"
398
+
version = "0.4.42"
362
399
source = "registry+https://github.com/rust-lang/crates.io-index"
363
-
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
400
+
checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
364
401
dependencies = [
365
-
"android-tzdata",
366
-
"iana-time-zone",
367
402
"num-traits",
368
-
"windows-link",
403
+
"serde",
369
404
]
370
405
371
406
[[package]]
···
381
416
"serde_bytes",
382
417
"unsigned-varint",
383
418
]
384
-
385
-
[[package]]
386
-
name = "clap"
387
-
version = "4.5.47"
388
-
source = "registry+https://github.com/rust-lang/crates.io-index"
389
-
checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931"
390
-
dependencies = [
391
-
"clap_builder",
392
-
"clap_derive",
393
-
]
394
-
395
-
[[package]]
396
-
name = "clap_builder"
397
-
version = "4.5.47"
398
-
source = "registry+https://github.com/rust-lang/crates.io-index"
399
-
checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6"
400
-
dependencies = [
401
-
"anstream",
402
-
"anstyle",
403
-
"clap_lex",
404
-
"strsim",
405
-
]
406
-
407
-
[[package]]
408
-
name = "clap_derive"
409
-
version = "4.5.47"
410
-
source = "registry+https://github.com/rust-lang/crates.io-index"
411
-
checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c"
412
-
dependencies = [
413
-
"heck",
414
-
"proc-macro2",
415
-
"quote",
416
-
"syn",
417
-
]
418
-
419
-
[[package]]
420
-
name = "clap_lex"
421
-
version = "0.7.5"
422
-
source = "registry+https://github.com/rust-lang/crates.io-index"
423
-
checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675"
424
-
425
-
[[package]]
426
-
name = "colorchoice"
427
-
version = "1.0.4"
428
-
source = "registry+https://github.com/rust-lang/crates.io-index"
429
-
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
430
419
431
420
[[package]]
432
421
name = "combine"
···
458
447
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
459
448
460
449
[[package]]
450
+
name = "const-str"
451
+
version = "0.4.3"
452
+
source = "registry+https://github.com/rust-lang/crates.io-index"
453
+
checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3"
454
+
455
+
[[package]]
461
456
name = "core-foundation"
462
457
version = "0.9.4"
463
458
source = "registry+https://github.com/rust-lang/crates.io-index"
···
600
595
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
601
596
dependencies = [
602
597
"data-encoding",
603
-
"syn",
598
+
"syn 2.0.108",
604
599
]
605
600
606
601
[[package]]
···
665
660
dependencies = [
666
661
"proc-macro2",
667
662
"quote",
668
-
"syn",
663
+
"syn 2.0.108",
669
664
]
670
665
671
666
[[package]]
···
684
679
"digest",
685
680
"elliptic-curve",
686
681
"rfc6979",
682
+
"serdect",
687
683
"signature",
688
684
"spki",
689
685
]
···
739
735
"heck",
740
736
"proc-macro2",
741
737
"quote",
742
-
"syn",
738
+
"syn 2.0.108",
743
739
]
744
740
745
741
[[package]]
···
750
746
751
747
[[package]]
752
748
name = "errno"
753
-
version = "0.3.13"
749
+
version = "0.3.14"
754
750
source = "registry+https://github.com/rust-lang/crates.io-index"
755
-
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
751
+
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
756
752
dependencies = [
757
753
"libc",
758
-
"windows-sys 0.60.2",
754
+
"windows-sys 0.61.2",
759
755
]
760
756
761
757
[[package]]
···
798
794
799
795
[[package]]
800
796
name = "find-msvc-tools"
801
-
version = "0.1.1"
797
+
version = "0.1.4"
802
798
source = "registry+https://github.com/rust-lang/crates.io-index"
803
-
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
799
+
checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127"
804
800
805
801
[[package]]
806
802
name = "flume"
···
850
846
]
851
847
852
848
[[package]]
849
+
name = "futures"
850
+
version = "0.3.31"
851
+
source = "registry+https://github.com/rust-lang/crates.io-index"
852
+
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
853
+
dependencies = [
854
+
"futures-channel",
855
+
"futures-core",
856
+
"futures-executor",
857
+
"futures-io",
858
+
"futures-sink",
859
+
"futures-task",
860
+
"futures-util",
861
+
]
862
+
863
+
[[package]]
853
864
name = "futures-channel"
854
865
version = "0.3.31"
855
866
source = "registry+https://github.com/rust-lang/crates.io-index"
···
901
912
dependencies = [
902
913
"proc-macro2",
903
914
"quote",
904
-
"syn",
915
+
"syn 2.0.108",
905
916
]
906
917
907
918
[[package]]
···
922
933
source = "registry+https://github.com/rust-lang/crates.io-index"
923
934
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
924
935
dependencies = [
936
+
"futures-channel",
925
937
"futures-core",
926
938
"futures-io",
927
939
"futures-macro",
···
934
946
]
935
947
936
948
[[package]]
937
-
name = "generator"
938
-
version = "0.8.7"
939
-
source = "registry+https://github.com/rust-lang/crates.io-index"
940
-
checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2"
941
-
dependencies = [
942
-
"cc",
943
-
"cfg-if",
944
-
"libc",
945
-
"log",
946
-
"rustversion",
947
-
"windows",
948
-
]
949
-
950
-
[[package]]
951
949
name = "generic-array"
952
-
version = "0.14.7"
950
+
version = "0.14.9"
953
951
source = "registry+https://github.com/rust-lang/crates.io-index"
954
-
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
952
+
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
955
953
dependencies = [
956
954
"typenum",
957
955
"version_check",
···
967
965
"cfg-if",
968
966
"js-sys",
969
967
"libc",
970
-
"wasi 0.11.1+wasi-snapshot-preview1",
968
+
"wasi",
971
969
"wasm-bindgen",
972
970
]
973
971
974
972
[[package]]
975
973
name = "getrandom"
976
-
version = "0.3.3"
974
+
version = "0.3.4"
977
975
source = "registry+https://github.com/rust-lang/crates.io-index"
978
-
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
976
+
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
979
977
dependencies = [
980
978
"cfg-if",
981
979
"js-sys",
982
980
"libc",
983
981
"r-efi",
984
-
"wasi 0.14.3+wasi-0.2.4",
982
+
"wasip2",
985
983
"wasm-bindgen",
986
984
]
987
-
988
-
[[package]]
989
-
name = "gimli"
990
-
version = "0.31.1"
991
-
source = "registry+https://github.com/rust-lang/crates.io-index"
992
-
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
993
985
994
986
[[package]]
995
987
name = "group"
···
1033
1025
]
1034
1026
1035
1027
[[package]]
1028
+
name = "hashbrown"
1029
+
version = "0.16.0"
1030
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1031
+
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
1032
+
1033
+
[[package]]
1036
1034
name = "hashlink"
1037
1035
version = "0.10.0"
1038
1036
source = "registry+https://github.com/rust-lang/crates.io-index"
1039
1037
checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
1040
1038
dependencies = [
1041
-
"hashbrown",
1039
+
"hashbrown 0.15.5",
1042
1040
]
1043
1041
1044
1042
[[package]]
···
1077
1075
"once_cell",
1078
1076
"rand 0.9.2",
1079
1077
"ring",
1080
-
"thiserror 2.0.16",
1078
+
"thiserror 2.0.17",
1081
1079
"tinyvec",
1082
1080
"tokio",
1083
1081
"tracing",
···
1100
1098
"rand 0.9.2",
1101
1099
"resolv-conf",
1102
1100
"smallvec",
1103
-
"thiserror 2.0.16",
1101
+
"thiserror 2.0.17",
1104
1102
"tokio",
1105
1103
"tracing",
1106
1104
]
···
1125
1123
1126
1124
[[package]]
1127
1125
name = "home"
1128
-
version = "0.5.11"
1126
+
version = "0.5.12"
1129
1127
source = "registry+https://github.com/rust-lang/crates.io-index"
1130
-
checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf"
1128
+
checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d"
1131
1129
dependencies = [
1132
-
"windows-sys 0.59.0",
1130
+
"windows-sys 0.61.2",
1133
1131
]
1134
1132
1135
1133
[[package]]
···
1167
1165
]
1168
1166
1169
1167
[[package]]
1168
+
name = "http-range-header"
1169
+
version = "0.4.2"
1170
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1171
+
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
1172
+
1173
+
[[package]]
1170
1174
name = "httparse"
1171
1175
version = "1.10.1"
1172
1176
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1236
1240
1237
1241
[[package]]
1238
1242
name = "hyper-util"
1239
-
version = "0.1.16"
1243
+
version = "0.1.17"
1240
1244
source = "registry+https://github.com/rust-lang/crates.io-index"
1241
-
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
1245
+
checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8"
1242
1246
dependencies = [
1243
1247
"base64",
1244
1248
"bytes",
···
1252
1256
"libc",
1253
1257
"percent-encoding",
1254
1258
"pin-project-lite",
1255
-
"socket2 0.6.0",
1259
+
"socket2 0.6.1",
1256
1260
"system-configuration",
1257
1261
"tokio",
1258
1262
"tower-service",
···
1261
1265
]
1262
1266
1263
1267
[[package]]
1264
-
name = "iana-time-zone"
1265
-
version = "0.1.63"
1266
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1267
-
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
1268
-
dependencies = [
1269
-
"android_system_properties",
1270
-
"core-foundation-sys",
1271
-
"iana-time-zone-haiku",
1272
-
"js-sys",
1273
-
"log",
1274
-
"wasm-bindgen",
1275
-
"windows-core",
1276
-
]
1277
-
1278
-
[[package]]
1279
-
name = "iana-time-zone-haiku"
1280
-
version = "0.1.2"
1281
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1282
-
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
1283
-
dependencies = [
1284
-
"cc",
1285
-
]
1286
-
1287
-
[[package]]
1288
1268
name = "icu_collections"
1289
-
version = "2.0.0"
1269
+
version = "2.1.1"
1290
1270
source = "registry+https://github.com/rust-lang/crates.io-index"
1291
-
checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47"
1271
+
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
1292
1272
dependencies = [
1293
1273
"displaydoc",
1294
1274
"potential_utf",
···
1299
1279
1300
1280
[[package]]
1301
1281
name = "icu_locale_core"
1302
-
version = "2.0.0"
1282
+
version = "2.1.1"
1303
1283
source = "registry+https://github.com/rust-lang/crates.io-index"
1304
-
checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a"
1284
+
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
1305
1285
dependencies = [
1306
1286
"displaydoc",
1307
1287
"litemap",
···
1312
1292
1313
1293
[[package]]
1314
1294
name = "icu_normalizer"
1315
-
version = "2.0.0"
1295
+
version = "2.1.1"
1316
1296
source = "registry+https://github.com/rust-lang/crates.io-index"
1317
-
checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979"
1297
+
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
1318
1298
dependencies = [
1319
-
"displaydoc",
1320
1299
"icu_collections",
1321
1300
"icu_normalizer_data",
1322
1301
"icu_properties",
···
1327
1306
1328
1307
[[package]]
1329
1308
name = "icu_normalizer_data"
1330
-
version = "2.0.0"
1309
+
version = "2.1.1"
1331
1310
source = "registry+https://github.com/rust-lang/crates.io-index"
1332
-
checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3"
1311
+
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
1333
1312
1334
1313
[[package]]
1335
1314
name = "icu_properties"
1336
-
version = "2.0.1"
1315
+
version = "2.1.1"
1337
1316
source = "registry+https://github.com/rust-lang/crates.io-index"
1338
-
checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b"
1317
+
checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99"
1339
1318
dependencies = [
1340
-
"displaydoc",
1341
1319
"icu_collections",
1342
1320
"icu_locale_core",
1343
1321
"icu_properties_data",
1344
1322
"icu_provider",
1345
-
"potential_utf",
1346
1323
"zerotrie",
1347
1324
"zerovec",
1348
1325
]
1349
1326
1350
1327
[[package]]
1351
1328
name = "icu_properties_data"
1352
-
version = "2.0.1"
1329
+
version = "2.1.1"
1353
1330
source = "registry+https://github.com/rust-lang/crates.io-index"
1354
-
checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632"
1331
+
checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899"
1355
1332
1356
1333
[[package]]
1357
1334
name = "icu_provider"
1358
-
version = "2.0.0"
1335
+
version = "2.1.1"
1359
1336
source = "registry+https://github.com/rust-lang/crates.io-index"
1360
-
checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af"
1337
+
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
1361
1338
dependencies = [
1362
1339
"displaydoc",
1363
1340
"icu_locale_core",
1364
-
"stable_deref_trait",
1365
-
"tinystr",
1366
1341
"writeable",
1367
1342
"yoke",
1368
1343
"zerofrom",
···
1393
1368
1394
1369
[[package]]
1395
1370
name = "indexmap"
1396
-
version = "2.11.0"
1371
+
version = "2.12.0"
1397
1372
source = "registry+https://github.com/rust-lang/crates.io-index"
1398
-
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
1373
+
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
1399
1374
dependencies = [
1400
1375
"equivalent",
1401
-
"hashbrown",
1402
-
]
1403
-
1404
-
[[package]]
1405
-
name = "io-uring"
1406
-
version = "0.7.10"
1407
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1408
-
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
1409
-
dependencies = [
1410
-
"bitflags",
1411
-
"cfg-if",
1412
-
"libc",
1376
+
"hashbrown 0.16.0",
1413
1377
]
1414
1378
1415
1379
[[package]]
···
1452
1416
]
1453
1417
1454
1418
[[package]]
1455
-
name = "is_terminal_polyfill"
1456
-
version = "1.70.1"
1457
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1458
-
checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf"
1459
-
1460
-
[[package]]
1461
1419
name = "itoa"
1462
1420
version = "1.0.15"
1463
1421
source = "registry+https://github.com/rust-lang/crates.io-index"
1464
1422
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
1465
1423
1466
1424
[[package]]
1425
+
name = "jobserver"
1426
+
version = "0.1.34"
1427
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1428
+
checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33"
1429
+
dependencies = [
1430
+
"getrandom 0.3.4",
1431
+
"libc",
1432
+
]
1433
+
1434
+
[[package]]
1467
1435
name = "js-sys"
1468
-
version = "0.3.78"
1436
+
version = "0.3.82"
1469
1437
source = "registry+https://github.com/rust-lang/crates.io-index"
1470
-
checksum = "0c0b063578492ceec17683ef2f8c5e89121fbd0b172cbc280635ab7567db2738"
1438
+
checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65"
1471
1439
dependencies = [
1472
1440
"once_cell",
1473
1441
"wasm-bindgen",
···
1498
1466
1499
1467
[[package]]
1500
1468
name = "libc"
1501
-
version = "0.2.175"
1469
+
version = "0.2.177"
1502
1470
source = "registry+https://github.com/rust-lang/crates.io-index"
1503
-
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
1471
+
checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
1504
1472
1505
1473
[[package]]
1506
1474
name = "libm"
···
1510
1478
1511
1479
[[package]]
1512
1480
name = "libredox"
1513
-
version = "0.1.9"
1481
+
version = "0.1.10"
1514
1482
source = "registry+https://github.com/rust-lang/crates.io-index"
1515
-
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
1483
+
checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb"
1516
1484
dependencies = [
1517
1485
"bitflags",
1518
1486
"libc",
···
1532
1500
1533
1501
[[package]]
1534
1502
name = "linux-raw-sys"
1535
-
version = "0.9.4"
1503
+
version = "0.11.0"
1536
1504
source = "registry+https://github.com/rust-lang/crates.io-index"
1537
-
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
1505
+
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039"
1538
1506
1539
1507
[[package]]
1540
1508
name = "litemap"
1541
-
version = "0.8.0"
1509
+
version = "0.8.1"
1542
1510
source = "registry+https://github.com/rust-lang/crates.io-index"
1543
-
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
1511
+
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
1544
1512
1545
1513
[[package]]
1546
1514
name = "lock_api"
1547
-
version = "0.4.13"
1515
+
version = "0.4.14"
1548
1516
source = "registry+https://github.com/rust-lang/crates.io-index"
1549
-
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
1517
+
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
1550
1518
dependencies = [
1551
-
"autocfg",
1552
1519
"scopeguard",
1553
1520
]
1554
1521
···
1559
1526
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
1560
1527
1561
1528
[[package]]
1562
-
name = "loom"
1563
-
version = "0.7.2"
1564
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1565
-
checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca"
1566
-
dependencies = [
1567
-
"cfg-if",
1568
-
"generator",
1569
-
"scoped-tls",
1570
-
"tracing",
1571
-
"tracing-subscriber",
1572
-
]
1573
-
1574
-
[[package]]
1575
1529
name = "lru"
1576
1530
version = "0.12.5"
1577
1531
source = "registry+https://github.com/rust-lang/crates.io-index"
1578
1532
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
1579
1533
dependencies = [
1580
-
"hashbrown",
1534
+
"hashbrown 0.15.5",
1581
1535
]
1582
1536
1583
1537
[[package]]
···
1585
1539
version = "0.1.2"
1586
1540
source = "registry+https://github.com/rust-lang/crates.io-index"
1587
1541
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
1542
+
1543
+
[[package]]
1544
+
name = "match-lookup"
1545
+
version = "0.1.1"
1546
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1547
+
checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e"
1548
+
dependencies = [
1549
+
"proc-macro2",
1550
+
"quote",
1551
+
"syn 1.0.109",
1552
+
]
1588
1553
1589
1554
[[package]]
1590
1555
name = "matchers"
···
1613
1578
1614
1579
[[package]]
1615
1580
name = "memchr"
1616
-
version = "2.7.5"
1581
+
version = "2.7.6"
1617
1582
source = "registry+https://github.com/rust-lang/crates.io-index"
1618
-
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
1583
+
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
1619
1584
1620
1585
[[package]]
1621
1586
name = "metrohash"
···
1630
1595
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
1631
1596
1632
1597
[[package]]
1633
-
name = "miniz_oxide"
1634
-
version = "0.8.9"
1598
+
name = "mime_guess"
1599
+
version = "2.0.5"
1635
1600
source = "registry+https://github.com/rust-lang/crates.io-index"
1636
-
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
1601
+
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
1637
1602
dependencies = [
1638
-
"adler2",
1603
+
"mime",
1604
+
"unicase",
1639
1605
]
1640
1606
1641
1607
[[package]]
1642
1608
name = "mio"
1643
-
version = "1.0.4"
1609
+
version = "1.1.0"
1644
1610
source = "registry+https://github.com/rust-lang/crates.io-index"
1645
-
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
1611
+
checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873"
1646
1612
dependencies = [
1647
1613
"libc",
1648
-
"wasi 0.11.1+wasi-snapshot-preview1",
1649
-
"windows-sys 0.59.0",
1614
+
"wasi",
1615
+
"windows-sys 0.61.2",
1650
1616
]
1651
1617
1652
1618
[[package]]
1653
1619
name = "moka"
1654
-
version = "0.12.10"
1620
+
version = "0.12.11"
1655
1621
source = "registry+https://github.com/rust-lang/crates.io-index"
1656
-
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
1622
+
checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077"
1657
1623
dependencies = [
1658
1624
"crossbeam-channel",
1659
1625
"crossbeam-epoch",
1660
1626
"crossbeam-utils",
1661
-
"loom",
1627
+
"equivalent",
1662
1628
"parking_lot",
1663
1629
"portable-atomic",
1664
1630
"rustc_version",
1665
1631
"smallvec",
1666
1632
"tagptr",
1667
-
"thiserror 1.0.69",
1668
1633
"uuid",
1669
1634
]
1670
1635
1671
1636
[[package]]
1672
1637
name = "multibase"
1673
-
version = "0.9.1"
1638
+
version = "0.9.2"
1674
1639
source = "registry+https://github.com/rust-lang/crates.io-index"
1675
-
checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404"
1640
+
checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77"
1676
1641
dependencies = [
1677
1642
"base-x",
1643
+
"base256emoji",
1678
1644
"data-encoding",
1679
1645
"data-encoding-macro",
1680
1646
]
···
1709
1675
1710
1676
[[package]]
1711
1677
name = "nu-ansi-term"
1712
-
version = "0.50.1"
1678
+
version = "0.50.3"
1713
1679
source = "registry+https://github.com/rust-lang/crates.io-index"
1714
-
checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399"
1680
+
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
1715
1681
dependencies = [
1716
-
"windows-sys 0.52.0",
1682
+
"windows-sys 0.61.2",
1717
1683
]
1718
1684
1719
1685
[[package]]
···
1728
1694
1729
1695
[[package]]
1730
1696
name = "num-bigint-dig"
1731
-
version = "0.8.4"
1697
+
version = "0.8.5"
1732
1698
source = "registry+https://github.com/rust-lang/crates.io-index"
1733
-
checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
1699
+
checksum = "82c79c15c05d4bf82b6f5ef163104cc81a760d8e874d38ac50ab67c8877b647b"
1734
1700
dependencies = [
1735
-
"byteorder",
1736
1701
"lazy_static",
1737
1702
"libm",
1738
1703
"num-integer",
···
1784
1749
]
1785
1750
1786
1751
[[package]]
1787
-
name = "object"
1788
-
version = "0.36.7"
1789
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1790
-
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
1791
-
dependencies = [
1792
-
"memchr",
1793
-
]
1794
-
1795
-
[[package]]
1796
1752
name = "once_cell"
1797
1753
version = "1.21.3"
1798
1754
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1803
1759
]
1804
1760
1805
1761
[[package]]
1806
-
name = "once_cell_polyfill"
1807
-
version = "1.70.1"
1808
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1809
-
checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad"
1810
-
1811
-
[[package]]
1812
1762
name = "openssl"
1813
-
version = "0.10.73"
1763
+
version = "0.10.74"
1814
1764
source = "registry+https://github.com/rust-lang/crates.io-index"
1815
-
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
1765
+
checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654"
1816
1766
dependencies = [
1817
1767
"bitflags",
1818
1768
"cfg-if",
···
1831
1781
dependencies = [
1832
1782
"proc-macro2",
1833
1783
"quote",
1834
-
"syn",
1784
+
"syn 2.0.108",
1835
1785
]
1836
1786
1837
1787
[[package]]
···
1842
1792
1843
1793
[[package]]
1844
1794
name = "openssl-sys"
1845
-
version = "0.9.109"
1795
+
version = "0.9.110"
1846
1796
source = "registry+https://github.com/rust-lang/crates.io-index"
1847
-
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
1797
+
checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2"
1848
1798
dependencies = [
1849
1799
"cc",
1850
1800
"libc",
···
1861
1811
"ecdsa",
1862
1812
"elliptic-curve",
1863
1813
"primeorder",
1814
+
"serdect",
1864
1815
"sha2",
1865
1816
]
1866
1817
···
1873
1824
"ecdsa",
1874
1825
"elliptic-curve",
1875
1826
"primeorder",
1827
+
"serdect",
1876
1828
"sha2",
1877
1829
]
1878
1830
···
1884
1836
1885
1837
[[package]]
1886
1838
name = "parking_lot"
1887
-
version = "0.12.4"
1839
+
version = "0.12.5"
1888
1840
source = "registry+https://github.com/rust-lang/crates.io-index"
1889
-
checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13"
1841
+
checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a"
1890
1842
dependencies = [
1891
1843
"lock_api",
1892
1844
"parking_lot_core",
···
1894
1846
1895
1847
[[package]]
1896
1848
name = "parking_lot_core"
1897
-
version = "0.9.11"
1849
+
version = "0.9.12"
1898
1850
source = "registry+https://github.com/rust-lang/crates.io-index"
1899
-
checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5"
1851
+
checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
1900
1852
dependencies = [
1901
1853
"cfg-if",
1902
1854
"libc",
1903
1855
"redox_syscall",
1904
1856
"smallvec",
1905
-
"windows-targets 0.52.6",
1857
+
"windows-link 0.2.1",
1906
1858
]
1907
1859
1908
1860
[[package]]
···
1967
1919
1968
1920
[[package]]
1969
1921
name = "potential_utf"
1970
-
version = "0.1.3"
1922
+
version = "0.1.4"
1971
1923
source = "registry+https://github.com/rust-lang/crates.io-index"
1972
-
checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a"
1924
+
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
1973
1925
dependencies = [
1974
1926
"zerovec",
1975
1927
]
···
1990
1942
checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6"
1991
1943
dependencies = [
1992
1944
"elliptic-curve",
1945
+
"serdect",
1993
1946
]
1994
1947
1995
1948
[[package]]
1996
1949
name = "proc-macro2"
1997
-
version = "1.0.101"
1950
+
version = "1.0.103"
1998
1951
source = "registry+https://github.com/rust-lang/crates.io-index"
1999
-
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
1952
+
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
2000
1953
dependencies = [
2001
1954
"unicode-ident",
2002
1955
]
2003
1956
2004
1957
[[package]]
2005
1958
name = "quickdid"
2006
-
version = "1.0.0-rc.2"
1959
+
version = "1.0.0-rc.5"
2007
1960
dependencies = [
2008
1961
"anyhow",
2009
1962
"async-trait",
2010
1963
"atproto-identity",
1964
+
"atproto-jetstream",
1965
+
"atproto-lexicon",
2011
1966
"axum",
2012
1967
"bincode",
2013
-
"clap",
1968
+
"cadence",
2014
1969
"deadpool-redis",
1970
+
"httpdate",
2015
1971
"metrohash",
1972
+
"once_cell",
2016
1973
"reqwest",
2017
1974
"serde",
2018
1975
"serde_json",
2019
1976
"sqlx",
2020
-
"thiserror 2.0.16",
1977
+
"thiserror 2.0.17",
2021
1978
"tokio",
2022
1979
"tokio-util",
1980
+
"tower-http",
2023
1981
"tracing",
2024
1982
"tracing-subscriber",
2025
1983
]
···
2037
1995
"quinn-udp",
2038
1996
"rustc-hash",
2039
1997
"rustls",
2040
-
"socket2 0.6.0",
2041
-
"thiserror 2.0.16",
1998
+
"socket2 0.6.1",
1999
+
"thiserror 2.0.17",
2042
2000
"tokio",
2043
2001
"tracing",
2044
2002
"web-time",
···
2051
2009
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
2052
2010
dependencies = [
2053
2011
"bytes",
2054
-
"getrandom 0.3.3",
2012
+
"getrandom 0.3.4",
2055
2013
"lru-slab",
2056
2014
"rand 0.9.2",
2057
2015
"ring",
···
2059
2017
"rustls",
2060
2018
"rustls-pki-types",
2061
2019
"slab",
2062
-
"thiserror 2.0.16",
2020
+
"thiserror 2.0.17",
2063
2021
"tinyvec",
2064
2022
"tracing",
2065
2023
"web-time",
···
2074
2032
"cfg_aliases",
2075
2033
"libc",
2076
2034
"once_cell",
2077
-
"socket2 0.6.0",
2035
+
"socket2 0.6.1",
2078
2036
"tracing",
2079
2037
"windows-sys 0.60.2",
2080
2038
]
2081
2039
2082
2040
[[package]]
2083
2041
name = "quote"
2084
-
version = "1.0.40"
2042
+
version = "1.0.41"
2085
2043
source = "registry+https://github.com/rust-lang/crates.io-index"
2086
-
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
2044
+
checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1"
2087
2045
dependencies = [
2088
2046
"proc-macro2",
2089
2047
]
···
2150
2108
source = "registry+https://github.com/rust-lang/crates.io-index"
2151
2109
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
2152
2110
dependencies = [
2153
-
"getrandom 0.3.3",
2111
+
"getrandom 0.3.4",
2154
2112
]
2155
2113
2156
2114
[[package]]
2157
2115
name = "redis"
2158
-
version = "0.32.5"
2116
+
version = "0.32.7"
2159
2117
source = "registry+https://github.com/rust-lang/crates.io-index"
2160
-
checksum = "7cd3650deebc68526b304898b192fa4102a4ef0b9ada24da096559cb60e0eef8"
2118
+
checksum = "014cc767fefab6a3e798ca45112bccad9c6e0e218fbd49720042716c73cfef44"
2161
2119
dependencies = [
2162
2120
"arc-swap",
2163
2121
"backon",
···
2173
2131
"rustls",
2174
2132
"rustls-native-certs",
2175
2133
"ryu",
2176
-
"socket2 0.6.0",
2134
+
"socket2 0.6.1",
2177
2135
"tokio",
2178
2136
"tokio-rustls",
2179
2137
"tokio-util",
···
2182
2140
2183
2141
[[package]]
2184
2142
name = "redox_syscall"
2185
-
version = "0.5.17"
2143
+
version = "0.5.18"
2186
2144
source = "registry+https://github.com/rust-lang/crates.io-index"
2187
-
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
2145
+
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
2188
2146
dependencies = [
2189
2147
"bitflags",
2190
2148
]
2191
2149
2192
2150
[[package]]
2193
2151
name = "regex-automata"
2194
-
version = "0.4.10"
2152
+
version = "0.4.13"
2195
2153
source = "registry+https://github.com/rust-lang/crates.io-index"
2196
-
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
2154
+
checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
2197
2155
dependencies = [
2198
2156
"aho-corasick",
2199
2157
"memchr",
···
2202
2160
2203
2161
[[package]]
2204
2162
name = "regex-syntax"
2205
-
version = "0.8.6"
2163
+
version = "0.8.8"
2206
2164
source = "registry+https://github.com/rust-lang/crates.io-index"
2207
-
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
2165
+
checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
2208
2166
2209
2167
[[package]]
2210
2168
name = "reqwest"
2211
-
version = "0.12.23"
2169
+
version = "0.12.24"
2212
2170
source = "registry+https://github.com/rust-lang/crates.io-index"
2213
-
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
2171
+
checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
2214
2172
dependencies = [
2215
2173
"base64",
2216
2174
"bytes",
2217
2175
"encoding_rs",
2218
2176
"futures-core",
2177
+
"futures-util",
2219
2178
"h2",
2220
2179
"http",
2221
2180
"http-body",
···
2227
2186
"js-sys",
2228
2187
"log",
2229
2188
"mime",
2189
+
"mime_guess",
2230
2190
"native-tls",
2231
2191
"percent-encoding",
2232
2192
"pin-project-lite",
···
2251
2211
]
2252
2212
2253
2213
[[package]]
2214
+
name = "reqwest-chain"
2215
+
version = "1.0.0"
2216
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2217
+
checksum = "da5c014fb79a8227db44a0433d748107750d2550b7fca55c59a3d7ee7d2ee2b2"
2218
+
dependencies = [
2219
+
"anyhow",
2220
+
"async-trait",
2221
+
"http",
2222
+
"reqwest-middleware",
2223
+
]
2224
+
2225
+
[[package]]
2226
+
name = "reqwest-middleware"
2227
+
version = "0.4.2"
2228
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2229
+
checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e"
2230
+
dependencies = [
2231
+
"anyhow",
2232
+
"async-trait",
2233
+
"http",
2234
+
"reqwest",
2235
+
"serde",
2236
+
"thiserror 1.0.69",
2237
+
"tower-service",
2238
+
]
2239
+
2240
+
[[package]]
2254
2241
name = "resolv-conf"
2255
-
version = "0.7.4"
2242
+
version = "0.7.5"
2256
2243
source = "registry+https://github.com/rust-lang/crates.io-index"
2257
-
checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3"
2244
+
checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799"
2258
2245
2259
2246
[[package]]
2260
2247
name = "rfc6979"
···
2301
2288
]
2302
2289
2303
2290
[[package]]
2304
-
name = "rustc-demangle"
2305
-
version = "0.1.26"
2306
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2307
-
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
2308
-
2309
-
[[package]]
2310
2291
name = "rustc-hash"
2311
2292
version = "2.1.1"
2312
2293
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2323
2304
2324
2305
[[package]]
2325
2306
name = "rustix"
2326
-
version = "1.0.8"
2307
+
version = "1.1.2"
2327
2308
source = "registry+https://github.com/rust-lang/crates.io-index"
2328
-
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
2309
+
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
2329
2310
dependencies = [
2330
2311
"bitflags",
2331
2312
"errno",
2332
2313
"libc",
2333
2314
"linux-raw-sys",
2334
-
"windows-sys 0.60.2",
2315
+
"windows-sys 0.61.2",
2335
2316
]
2336
2317
2337
2318
[[package]]
2338
2319
name = "rustls"
2339
-
version = "0.23.31"
2320
+
version = "0.23.34"
2340
2321
source = "registry+https://github.com/rust-lang/crates.io-index"
2341
-
checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc"
2322
+
checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7"
2342
2323
dependencies = [
2343
2324
"once_cell",
2344
2325
"ring",
···
2350
2331
2351
2332
[[package]]
2352
2333
name = "rustls-native-certs"
2353
-
version = "0.8.1"
2334
+
version = "0.8.2"
2354
2335
source = "registry+https://github.com/rust-lang/crates.io-index"
2355
-
checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
2336
+
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
2356
2337
dependencies = [
2357
2338
"openssl-probe",
2358
2339
"rustls-pki-types",
2359
2340
"schannel",
2360
-
"security-framework 3.3.0",
2341
+
"security-framework 3.5.1",
2361
2342
]
2362
2343
2363
2344
[[package]]
2364
2345
name = "rustls-pki-types"
2365
-
version = "1.12.0"
2346
+
version = "1.13.0"
2366
2347
source = "registry+https://github.com/rust-lang/crates.io-index"
2367
-
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
2348
+
checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a"
2368
2349
dependencies = [
2369
2350
"web-time",
2370
2351
"zeroize",
···
2372
2353
2373
2354
[[package]]
2374
2355
name = "rustls-webpki"
2375
-
version = "0.103.4"
2356
+
version = "0.103.8"
2376
2357
source = "registry+https://github.com/rust-lang/crates.io-index"
2377
-
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
2358
+
checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52"
2378
2359
dependencies = [
2379
2360
"ring",
2380
2361
"rustls-pki-types",
···
2395
2376
2396
2377
[[package]]
2397
2378
name = "schannel"
2398
-
version = "0.1.27"
2379
+
version = "0.1.28"
2399
2380
source = "registry+https://github.com/rust-lang/crates.io-index"
2400
-
checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
2381
+
checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1"
2401
2382
dependencies = [
2402
-
"windows-sys 0.59.0",
2383
+
"windows-sys 0.61.2",
2403
2384
]
2404
2385
2405
2386
[[package]]
2406
-
name = "scoped-tls"
2407
-
version = "1.0.1"
2408
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2409
-
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
2410
-
2411
-
[[package]]
2412
2387
name = "scopeguard"
2413
2388
version = "1.2.0"
2414
2389
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2444
2419
2445
2420
[[package]]
2446
2421
name = "security-framework"
2447
-
version = "3.3.0"
2422
+
version = "3.5.1"
2448
2423
source = "registry+https://github.com/rust-lang/crates.io-index"
2449
-
checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c"
2424
+
checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef"
2450
2425
dependencies = [
2451
2426
"bitflags",
2452
2427
"core-foundation 0.10.1",
···
2457
2432
2458
2433
[[package]]
2459
2434
name = "security-framework-sys"
2460
-
version = "2.14.0"
2435
+
version = "2.15.0"
2461
2436
source = "registry+https://github.com/rust-lang/crates.io-index"
2462
-
checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
2437
+
checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0"
2463
2438
dependencies = [
2464
2439
"core-foundation-sys",
2465
2440
"libc",
···
2467
2442
2468
2443
[[package]]
2469
2444
name = "semver"
2470
-
version = "1.0.26"
2445
+
version = "1.0.27"
2471
2446
source = "registry+https://github.com/rust-lang/crates.io-index"
2472
-
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
2447
+
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
2473
2448
2474
2449
[[package]]
2475
2450
name = "serde"
2476
-
version = "1.0.219"
2451
+
version = "1.0.228"
2477
2452
source = "registry+https://github.com/rust-lang/crates.io-index"
2478
-
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
2453
+
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
2479
2454
dependencies = [
2455
+
"serde_core",
2480
2456
"serde_derive",
2481
2457
]
2482
2458
2483
2459
[[package]]
2484
2460
name = "serde_bytes"
2485
-
version = "0.11.17"
2461
+
version = "0.11.19"
2486
2462
source = "registry+https://github.com/rust-lang/crates.io-index"
2487
-
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
2463
+
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
2488
2464
dependencies = [
2489
2465
"serde",
2466
+
"serde_core",
2467
+
]
2468
+
2469
+
[[package]]
2470
+
name = "serde_core"
2471
+
version = "1.0.228"
2472
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2473
+
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
2474
+
dependencies = [
2475
+
"serde_derive",
2490
2476
]
2491
2477
2492
2478
[[package]]
2493
2479
name = "serde_derive"
2494
-
version = "1.0.219"
2480
+
version = "1.0.228"
2495
2481
source = "registry+https://github.com/rust-lang/crates.io-index"
2496
-
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
2482
+
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
2497
2483
dependencies = [
2498
2484
"proc-macro2",
2499
2485
"quote",
2500
-
"syn",
2486
+
"syn 2.0.108",
2501
2487
]
2502
2488
2503
2489
[[package]]
2504
2490
name = "serde_ipld_dagcbor"
2505
-
version = "0.6.3"
2491
+
version = "0.6.4"
2506
2492
source = "registry+https://github.com/rust-lang/crates.io-index"
2507
-
checksum = "99600723cf53fb000a66175555098db7e75217c415bdd9a16a65d52a19dcc4fc"
2493
+
checksum = "46182f4f08349a02b45c998ba3215d3f9de826246ba02bb9dddfe9a2a2100778"
2508
2494
dependencies = [
2509
2495
"cbor4ii",
2510
2496
"ipld-core",
···
2514
2500
2515
2501
[[package]]
2516
2502
name = "serde_json"
2517
-
version = "1.0.143"
2503
+
version = "1.0.145"
2518
2504
source = "registry+https://github.com/rust-lang/crates.io-index"
2519
-
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
2505
+
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
2520
2506
dependencies = [
2521
2507
"itoa",
2522
2508
"memchr",
2523
2509
"ryu",
2524
2510
"serde",
2511
+
"serde_core",
2525
2512
]
2526
2513
2527
2514
[[package]]
2528
2515
name = "serde_path_to_error"
2529
-
version = "0.1.17"
2516
+
version = "0.1.20"
2530
2517
source = "registry+https://github.com/rust-lang/crates.io-index"
2531
-
checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a"
2518
+
checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457"
2532
2519
dependencies = [
2533
2520
"itoa",
2534
2521
"serde",
2522
+
"serde_core",
2535
2523
]
2536
2524
2537
2525
[[package]]
···
2613
2601
]
2614
2602
2615
2603
[[package]]
2604
+
name = "simdutf8"
2605
+
version = "0.1.5"
2606
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2607
+
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
2608
+
2609
+
[[package]]
2616
2610
name = "slab"
2617
2611
version = "0.4.11"
2618
2612
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2639
2633
2640
2634
[[package]]
2641
2635
name = "socket2"
2642
-
version = "0.6.0"
2636
+
version = "0.6.1"
2643
2637
source = "registry+https://github.com/rust-lang/crates.io-index"
2644
-
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
2638
+
checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
2645
2639
dependencies = [
2646
2640
"libc",
2647
-
"windows-sys 0.59.0",
2641
+
"windows-sys 0.60.2",
2648
2642
]
2649
2643
2650
2644
[[package]]
···
2687
2681
dependencies = [
2688
2682
"base64",
2689
2683
"bytes",
2690
-
"chrono",
2691
2684
"crc",
2692
2685
"crossbeam-queue",
2693
2686
"either",
···
2696
2689
"futures-intrusive",
2697
2690
"futures-io",
2698
2691
"futures-util",
2699
-
"hashbrown",
2692
+
"hashbrown 0.15.5",
2700
2693
"hashlink",
2701
2694
"indexmap",
2702
2695
"log",
···
2707
2700
"serde_json",
2708
2701
"sha2",
2709
2702
"smallvec",
2710
-
"thiserror 2.0.16",
2703
+
"thiserror 2.0.17",
2711
2704
"tokio",
2712
2705
"tokio-stream",
2713
2706
"tracing",
···
2724
2717
"quote",
2725
2718
"sqlx-core",
2726
2719
"sqlx-macros-core",
2727
-
"syn",
2720
+
"syn 2.0.108",
2728
2721
]
2729
2722
2730
2723
[[package]]
···
2747
2740
"sqlx-mysql",
2748
2741
"sqlx-postgres",
2749
2742
"sqlx-sqlite",
2750
-
"syn",
2743
+
"syn 2.0.108",
2751
2744
"tokio",
2752
2745
"url",
2753
2746
]
···
2763
2756
"bitflags",
2764
2757
"byteorder",
2765
2758
"bytes",
2766
-
"chrono",
2767
2759
"crc",
2768
2760
"digest",
2769
2761
"dotenvy",
···
2790
2782
"smallvec",
2791
2783
"sqlx-core",
2792
2784
"stringprep",
2793
-
"thiserror 2.0.16",
2785
+
"thiserror 2.0.17",
2794
2786
"tracing",
2795
2787
"whoami",
2796
2788
]
···
2805
2797
"base64",
2806
2798
"bitflags",
2807
2799
"byteorder",
2808
-
"chrono",
2809
2800
"crc",
2810
2801
"dotenvy",
2811
2802
"etcetera",
···
2828
2819
"smallvec",
2829
2820
"sqlx-core",
2830
2821
"stringprep",
2831
-
"thiserror 2.0.16",
2822
+
"thiserror 2.0.17",
2832
2823
"tracing",
2833
2824
"whoami",
2834
2825
]
···
2840
2831
checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea"
2841
2832
dependencies = [
2842
2833
"atoi",
2843
-
"chrono",
2844
2834
"flume",
2845
2835
"futures-channel",
2846
2836
"futures-core",
···
2853
2843
"serde",
2854
2844
"serde_urlencoded",
2855
2845
"sqlx-core",
2856
-
"thiserror 2.0.16",
2846
+
"thiserror 2.0.17",
2857
2847
"tracing",
2858
2848
"url",
2859
2849
]
2860
2850
2861
2851
[[package]]
2862
2852
name = "stable_deref_trait"
2863
-
version = "1.2.0"
2853
+
version = "1.2.1"
2864
2854
source = "registry+https://github.com/rust-lang/crates.io-index"
2865
-
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
2855
+
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
2866
2856
2867
2857
[[package]]
2868
2858
name = "stringprep"
···
2876
2866
]
2877
2867
2878
2868
[[package]]
2879
-
name = "strsim"
2880
-
version = "0.11.1"
2869
+
name = "subtle"
2870
+
version = "2.6.1"
2881
2871
source = "registry+https://github.com/rust-lang/crates.io-index"
2882
-
checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f"
2872
+
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
2883
2873
2884
2874
[[package]]
2885
-
name = "subtle"
2886
-
version = "2.6.1"
2875
+
name = "syn"
2876
+
version = "1.0.109"
2887
2877
source = "registry+https://github.com/rust-lang/crates.io-index"
2888
-
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
2878
+
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
2879
+
dependencies = [
2880
+
"proc-macro2",
2881
+
"quote",
2882
+
"unicode-ident",
2883
+
]
2889
2884
2890
2885
[[package]]
2891
2886
name = "syn"
2892
-
version = "2.0.106"
2887
+
version = "2.0.108"
2893
2888
source = "registry+https://github.com/rust-lang/crates.io-index"
2894
-
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
2889
+
checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917"
2895
2890
dependencies = [
2896
2891
"proc-macro2",
2897
2892
"quote",
···
2915
2910
dependencies = [
2916
2911
"proc-macro2",
2917
2912
"quote",
2918
-
"syn",
2913
+
"syn 2.0.108",
2919
2914
]
2920
2915
2921
2916
[[package]]
···
2947
2942
2948
2943
[[package]]
2949
2944
name = "tempfile"
2950
-
version = "3.21.0"
2945
+
version = "3.23.0"
2951
2946
source = "registry+https://github.com/rust-lang/crates.io-index"
2952
-
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
2947
+
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
2953
2948
dependencies = [
2954
2949
"fastrand",
2955
-
"getrandom 0.3.3",
2950
+
"getrandom 0.3.4",
2956
2951
"once_cell",
2957
2952
"rustix",
2958
-
"windows-sys 0.60.2",
2953
+
"windows-sys 0.61.2",
2959
2954
]
2960
2955
2961
2956
[[package]]
···
2969
2964
2970
2965
[[package]]
2971
2966
name = "thiserror"
2972
-
version = "2.0.16"
2967
+
version = "2.0.17"
2973
2968
source = "registry+https://github.com/rust-lang/crates.io-index"
2974
-
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
2969
+
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
2975
2970
dependencies = [
2976
-
"thiserror-impl 2.0.16",
2971
+
"thiserror-impl 2.0.17",
2977
2972
]
2978
2973
2979
2974
[[package]]
···
2984
2979
dependencies = [
2985
2980
"proc-macro2",
2986
2981
"quote",
2987
-
"syn",
2982
+
"syn 2.0.108",
2988
2983
]
2989
2984
2990
2985
[[package]]
2991
2986
name = "thiserror-impl"
2992
-
version = "2.0.16"
2987
+
version = "2.0.17"
2993
2988
source = "registry+https://github.com/rust-lang/crates.io-index"
2994
-
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
2989
+
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
2995
2990
dependencies = [
2996
2991
"proc-macro2",
2997
2992
"quote",
2998
-
"syn",
2993
+
"syn 2.0.108",
2999
2994
]
3000
2995
3001
2996
[[package]]
···
3009
3004
3010
3005
[[package]]
3011
3006
name = "tinystr"
3012
-
version = "0.8.1"
3007
+
version = "0.8.2"
3013
3008
source = "registry+https://github.com/rust-lang/crates.io-index"
3014
-
checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b"
3009
+
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
3015
3010
dependencies = [
3016
3011
"displaydoc",
3017
3012
"zerovec",
···
3034
3029
3035
3030
[[package]]
3036
3031
name = "tokio"
3037
-
version = "1.47.1"
3032
+
version = "1.48.0"
3038
3033
source = "registry+https://github.com/rust-lang/crates.io-index"
3039
-
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
3034
+
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
3040
3035
dependencies = [
3041
-
"backtrace",
3042
3036
"bytes",
3043
-
"io-uring",
3044
3037
"libc",
3045
3038
"mio",
3039
+
"parking_lot",
3046
3040
"pin-project-lite",
3047
3041
"signal-hook-registry",
3048
-
"slab",
3049
-
"socket2 0.6.0",
3042
+
"socket2 0.6.1",
3050
3043
"tokio-macros",
3051
-
"windows-sys 0.59.0",
3044
+
"windows-sys 0.61.2",
3052
3045
]
3053
3046
3054
3047
[[package]]
3055
3048
name = "tokio-macros"
3056
-
version = "2.5.0"
3049
+
version = "2.6.0"
3057
3050
source = "registry+https://github.com/rust-lang/crates.io-index"
3058
-
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
3051
+
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
3059
3052
dependencies = [
3060
3053
"proc-macro2",
3061
3054
"quote",
3062
-
"syn",
3055
+
"syn 2.0.108",
3063
3056
]
3064
3057
3065
3058
[[package]]
···
3074
3067
3075
3068
[[package]]
3076
3069
name = "tokio-rustls"
3077
-
version = "0.26.2"
3070
+
version = "0.26.4"
3078
3071
source = "registry+https://github.com/rust-lang/crates.io-index"
3079
-
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
3072
+
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
3080
3073
dependencies = [
3081
3074
"rustls",
3082
3075
"tokio",
···
3095
3088
3096
3089
[[package]]
3097
3090
name = "tokio-util"
3098
-
version = "0.7.16"
3091
+
version = "0.7.17"
3099
3092
source = "registry+https://github.com/rust-lang/crates.io-index"
3100
-
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
3093
+
checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594"
3101
3094
dependencies = [
3102
3095
"bytes",
3103
3096
"futures-core",
···
3108
3101
]
3109
3102
3110
3103
[[package]]
3104
+
name = "tokio-websockets"
3105
+
version = "0.11.4"
3106
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3107
+
checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e"
3108
+
dependencies = [
3109
+
"base64",
3110
+
"bytes",
3111
+
"futures-core",
3112
+
"futures-sink",
3113
+
"http",
3114
+
"httparse",
3115
+
"rand 0.9.2",
3116
+
"ring",
3117
+
"rustls-native-certs",
3118
+
"rustls-pki-types",
3119
+
"simdutf8",
3120
+
"tokio",
3121
+
"tokio-rustls",
3122
+
"tokio-util",
3123
+
]
3124
+
3125
+
[[package]]
3111
3126
name = "tower"
3112
3127
version = "0.5.2"
3113
3128
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3131
3146
dependencies = [
3132
3147
"bitflags",
3133
3148
"bytes",
3149
+
"futures-core",
3134
3150
"futures-util",
3135
3151
"http",
3136
3152
"http-body",
3153
+
"http-body-util",
3154
+
"http-range-header",
3155
+
"httpdate",
3137
3156
"iri-string",
3157
+
"mime",
3158
+
"mime_guess",
3159
+
"percent-encoding",
3138
3160
"pin-project-lite",
3161
+
"tokio",
3162
+
"tokio-util",
3139
3163
"tower",
3140
3164
"tower-layer",
3141
3165
"tower-service",
3166
+
"tracing",
3142
3167
]
3143
3168
3144
3169
[[package]]
···
3173
3198
dependencies = [
3174
3199
"proc-macro2",
3175
3200
"quote",
3176
-
"syn",
3201
+
"syn 2.0.108",
3177
3202
]
3178
3203
3179
3204
[[package]]
···
3223
3248
3224
3249
[[package]]
3225
3250
name = "typenum"
3226
-
version = "1.18.0"
3251
+
version = "1.19.0"
3227
3252
source = "registry+https://github.com/rust-lang/crates.io-index"
3228
-
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
3253
+
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
3254
+
3255
+
[[package]]
3256
+
name = "ulid"
3257
+
version = "1.2.1"
3258
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3259
+
checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe"
3260
+
dependencies = [
3261
+
"rand 0.9.2",
3262
+
"web-time",
3263
+
]
3264
+
3265
+
[[package]]
3266
+
name = "unicase"
3267
+
version = "2.8.1"
3268
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3269
+
checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
3229
3270
3230
3271
[[package]]
3231
3272
name = "unicode-bidi"
···
3235
3276
3236
3277
[[package]]
3237
3278
name = "unicode-ident"
3238
-
version = "1.0.18"
3279
+
version = "1.0.22"
3239
3280
source = "registry+https://github.com/rust-lang/crates.io-index"
3240
-
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
3281
+
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
3241
3282
3242
3283
[[package]]
3243
3284
name = "unicode-normalization"
3244
-
version = "0.1.24"
3285
+
version = "0.1.25"
3245
3286
source = "registry+https://github.com/rust-lang/crates.io-index"
3246
-
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
3287
+
checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8"
3247
3288
dependencies = [
3248
3289
"tinyvec",
3249
3290
]
3250
3291
3251
3292
[[package]]
3252
3293
name = "unicode-properties"
3253
-
version = "0.1.3"
3294
+
version = "0.1.4"
3254
3295
source = "registry+https://github.com/rust-lang/crates.io-index"
3255
-
checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
3296
+
checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
3256
3297
3257
3298
[[package]]
3258
3299
name = "unsigned-varint"
···
3285
3326
]
3286
3327
3287
3328
[[package]]
3329
+
name = "urlencoding"
3330
+
version = "2.1.3"
3331
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3332
+
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
3333
+
3334
+
[[package]]
3288
3335
name = "utf8_iter"
3289
3336
version = "1.0.4"
3290
3337
source = "registry+https://github.com/rust-lang/crates.io-index"
3291
3338
checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
3292
3339
3293
3340
[[package]]
3294
-
name = "utf8parse"
3295
-
version = "0.2.2"
3296
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3297
-
checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821"
3298
-
3299
-
[[package]]
3300
3341
name = "uuid"
3301
3342
version = "1.18.1"
3302
3343
source = "registry+https://github.com/rust-lang/crates.io-index"
3303
3344
checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
3304
3345
dependencies = [
3305
-
"getrandom 0.3.3",
3346
+
"getrandom 0.3.4",
3306
3347
"js-sys",
3307
3348
"wasm-bindgen",
3308
3349
]
···
3347
3388
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
3348
3389
3349
3390
[[package]]
3350
-
name = "wasi"
3351
-
version = "0.14.3+wasi-0.2.4"
3391
+
name = "wasip2"
3392
+
version = "1.0.1+wasi-0.2.4"
3352
3393
source = "registry+https://github.com/rust-lang/crates.io-index"
3353
-
checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95"
3394
+
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
3354
3395
dependencies = [
3355
3396
"wit-bindgen",
3356
3397
]
···
3363
3404
3364
3405
[[package]]
3365
3406
name = "wasm-bindgen"
3366
-
version = "0.2.101"
3407
+
version = "0.2.105"
3367
3408
source = "registry+https://github.com/rust-lang/crates.io-index"
3368
-
checksum = "7e14915cadd45b529bb8d1f343c4ed0ac1de926144b746e2710f9cd05df6603b"
3409
+
checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60"
3369
3410
dependencies = [
3370
3411
"cfg-if",
3371
3412
"once_cell",
···
3375
3416
]
3376
3417
3377
3418
[[package]]
3378
-
name = "wasm-bindgen-backend"
3379
-
version = "0.2.101"
3380
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3381
-
checksum = "e28d1ba982ca7923fd01448d5c30c6864d0a14109560296a162f80f305fb93bb"
3382
-
dependencies = [
3383
-
"bumpalo",
3384
-
"log",
3385
-
"proc-macro2",
3386
-
"quote",
3387
-
"syn",
3388
-
"wasm-bindgen-shared",
3389
-
]
3390
-
3391
-
[[package]]
3392
3419
name = "wasm-bindgen-futures"
3393
-
version = "0.4.51"
3420
+
version = "0.4.55"
3394
3421
source = "registry+https://github.com/rust-lang/crates.io-index"
3395
-
checksum = "0ca85039a9b469b38336411d6d6ced91f3fc87109a2a27b0c197663f5144dffe"
3422
+
checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0"
3396
3423
dependencies = [
3397
3424
"cfg-if",
3398
3425
"js-sys",
···
3403
3430
3404
3431
[[package]]
3405
3432
name = "wasm-bindgen-macro"
3406
-
version = "0.2.101"
3433
+
version = "0.2.105"
3407
3434
source = "registry+https://github.com/rust-lang/crates.io-index"
3408
-
checksum = "7c3d463ae3eff775b0c45df9da45d68837702ac35af998361e2c84e7c5ec1b0d"
3435
+
checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2"
3409
3436
dependencies = [
3410
3437
"quote",
3411
3438
"wasm-bindgen-macro-support",
···
3413
3440
3414
3441
[[package]]
3415
3442
name = "wasm-bindgen-macro-support"
3416
-
version = "0.2.101"
3443
+
version = "0.2.105"
3417
3444
source = "registry+https://github.com/rust-lang/crates.io-index"
3418
-
checksum = "7bb4ce89b08211f923caf51d527662b75bdc9c9c7aab40f86dcb9fb85ac552aa"
3445
+
checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc"
3419
3446
dependencies = [
3447
+
"bumpalo",
3420
3448
"proc-macro2",
3421
3449
"quote",
3422
-
"syn",
3423
-
"wasm-bindgen-backend",
3450
+
"syn 2.0.108",
3424
3451
"wasm-bindgen-shared",
3425
3452
]
3426
3453
3427
3454
[[package]]
3428
3455
name = "wasm-bindgen-shared"
3429
-
version = "0.2.101"
3456
+
version = "0.2.105"
3430
3457
source = "registry+https://github.com/rust-lang/crates.io-index"
3431
-
checksum = "f143854a3b13752c6950862c906306adb27c7e839f7414cec8fea35beab624c1"
3458
+
checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76"
3432
3459
dependencies = [
3433
3460
"unicode-ident",
3434
3461
]
3435
3462
3436
3463
[[package]]
3437
3464
name = "web-sys"
3438
-
version = "0.3.78"
3465
+
version = "0.3.82"
3439
3466
source = "registry+https://github.com/rust-lang/crates.io-index"
3440
-
checksum = "77e4b637749ff0d92b8fad63aa1f7cff3cbe125fd49c175cd6345e7272638b12"
3467
+
checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1"
3441
3468
dependencies = [
3442
3469
"js-sys",
3443
3470
"wasm-bindgen",
···
3455
3482
3456
3483
[[package]]
3457
3484
name = "webpki-roots"
3458
-
version = "1.0.2"
3485
+
version = "1.0.4"
3459
3486
source = "registry+https://github.com/rust-lang/crates.io-index"
3460
-
checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2"
3487
+
checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e"
3461
3488
dependencies = [
3462
3489
"rustls-pki-types",
3463
3490
]
···
3474
3501
3475
3502
[[package]]
3476
3503
name = "widestring"
3477
-
version = "1.2.0"
3478
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3479
-
checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d"
3480
-
3481
-
[[package]]
3482
-
name = "windows"
3483
-
version = "0.61.3"
3484
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3485
-
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
3486
-
dependencies = [
3487
-
"windows-collections",
3488
-
"windows-core",
3489
-
"windows-future",
3490
-
"windows-link",
3491
-
"windows-numerics",
3492
-
]
3493
-
3494
-
[[package]]
3495
-
name = "windows-collections"
3496
-
version = "0.2.0"
3497
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3498
-
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
3499
-
dependencies = [
3500
-
"windows-core",
3501
-
]
3502
-
3503
-
[[package]]
3504
-
name = "windows-core"
3505
-
version = "0.61.2"
3506
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3507
-
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
3508
-
dependencies = [
3509
-
"windows-implement",
3510
-
"windows-interface",
3511
-
"windows-link",
3512
-
"windows-result",
3513
-
"windows-strings",
3514
-
]
3515
-
3516
-
[[package]]
3517
-
name = "windows-future"
3518
-
version = "0.2.1"
3504
+
version = "1.2.1"
3519
3505
source = "registry+https://github.com/rust-lang/crates.io-index"
3520
-
checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
3521
-
dependencies = [
3522
-
"windows-core",
3523
-
"windows-link",
3524
-
"windows-threading",
3525
-
]
3526
-
3527
-
[[package]]
3528
-
name = "windows-implement"
3529
-
version = "0.60.0"
3530
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3531
-
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
3532
-
dependencies = [
3533
-
"proc-macro2",
3534
-
"quote",
3535
-
"syn",
3536
-
]
3537
-
3538
-
[[package]]
3539
-
name = "windows-interface"
3540
-
version = "0.59.1"
3541
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3542
-
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
3543
-
dependencies = [
3544
-
"proc-macro2",
3545
-
"quote",
3546
-
"syn",
3547
-
]
3506
+
checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471"
3548
3507
3549
3508
[[package]]
3550
3509
name = "windows-link"
···
3553
3512
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
3554
3513
3555
3514
[[package]]
3556
-
name = "windows-numerics"
3557
-
version = "0.2.0"
3515
+
name = "windows-link"
3516
+
version = "0.2.1"
3558
3517
source = "registry+https://github.com/rust-lang/crates.io-index"
3559
-
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
3560
-
dependencies = [
3561
-
"windows-core",
3562
-
"windows-link",
3563
-
]
3518
+
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
3564
3519
3565
3520
[[package]]
3566
3521
name = "windows-registry"
···
3568
3523
source = "registry+https://github.com/rust-lang/crates.io-index"
3569
3524
checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e"
3570
3525
dependencies = [
3571
-
"windows-link",
3526
+
"windows-link 0.1.3",
3572
3527
"windows-result",
3573
3528
"windows-strings",
3574
3529
]
···
3579
3534
source = "registry+https://github.com/rust-lang/crates.io-index"
3580
3535
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
3581
3536
dependencies = [
3582
-
"windows-link",
3537
+
"windows-link 0.1.3",
3583
3538
]
3584
3539
3585
3540
[[package]]
···
3588
3543
source = "registry+https://github.com/rust-lang/crates.io-index"
3589
3544
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
3590
3545
dependencies = [
3591
-
"windows-link",
3546
+
"windows-link 0.1.3",
3592
3547
]
3593
3548
3594
3549
[[package]]
···
3611
3566
3612
3567
[[package]]
3613
3568
name = "windows-sys"
3614
-
version = "0.59.0"
3569
+
version = "0.60.2"
3615
3570
source = "registry+https://github.com/rust-lang/crates.io-index"
3616
-
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
3571
+
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3617
3572
dependencies = [
3618
-
"windows-targets 0.52.6",
3573
+
"windows-targets 0.53.5",
3619
3574
]
3620
3575
3621
3576
[[package]]
3622
3577
name = "windows-sys"
3623
-
version = "0.60.2"
3578
+
version = "0.61.2"
3624
3579
source = "registry+https://github.com/rust-lang/crates.io-index"
3625
-
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3580
+
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
3626
3581
dependencies = [
3627
-
"windows-targets 0.53.3",
3582
+
"windows-link 0.2.1",
3628
3583
]
3629
3584
3630
3585
[[package]]
···
3660
3615
3661
3616
[[package]]
3662
3617
name = "windows-targets"
3663
-
version = "0.53.3"
3618
+
version = "0.53.5"
3664
3619
source = "registry+https://github.com/rust-lang/crates.io-index"
3665
-
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
3620
+
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
3666
3621
dependencies = [
3667
-
"windows-link",
3668
-
"windows_aarch64_gnullvm 0.53.0",
3669
-
"windows_aarch64_msvc 0.53.0",
3670
-
"windows_i686_gnu 0.53.0",
3671
-
"windows_i686_gnullvm 0.53.0",
3672
-
"windows_i686_msvc 0.53.0",
3673
-
"windows_x86_64_gnu 0.53.0",
3674
-
"windows_x86_64_gnullvm 0.53.0",
3675
-
"windows_x86_64_msvc 0.53.0",
3676
-
]
3677
-
3678
-
[[package]]
3679
-
name = "windows-threading"
3680
-
version = "0.1.0"
3681
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3682
-
checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6"
3683
-
dependencies = [
3684
-
"windows-link",
3622
+
"windows-link 0.2.1",
3623
+
"windows_aarch64_gnullvm 0.53.1",
3624
+
"windows_aarch64_msvc 0.53.1",
3625
+
"windows_i686_gnu 0.53.1",
3626
+
"windows_i686_gnullvm 0.53.1",
3627
+
"windows_i686_msvc 0.53.1",
3628
+
"windows_x86_64_gnu 0.53.1",
3629
+
"windows_x86_64_gnullvm 0.53.1",
3630
+
"windows_x86_64_msvc 0.53.1",
3685
3631
]
3686
3632
3687
3633
[[package]]
···
3698
3644
3699
3645
[[package]]
3700
3646
name = "windows_aarch64_gnullvm"
3701
-
version = "0.53.0"
3647
+
version = "0.53.1"
3702
3648
source = "registry+https://github.com/rust-lang/crates.io-index"
3703
-
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
3649
+
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
3704
3650
3705
3651
[[package]]
3706
3652
name = "windows_aarch64_msvc"
···
3716
3662
3717
3663
[[package]]
3718
3664
name = "windows_aarch64_msvc"
3719
-
version = "0.53.0"
3665
+
version = "0.53.1"
3720
3666
source = "registry+https://github.com/rust-lang/crates.io-index"
3721
-
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
3667
+
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
3722
3668
3723
3669
[[package]]
3724
3670
name = "windows_i686_gnu"
···
3734
3680
3735
3681
[[package]]
3736
3682
name = "windows_i686_gnu"
3737
-
version = "0.53.0"
3683
+
version = "0.53.1"
3738
3684
source = "registry+https://github.com/rust-lang/crates.io-index"
3739
-
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
3685
+
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
3740
3686
3741
3687
[[package]]
3742
3688
name = "windows_i686_gnullvm"
···
3746
3692
3747
3693
[[package]]
3748
3694
name = "windows_i686_gnullvm"
3749
-
version = "0.53.0"
3695
+
version = "0.53.1"
3750
3696
source = "registry+https://github.com/rust-lang/crates.io-index"
3751
-
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
3697
+
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
3752
3698
3753
3699
[[package]]
3754
3700
name = "windows_i686_msvc"
···
3764
3710
3765
3711
[[package]]
3766
3712
name = "windows_i686_msvc"
3767
-
version = "0.53.0"
3713
+
version = "0.53.1"
3768
3714
source = "registry+https://github.com/rust-lang/crates.io-index"
3769
-
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
3715
+
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
3770
3716
3771
3717
[[package]]
3772
3718
name = "windows_x86_64_gnu"
···
3782
3728
3783
3729
[[package]]
3784
3730
name = "windows_x86_64_gnu"
3785
-
version = "0.53.0"
3731
+
version = "0.53.1"
3786
3732
source = "registry+https://github.com/rust-lang/crates.io-index"
3787
-
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
3733
+
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
3788
3734
3789
3735
[[package]]
3790
3736
name = "windows_x86_64_gnullvm"
···
3800
3746
3801
3747
[[package]]
3802
3748
name = "windows_x86_64_gnullvm"
3803
-
version = "0.53.0"
3749
+
version = "0.53.1"
3804
3750
source = "registry+https://github.com/rust-lang/crates.io-index"
3805
-
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
3751
+
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
3806
3752
3807
3753
[[package]]
3808
3754
name = "windows_x86_64_msvc"
···
3818
3764
3819
3765
[[package]]
3820
3766
name = "windows_x86_64_msvc"
3821
-
version = "0.53.0"
3767
+
version = "0.53.1"
3822
3768
source = "registry+https://github.com/rust-lang/crates.io-index"
3823
-
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
3769
+
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
3824
3770
3825
3771
[[package]]
3826
3772
name = "winreg"
···
3834
3780
3835
3781
[[package]]
3836
3782
name = "wit-bindgen"
3837
-
version = "0.45.1"
3783
+
version = "0.46.0"
3838
3784
source = "registry+https://github.com/rust-lang/crates.io-index"
3839
-
checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36"
3785
+
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
3840
3786
3841
3787
[[package]]
3842
3788
name = "writeable"
3843
-
version = "0.6.1"
3789
+
version = "0.6.2"
3844
3790
source = "registry+https://github.com/rust-lang/crates.io-index"
3845
-
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
3791
+
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
3846
3792
3847
3793
[[package]]
3848
3794
name = "yoke"
3849
-
version = "0.8.0"
3795
+
version = "0.8.1"
3850
3796
source = "registry+https://github.com/rust-lang/crates.io-index"
3851
-
checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc"
3797
+
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
3852
3798
dependencies = [
3853
-
"serde",
3854
3799
"stable_deref_trait",
3855
3800
"yoke-derive",
3856
3801
"zerofrom",
···
3858
3803
3859
3804
[[package]]
3860
3805
name = "yoke-derive"
3861
-
version = "0.8.0"
3806
+
version = "0.8.1"
3862
3807
source = "registry+https://github.com/rust-lang/crates.io-index"
3863
-
checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
3808
+
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
3864
3809
dependencies = [
3865
3810
"proc-macro2",
3866
3811
"quote",
3867
-
"syn",
3812
+
"syn 2.0.108",
3868
3813
"synstructure",
3869
3814
]
3870
3815
3871
3816
[[package]]
3872
3817
name = "zerocopy"
3873
-
version = "0.8.26"
3818
+
version = "0.8.27"
3874
3819
source = "registry+https://github.com/rust-lang/crates.io-index"
3875
-
checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f"
3820
+
checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c"
3876
3821
dependencies = [
3877
3822
"zerocopy-derive",
3878
3823
]
3879
3824
3880
3825
[[package]]
3881
3826
name = "zerocopy-derive"
3882
-
version = "0.8.26"
3827
+
version = "0.8.27"
3883
3828
source = "registry+https://github.com/rust-lang/crates.io-index"
3884
-
checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
3829
+
checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831"
3885
3830
dependencies = [
3886
3831
"proc-macro2",
3887
3832
"quote",
3888
-
"syn",
3833
+
"syn 2.0.108",
3889
3834
]
3890
3835
3891
3836
[[package]]
···
3905
3850
dependencies = [
3906
3851
"proc-macro2",
3907
3852
"quote",
3908
-
"syn",
3853
+
"syn 2.0.108",
3909
3854
"synstructure",
3910
3855
]
3911
3856
3912
3857
[[package]]
3913
3858
name = "zeroize"
3914
-
version = "1.8.1"
3859
+
version = "1.8.2"
3915
3860
source = "registry+https://github.com/rust-lang/crates.io-index"
3916
-
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
3861
+
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
3917
3862
3918
3863
[[package]]
3919
3864
name = "zerotrie"
3920
-
version = "0.2.2"
3865
+
version = "0.2.3"
3921
3866
source = "registry+https://github.com/rust-lang/crates.io-index"
3922
-
checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595"
3867
+
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
3923
3868
dependencies = [
3924
3869
"displaydoc",
3925
3870
"yoke",
···
3928
3873
3929
3874
[[package]]
3930
3875
name = "zerovec"
3931
-
version = "0.11.4"
3876
+
version = "0.11.5"
3932
3877
source = "registry+https://github.com/rust-lang/crates.io-index"
3933
-
checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b"
3878
+
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
3934
3879
dependencies = [
3935
3880
"yoke",
3936
3881
"zerofrom",
···
3939
3884
3940
3885
[[package]]
3941
3886
name = "zerovec-derive"
3942
-
version = "0.11.1"
3887
+
version = "0.11.2"
3943
3888
source = "registry+https://github.com/rust-lang/crates.io-index"
3944
-
checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
3889
+
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
3945
3890
dependencies = [
3946
3891
"proc-macro2",
3947
3892
"quote",
3948
-
"syn",
3893
+
"syn 2.0.108",
3894
+
]
3895
+
3896
+
[[package]]
3897
+
name = "zstd"
3898
+
version = "0.13.3"
3899
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3900
+
checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a"
3901
+
dependencies = [
3902
+
"zstd-safe",
3903
+
]
3904
+
3905
+
[[package]]
3906
+
name = "zstd-safe"
3907
+
version = "7.2.4"
3908
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3909
+
checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d"
3910
+
dependencies = [
3911
+
"zstd-sys",
3912
+
]
3913
+
3914
+
[[package]]
3915
+
name = "zstd-sys"
3916
+
version = "2.0.16+zstd.1.5.7"
3917
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3918
+
checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748"
3919
+
dependencies = [
3920
+
"cc",
3921
+
"pkg-config",
3949
3922
]
+13
-6
Cargo.toml
+13
-6
Cargo.toml
···
1
1
[package]
2
2
name = "quickdid"
3
-
version = "1.0.0-rc.2"
3
+
version = "1.0.0-rc.5"
4
4
edition = "2024"
5
5
authors = ["Nick Gerakines <nick.gerakines@gmail.com>"]
6
6
description = "A fast and scalable com.atproto.identity.resolveHandle service"
···
16
16
[dependencies]
17
17
anyhow = "1.0"
18
18
async-trait = "0.1"
19
-
atproto-identity = { version = "0.11.3" }
19
+
atproto-identity = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
+
atproto-jetstream = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
21
+
atproto-lexicon = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
22
axum = { version = "0.8" }
21
-
bincode = { version = "2.0.1", features = ["serde"] }
22
-
clap = { version = "4", features = ["derive", "env"] }
23
+
bincode = { version = "2.0", features = ["serde"] }
24
+
cadence = "1.6"
23
25
deadpool-redis = { version = "0.22", features = ["connection-manager", "tokio-comp", "tokio-rustls-comp"] }
24
-
metrohash = "1.0.7"
26
+
httpdate = "1.0"
27
+
metrohash = "1.0"
25
28
reqwest = { version = "0.12", features = ["json"] }
26
29
serde = { version = "1.0", features = ["derive"] }
27
30
serde_json = "1.0"
28
-
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "chrono"] }
31
+
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
29
32
thiserror = "2.0"
30
33
tokio = { version = "1.35", features = ["rt-multi-thread", "macros", "signal", "sync", "time", "net", "fs"] }
31
34
tokio-util = { version = "0.7", features = ["rt"] }
35
+
tower-http = { version = "0.6", features = ["fs"] }
32
36
tracing = "0.1"
33
37
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
38
+
39
+
[dev-dependencies]
40
+
once_cell = "1.20"
+6
-5
Dockerfile
+6
-5
Dockerfile
···
1
1
# syntax=docker/dockerfile:1.4
2
-
FROM rust:1.89-slim AS builder
2
+
FROM rust:1.90-slim-bookworm AS builder
3
3
4
4
RUN apt-get update && apt-get install -y \
5
5
pkg-config \
···
9
9
WORKDIR /app
10
10
COPY Cargo.lock Cargo.toml ./
11
11
12
-
ARG GIT_HASH=0
13
-
ENV GIT_HASH=$GIT_HASH
14
-
15
12
COPY src ./src
16
13
RUN cargo build --bin quickdid --release
17
14
···
22
19
LABEL org.opencontainers.image.licenses="MIT"
23
20
LABEL org.opencontainers.image.authors="Nick Gerakines <nick.gerakines@gmail.com>"
24
21
LABEL org.opencontainers.image.source="https://tangled.sh/@smokesignal.events/quickdid"
25
-
LABEL org.opencontainers.image.version="1.0.0-rc.2"
22
+
LABEL org.opencontainers.image.version="1.0.0-rc.5"
26
23
27
24
WORKDIR /app
28
25
COPY --from=builder /app/target/release/quickdid /app/quickdid
29
26
27
+
# Copy static files for serving
28
+
COPY www /app/www
29
+
30
30
ENV HTTP_PORT=8080
31
+
ENV STATIC_FILES_DIR=/app/www
31
32
ENV RUST_LOG=info
32
33
ENV RUST_BACKTRACE=full
33
34
+211
-17
README.md
+211
-17
README.md
···
1
1
# QuickDID
2
2
3
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting both in-memory and Redis-backed persistent caching with binary serialization for optimal storage efficiency.
3
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting in-memory, Redis-backed, and SQLite-backed persistent caching with binary serialization for optimal storage efficiency. The service includes proactive cache refreshing to maintain optimal performance and comprehensive metrics support for production monitoring.
4
4
5
-
Built with minimal dependencies and optimized for production use, QuickDID delivers exceptional performance while maintaining a lean footprint.
5
+
Built following the 12-factor app methodology with minimal dependencies and optimized for production use, QuickDID delivers exceptional performance while maintaining a lean footprint. Configuration is handled exclusively through environment variables, with only `--version` and `--help` command-line arguments supported.
6
6
7
7
## โ ๏ธ Production Disclaimer
8
8
9
9
**This project is a release candidate and has not been fully vetted for production use.** While it includes comprehensive error handling and has been designed with production features in mind, more thorough testing is necessary before deploying in critical environments. Use at your own risk and conduct appropriate testing for your use case.
10
10
11
+
## Performance
12
+
13
+
QuickDID is designed for high throughput and low latency:
14
+
15
+
- **Binary serialization** reduces cache storage by ~40% compared to JSON
16
+
- **Rate limiting** protects upstream services from being overwhelmed
17
+
- **Work shedding** in SQLite queue adapter prevents unbounded growth
18
+
- **Configurable TTLs** allow fine-tuning cache freshness vs. performance
19
+
- **Connection pooling** for Redis minimizes connection overhead
20
+
11
21
## Features
12
22
13
23
- **Fast Handle Resolution**: Resolves AT Protocol handles to DIDs using DNS TXT records and HTTP well-known endpoints
14
-
- **Multi-Layer Caching**: In-memory caching with configurable TTL and Redis-backed persistent caching (90-day TTL)
24
+
- **Bidirectional Caching**: Supports both handle-to-DID and DID-to-handle lookups with automatic cache synchronization
25
+
- **Multi-Layer Caching**: Flexible caching with three tiers:
26
+
- In-memory caching with configurable TTL (default: 600 seconds)
27
+
- Redis-backed persistent caching (default: 90-day TTL)
28
+
- SQLite-backed persistent caching (default: 90-day TTL)
29
+
- **Jetstream Consumer**: Real-time cache updates from AT Protocol firehose:
30
+
- Processes Account and Identity events
31
+
- Automatically purges deleted/deactivated accounts
32
+
- Updates handle-to-DID mappings in real-time
33
+
- Comprehensive metrics for event processing
34
+
- Automatic reconnection with backoff
35
+
- **HTTP Caching**: Client-side caching support with:
36
+
- ETag generation with configurable seed for cache invalidation
37
+
- Cache-Control headers with max-age, stale-while-revalidate, and stale-if-error directives
38
+
- CORS headers for cross-origin requests
39
+
- **Rate Limiting**: Semaphore-based concurrency control with optional timeout to protect upstream services
15
40
- **Binary Serialization**: Compact storage format reduces cache size by ~40% compared to JSON
16
-
- **Queue Processing**: Asynchronous handle resolution with support for MPSC, Redis, and no-op queue adapters
41
+
- **Queue Processing**: Asynchronous handle resolution with multiple adapters:
42
+
- MPSC (in-memory, default)
43
+
- Redis (distributed)
44
+
- SQLite (persistent with work shedding)
45
+
- No-op (testing)
46
+
- **Metrics & Monitoring**:
47
+
- StatsD metrics support for counters, gauges, and timings
48
+
- Resolution timing measurements
49
+
- Jetstream event processing metrics
50
+
- Configurable tags for environment/service identification
51
+
- Integration guides for Telegraf and TimescaleDB
52
+
- Configurable bind address for StatsD UDP socket (IPv4/IPv6)
53
+
- **Proactive Cache Refresh**:
54
+
- Automatically refreshes cache entries before expiration
55
+
- Configurable refresh threshold
56
+
- Prevents cache misses for frequently accessed handles
57
+
- Metrics tracking for refresh operations
58
+
- **Queue Deduplication**:
59
+
- Redis-based deduplication for queue items
60
+
- Prevents duplicate handle resolution work
61
+
- Configurable TTL for deduplication keys
62
+
- **Cache Management APIs**:
63
+
- `purge` method for removing entries by handle or DID
64
+
- `set` method for manually updating handle-to-DID mappings
65
+
- Chainable operations across resolver layers
17
66
- **AT Protocol Compatible**: Implements XRPC endpoints for seamless integration with AT Protocol infrastructure
18
-
- **Comprehensive Error Handling**: Includes health checks and graceful shutdown support
67
+
- **Comprehensive Error Handling**: Structured errors with unique identifiers (e.g., `error-quickdid-config-1`), health checks, and graceful shutdown
68
+
- **12-Factor App**: Environment-based configuration following cloud-native best practices
19
69
- **Minimal Dependencies**: Optimized dependency tree for faster compilation and reduced attack surface
20
-
- **Predictable Worker IDs**: Simple default worker identification for distributed deployments
21
70
22
71
## Building
23
72
···
25
74
26
75
- Rust 1.70 or later
27
76
- Redis (optional, for persistent caching and distributed queuing)
77
+
- SQLite 3.35+ (optional, for single-instance persistent caching)
28
78
29
79
### Build Commands
30
80
···
45
95
46
96
## Minimum Configuration
47
97
48
-
QuickDID requires the following environment variables to run:
98
+
QuickDID requires minimal configuration to run. Configuration is validated at startup, and the service will exit with specific error codes if validation fails.
49
99
50
100
### Required
51
101
52
102
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
53
-
- `SERVICE_KEY`: Private key for service identity in DID format (e.g., `did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK`)
54
103
55
104
### Example Minimal Setup
56
105
57
106
```bash
58
-
HTTP_EXTERNAL=localhost:3007 \
59
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
60
-
cargo run
107
+
HTTP_EXTERNAL=localhost:3007 cargo run
108
+
```
109
+
110
+
### Static Files
111
+
112
+
QuickDID serves static files from the `www` directory by default. This includes:
113
+
- Landing page (`index.html`)
114
+
- AT Protocol well-known files (`.well-known/atproto-did` and `.well-known/did.json`)
115
+
116
+
Generate the `.well-known` files for your deployment:
117
+
118
+
```bash
119
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
61
120
```
62
121
63
122
This will start QuickDID with:
64
123
- HTTP server on port 8080 (default)
65
-
- In-memory caching only (300-second TTL)
124
+
- In-memory caching only (600-second TTL default)
66
125
- MPSC queue adapter for async processing
67
126
- Default worker ID: "worker1"
68
127
- Connection to plc.directory for DID resolution
128
+
- Rate limiting disabled (default)
69
129
70
130
### Optional Configuration
71
131
72
132
For production deployments, consider these additional environment variables:
73
133
134
+
#### Network & Service
74
135
- `HTTP_PORT`: Server port (default: 8080)
75
-
- `REDIS_URL`: Redis connection URL for persistent caching (e.g., `redis://localhost:6379`)
76
-
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', or 'noop' (default: mpsc)
77
-
- `QUEUE_WORKER_ID`: Worker identifier for distributed queue processing (default: worker1)
78
136
- `PLC_HOSTNAME`: PLC directory hostname (default: plc.directory)
137
+
- `USER_AGENT`: HTTP User-Agent for outgoing requests
138
+
- `DNS_NAMESERVERS`: Custom DNS servers (comma-separated)
139
+
140
+
#### Caching
141
+
- `REDIS_URL`: Redis connection URL (e.g., `redis://localhost:6379`)
142
+
- `SQLITE_URL`: SQLite database URL (e.g., `sqlite:./quickdid.db`)
143
+
- `CACHE_TTL_MEMORY`: In-memory cache TTL in seconds (default: 600)
144
+
- `CACHE_TTL_REDIS`: Redis cache TTL in seconds (default: 7776000 = 90 days)
145
+
- `CACHE_TTL_SQLITE`: SQLite cache TTL in seconds (default: 7776000 = 90 days)
146
+
147
+
#### Queue Processing
148
+
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
149
+
- `QUEUE_WORKER_ID`: Worker identifier (default: worker1)
150
+
- `QUEUE_BUFFER_SIZE`: MPSC queue buffer size (default: 1000)
151
+
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
152
+
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
153
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication (default: false)
154
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
155
+
- `QUEUE_SQLITE_MAX_SIZE`: Max SQLite queue size for work shedding (default: 10000)
156
+
157
+
#### Rate Limiting
158
+
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
159
+
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
160
+
161
+
#### HTTP Cache Control
162
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
163
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
164
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate in seconds (default: 86400)
165
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
166
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
167
+
168
+
#### Metrics
169
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
170
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd)
171
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
172
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
173
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
174
+
175
+
#### Proactive Refresh
176
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
177
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
178
+
179
+
#### Jetstream Consumer
180
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
181
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
182
+
183
+
#### Static Files
184
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
185
+
186
+
#### Logging
79
187
- `RUST_LOG`: Logging level (e.g., debug, info, warn, error)
80
188
81
-
### Production Example
189
+
### Production Examples
82
190
191
+
#### Redis-based with Metrics and Jetstream (Multi-instance/HA)
83
192
```bash
84
193
HTTP_EXTERNAL=quickdid.example.com \
85
-
SERVICE_KEY=did:key:yourkeyhere \
86
194
HTTP_PORT=3000 \
87
195
REDIS_URL=redis://localhost:6379 \
196
+
CACHE_TTL_REDIS=86400 \
88
197
QUEUE_ADAPTER=redis \
89
198
QUEUE_WORKER_ID=prod-worker-1 \
199
+
RESOLVER_MAX_CONCURRENT=100 \
200
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 \
201
+
METRICS_ADAPTER=statsd \
202
+
METRICS_STATSD_HOST=localhost:8125 \
203
+
METRICS_PREFIX=quickdid \
204
+
METRICS_TAGS=env:prod,service:quickdid \
205
+
CACHE_MAX_AGE=86400 \
206
+
JETSTREAM_ENABLED=true \
207
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools \
90
208
RUST_LOG=info \
91
209
./target/release/quickdid
92
210
```
93
211
212
+
#### SQLite-based (Single-instance)
213
+
```bash
214
+
HTTP_EXTERNAL=quickdid.example.com \
215
+
HTTP_PORT=3000 \
216
+
SQLITE_URL=sqlite:./quickdid.db \
217
+
CACHE_TTL_SQLITE=86400 \
218
+
QUEUE_ADAPTER=sqlite \
219
+
QUEUE_SQLITE_MAX_SIZE=10000 \
220
+
RESOLVER_MAX_CONCURRENT=50 \
221
+
RUST_LOG=info \
222
+
./target/release/quickdid
223
+
```
224
+
225
+
## Architecture
226
+
227
+
QuickDID uses a layered architecture for optimal performance:
228
+
229
+
```
230
+
Request โ Cache Layer โ Proactive Refresh โ Rate Limiter โ Base Resolver โ DNS/HTTP
231
+
โ โ โ โ
232
+
Memory/Redis/ Background Semaphore AT Protocol
233
+
SQLite Refresher (optional) Infrastructure
234
+
โ
235
+
Jetstream Consumer โ Real-time Updates from AT Protocol Firehose
236
+
```
237
+
238
+
### Cache Priority
239
+
QuickDID checks caches in this order:
240
+
1. Redis (if configured) - Best for distributed deployments
241
+
2. SQLite (if configured) - Best for single-instance with persistence
242
+
3. In-memory (fallback) - Always available
243
+
244
+
### Real-time Cache Updates
245
+
When Jetstream is enabled, QuickDID maintains cache consistency by:
246
+
- Processing Account events to purge deleted/deactivated accounts
247
+
- Processing Identity events to update handle-to-DID mappings
248
+
- Automatically reconnecting with exponential backoff on connection failures
249
+
- Tracking metrics for successful and failed event processing
250
+
251
+
### Deployment Strategies
252
+
253
+
- **Single-instance**: Use SQLite for both caching and queuing
254
+
- **Multi-instance/HA**: Use Redis for distributed caching and queuing
255
+
- **Development**: Use in-memory caching with MPSC queuing
256
+
- **Real-time sync**: Enable Jetstream consumer for live cache updates
257
+
94
258
## API Endpoints
95
259
96
260
- `GET /_health` - Health check endpoint
97
261
- `GET /xrpc/com.atproto.identity.resolveHandle` - Resolve handle to DID
98
262
- `GET /.well-known/atproto-did` - Serve DID document for the service
263
+
- `OPTIONS /*` - CORS preflight support for all endpoints
264
+
265
+
## Docker Deployment
266
+
267
+
QuickDID can be deployed using Docker. See the [production deployment guide](docs/production-deployment.md) for detailed Docker and Docker Compose configurations.
268
+
269
+
### Quick Docker Setup
270
+
271
+
```bash
272
+
# Build the image
273
+
docker build -t quickdid:latest .
274
+
275
+
# Run with environment file
276
+
docker run -d \
277
+
--name quickdid \
278
+
--env-file .env \
279
+
-p 8080:8080 \
280
+
quickdid:latest
281
+
```
282
+
283
+
## Documentation
284
+
285
+
- [Configuration Reference](docs/configuration-reference.md) - Complete list of all configuration options
286
+
- [Production Deployment Guide](docs/production-deployment.md) - Docker, monitoring, and production best practices
287
+
- [Metrics Guide](docs/telegraf-timescaledb-metrics-guide.md) - Setting up metrics with Telegraf and TimescaleDB
288
+
- [Development Guide](CLAUDE.md) - Architecture details and development patterns
289
+
290
+
## Railway Deployment
291
+
292
+
QuickDID includes Railway deployment resources in the `railway-resources/` directory for easy deployment with metrics support via Telegraf. See the deployment configurations for one-click deployment options.
99
293
100
294
## License
101
295
+41
docker-compose.yml
+41
docker-compose.yml
···
1
+
version: '3.8'
2
+
3
+
services:
4
+
quickdid:
5
+
image: quickdid:latest
6
+
build: .
7
+
ports:
8
+
- "3007:8080"
9
+
environment:
10
+
- HTTP_EXTERNAL=localhost:3007
11
+
- HTTP_PORT=8080
12
+
- RUST_LOG=info
13
+
# Optional: Override the static files directory
14
+
# - STATIC_FILES_DIR=/app/custom-www
15
+
volumes:
16
+
# Optional: Mount custom static files from host
17
+
# - ./custom-www:/app/custom-www:ro
18
+
# Optional: Mount custom .well-known files
19
+
# - ./www/.well-known:/app/www/.well-known:ro
20
+
# Optional: Use SQLite for caching
21
+
# - ./data:/app/data
22
+
# environment:
23
+
# SQLite cache configuration
24
+
# - SQLITE_URL=sqlite:/app/data/quickdid.db
25
+
# - CACHE_TTL_SQLITE=86400
26
+
27
+
# Redis cache configuration (if using external Redis)
28
+
# - REDIS_URL=redis://redis:6379
29
+
# - CACHE_TTL_REDIS=86400
30
+
# - QUEUE_ADAPTER=redis
31
+
32
+
# Optional: Redis service for caching
33
+
# redis:
34
+
# image: redis:7-alpine
35
+
# ports:
36
+
# - "6379:6379"
37
+
# volumes:
38
+
# - redis-data:/data
39
+
40
+
volumes:
41
+
redis-data:
+735
-44
docs/configuration-reference.md
+735
-44
docs/configuration-reference.md
···
8
8
- [Network Configuration](#network-configuration)
9
9
- [Caching Configuration](#caching-configuration)
10
10
- [Queue Configuration](#queue-configuration)
11
-
- [Security Configuration](#security-configuration)
12
-
- [Advanced Configuration](#advanced-configuration)
11
+
- [Rate Limiting Configuration](#rate-limiting-configuration)
12
+
- [HTTP Caching Configuration](#http-caching-configuration)
13
+
- [Metrics Configuration](#metrics-configuration)
14
+
- [Proactive Refresh Configuration](#proactive-refresh-configuration)
15
+
- [Jetstream Consumer Configuration](#jetstream-consumer-configuration)
16
+
- [Static Files Configuration](#static-files-configuration)
13
17
- [Configuration Examples](#configuration-examples)
14
18
- [Validation Rules](#validation-rules)
15
19
···
40
44
**Constraints**:
41
45
- Must be a valid hostname or hostname:port combination
42
46
- Port (if specified) must be between 1-65535
43
-
- Used to generate service DID (did:web:{HTTP_EXTERNAL})
44
-
45
-
### `SERVICE_KEY`
46
-
47
-
**Required**: Yes
48
-
**Type**: String
49
-
**Format**: DID private key
50
-
**Security**: SENSITIVE - Never commit to version control
51
-
52
-
The private key for the service's AT Protocol identity. This key is used to sign responses and authenticate the service.
53
-
54
-
**Examples**:
55
-
```bash
56
-
# did:key format (Ed25519)
57
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
58
-
59
-
# did:plc format
60
-
SERVICE_KEY=did:plc:xyz123abc456def789
61
-
```
62
-
63
-
**Constraints**:
64
-
- Must be a valid DID format
65
-
- Must include the private key component
66
-
- Should be stored securely (e.g., secrets manager, encrypted storage)
67
47
68
48
## Network Configuration
69
49
···
378
358
QUEUE_REDIS_TIMEOUT=30 # Minimal polling, slow shutdown
379
359
```
380
360
361
+
### `QUEUE_REDIS_DEDUP_ENABLED`
362
+
363
+
**Required**: No
364
+
**Type**: Boolean
365
+
**Default**: `false`
366
+
367
+
Enable deduplication for Redis queue to prevent duplicate handles from being queued multiple times within the TTL window. When enabled, uses Redis SET with TTL to track handles currently being processed.
368
+
369
+
**Examples**:
370
+
```bash
371
+
# Enable deduplication (recommended for production)
372
+
QUEUE_REDIS_DEDUP_ENABLED=true
373
+
374
+
# Disable deduplication (default)
375
+
QUEUE_REDIS_DEDUP_ENABLED=false
376
+
```
377
+
378
+
**Use cases**:
379
+
- **Production**: Enable to prevent duplicate work and reduce load
380
+
- **High-traffic**: Essential to avoid processing the same handle multiple times
381
+
- **Development**: Can be disabled for simpler debugging
382
+
383
+
### `QUEUE_REDIS_DEDUP_TTL`
384
+
385
+
**Required**: No
386
+
**Type**: Integer (seconds)
387
+
**Default**: `60`
388
+
**Range**: 10-300 (recommended)
389
+
**Constraints**: Must be > 0 when deduplication is enabled
390
+
391
+
TTL for Redis queue deduplication keys in seconds. Determines how long to prevent duplicate handle resolution requests.
392
+
393
+
**Examples**:
394
+
```bash
395
+
# Quick deduplication window (10 seconds)
396
+
QUEUE_REDIS_DEDUP_TTL=10
397
+
398
+
# Default (1 minute)
399
+
QUEUE_REDIS_DEDUP_TTL=60
400
+
401
+
# Extended deduplication (5 minutes)
402
+
QUEUE_REDIS_DEDUP_TTL=300
403
+
```
404
+
405
+
**Recommendations**:
406
+
- **Fast processing**: 10-30 seconds
407
+
- **Normal processing**: 60 seconds (default)
408
+
- **Slow processing or high load**: 120-300 seconds
409
+
381
410
### `QUEUE_WORKER_ID`
382
411
383
412
**Required**: No
···
453
482
- **Disk space concerns**: Lower values (1000-5000)
454
483
- **High ingestion rate**: Higher values (50000-1000000)
455
484
485
+
## Rate Limiting Configuration
486
+
487
+
### `RESOLVER_MAX_CONCURRENT`
488
+
489
+
**Required**: No
490
+
**Type**: Integer
491
+
**Default**: `0` (disabled)
492
+
**Range**: 0-10000
493
+
**Constraints**: Must be between 0 and 10000
494
+
495
+
Maximum concurrent handle resolutions allowed. When set to a value greater than 0, enables semaphore-based rate limiting to protect upstream DNS and HTTP services from being overwhelmed.
496
+
497
+
**How it works**:
498
+
- Uses a semaphore to limit concurrent resolutions
499
+
- Applied between the base resolver and caching layers
500
+
- Requests wait for an available permit before resolution
501
+
- Helps prevent overwhelming upstream services
502
+
503
+
**Examples**:
504
+
```bash
505
+
# Disabled (default)
506
+
RESOLVER_MAX_CONCURRENT=0
507
+
508
+
# Light rate limiting
509
+
RESOLVER_MAX_CONCURRENT=10
510
+
511
+
# Moderate rate limiting
512
+
RESOLVER_MAX_CONCURRENT=50
513
+
514
+
# Heavy traffic with rate limiting
515
+
RESOLVER_MAX_CONCURRENT=100
516
+
517
+
# Maximum allowed
518
+
RESOLVER_MAX_CONCURRENT=10000
519
+
```
520
+
521
+
**Recommendations**:
522
+
- **Development**: 0 (disabled) or 10-50 for testing
523
+
- **Production (low traffic)**: 50-100
524
+
- **Production (high traffic)**: 100-500
525
+
- **Production (very high traffic)**: 500-1000
526
+
- **Testing rate limiting**: 1-5 to observe behavior
527
+
528
+
**Placement in resolver stack**:
529
+
```
530
+
Request โ Cache โ RateLimited โ Base โ DNS/HTTP
531
+
```
532
+
533
+
### `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`
534
+
535
+
**Required**: No
536
+
**Type**: Integer (milliseconds)
537
+
**Default**: `0` (no timeout)
538
+
**Range**: 0-60000
539
+
**Constraints**: Must be between 0 and 60000 (60 seconds max)
540
+
541
+
Timeout for acquiring a rate limit permit in milliseconds. When set to a value greater than 0, requests will timeout if they cannot acquire a permit within the specified time, preventing them from waiting indefinitely when the rate limiter is at capacity.
542
+
543
+
**How it works**:
544
+
- Applied when `RESOLVER_MAX_CONCURRENT` is enabled (> 0)
545
+
- Uses `tokio::time::timeout` to limit permit acquisition time
546
+
- Returns an error if timeout expires before permit is acquired
547
+
- Prevents request queue buildup during high load
548
+
549
+
**Examples**:
550
+
```bash
551
+
# No timeout (default)
552
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=0
553
+
554
+
# Quick timeout for responsive failures (100ms)
555
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=100
556
+
557
+
# Moderate timeout (1 second)
558
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=1000
559
+
560
+
# Longer timeout for production (5 seconds)
561
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000
562
+
563
+
# Maximum allowed (60 seconds)
564
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=60000
565
+
```
566
+
567
+
**Recommendations**:
568
+
- **Development**: 100-1000ms for quick feedback
569
+
- **Production (low latency)**: 1000-5000ms
570
+
- **Production (high latency tolerance)**: 5000-30000ms
571
+
- **Testing**: 100ms to quickly identify bottlenecks
572
+
- **0**: Use when you want requests to wait indefinitely
573
+
574
+
**Error behavior**:
575
+
When a timeout occurs, the request fails with:
576
+
```
577
+
Rate limit permit acquisition timed out after {timeout}ms
578
+
```
579
+
580
+
## Metrics Configuration
581
+
582
+
### `METRICS_ADAPTER`
583
+
584
+
**Required**: No
585
+
**Type**: String
586
+
**Default**: `noop`
587
+
**Values**: `noop`, `statsd`
588
+
589
+
Metrics adapter type for collecting and publishing metrics.
590
+
591
+
**Options**:
592
+
- `noop`: No metrics collection (default)
593
+
- `statsd`: Send metrics to StatsD server
594
+
595
+
**Examples**:
596
+
```bash
597
+
# No metrics (default)
598
+
METRICS_ADAPTER=noop
599
+
600
+
# Enable StatsD metrics
601
+
METRICS_ADAPTER=statsd
602
+
```
603
+
604
+
### `METRICS_STATSD_HOST`
605
+
606
+
**Required**: Yes (when METRICS_ADAPTER=statsd)
607
+
**Type**: String
608
+
**Format**: hostname:port
609
+
610
+
StatsD server host and port for metrics collection.
611
+
612
+
**Examples**:
613
+
```bash
614
+
# Local StatsD
615
+
METRICS_STATSD_HOST=localhost:8125
616
+
617
+
# Remote StatsD
618
+
METRICS_STATSD_HOST=statsd.example.com:8125
619
+
620
+
# Docker network
621
+
METRICS_STATSD_HOST=statsd:8125
622
+
```
623
+
624
+
### `METRICS_STATSD_BIND`
625
+
626
+
**Required**: No
627
+
**Type**: String
628
+
**Default**: `[::]:0`
629
+
630
+
Bind address for StatsD UDP socket. Controls which local address to bind for sending UDP packets.
631
+
632
+
**Examples**:
633
+
```bash
634
+
# IPv6 any address, random port (default)
635
+
METRICS_STATSD_BIND=[::]:0
636
+
637
+
# IPv4 any address, random port
638
+
METRICS_STATSD_BIND=0.0.0.0:0
639
+
640
+
# Specific interface
641
+
METRICS_STATSD_BIND=192.168.1.100:0
642
+
643
+
# Specific port
644
+
METRICS_STATSD_BIND=[::]:8126
645
+
```
646
+
647
+
### `METRICS_PREFIX`
648
+
649
+
**Required**: No
650
+
**Type**: String
651
+
**Default**: `quickdid`
652
+
653
+
Prefix for all metrics. Used to namespace metrics in your monitoring system.
654
+
655
+
**Examples**:
656
+
```bash
657
+
# Default
658
+
METRICS_PREFIX=quickdid
659
+
660
+
# Environment-specific
661
+
METRICS_PREFIX=prod.quickdid
662
+
METRICS_PREFIX=staging.quickdid
663
+
664
+
# Region-specific
665
+
METRICS_PREFIX=us-east-1.quickdid
666
+
METRICS_PREFIX=eu-west-1.quickdid
667
+
668
+
# Service-specific
669
+
METRICS_PREFIX=api.quickdid
670
+
```
671
+
672
+
### `METRICS_TAGS`
673
+
674
+
**Required**: No
675
+
**Type**: String (comma-separated key:value pairs)
676
+
**Default**: None
677
+
678
+
Default tags for all metrics. Added to all metrics for filtering and grouping.
679
+
680
+
**Examples**:
681
+
```bash
682
+
# Basic tags
683
+
METRICS_TAGS=env:production,service:quickdid
684
+
685
+
# Detailed tags
686
+
METRICS_TAGS=env:production,service:quickdid,region:us-east-1,version:1.0.0
687
+
688
+
# Deployment-specific
689
+
METRICS_TAGS=env:staging,cluster:k8s-staging,namespace:quickdid
690
+
```
691
+
692
+
**Common tag patterns**:
693
+
- `env`: Environment (production, staging, development)
694
+
- `service`: Service name
695
+
- `region`: Geographic region
696
+
- `version`: Application version
697
+
- `cluster`: Kubernetes cluster name
698
+
- `instance`: Instance identifier
699
+
700
+
## Proactive Refresh Configuration
701
+
702
+
### `PROACTIVE_REFRESH_ENABLED`
703
+
704
+
**Required**: No
705
+
**Type**: Boolean
706
+
**Default**: `false`
707
+
708
+
Enable proactive cache refresh for frequently accessed handles. When enabled, cache entries that have reached the refresh threshold will be queued for background refresh to keep the cache warm.
709
+
710
+
**Examples**:
711
+
```bash
712
+
# Enable proactive refresh (recommended for production)
713
+
PROACTIVE_REFRESH_ENABLED=true
714
+
715
+
# Disable proactive refresh (default)
716
+
PROACTIVE_REFRESH_ENABLED=false
717
+
```
718
+
719
+
**Benefits**:
720
+
- Prevents cache misses for popular handles
721
+
- Maintains consistent response times
722
+
- Reduces latency spikes during cache expiration
723
+
724
+
**Considerations**:
725
+
- Increases background processing load
726
+
- More DNS/HTTP requests to upstream services
727
+
- Best for high-traffic services with predictable access patterns
728
+
729
+
### `PROACTIVE_REFRESH_THRESHOLD`
730
+
731
+
**Required**: No
732
+
**Type**: Float
733
+
**Default**: `0.8`
734
+
**Range**: 0.0-1.0
735
+
**Constraints**: Must be between 0.0 and 1.0
736
+
737
+
Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh. For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
738
+
739
+
**Examples**:
740
+
```bash
741
+
# Very aggressive (refresh at 50% of TTL)
742
+
PROACTIVE_REFRESH_THRESHOLD=0.5
743
+
744
+
# Moderate (refresh at 70% of TTL)
745
+
PROACTIVE_REFRESH_THRESHOLD=0.7
746
+
747
+
# Default (refresh at 80% of TTL)
748
+
PROACTIVE_REFRESH_THRESHOLD=0.8
749
+
750
+
# Conservative (refresh at 90% of TTL)
751
+
PROACTIVE_REFRESH_THRESHOLD=0.9
752
+
753
+
# Very conservative (refresh at 95% of TTL)
754
+
PROACTIVE_REFRESH_THRESHOLD=0.95
755
+
```
756
+
757
+
**Recommendations**:
758
+
- **High-traffic services**: 0.5-0.7 (aggressive refresh)
759
+
- **Normal traffic**: 0.8 (default, balanced)
760
+
- **Low traffic**: 0.9-0.95 (conservative)
761
+
- **Development**: 0.5 (test refresh behavior)
762
+
763
+
**Impact on different cache TTLs**:
764
+
- TTL=600s (10 min), threshold=0.8: Refresh after 8 minutes
765
+
- TTL=3600s (1 hour), threshold=0.8: Refresh after 48 minutes
766
+
- TTL=86400s (1 day), threshold=0.8: Refresh after 19.2 hours
767
+
768
+
## Jetstream Consumer Configuration
769
+
770
+
### `JETSTREAM_ENABLED`
771
+
772
+
**Required**: No
773
+
**Type**: Boolean
774
+
**Default**: `false`
775
+
776
+
Enable Jetstream consumer for real-time cache updates from the AT Protocol firehose. When enabled, QuickDID connects to the Jetstream WebSocket service to receive live updates about account and identity changes.
777
+
778
+
**How it works**:
779
+
- Subscribes to Account and Identity events from the firehose
780
+
- Processes Account events to purge deleted/deactivated accounts
781
+
- Processes Identity events to update handle-to-DID mappings
782
+
- Automatically reconnects with exponential backoff on connection failures
783
+
- Tracks metrics for successful and failed event processing
784
+
785
+
**Examples**:
786
+
```bash
787
+
# Enable Jetstream consumer (recommended for production)
788
+
JETSTREAM_ENABLED=true
789
+
790
+
# Disable Jetstream consumer (default)
791
+
JETSTREAM_ENABLED=false
792
+
```
793
+
794
+
**Benefits**:
795
+
- Real-time cache synchronization with AT Protocol network
796
+
- Automatic removal of deleted/deactivated accounts
797
+
- Immediate handle change updates
798
+
- Reduces stale data in cache
799
+
800
+
**Considerations**:
801
+
- Requires stable WebSocket connection
802
+
- Increases network traffic (incoming events)
803
+
- Best for services requiring up-to-date handle mappings
804
+
- Automatically handles reconnection on failures
805
+
806
+
### `JETSTREAM_HOSTNAME`
807
+
808
+
**Required**: No
809
+
**Type**: String
810
+
**Default**: `jetstream.atproto.tools`
811
+
812
+
The hostname of the Jetstream WebSocket service to connect to for real-time AT Protocol events. Only used when `JETSTREAM_ENABLED=true`.
813
+
814
+
**Examples**:
815
+
```bash
816
+
# Production firehose (default)
817
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
818
+
819
+
# Staging environment
820
+
JETSTREAM_HOSTNAME=jetstream-staging.atproto.tools
821
+
822
+
# Local development firehose
823
+
JETSTREAM_HOSTNAME=localhost:6008
824
+
825
+
# Custom deployment
826
+
JETSTREAM_HOSTNAME=jetstream.example.com
827
+
```
828
+
829
+
**Event Processing**:
830
+
- **Account events**:
831
+
- `status: deleted` โ Purges handle and DID from all caches
832
+
- `status: deactivated` โ Purges handle and DID from all caches
833
+
- Other statuses โ Ignored
834
+
835
+
- **Identity events**:
836
+
- Updates handle-to-DID mapping in cache
837
+
- Removes old handle mapping if changed
838
+
- Maintains bidirectional cache consistency
839
+
840
+
**Metrics Tracked** (when metrics are enabled):
841
+
- `jetstream.events.received`: Total events received
842
+
- `jetstream.events.processed`: Successfully processed events
843
+
- `jetstream.events.failed`: Failed event processing
844
+
- `jetstream.connections.established`: Successful connections
845
+
- `jetstream.connections.failed`: Failed connection attempts
846
+
847
+
**Reconnection Behavior**:
848
+
- Initial retry delay: 1 second
849
+
- Maximum retry delay: 60 seconds
850
+
- Exponential backoff with jitter
851
+
- Automatic recovery on transient failures
852
+
853
+
**Recommendations**:
854
+
- **Production**: Use default `jetstream.atproto.tools`
855
+
- **Development**: Consider local firehose for testing
856
+
- **High availability**: Monitor connection metrics
857
+
- **Network issues**: Check WebSocket connectivity
858
+
859
+
## Static Files Configuration
860
+
861
+
### `STATIC_FILES_DIR`
862
+
863
+
**Required**: No
864
+
**Type**: String (directory path)
865
+
**Default**: `www`
866
+
867
+
Directory path for serving static files. This directory should contain the landing page and AT Protocol well-known files.
868
+
869
+
**Directory Structure**:
870
+
```
871
+
www/
872
+
โโโ index.html # Landing page
873
+
โโโ .well-known/
874
+
โ โโโ atproto-did # Service DID identifier
875
+
โ โโโ did.json # DID document
876
+
โโโ (other static assets)
877
+
```
878
+
879
+
**Examples**:
880
+
```bash
881
+
# Default (relative to working directory)
882
+
STATIC_FILES_DIR=www
883
+
884
+
# Absolute path
885
+
STATIC_FILES_DIR=/var/www/quickdid
886
+
887
+
# Docker container path
888
+
STATIC_FILES_DIR=/app/www
889
+
890
+
# Custom directory
891
+
STATIC_FILES_DIR=./public
892
+
```
893
+
894
+
**Docker Volume Mounting**:
895
+
```yaml
896
+
volumes:
897
+
# Mount entire custom directory
898
+
- ./custom-www:/app/www:ro
899
+
900
+
# Mount specific files
901
+
- ./custom-index.html:/app/www/index.html:ro
902
+
- ./well-known:/app/www/.well-known:ro
903
+
```
904
+
905
+
**Generating Well-Known Files**:
906
+
```bash
907
+
# Generate .well-known files for your domain
908
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
909
+
```
910
+
911
+
## HTTP Caching Configuration
912
+
913
+
### `CACHE_MAX_AGE`
914
+
915
+
**Required**: No
916
+
**Type**: Integer (seconds)
917
+
**Default**: `86400` (24 hours)
918
+
**Range**: 0-31536000 (0 to 1 year)
919
+
920
+
Maximum age for HTTP Cache-Control header in seconds. When set to 0, the Cache-Control header is disabled and will not be added to responses. This controls how long clients and intermediate caches can cache responses.
921
+
922
+
**Examples**:
923
+
```bash
924
+
# Default (24 hours)
925
+
CACHE_MAX_AGE=86400
926
+
927
+
# Aggressive caching (7 days)
928
+
CACHE_MAX_AGE=604800
929
+
930
+
# Conservative caching (1 hour)
931
+
CACHE_MAX_AGE=3600
932
+
933
+
# Disable Cache-Control header
934
+
CACHE_MAX_AGE=0
935
+
```
936
+
937
+
### `CACHE_STALE_IF_ERROR`
938
+
939
+
**Required**: No
940
+
**Type**: Integer (seconds)
941
+
**Default**: `172800` (48 hours)
942
+
943
+
Allows stale content to be served if the backend encounters an error. This provides resilience during service outages.
944
+
945
+
**Examples**:
946
+
```bash
947
+
# Default (48 hours)
948
+
CACHE_STALE_IF_ERROR=172800
949
+
950
+
# Extended error tolerance (7 days)
951
+
CACHE_STALE_IF_ERROR=604800
952
+
953
+
# Minimal error tolerance (1 hour)
954
+
CACHE_STALE_IF_ERROR=3600
955
+
```
956
+
957
+
### `CACHE_STALE_WHILE_REVALIDATE`
958
+
959
+
**Required**: No
960
+
**Type**: Integer (seconds)
961
+
**Default**: `86400` (24 hours)
962
+
963
+
Allows stale content to be served while fresh content is being fetched in the background. This improves perceived performance.
964
+
965
+
**Examples**:
966
+
```bash
967
+
# Default (24 hours)
968
+
CACHE_STALE_WHILE_REVALIDATE=86400
969
+
970
+
# Quick revalidation (1 hour)
971
+
CACHE_STALE_WHILE_REVALIDATE=3600
972
+
973
+
# Extended revalidation (7 days)
974
+
CACHE_STALE_WHILE_REVALIDATE=604800
975
+
```
976
+
977
+
### `CACHE_MAX_STALE`
978
+
979
+
**Required**: No
980
+
**Type**: Integer (seconds)
981
+
**Default**: `172800` (48 hours)
982
+
983
+
Maximum time a client will accept stale responses. This provides an upper bound on how old cached content can be.
984
+
985
+
**Examples**:
986
+
```bash
987
+
# Default (48 hours)
988
+
CACHE_MAX_STALE=172800
989
+
990
+
# Extended staleness (7 days)
991
+
CACHE_MAX_STALE=604800
992
+
993
+
# Strict freshness (1 hour)
994
+
CACHE_MAX_STALE=3600
995
+
```
996
+
997
+
### `CACHE_MIN_FRESH`
998
+
999
+
**Required**: No
1000
+
**Type**: Integer (seconds)
1001
+
**Default**: `3600` (1 hour)
1002
+
1003
+
Minimum time a response must remain fresh. Clients will not accept responses that will expire within this time.
1004
+
1005
+
**Examples**:
1006
+
```bash
1007
+
# Default (1 hour)
1008
+
CACHE_MIN_FRESH=3600
1009
+
1010
+
# Strict freshness (24 hours)
1011
+
CACHE_MIN_FRESH=86400
1012
+
1013
+
# Relaxed freshness (5 minutes)
1014
+
CACHE_MIN_FRESH=300
1015
+
```
1016
+
1017
+
**Cache-Control Header Format**:
1018
+
1019
+
When `CACHE_MAX_AGE` is greater than 0, the following Cache-Control header is added to responses:
1020
+
```
1021
+
Cache-Control: public, max-age=86400, stale-while-revalidate=86400, stale-if-error=172800, max-stale=172800, min-fresh=3600
1022
+
```
1023
+
1024
+
**Recommendations**:
1025
+
- **High-traffic services**: Use longer max-age (86400-604800) to reduce load
1026
+
- **Frequently changing data**: Use shorter max-age (3600-14400)
1027
+
- **Critical services**: Set higher stale-if-error for resilience
1028
+
- **Performance-sensitive**: Enable stale-while-revalidate for better UX
1029
+
- **Disable caching**: Set CACHE_MAX_AGE=0 for real-time data
1030
+
1031
+
### `ETAG_SEED`
1032
+
1033
+
**Required**: No
1034
+
**Type**: String
1035
+
**Default**: Application version (from `CARGO_PKG_VERSION`)
1036
+
1037
+
Seed value for ETAG generation to allow cache invalidation. This value is incorporated into ETAG checksums, allowing server administrators to invalidate client-cached responses after major changes or deployments.
1038
+
1039
+
**How it works**:
1040
+
- Combined with response content to generate ETAG checksums
1041
+
- Uses MetroHash64 for fast, non-cryptographic hashing
1042
+
- Generates weak ETags (W/"hash") for HTTP caching
1043
+
- Changing the seed invalidates all client caches
1044
+
1045
+
**Examples**:
1046
+
```bash
1047
+
# Default (uses application version)
1048
+
# ETAG_SEED is automatically set to the version
1049
+
1050
+
# Deployment-specific seed
1051
+
ETAG_SEED=prod-2024-01-15
1052
+
1053
+
# Version with timestamp
1054
+
ETAG_SEED=v1.0.0-1705344000
1055
+
1056
+
# Environment-specific
1057
+
ETAG_SEED=staging-v2
1058
+
1059
+
# Force cache invalidation after config change
1060
+
ETAG_SEED=config-update-2024-01-15
1061
+
```
1062
+
1063
+
**Use cases**:
1064
+
- **Major configuration changes**: Update seed to invalidate all cached responses
1065
+
- **Data migration**: Force clients to refetch after backend changes
1066
+
- **Security updates**: Ensure clients get fresh data after security fixes
1067
+
- **A/B testing**: Different seeds for different deployment groups
1068
+
- **Rollback scenarios**: Revert to previous seed to restore cache behavior
1069
+
1070
+
**Recommendations**:
1071
+
- **Default**: Use the application version (automatic)
1072
+
- **Production**: Include deployment date or config version
1073
+
- **Staging**: Use environment-specific seeds
1074
+
- **After incidents**: Update seed to force fresh data
1075
+
- **Routine deployments**: Keep the same seed if no data changes
1076
+
456
1077
## Configuration Examples
457
1078
458
1079
### Minimal Development Configuration
···
460
1081
```bash
461
1082
# .env.development
462
1083
HTTP_EXTERNAL=localhost:3007
463
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
464
1084
RUST_LOG=debug
465
1085
```
466
1086
···
470
1090
# .env.production.redis
471
1091
# Required
472
1092
HTTP_EXTERNAL=quickdid.example.com
473
-
SERVICE_KEY=${SECRET_SERVICE_KEY} # From secrets manager
474
1093
475
1094
# Network
476
1095
HTTP_PORT=8080
···
485
1104
QUEUE_ADAPTER=redis
486
1105
QUEUE_REDIS_TIMEOUT=5
487
1106
QUEUE_BUFFER_SIZE=5000
1107
+
QUEUE_REDIS_DEDUP_ENABLED=true # Prevent duplicate work
1108
+
QUEUE_REDIS_DEDUP_TTL=60
1109
+
1110
+
# Rate Limiting (optional, recommended for production)
1111
+
RESOLVER_MAX_CONCURRENT=100
1112
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
1113
+
1114
+
# Metrics (optional, recommended for production)
1115
+
METRICS_ADAPTER=statsd
1116
+
METRICS_STATSD_HOST=localhost:8125
1117
+
METRICS_PREFIX=quickdid
1118
+
METRICS_TAGS=env:prod,service:quickdid
1119
+
1120
+
# Proactive Refresh (optional, recommended for high-traffic)
1121
+
PROACTIVE_REFRESH_ENABLED=true
1122
+
PROACTIVE_REFRESH_THRESHOLD=0.8
1123
+
1124
+
# Jetstream Consumer (optional, recommended for real-time sync)
1125
+
JETSTREAM_ENABLED=true
1126
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1127
+
1128
+
# HTTP Caching (Cache-Control headers)
1129
+
CACHE_MAX_AGE=86400 # 24 hours
1130
+
CACHE_STALE_IF_ERROR=172800 # 48 hours
1131
+
CACHE_STALE_WHILE_REVALIDATE=86400 # 24 hours
488
1132
489
1133
# Logging
490
1134
RUST_LOG=info
···
496
1140
# .env.production.sqlite
497
1141
# Required
498
1142
HTTP_EXTERNAL=quickdid.example.com
499
-
SERVICE_KEY=${SECRET_SERVICE_KEY} # From secrets manager
500
1143
501
1144
# Network
502
1145
HTTP_PORT=8080
···
512
1155
QUEUE_BUFFER_SIZE=5000
513
1156
QUEUE_SQLITE_MAX_SIZE=10000
514
1157
1158
+
# Rate Limiting (optional, recommended for production)
1159
+
RESOLVER_MAX_CONCURRENT=100
1160
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
1161
+
1162
+
# Jetstream Consumer (optional, recommended for real-time sync)
1163
+
JETSTREAM_ENABLED=true
1164
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1165
+
1166
+
# HTTP Caching (Cache-Control headers)
1167
+
CACHE_MAX_AGE=86400 # 24 hours
1168
+
CACHE_STALE_IF_ERROR=172800 # 48 hours
1169
+
CACHE_STALE_WHILE_REVALIDATE=86400 # 24 hours
1170
+
515
1171
# Logging
516
1172
RUST_LOG=info
517
1173
```
···
522
1178
# .env.ha.redis
523
1179
# Required
524
1180
HTTP_EXTERNAL=quickdid.example.com
525
-
SERVICE_KEY=${SECRET_SERVICE_KEY}
526
1181
527
1182
# Network
528
1183
HTTP_PORT=8080
···
539
1194
QUEUE_REDIS_PREFIX=prod:queue:
540
1195
QUEUE_WORKER_ID=${HOSTNAME:-worker1}
541
1196
QUEUE_REDIS_TIMEOUT=10
1197
+
QUEUE_REDIS_DEDUP_ENABLED=true # Essential for multi-instance
1198
+
QUEUE_REDIS_DEDUP_TTL=120 # Longer TTL for HA
542
1199
543
1200
# Performance
544
1201
QUEUE_BUFFER_SIZE=10000
545
1202
1203
+
# Rate Limiting (important for HA deployments)
1204
+
RESOLVER_MAX_CONCURRENT=500
1205
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=10000 # 10 second timeout for HA
1206
+
1207
+
# Metrics (recommended for HA monitoring)
1208
+
METRICS_ADAPTER=statsd
1209
+
METRICS_STATSD_HOST=statsd:8125
1210
+
METRICS_PREFIX=quickdid.prod
1211
+
METRICS_TAGS=env:prod,service:quickdid,cluster:ha
1212
+
1213
+
# Proactive Refresh (recommended for HA)
1214
+
PROACTIVE_REFRESH_ENABLED=true
1215
+
PROACTIVE_REFRESH_THRESHOLD=0.7 # More aggressive for HA
1216
+
1217
+
# Jetstream Consumer (recommended for real-time sync in HA)
1218
+
JETSTREAM_ENABLED=true
1219
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1220
+
546
1221
# Logging
547
1222
RUST_LOG=warn
548
1223
```
···
553
1228
# .env.hybrid
554
1229
# Required
555
1230
HTTP_EXTERNAL=quickdid.example.com
556
-
SERVICE_KEY=${SECRET_SERVICE_KEY}
557
1231
558
1232
# Network
559
1233
HTTP_PORT=8080
···
584
1258
image: quickdid:latest
585
1259
environment:
586
1260
HTTP_EXTERNAL: quickdid.example.com
587
-
SERVICE_KEY: ${SERVICE_KEY}
588
1261
HTTP_PORT: 8080
589
1262
REDIS_URL: redis://redis:6379/0
590
1263
CACHE_TTL_MEMORY: 600
591
1264
CACHE_TTL_REDIS: 86400
592
1265
QUEUE_ADAPTER: redis
593
1266
QUEUE_REDIS_TIMEOUT: 5
1267
+
JETSTREAM_ENABLED: true
1268
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
594
1269
RUST_LOG: info
595
1270
ports:
596
1271
- "8080:8080"
···
613
1288
image: quickdid:latest
614
1289
environment:
615
1290
HTTP_EXTERNAL: quickdid.example.com
616
-
SERVICE_KEY: ${SERVICE_KEY}
617
1291
HTTP_PORT: 8080
618
1292
SQLITE_URL: sqlite:/data/quickdid.db
619
1293
CACHE_TTL_MEMORY: 600
···
621
1295
QUEUE_ADAPTER: sqlite
622
1296
QUEUE_BUFFER_SIZE: 5000
623
1297
QUEUE_SQLITE_MAX_SIZE: 10000
1298
+
JETSTREAM_ENABLED: true
1299
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
624
1300
RUST_LOG: info
625
1301
ports:
626
1302
- "8080:8080"
···
639
1315
### Required Fields
640
1316
641
1317
1. **HTTP_EXTERNAL**: Must be provided
642
-
2. **SERVICE_KEY**: Must be provided
1318
+
2. **HTTP_EXTERNAL**: Must be provided
643
1319
644
1320
### Value Constraints
645
1321
···
655
1331
- Must be one of: `mpsc`, `redis`, `sqlite`, `noop`, `none`
656
1332
- Case-sensitive
657
1333
658
-
4. **Port** (`HTTP_PORT`):
1334
+
4. **Rate Limiting** (`RESOLVER_MAX_CONCURRENT`):
1335
+
- Must be between 0 and 10000
1336
+
- 0 = disabled (default)
1337
+
- Values > 10000 will fail validation
1338
+
1339
+
5. **Rate Limiting Timeout** (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`):
1340
+
- Must be between 0 and 60000 (milliseconds)
1341
+
- 0 = no timeout (default)
1342
+
- Values > 60000 will fail validation
1343
+
1344
+
6. **Port** (`HTTP_PORT`):
659
1345
- Must be valid port number (1-65535)
660
1346
- Ports < 1024 require elevated privileges
661
1347
···
674
1360
675
1361
```bash
676
1362
# Validate configuration
677
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1363
+
HTTP_EXTERNAL=test quickdid --help
678
1364
679
1365
# Test with specific values
680
1366
CACHE_TTL_MEMORY=0 quickdid --help # Will fail validation
681
1367
682
1368
# Check parsed configuration (with debug logging)
683
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1369
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
684
1370
```
685
1371
686
1372
## Best Practices
687
1373
688
1374
### Security
689
1375
690
-
1. **Never commit SERVICE_KEY** to version control
691
-
2. Use environment-specific key management (Vault, AWS Secrets, etc.)
692
-
3. Rotate SERVICE_KEY regularly
693
-
4. Use TLS for Redis connections in production (`rediss://`)
1376
+
1. Use environment-specific configuration management
1377
+
2. Use TLS for Redis connections in production (`rediss://`)
1378
+
3. Never commit sensitive configuration to version control
694
1379
5. Implement network segmentation for Redis access
695
1380
696
1381
### Performance
···
707
1392
2. **Single-instance deployments**: Use SQLite for persistent caching and queuing
708
1393
3. **Development/testing**: Use memory-only caching with MPSC queuing
709
1394
4. **Hybrid setups**: Configure both Redis and SQLite for redundancy
710
-
5. **Queue adapter guidelines**:
1395
+
5. **Real-time sync**: Enable Jetstream consumer for live cache updates
1396
+
6. **Queue adapter guidelines**:
711
1397
- Redis: Best for multi-instance deployments with distributed processing
712
1398
- SQLite: Best for single-instance deployments needing persistence
713
1399
- MPSC: Best for single-instance deployments without persistence needs
714
-
6. **Cache TTL guidelines**:
1400
+
7. **Cache TTL guidelines**:
715
1401
- Redis: Shorter TTLs (1-7 days) for frequently updated handles
716
1402
- SQLite: Longer TTLs (7-90 days) for stable single-instance caching
717
1403
- Memory: Short TTLs (5-30 minutes) as fallback
1404
+
8. **Jetstream guidelines**:
1405
+
- Production: Enable for real-time cache synchronization
1406
+
- High-traffic: Essential for reducing stale data
1407
+
- Development: Can be disabled for simpler testing
1408
+
- Monitor WebSocket connection health in production
718
1409
719
1410
### Monitoring
720
1411
···
726
1417
### Deployment
727
1418
728
1419
1. Use `.env` files for local development
729
-
2. Use secrets management for production SERVICE_KEY
1420
+
2. Use secrets management for production configurations
730
1421
3. Set resource limits in container orchestration
731
1422
4. Use health checks to monitor service availability
732
1423
5. Implement gradual rollouts with feature flags
+182
-19
docs/production-deployment.md
+182
-19
docs/production-deployment.md
···
42
42
# - localhost:3007 (for testing only)
43
43
HTTP_EXTERNAL=quickdid.example.com
44
44
45
-
# Private key for service identity (DID format)
46
-
# Generate a new key for production using atproto-identity tools
47
-
# SECURITY: Keep this key secure and never commit to version control
48
-
# Example formats:
49
-
# - did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
50
-
# - did:plc:xyz123abc456
51
-
SERVICE_KEY=did:key:YOUR_PRODUCTION_KEY_HERE
52
-
53
45
# ----------------------------------------------------------------------------
54
46
# NETWORK CONFIGURATION
55
47
# ----------------------------------------------------------------------------
···
133
125
# Higher = less polling overhead, slower shutdown
134
126
QUEUE_REDIS_TIMEOUT=5
135
127
128
+
# Enable deduplication for Redis queue to prevent duplicate handles (default: false)
129
+
# When enabled, uses Redis SET with TTL to track handles being processed
130
+
# Prevents the same handle from being queued multiple times within the TTL window
131
+
QUEUE_REDIS_DEDUP_ENABLED=false
132
+
133
+
# TTL for Redis queue deduplication keys in seconds (default: 60)
134
+
# Range: 10-300 recommended
135
+
# Determines how long to prevent duplicate handle resolution requests
136
+
QUEUE_REDIS_DEDUP_TTL=60
137
+
136
138
# Worker ID for Redis queue (defaults to "worker1")
137
139
# Set this for predictable worker identification in multi-instance deployments
138
140
# Examples: worker-001, prod-us-east-1, $(hostname)
···
158
160
# Identifies your service to other AT Protocol services
159
161
# Default: Auto-generated with current version from Cargo.toml
160
162
# Format: quickdid/{version} (+https://github.com/smokesignal.events/quickdid)
161
-
USER_AGENT=quickdid/1.0.0-rc.2 (+https://quickdid.example.com)
163
+
USER_AGENT=quickdid/1.0.0-rc.5 (+https://quickdid.example.com)
162
164
163
165
# Custom DNS nameservers (comma-separated)
164
166
# Use for custom DNS resolution or to bypass local DNS
···
184
186
# RUST_LOG_FORMAT=json
185
187
186
188
# ----------------------------------------------------------------------------
189
+
# RATE LIMITING CONFIGURATION
190
+
# ----------------------------------------------------------------------------
191
+
192
+
# Maximum concurrent handle resolutions (default: 0 = disabled)
193
+
# When > 0, enables semaphore-based rate limiting
194
+
# Range: 0-10000 (0 = disabled)
195
+
# Protects upstream DNS/HTTP services from being overwhelmed
196
+
RESOLVER_MAX_CONCURRENT=0
197
+
198
+
# Timeout for acquiring rate limit permit in milliseconds (default: 0 = no timeout)
199
+
# When > 0, requests will timeout if they can't acquire a permit within this time
200
+
# Range: 0-60000 (max 60 seconds)
201
+
# Prevents requests from waiting indefinitely when rate limiter is at capacity
202
+
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=0
203
+
204
+
# ----------------------------------------------------------------------------
205
+
# HTTP CACHING CONFIGURATION
206
+
# ----------------------------------------------------------------------------
207
+
208
+
# ETAG seed for cache invalidation (default: application version)
209
+
# Used to generate ETAG checksums for HTTP responses
210
+
# Changing this value invalidates all client-cached responses
211
+
# Examples:
212
+
# - prod-2024-01-15 (deployment-specific)
213
+
# - v1.0.0-1705344000 (version with timestamp)
214
+
# - config-update-2024-01-15 (after configuration changes)
215
+
# Default uses the application version from Cargo.toml
216
+
# ETAG_SEED=prod-2024-01-15
217
+
218
+
# Maximum age for HTTP Cache-Control header in seconds (default: 86400 = 24 hours)
219
+
# Set to 0 to disable Cache-Control header
220
+
# Controls how long clients and intermediate caches can cache responses
221
+
CACHE_MAX_AGE=86400
222
+
223
+
# Stale-if-error directive for Cache-Control in seconds (default: 172800 = 48 hours)
224
+
# Allows stale content to be served if backend errors occur
225
+
# Provides resilience during service outages
226
+
CACHE_STALE_IF_ERROR=172800
227
+
228
+
# Stale-while-revalidate directive for Cache-Control in seconds (default: 86400 = 24 hours)
229
+
# Allows stale content to be served while fetching fresh content in background
230
+
# Improves perceived performance for users
231
+
CACHE_STALE_WHILE_REVALIDATE=86400
232
+
233
+
# Max-stale directive for Cache-Control in seconds (default: 172800 = 48 hours)
234
+
# Maximum time client will accept stale responses
235
+
# Provides upper bound on cached content age
236
+
CACHE_MAX_STALE=172800
237
+
238
+
# Min-fresh directive for Cache-Control in seconds (default: 3600 = 1 hour)
239
+
# Minimum time response must remain fresh
240
+
# Clients won't accept responses expiring within this time
241
+
CACHE_MIN_FRESH=3600
242
+
243
+
# ----------------------------------------------------------------------------
244
+
# METRICS CONFIGURATION
245
+
# ----------------------------------------------------------------------------
246
+
247
+
# Metrics adapter type: 'noop' or 'statsd' (default: noop)
248
+
# - 'noop': No metrics collection (default)
249
+
# - 'statsd': Send metrics to StatsD server
250
+
METRICS_ADAPTER=statsd
251
+
252
+
# StatsD host and port (required when METRICS_ADAPTER=statsd)
253
+
# Format: hostname:port
254
+
# Examples:
255
+
# - localhost:8125 (local StatsD)
256
+
# - statsd.example.com:8125 (remote StatsD)
257
+
METRICS_STATSD_HOST=localhost:8125
258
+
259
+
# Bind address for StatsD UDP socket (default: [::]:0)
260
+
# Controls which local address to bind for sending UDP packets
261
+
# Examples:
262
+
# - [::]:0 (IPv6 any address, random port - default)
263
+
# - 0.0.0.0:0 (IPv4 any address, random port)
264
+
# - 192.168.1.100:0 (specific interface)
265
+
METRICS_STATSD_BIND=[::]:0
266
+
267
+
# Prefix for all metrics (default: quickdid)
268
+
# Used to namespace metrics in your monitoring system
269
+
# Examples:
270
+
# - quickdid (default)
271
+
# - prod.quickdid
272
+
# - us-east-1.quickdid
273
+
METRICS_PREFIX=quickdid
274
+
275
+
# Tags for all metrics (comma-separated key:value pairs)
276
+
# Added to all metrics for filtering and grouping
277
+
# Examples:
278
+
# - env:production,service:quickdid
279
+
# - env:staging,region:us-east-1,version:1.0.0
280
+
METRICS_TAGS=env:production,service:quickdid
281
+
282
+
# ----------------------------------------------------------------------------
283
+
# PROACTIVE REFRESH CONFIGURATION
284
+
# ----------------------------------------------------------------------------
285
+
286
+
# Enable proactive cache refresh (default: false)
287
+
# When enabled, cache entries nearing expiration are automatically refreshed
288
+
# in the background to prevent cache misses for frequently accessed handles
289
+
PROACTIVE_REFRESH_ENABLED=false
290
+
291
+
# Threshold for proactive refresh as percentage of TTL (default: 0.8)
292
+
# Range: 0.0-1.0 (0% to 100% of TTL)
293
+
# Example: 0.8 means refresh when 80% of TTL has elapsed
294
+
# Lower values = more aggressive refreshing, higher load
295
+
# Higher values = less aggressive refreshing, more cache misses
296
+
PROACTIVE_REFRESH_THRESHOLD=0.8
297
+
298
+
# ----------------------------------------------------------------------------
299
+
# JETSTREAM CONSUMER CONFIGURATION
300
+
# ----------------------------------------------------------------------------
301
+
302
+
# Enable Jetstream consumer for real-time cache updates (default: false)
303
+
# When enabled, connects to AT Protocol firehose for live updates
304
+
# Processes Account events (deleted/deactivated) and Identity events (handle changes)
305
+
# Automatically reconnects with exponential backoff on connection failures
306
+
JETSTREAM_ENABLED=false
307
+
308
+
# Jetstream WebSocket hostname (default: jetstream.atproto.tools)
309
+
# The firehose service to connect to for real-time AT Protocol events
310
+
# Examples:
311
+
# - jetstream.atproto.tools (production firehose)
312
+
# - jetstream-staging.atproto.tools (staging environment)
313
+
# - localhost:6008 (local development)
314
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
315
+
316
+
# ----------------------------------------------------------------------------
317
+
# STATIC FILES CONFIGURATION
318
+
# ----------------------------------------------------------------------------
319
+
320
+
# Directory path for serving static files (default: www)
321
+
# This directory should contain:
322
+
# - index.html (landing page)
323
+
# - .well-known/atproto-did (service DID identifier)
324
+
# - .well-known/did.json (DID document)
325
+
# In Docker, this defaults to /app/www
326
+
# You can mount custom files via Docker volumes
327
+
STATIC_FILES_DIR=/app/www
328
+
329
+
# ----------------------------------------------------------------------------
187
330
# PERFORMANCE TUNING
188
331
# ----------------------------------------------------------------------------
189
332
···
288
431
289
432
## Docker Compose Setup
290
433
291
-
### Redis-based Production Setup
434
+
### Redis-based Production Setup with Jetstream
292
435
293
-
Create a `docker-compose.yml` file for a complete production setup with Redis:
436
+
Create a `docker-compose.yml` file for a complete production setup with Redis and optional Jetstream consumer:
294
437
295
438
```yaml
296
439
version: '3.8'
···
379
522
driver: local
380
523
```
381
524
382
-
### SQLite-based Single-Instance Setup
525
+
### SQLite-based Single-Instance Setup with Jetstream
383
526
384
-
For single-instance deployments without Redis, create a simpler `docker-compose.sqlite.yml`:
527
+
For single-instance deployments without Redis, create a simpler `docker-compose.sqlite.yml` with optional Jetstream consumer:
385
528
386
529
```yaml
387
530
version: '3.8'
···
392
535
container_name: quickdid-sqlite
393
536
environment:
394
537
HTTP_EXTERNAL: quickdid.example.com
395
-
SERVICE_KEY: ${SERVICE_KEY}
396
538
HTTP_PORT: 8080
397
539
SQLITE_URL: sqlite:/data/quickdid.db
398
540
CACHE_TTL_MEMORY: 600
···
400
542
QUEUE_ADAPTER: sqlite
401
543
QUEUE_BUFFER_SIZE: 5000
402
544
QUEUE_SQLITE_MAX_SIZE: 10000
545
+
# Optional: Enable Jetstream for real-time cache updates
546
+
# JETSTREAM_ENABLED: true
547
+
# JETSTREAM_HOSTNAME: jetstream.atproto.tools
403
548
RUST_LOG: info
404
549
ports:
405
550
- "8080:8080"
···
593
738
594
739
### 1. Service Key Protection
595
740
596
-
- **Never commit** the `SERVICE_KEY` to version control
741
+
- **Never commit** sensitive configuration to version control
597
742
- Store keys in a secure secret management system (e.g., HashiCorp Vault, AWS Secrets Manager)
598
743
- Rotate keys regularly
599
744
- Use different keys for different environments
···
638
783
docker logs quickdid
639
784
640
785
# Verify environment variables
641
-
docker exec quickdid env | grep -E "HTTP_EXTERNAL|SERVICE_KEY"
786
+
docker exec quickdid env | grep -E "HTTP_EXTERNAL|HTTP_PORT"
642
787
643
788
# Test Redis connectivity
644
789
docker exec quickdid redis-cli -h redis ping
···
781
926
2. **SQLite** (persistent, best for single-instance)
782
927
3. **Memory** (fast, but lost on restart)
783
928
929
+
**Real-time Updates with Jetstream**: When `JETSTREAM_ENABLED=true`, QuickDID:
930
+
- Connects to AT Protocol firehose for live cache updates
931
+
- Processes Account events to purge deleted/deactivated accounts
932
+
- Processes Identity events to update handle-to-DID mappings
933
+
- Automatically reconnects with exponential backoff on failures
934
+
- Tracks metrics for successful and failed event processing
935
+
784
936
**Recommendations by Deployment Type**:
785
937
- **Single instance, persistent**: Use SQLite for both caching and queuing (`SQLITE_URL=sqlite:./quickdid.db`, `QUEUE_ADAPTER=sqlite`)
786
938
- **Multi-instance, HA**: Use Redis for both caching and queuing (`REDIS_URL=redis://redis:6379/0`, `QUEUE_ADAPTER=redis`)
939
+
- **Real-time sync**: Enable Jetstream consumer (`JETSTREAM_ENABLED=true`) for live cache updates
787
940
- **Testing/development**: Use memory-only caching with MPSC queuing (`QUEUE_ADAPTER=mpsc`)
788
941
- **Hybrid**: Configure both Redis and SQLite for redundancy
789
942
···
823
976
### Required Fields
824
977
825
978
- **HTTP_EXTERNAL**: Must be provided
826
-
- **SERVICE_KEY**: Must be provided
979
+
- **HTTP_EXTERNAL**: Must be provided
827
980
828
981
### Value Constraints
829
982
···
839
992
- Must be one of: `mpsc`, `redis`, `sqlite`, `noop`, `none`
840
993
- Case-sensitive
841
994
995
+
4. **Rate Limiting** (`RESOLVER_MAX_CONCURRENT`):
996
+
- Must be between 0 and 10000
997
+
- 0 = disabled (default)
998
+
- When > 0, limits concurrent handle resolutions
999
+
1000
+
5. **Rate Limiting Timeout** (`RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`):
1001
+
- Must be between 0 and 60000 (milliseconds)
1002
+
- 0 = no timeout (default)
1003
+
- Maximum: 60000ms (60 seconds)
1004
+
842
1005
### Validation Errors
843
1006
844
1007
If validation fails, QuickDID will exit with one of these error codes:
···
852
1015
853
1016
```bash
854
1017
# Validate configuration without starting service
855
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1018
+
HTTP_EXTERNAL=test quickdid --help
856
1019
857
1020
# Test with specific values (will fail validation)
858
1021
CACHE_TTL_MEMORY=0 quickdid --help
859
1022
860
1023
# Debug configuration parsing
861
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1024
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
862
1025
```
863
1026
864
1027
## Support and Resources
+714
docs/telegraf-timescaledb-metrics-guide.md
+714
docs/telegraf-timescaledb-metrics-guide.md
···
1
+
# Telegraf and TimescaleDB Metrics Collection Guide
2
+
3
+
This guide demonstrates how to set up a metrics collection pipeline using Telegraf to collect StatsD metrics and store them in PostgreSQL with TimescaleDB using Docker Compose.
4
+
5
+
## Overview
6
+
7
+
This setup creates a metrics pipeline that:
8
+
- Collects StatsD metrics via Telegraf on UDP port 8125
9
+
- Creates individual PostgreSQL tables for each metric type
10
+
- Stores metric tags as JSONB for flexible querying
11
+
- Automatically creates hypertables for time-series optimization
12
+
- Provides a complete Docker Compose configuration for easy deployment
13
+
14
+
## Important Note on Table Structure
15
+
16
+
The Telegraf PostgreSQL output plugin with the configuration in this guide creates **individual tables for each metric name**. For example:
17
+
- `quickdid.http.request.count` becomes table `"quickdid.http.request.count"`
18
+
- `quickdid.resolver.rate_limit.available_permits` becomes table `"quickdid.resolver.rate_limit.available_permits"`
19
+
20
+
Each table has the following structure:
21
+
- `time` (timestamptz) - The timestamp of the metric
22
+
- `tags` (jsonb) - All tags stored as a JSON object
23
+
- Metric-specific columns for values (e.g., `value`, `mean`, `p99`, etc.)
24
+
25
+
## Prerequisites
26
+
27
+
- Docker and Docker Compose installed
28
+
- Basic understanding of StatsD metrics format
29
+
- Familiarity with PostgreSQL/TimescaleDB concepts
30
+
31
+
## Project Structure
32
+
33
+
Create the following directory structure:
34
+
35
+
```
36
+
metrics-stack/
37
+
โโโ docker-compose.yml
38
+
โโโ telegraf/
39
+
โ โโโ telegraf.conf
40
+
โโโ test-scripts/
41
+
โ โโโ send-metrics.sh
42
+
โ โโโ verify-queries.sql
43
+
โโโ .env
44
+
```
45
+
46
+
## Configuration Files
47
+
48
+
### 1. Environment Variables (.env)
49
+
50
+
Create a `.env` file to store sensitive configuration:
51
+
52
+
```env
53
+
# PostgreSQL/TimescaleDB Configuration
54
+
POSTGRES_DB=metrics
55
+
POSTGRES_USER=postgres
56
+
POSTGRES_PASSWORD=secretpassword
57
+
58
+
# Telegraf Database User
59
+
TELEGRAF_DB_USER=postgres
60
+
TELEGRAF_DB_PASSWORD=secretpassword
61
+
62
+
# TimescaleDB Settings
63
+
TIMESCALE_TELEMETRY=off
64
+
```
65
+
66
+
### 2. Telegraf Configuration (telegraf/telegraf.conf)
67
+
68
+
Create the Telegraf configuration file:
69
+
70
+
```toml
71
+
# Global Telegraf Agent Configuration
72
+
[agent]
73
+
interval = "10s"
74
+
round_interval = true
75
+
metric_batch_size = 1000
76
+
metric_buffer_limit = 10000
77
+
collection_jitter = "0s"
78
+
flush_interval = "10s"
79
+
flush_jitter = "0s"
80
+
precision = ""
81
+
debug = false
82
+
quiet = false
83
+
hostname = "telegraf-agent"
84
+
omit_hostname = false
85
+
86
+
# StatsD Input Plugin
87
+
[[inputs.statsd]]
88
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
89
+
protocol = "udp"
90
+
delete_gauges = true
91
+
delete_counters = true
92
+
delete_sets = true
93
+
delete_timings = true
94
+
percentiles = [50, 90, 95, 99]
95
+
metric_separator = "."
96
+
allowed_pending_messages = 10000
97
+
datadog_extensions = true
98
+
datadog_distributions = true
99
+
100
+
# PostgreSQL (TimescaleDB) Output Plugin
101
+
[[outputs.postgresql]]
102
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
103
+
schema = "public"
104
+
105
+
# Create individual tables for each metric with hypertable support
106
+
create_templates = [
107
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
108
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
109
+
]
110
+
111
+
# Store all tags as JSONB for flexible querying
112
+
tags_as_jsonb = true
113
+
114
+
# Keep fields as separate columns for better performance on aggregations
115
+
fields_as_jsonb = false
116
+
```
117
+
118
+
### 3. Docker Compose Configuration (docker-compose.yml)
119
+
120
+
Create the Docker Compose file:
121
+
122
+
```yaml
123
+
version: '3.8'
124
+
125
+
services:
126
+
timescaledb:
127
+
image: timescale/timescaledb:latest-pg17
128
+
container_name: timescaledb
129
+
restart: unless-stopped
130
+
environment:
131
+
POSTGRES_DB: ${POSTGRES_DB}
132
+
POSTGRES_USER: ${POSTGRES_USER}
133
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
134
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
135
+
ports:
136
+
- "5442:5432"
137
+
volumes:
138
+
- timescale_data:/home/postgres/pgdata/data
139
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
140
+
command:
141
+
- postgres
142
+
- -c
143
+
- shared_buffers=1GB
144
+
- -c
145
+
- effective_cache_size=3GB
146
+
- -c
147
+
- maintenance_work_mem=512MB
148
+
- -c
149
+
- work_mem=32MB
150
+
- -c
151
+
- timescaledb.max_background_workers=8
152
+
- -c
153
+
- max_parallel_workers_per_gather=2
154
+
- -c
155
+
- max_parallel_workers=8
156
+
healthcheck:
157
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
158
+
interval: 10s
159
+
timeout: 5s
160
+
retries: 5
161
+
networks:
162
+
- metrics_network
163
+
164
+
telegraf:
165
+
image: telegraf:1.35
166
+
container_name: telegraf
167
+
restart: unless-stopped
168
+
environment:
169
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
170
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
171
+
POSTGRES_DB: ${POSTGRES_DB}
172
+
ports:
173
+
- "8125:8125/udp" # StatsD UDP port
174
+
volumes:
175
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
176
+
depends_on:
177
+
timescaledb:
178
+
condition: service_healthy
179
+
networks:
180
+
- metrics_network
181
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
182
+
183
+
redis:
184
+
image: redis:7-alpine
185
+
container_name: redis
186
+
restart: unless-stopped
187
+
ports:
188
+
- "6379:6379"
189
+
volumes:
190
+
- redis_data:/data
191
+
command: redis-server --appendonly yes --appendfsync everysec
192
+
healthcheck:
193
+
test: ["CMD", "redis-cli", "ping"]
194
+
interval: 10s
195
+
timeout: 5s
196
+
retries: 5
197
+
networks:
198
+
- metrics_network
199
+
200
+
networks:
201
+
metrics_network:
202
+
driver: bridge
203
+
204
+
volumes:
205
+
timescale_data:
206
+
redis_data:
207
+
```
208
+
209
+
### 4. Database Initialization Script (optional)
210
+
211
+
Create `init-scripts/01-init.sql` to set up the TimescaleDB extension:
212
+
213
+
```sql
214
+
-- Enable TimescaleDB extension
215
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
216
+
217
+
-- Enable additional useful extensions
218
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
219
+
```
220
+
221
+
## Test Scripts
222
+
223
+
### 1. Send Test Metrics Script (test-scripts/send-metrics.sh)
224
+
225
+
Create a script to send various types of metrics:
226
+
227
+
```bash
228
+
#!/bin/bash
229
+
230
+
# Send test metrics to StatsD/Telegraf
231
+
232
+
echo "Sending test metrics to StatsD on localhost:8125..."
233
+
234
+
# Counter metrics
235
+
for i in {1..10}; do
236
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
237
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
238
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
239
+
done
240
+
241
+
# Gauge metrics
242
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
243
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
244
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
245
+
246
+
# Timing metrics (in milliseconds)
247
+
for i in {1..20}; do
248
+
duration=$((RANDOM % 100 + 10))
249
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
250
+
done
251
+
252
+
for i in {1..10}; do
253
+
duration=$((RANDOM % 200 + 50))
254
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
255
+
done
256
+
257
+
# Histogram metrics
258
+
for i in {1..15}; do
259
+
resolution_time=$((RANDOM % 500 + 50))
260
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
261
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
262
+
done
263
+
264
+
# Cache metrics
265
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
266
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
267
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
268
+
269
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
270
+
sleep 15
271
+
echo "Done!"
272
+
```
273
+
274
+
### 2. Verify Queries Script (test-scripts/verify-queries.sql)
275
+
276
+
Create a SQL script to verify all queries work correctly:
277
+
278
+
```sql
279
+
-- Test script to verify all metrics queries work correctly
280
+
-- Run this after sending test metrics with send-metrics.sh
281
+
282
+
\echo '===== CHECKING AVAILABLE TABLES ====='
283
+
SELECT table_name
284
+
FROM information_schema.tables
285
+
WHERE table_schema = 'public'
286
+
AND table_name LIKE 'quickdid%'
287
+
ORDER BY table_name;
288
+
289
+
\echo ''
290
+
\echo '===== CHECKING TABLE STRUCTURES ====='
291
+
\echo 'Structure of quickdid.http.request.count table:'
292
+
\d "quickdid.http.request.count"
293
+
294
+
\echo ''
295
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
296
+
\d "quickdid.http.request.duration_ms"
297
+
298
+
\echo ''
299
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
300
+
SELECT
301
+
time,
302
+
tags,
303
+
tags->>'method' as method,
304
+
tags->>'path' as path,
305
+
tags->>'status' as status,
306
+
value
307
+
FROM "quickdid.http.request.count"
308
+
WHERE time > NOW() - INTERVAL '1 hour'
309
+
ORDER BY time DESC
310
+
LIMIT 10;
311
+
312
+
\echo ''
313
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
314
+
SELECT
315
+
time_bucket('1 minute', time) AS minute,
316
+
tags->>'method' as method,
317
+
tags->>'path' as path,
318
+
tags->>'status' as status,
319
+
COUNT(*) as request_count,
320
+
AVG(mean) as avg_duration_ms,
321
+
MAX(p99) as p99_duration_ms,
322
+
MIN(mean) as min_duration_ms
323
+
FROM "quickdid.http.request.duration_ms"
324
+
WHERE time > NOW() - INTERVAL '1 hour'
325
+
AND tags IS NOT NULL
326
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
327
+
ORDER BY minute DESC
328
+
LIMIT 10;
329
+
330
+
\echo ''
331
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
332
+
SELECT
333
+
time,
334
+
value as available_permits
335
+
FROM "quickdid.resolver.rate_limit.available_permits"
336
+
WHERE time > NOW() - INTERVAL '1 hour'
337
+
ORDER BY time DESC
338
+
LIMIT 10;
339
+
340
+
\echo ''
341
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
342
+
SELECT
343
+
tags->>'resolver' as resolver_type,
344
+
COUNT(*) as sample_count,
345
+
AVG(mean) as avg_resolution_time_ms,
346
+
MAX(p99) as p99_resolution_time_ms,
347
+
MIN(mean) as min_resolution_time_ms
348
+
FROM "quickdid.resolver.resolution_time"
349
+
WHERE time > NOW() - INTERVAL '1 hour'
350
+
AND tags->>'resolver' IS NOT NULL
351
+
GROUP BY tags->>'resolver'
352
+
ORDER BY avg_resolution_time_ms;
353
+
354
+
\echo ''
355
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
356
+
WITH cache_stats AS (
357
+
SELECT
358
+
'hits' as metric_type,
359
+
SUM(value) as total_count
360
+
FROM "quickdid.cache.hit.count"
361
+
WHERE time > NOW() - INTERVAL '1 hour'
362
+
UNION ALL
363
+
SELECT
364
+
'misses' as metric_type,
365
+
SUM(value) as total_count
366
+
FROM "quickdid.cache.miss.count"
367
+
WHERE time > NOW() - INTERVAL '1 hour'
368
+
)
369
+
SELECT
370
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
371
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
372
+
CASE
373
+
WHEN SUM(total_count) > 0 THEN
374
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
375
+
ELSE 0
376
+
END as hit_rate_percentage
377
+
FROM cache_stats;
378
+
379
+
\echo ''
380
+
\echo '===== QUERY 6: Hypertable Information ====='
381
+
SELECT
382
+
hypertable_schema,
383
+
hypertable_name,
384
+
owner,
385
+
num_dimensions,
386
+
num_chunks,
387
+
compression_enabled
388
+
FROM timescaledb_information.hypertables
389
+
WHERE hypertable_name LIKE 'quickdid%'
390
+
ORDER BY hypertable_name;
391
+
392
+
\echo ''
393
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
394
+
WITH status_counts AS (
395
+
SELECT
396
+
time_bucket('5 minutes', time) as period,
397
+
tags->>'path' as path,
398
+
CASE
399
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
400
+
ELSE 'success'
401
+
END as status_category,
402
+
SUM(value) as request_count
403
+
FROM "quickdid.http.request.count"
404
+
WHERE time > NOW() - INTERVAL '1 hour'
405
+
GROUP BY period, path, status_category
406
+
)
407
+
SELECT
408
+
period,
409
+
path,
410
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
411
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
412
+
CASE
413
+
WHEN SUM(request_count) > 0 THEN
414
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
415
+
ELSE 0
416
+
END as error_rate_percentage
417
+
FROM status_counts
418
+
GROUP BY period, path
419
+
HAVING SUM(request_count) > 0
420
+
ORDER BY period DESC, error_rate_percentage DESC;
421
+
422
+
\echo ''
423
+
\echo '===== TEST COMPLETED ====='
424
+
```
425
+
426
+
## Usage
427
+
428
+
### Starting the Stack
429
+
430
+
1. Navigate to your project directory:
431
+
```bash
432
+
cd metrics-stack
433
+
```
434
+
435
+
2. Make the test scripts executable:
436
+
```bash
437
+
chmod +x test-scripts/send-metrics.sh
438
+
```
439
+
440
+
3. Start the services:
441
+
```bash
442
+
docker-compose up -d
443
+
```
444
+
445
+
4. Check the logs to ensure everything is running:
446
+
```bash
447
+
docker-compose logs -f
448
+
```
449
+
450
+
5. Wait for services to be fully ready (about 30 seconds)
451
+
452
+
### Running the Test Suite
453
+
454
+
1. Send test metrics:
455
+
```bash
456
+
./test-scripts/send-metrics.sh
457
+
```
458
+
459
+
2. Verify all queries work:
460
+
```bash
461
+
docker exec -i timescaledb psql -U postgres -d metrics < test-scripts/verify-queries.sql
462
+
```
463
+
464
+
### Manual Querying
465
+
466
+
Connect to TimescaleDB to run queries manually:
467
+
468
+
```bash
469
+
# Connect to the database
470
+
docker exec -it timescaledb psql -U postgres -d metrics
471
+
472
+
# List all metric tables
473
+
\dt "quickdid*"
474
+
475
+
# Describe a specific table structure
476
+
\d "quickdid.http.request.duration_ms"
477
+
478
+
# Query with JSONB tag filtering
479
+
SELECT
480
+
time,
481
+
tags->>'method' as method,
482
+
mean as avg_ms,
483
+
'99_percentile' as p99_ms
484
+
FROM "quickdid.http.request.duration_ms"
485
+
WHERE tags @> '{"method": "GET"}'::jsonb
486
+
AND time > NOW() - INTERVAL '1 hour'
487
+
ORDER BY time DESC
488
+
LIMIT 10;
489
+
```
490
+
491
+
## Advanced Configuration
492
+
493
+
### Continuous Aggregates for Performance
494
+
495
+
Create continuous aggregates for frequently queried data:
496
+
497
+
```sql
498
+
-- Create hourly aggregates for HTTP metrics
499
+
CREATE MATERIALIZED VIEW http_metrics_hourly
500
+
WITH (timescaledb.continuous) AS
501
+
SELECT
502
+
time_bucket('1 hour', time) AS hour,
503
+
tags->>'method' as method,
504
+
tags->>'path' as path,
505
+
tags->>'status' as status,
506
+
COUNT(*) as request_count,
507
+
AVG(mean) as avg_duration_ms,
508
+
MAX('99_percentile') as p99_duration_ms,
509
+
MIN(mean) as min_duration_ms
510
+
FROM "quickdid.http.request.duration_ms"
511
+
WHERE tags IS NOT NULL
512
+
GROUP BY hour, method, path, status
513
+
WITH NO DATA;
514
+
515
+
-- Add refresh policy
516
+
SELECT add_continuous_aggregate_policy('http_metrics_hourly',
517
+
start_offset => INTERVAL '3 hours',
518
+
end_offset => INTERVAL '1 hour',
519
+
schedule_interval => INTERVAL '1 hour');
520
+
521
+
-- Manually refresh to populate initial data
522
+
CALL refresh_continuous_aggregate('http_metrics_hourly', NULL, NULL);
523
+
524
+
-- Query the aggregate
525
+
SELECT * FROM http_metrics_hourly
526
+
ORDER BY hour DESC, request_count DESC
527
+
LIMIT 20;
528
+
```
529
+
530
+
### Data Retention Policies
531
+
532
+
Set up automatic data retention:
533
+
534
+
```sql
535
+
-- Add retention policy to drop data older than 30 days
536
+
SELECT add_retention_policy('"quickdid.http.request.count"', INTERVAL '30 days');
537
+
SELECT add_retention_policy('"quickdid.http.request.duration_ms"', INTERVAL '30 days');
538
+
539
+
-- View retention policies
540
+
SELECT js.* FROM timescaledb_information.job_stats js
541
+
JOIN timescaledb_information.jobs j ON js.job_id = j.job_id
542
+
WHERE j.proc_name LIKE '%retention%';
543
+
```
544
+
545
+
### Compression for Storage Optimization
546
+
547
+
Enable compression for older data:
548
+
549
+
```sql
550
+
-- Enable compression on a hypertable
551
+
ALTER TABLE "quickdid.http.request.duration_ms" SET (
552
+
timescaledb.compress,
553
+
timescaledb.compress_segmentby = 'tags'
554
+
);
555
+
556
+
-- Add compression policy (compress chunks older than 7 days)
557
+
SELECT add_compression_policy('"quickdid.http.request.duration_ms"', INTERVAL '7 days');
558
+
559
+
-- Manually compress old chunks
560
+
SELECT compress_chunk(format('%I.%I', c.chunk_schema, c.chunk_name)::regclass)
561
+
FROM timescaledb_information.chunks c
562
+
WHERE c.hypertable_name = 'quickdid.http.request.duration_ms'
563
+
AND c.range_end < NOW() - INTERVAL '7 days'
564
+
AND NOT c.is_compressed;
565
+
566
+
-- Check compression status
567
+
SELECT
568
+
hypertable_name,
569
+
uncompressed_total_bytes,
570
+
compressed_total_bytes,
571
+
compression_ratio
572
+
FROM timescaledb_information.hypertable_compression_stats
573
+
WHERE hypertable_name LIKE 'quickdid%';
574
+
```
575
+
576
+
## Monitoring and Maintenance
577
+
578
+
### Health Checks
579
+
580
+
```sql
581
+
-- Check chunk distribution
582
+
SELECT
583
+
hypertable_name,
584
+
chunk_name,
585
+
range_start,
586
+
range_end,
587
+
is_compressed,
588
+
pg_size_pretty(total_bytes) as size
589
+
FROM timescaledb_information.chunks
590
+
WHERE hypertable_name LIKE 'quickdid%'
591
+
ORDER BY hypertable_name, range_start DESC
592
+
LIMIT 20;
593
+
594
+
-- Check background jobs
595
+
SELECT
596
+
job_id,
597
+
application_name,
598
+
job_type,
599
+
schedule_interval,
600
+
last_run_started_at,
601
+
last_successful_finish,
602
+
next_scheduled_run
603
+
FROM timescaledb_information.job_stats
604
+
ORDER BY job_id;
605
+
606
+
-- Check table sizes
607
+
SELECT
608
+
hypertable_name,
609
+
chunks_total_size,
610
+
chunks_compressed_size,
611
+
chunks_uncompressed_size
612
+
FROM timescaledb_information.hypertables
613
+
WHERE hypertable_name LIKE 'quickdid%';
614
+
```
615
+
616
+
### Troubleshooting
617
+
618
+
1. **Tables not being created:**
619
+
- Check Telegraf logs: `docker-compose logs telegraf | grep -i error`
620
+
- Verify PostgreSQL connectivity: `docker exec telegraf telegraf --test`
621
+
- Ensure metrics are being received: `docker-compose logs telegraf | grep statsd`
622
+
623
+
2. **Queries returning no data:**
624
+
- Verify tables exist: `\dt "quickdid*"` in psql
625
+
- Check table contents: `SELECT COUNT(*) FROM "quickdid.http.request.count";`
626
+
- Verify time ranges in WHERE clauses
627
+
628
+
3. **Performance issues:**
629
+
- Check if hypertables are created: Query `timescaledb_information.hypertables`
630
+
- Verify compression is working if enabled
631
+
- Consider creating appropriate indexes on JSONB paths:
632
+
```sql
633
+
CREATE INDEX idx_http_method ON "quickdid.http.request.duration_ms" ((tags->>'method'));
634
+
CREATE INDEX idx_http_path ON "quickdid.http.request.duration_ms" ((tags->>'path'));
635
+
```
636
+
637
+
## Integration with QuickDID
638
+
639
+
To integrate with QuickDID, configure it to send metrics to the Telegraf StatsD endpoint:
640
+
641
+
```bash
642
+
# Set environment variables for QuickDID
643
+
export METRICS_ADAPTER=statsd
644
+
export METRICS_STATSD_HOST=localhost:8125
645
+
export METRICS_PREFIX=quickdid.
646
+
export METRICS_TAGS=env:production,service:quickdid
647
+
648
+
# Start QuickDID
649
+
cargo run
650
+
```
651
+
652
+
QuickDID will automatically send metrics to Telegraf, which will store them in TimescaleDB for analysis.
653
+
654
+
## Key Differences from Generic Metrics Table Approach
655
+
656
+
This configuration creates **individual tables per metric** instead of a single generic metrics table. Benefits include:
657
+
658
+
1. **Better performance**: Each metric has its own optimized schema
659
+
2. **Clearer data model**: Tables directly represent metrics
660
+
3. **Easier querying**: No need to filter by metric name
661
+
4. **Type safety**: Each metric's fields have appropriate types
662
+
5. **Efficient compression**: Per-metric compression strategies
663
+
664
+
Trade-offs:
665
+
- More tables to manage (mitigated by TimescaleDB automation)
666
+
- Need to know metric names upfront for queries
667
+
- Schema changes require table alterations
668
+
669
+
## Security Considerations
670
+
671
+
1. **Use strong passwords:** Update the default passwords in `.env`
672
+
2. **Enable SSL:** Configure `sslmode=require` in production
673
+
3. **Network isolation:** Use Docker networks to isolate services
674
+
4. **Access control:** Create separate database users with minimal permissions:
675
+
```sql
676
+
CREATE USER metrics_reader WITH PASSWORD 'readonly_password';
677
+
GRANT CONNECT ON DATABASE metrics TO metrics_reader;
678
+
GRANT USAGE ON SCHEMA public TO metrics_reader;
679
+
GRANT SELECT ON ALL TABLES IN SCHEMA public TO metrics_reader;
680
+
```
681
+
5. **Regular updates:** Keep Docker images updated for security patches
682
+
683
+
## Performance Tuning
684
+
685
+
### PostgreSQL/TimescaleDB Settings
686
+
687
+
The docker-compose.yml includes optimized settings. Adjust based on your hardware:
688
+
689
+
- `shared_buffers`: 25% of system RAM
690
+
- `effective_cache_size`: 75% of system RAM
691
+
- `maintenance_work_mem`: 5% of system RAM
692
+
- `work_mem`: RAM / max_connections / 2
693
+
694
+
### Telegraf Buffer Settings
695
+
696
+
For high-volume metrics, adjust in telegraf.conf:
697
+
698
+
```toml
699
+
[agent]
700
+
metric_batch_size = 5000 # Increase for high volume
701
+
metric_buffer_limit = 100000 # Increase buffer size
702
+
flush_interval = "5s" # Decrease for more frequent writes
703
+
```
704
+
705
+
## Conclusion
706
+
707
+
This setup provides a robust metrics collection and storage solution with:
708
+
- **Individual metric tables** for optimal performance and clarity
709
+
- **JSONB tag storage** for flexible querying
710
+
- **TimescaleDB hypertables** for efficient time-series storage
711
+
- **Comprehensive test suite** to verify functionality
712
+
- **Production-ready configuration** with compression and retention policies
713
+
714
+
The system correctly handles StatsD metrics from QuickDID and provides powerful querying capabilities through PostgreSQL's JSONB support and TimescaleDB's time-series functions.
+59
generate-wellknown.sh
+59
generate-wellknown.sh
···
1
+
#!/bin/bash
2
+
3
+
# Script to generate .well-known static files based on QuickDID configuration
4
+
# Usage: HTTP_EXTERNAL=quickdid.smokesignal.tools ./generate-wellknown.sh
5
+
#
6
+
# Note: Since we no longer process SERVICE_KEY, you'll need to manually
7
+
# add the public key to the did.json file if you need DID document support.
8
+
9
+
set -e
10
+
11
+
# Check required environment variables
12
+
if [ -z "$HTTP_EXTERNAL" ]; then
13
+
echo "Error: HTTP_EXTERNAL environment variable is required"
14
+
echo "Usage: HTTP_EXTERNAL=example.com ./generate-wellknown.sh"
15
+
exit 1
16
+
fi
17
+
18
+
# Ensure www/.well-known directory exists
19
+
mkdir -p www/.well-known
20
+
21
+
# Generate service DID from HTTP_EXTERNAL
22
+
if [[ "$HTTP_EXTERNAL" == *":"* ]]; then
23
+
# Contains port - URL encode the colon
24
+
SERVICE_DID="did:web:${HTTP_EXTERNAL//:/%3A}"
25
+
else
26
+
SERVICE_DID="did:web:$HTTP_EXTERNAL"
27
+
fi
28
+
29
+
echo "Generating .well-known files for $SERVICE_DID"
30
+
31
+
# Write atproto-did file
32
+
echo "$SERVICE_DID" > www/.well-known/atproto-did
33
+
echo "Created: www/.well-known/atproto-did"
34
+
35
+
# Create a basic did.json template
36
+
# Note: You'll need to manually add the publicKeyMultibase if you need DID document support
37
+
38
+
cat > www/.well-known/did.json <<EOF
39
+
{
40
+
"@context": [
41
+
"https://www.w3.org/ns/did/v1",
42
+
"https://w3id.org/security/multikey/v1"
43
+
],
44
+
"id": "$SERVICE_DID",
45
+
"verificationMethod": [],
46
+
"service": [
47
+
{
48
+
"id": "${SERVICE_DID}#quickdid",
49
+
"type": "QuickDIDService",
50
+
"serviceEndpoint": "https://${HTTP_EXTERNAL}"
51
+
}
52
+
]
53
+
}
54
+
EOF
55
+
56
+
echo "Created: www/.well-known/did.json"
57
+
echo ""
58
+
echo "Note: The did.json file is a basic template. If you need DID document support,"
59
+
echo "you'll need to manually add the verificationMethod with your public key."
+18
railway-resources/telegraf/Dockerfile
+18
railway-resources/telegraf/Dockerfile
···
1
+
# Telegraf Dockerfile for Railway Deployment
2
+
FROM telegraf:1.33-alpine
3
+
4
+
# Install additional packages for health checks
5
+
RUN apk add --no-cache curl postgresql-client
6
+
7
+
# Create directories for custom configs
8
+
RUN mkdir -p /etc/telegraf/telegraf.d
9
+
10
+
# Copy main configuration
11
+
COPY railway-resources/telegraf/telegraf.conf /etc/telegraf/telegraf.conf
12
+
13
+
# Health check - test configuration validity
14
+
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
15
+
CMD telegraf --config /etc/telegraf/telegraf.conf --test || exit 1
16
+
17
+
# Run telegraf with custom config
18
+
CMD ["telegraf", "--config", "/etc/telegraf/telegraf.conf", "--config-directory", "/etc/telegraf/telegraf.d"]
+48
railway-resources/telegraf/railway.toml
+48
railway-resources/telegraf/railway.toml
···
1
+
# Railway configuration for Telegraf service
2
+
# This file configures how Railway builds and deploys the Telegraf metrics collector
3
+
4
+
[build]
5
+
# Use Dockerfile for building
6
+
builder = "DOCKERFILE"
7
+
dockerfilePath = "railway-resources/telegraf/Dockerfile"
8
+
9
+
[deploy]
10
+
# Start command (handled by Dockerfile CMD)
11
+
startCommand = "telegraf --config /etc/telegraf/telegraf.conf"
12
+
13
+
# No health check path for Telegraf (uses container health check)
14
+
# healthcheckPath = ""
15
+
16
+
# Restart policy
17
+
restartPolicyType = "ALWAYS"
18
+
restartPolicyMaxRetries = 10
19
+
20
+
# Resource limits
21
+
memoryLimitMB = 1024
22
+
cpuLimitCores = 1
23
+
24
+
# Scaling (Telegraf should be singleton)
25
+
minReplicas = 1
26
+
maxReplicas = 1
27
+
28
+
# Graceful shutdown
29
+
stopTimeout = 10
30
+
31
+
# Service configuration for StatsD UDP endpoint
32
+
[[services]]
33
+
name = "telegraf-statsd"
34
+
port = 8125
35
+
protocol = "UDP"
36
+
internalPort = 8125
37
+
38
+
# Service configuration for Telegraf HTTP API (optional)
39
+
[[services]]
40
+
name = "telegraf-http"
41
+
port = 8086
42
+
protocol = "HTTP"
43
+
internalPort = 8086
44
+
45
+
# Environment-specific settings
46
+
[environments.production]
47
+
memoryLimitMB = 512
48
+
cpuLimitCores = 1
+77
railway-resources/telegraf/telegraf.conf
+77
railway-resources/telegraf/telegraf.conf
···
1
+
# Telegraf Configuration for QuickDID Metrics Collection
2
+
# Optimized for Railway deployment with TimescaleDB
3
+
4
+
# Global tags applied to all metrics
5
+
[global_tags]
6
+
environment = "${ENVIRONMENT:-production}"
7
+
service = "quickdid"
8
+
region = "${RAILWAY_REGION:-us-west1}"
9
+
deployment_id = "${RAILWAY_DEPLOYMENT_ID:-unknown}"
10
+
11
+
# Agent configuration
12
+
[agent]
13
+
## Default data collection interval
14
+
interval = "10s"
15
+
16
+
## Rounds collection interval to interval
17
+
round_interval = true
18
+
19
+
## Telegraf will send metrics to outputs in batches of at most metric_batch_size metrics.
20
+
metric_batch_size = 1000
21
+
22
+
## Maximum number of unwritten metrics per output
23
+
metric_buffer_limit = 10000
24
+
25
+
## Collection jitter is used to jitter the collection by a random amount
26
+
collection_jitter = "0s"
27
+
28
+
## Default flushing interval for all outputs
29
+
flush_interval = "10s"
30
+
31
+
## Jitter the flush interval by a random amount
32
+
flush_jitter = "0s"
33
+
34
+
## Precision of timestamps
35
+
precision = "1ms"
36
+
37
+
## Log level
38
+
debug = ${TELEGRAF_DEBUG:-false}
39
+
quiet = ${TELEGRAF_QUIET:-false}
40
+
41
+
## Override default hostname
42
+
hostname = "${HOSTNAME:-telegraf}"
43
+
44
+
## If true, do not set the "host" tag in the telegraf agent
45
+
omit_hostname = false
46
+
47
+
###############################################################################
48
+
# INPUT PLUGINS #
49
+
###############################################################################
50
+
51
+
# StatsD Server - receives metrics from QuickDID
52
+
[[inputs.statsd]]
53
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
54
+
protocol = "udp"
55
+
delete_gauges = true
56
+
delete_counters = true
57
+
delete_sets = true
58
+
delete_timings = true
59
+
percentiles = [50, 90, 95, 99]
60
+
metric_separator = "."
61
+
allowed_pending_messages = 100
62
+
datadog_extensions = true
63
+
datadog_distributions = true
64
+
65
+
[[outputs.postgresql]]
66
+
connection = "${DATABASE_URL}"
67
+
68
+
schema = "public"
69
+
70
+
create_templates = [
71
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
72
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
73
+
]
74
+
75
+
tags_as_jsonb = true
76
+
77
+
fields_as_jsonb = false
+401
-70
src/bin/quickdid.rs
+401
-70
src/bin/quickdid.rs
···
1
1
use anyhow::Result;
2
2
use atproto_identity::{
3
3
config::{CertificateBundles, DnsNameservers},
4
-
key::{identify_key, to_public},
5
4
resolve::HickoryDnsResolver,
6
5
};
7
-
use clap::Parser;
6
+
use atproto_jetstream::{Consumer as JetstreamConsumer, ConsumerTaskConfig};
7
+
use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
8
8
use quickdid::{
9
9
cache::create_redis_pool,
10
-
config::{Args, Config},
10
+
config::Config,
11
11
handle_resolver::{
12
-
create_base_resolver, create_caching_resolver, create_redis_resolver_with_ttl,
13
-
create_sqlite_resolver_with_ttl,
12
+
create_base_resolver, create_caching_resolver,
13
+
create_proactive_refresh_resolver_with_metrics, create_rate_limited_resolver_with_timeout,
14
+
create_redis_resolver_with_ttl, create_sqlite_resolver_with_ttl,
14
15
},
15
-
sqlite_schema::create_sqlite_pool,
16
16
handle_resolver_task::{HandleResolverTaskConfig, create_handle_resolver_task_with_config},
17
17
http::{AppContext, create_router},
18
+
jetstream_handler::QuickDidEventHandler,
19
+
lexicon_resolver::create_redis_lexicon_resolver_with_ttl,
20
+
metrics::create_metrics_publisher,
18
21
queue::{
19
22
HandleResolutionWork, QueueAdapter, create_mpsc_queue_from_channel, create_noop_queue,
20
-
create_redis_queue, create_sqlite_queue, create_sqlite_queue_with_max_size,
23
+
create_redis_queue, create_redis_queue_with_dedup, create_sqlite_queue,
24
+
create_sqlite_queue_with_max_size,
21
25
},
26
+
sqlite_schema::create_sqlite_pool,
22
27
task_manager::spawn_cancellable_task,
23
28
};
24
-
use serde_json::json;
25
29
use std::sync::Arc;
26
30
use tokio::signal;
27
31
use tokio_util::{sync::CancellationToken, task::TaskTracker};
···
55
59
}
56
60
}
57
61
62
+
/// Simple command-line argument handling for --version and --help
63
+
fn handle_simple_args() -> bool {
64
+
let args: Vec<String> = std::env::args().collect();
65
+
66
+
if args.len() > 1 {
67
+
match args[1].as_str() {
68
+
"--version" | "-V" => {
69
+
println!("quickdid {}", env!("CARGO_PKG_VERSION"));
70
+
return true;
71
+
}
72
+
"--help" | "-h" => {
73
+
println!("QuickDID - AT Protocol Identity Resolver Service");
74
+
println!("Version: {}", env!("CARGO_PKG_VERSION"));
75
+
println!();
76
+
println!("USAGE:");
77
+
println!(" quickdid [OPTIONS]");
78
+
println!();
79
+
println!("OPTIONS:");
80
+
println!(" -h, --help Print help information");
81
+
println!(" -V, --version Print version information");
82
+
println!();
83
+
println!("ENVIRONMENT VARIABLES:");
84
+
println!(
85
+
" HTTP_EXTERNAL External hostname for service endpoints (required)"
86
+
);
87
+
println!(" HTTP_PORT HTTP server port (default: 8080)");
88
+
println!(" PLC_HOSTNAME PLC directory hostname (default: plc.directory)");
89
+
println!(
90
+
" USER_AGENT HTTP User-Agent header (auto-generated with version)"
91
+
);
92
+
println!(" DNS_NAMESERVERS Custom DNS nameservers (comma-separated IPs)");
93
+
println!(
94
+
" CERTIFICATE_BUNDLES Additional CA certificates (comma-separated paths)"
95
+
);
96
+
println!();
97
+
println!(" CACHING:");
98
+
println!(" REDIS_URL Redis URL for handle resolution caching");
99
+
println!(
100
+
" SQLITE_URL SQLite database URL for handle resolution caching"
101
+
);
102
+
println!(
103
+
" CACHE_TTL_MEMORY TTL for in-memory cache in seconds (default: 600)"
104
+
);
105
+
println!(
106
+
" CACHE_TTL_REDIS TTL for Redis cache in seconds (default: 7776000)"
107
+
);
108
+
println!(
109
+
" CACHE_TTL_SQLITE TTL for SQLite cache in seconds (default: 7776000)"
110
+
);
111
+
println!();
112
+
println!(" QUEUE CONFIGURATION:");
113
+
println!(
114
+
" QUEUE_ADAPTER Queue adapter: 'mpsc', 'redis', 'sqlite', 'noop' (default: mpsc)"
115
+
);
116
+
println!(" QUEUE_REDIS_URL Redis URL for queue adapter");
117
+
println!(
118
+
" QUEUE_REDIS_PREFIX Redis key prefix for queues (default: queue:handleresolver:)"
119
+
);
120
+
println!(" QUEUE_REDIS_TIMEOUT Queue blocking timeout in seconds (default: 5)");
121
+
println!(
122
+
" QUEUE_REDIS_DEDUP_ENABLED Enable queue deduplication (default: false)"
123
+
);
124
+
println!(" QUEUE_REDIS_DEDUP_TTL TTL for dedup keys in seconds (default: 60)");
125
+
println!(" QUEUE_WORKER_ID Worker ID for Redis queue (default: worker1)");
126
+
println!(" QUEUE_BUFFER_SIZE Buffer size for MPSC queue (default: 1000)");
127
+
println!(" QUEUE_SQLITE_MAX_SIZE Maximum SQLite queue size (default: 10000)");
128
+
println!();
129
+
println!(" RATE LIMITING:");
130
+
println!(
131
+
" RESOLVER_MAX_CONCURRENT Maximum concurrent resolutions (default: 0 = disabled)"
132
+
);
133
+
println!(
134
+
" RESOLVER_MAX_CONCURRENT_TIMEOUT_MS Timeout for acquiring permits in ms (default: 0 = no timeout)"
135
+
);
136
+
println!();
137
+
println!(" METRICS:");
138
+
println!(
139
+
" METRICS_ADAPTER Metrics adapter: 'noop' or 'statsd' (default: noop)"
140
+
);
141
+
println!(
142
+
" METRICS_STATSD_HOST StatsD host when using statsd adapter (e.g., localhost:8125)"
143
+
);
144
+
println!(
145
+
" METRICS_STATSD_BIND Bind address for StatsD UDP socket (default: [::]:0)"
146
+
);
147
+
println!(" METRICS_PREFIX Prefix for all metrics (default: quickdid)");
148
+
println!(
149
+
" METRICS_TAGS Default tags for metrics (comma-separated key:value pairs)"
150
+
);
151
+
println!();
152
+
println!(" PROACTIVE CACHE REFRESH:");
153
+
println!(
154
+
" PROACTIVE_REFRESH_ENABLED Enable proactive cache refresh (default: false)"
155
+
);
156
+
println!(
157
+
" PROACTIVE_REFRESH_THRESHOLD Threshold as percentage of TTL (0.0-1.0, default: 0.8)"
158
+
);
159
+
println!();
160
+
println!(" JETSTREAM:");
161
+
println!(" JETSTREAM_ENABLED Enable Jetstream consumer (default: false)");
162
+
println!(
163
+
" JETSTREAM_HOSTNAME Jetstream hostname (default: jetstream.atproto.tools)"
164
+
);
165
+
println!();
166
+
println!(
167
+
"For more information, visit: https://github.com/smokesignal.events/quickdid"
168
+
);
169
+
return true;
170
+
}
171
+
_ => {}
172
+
}
173
+
}
174
+
175
+
false
176
+
}
177
+
58
178
#[tokio::main]
59
179
async fn main() -> Result<()> {
180
+
// Handle --version and --help
181
+
if handle_simple_args() {
182
+
return Ok(());
183
+
}
184
+
60
185
// Initialize tracing
61
186
tracing_subscriber::registry()
62
187
.with(
···
67
192
.with(tracing_subscriber::fmt::layer())
68
193
.init();
69
194
70
-
let args = Args::parse();
71
-
let config = Config::from_args(args)?;
195
+
let config = Config::from_env()?;
72
196
73
197
// Validate configuration
74
198
config.validate()?;
75
199
76
200
tracing::info!("Starting QuickDID service on port {}", config.http_port);
77
-
tracing::info!("Service DID: {}", config.service_did);
78
201
tracing::info!(
79
202
"Cache TTL - Memory: {}s, Redis: {}s, SQLite: {}s",
80
203
config.cache_ttl_memory,
···
109
232
// Create DNS resolver
110
233
let dns_resolver = HickoryDnsResolver::create_resolver(dns_nameservers.as_ref());
111
234
112
-
// Process service key
113
-
let private_service_key_data = identify_key(&config.service_key)?;
114
-
let public_service_key_data = to_public(&private_service_key_data)?;
115
-
let public_service_key = public_service_key_data.to_string();
235
+
// Clone DNS resolver for lexicon resolution before wrapping in Arc
236
+
let lexicon_dns_resolver = dns_resolver.clone();
237
+
238
+
// Wrap DNS resolver in Arc for handle resolution
239
+
let dns_resolver_arc = Arc::new(dns_resolver);
116
240
117
-
// Create service DID document
118
-
let service_document = json!({
119
-
"@context": vec!["https://www.w3.org/ns/did/v1", "https://w3id.org/security/multikey/v1"],
120
-
"id": config.service_did.clone(),
121
-
"verificationMethod": [{
122
-
"id": format!("{}#atproto", config.service_did),
123
-
"type": "Multikey",
124
-
"controller": config.service_did.clone(),
125
-
"publicKeyMultibase": public_service_key
126
-
}],
127
-
"service": []
128
-
});
241
+
// Create metrics publisher based on configuration
242
+
let metrics_publisher = create_metrics_publisher(&config).map_err(|e| {
243
+
tracing::error!("Failed to create metrics publisher: {}", e);
244
+
anyhow::anyhow!("Failed to create metrics publisher: {}", e)
245
+
})?;
129
246
130
-
// Create DNS resolver Arc for sharing
131
-
let dns_resolver_arc = Arc::new(dns_resolver);
247
+
tracing::info!(
248
+
"Metrics publisher created with {} adapter",
249
+
config.metrics_adapter
250
+
);
251
+
252
+
metrics_publisher.gauge("server", 1).await;
132
253
133
254
// Create base handle resolver using factory function
134
-
let base_handle_resolver = create_base_resolver(dns_resolver_arc.clone(), http_client.clone());
255
+
let mut base_handle_resolver = create_base_resolver(
256
+
dns_resolver_arc.clone(),
257
+
http_client.clone(),
258
+
metrics_publisher.clone(),
259
+
);
260
+
261
+
// Apply rate limiting if configured
262
+
if config.resolver_max_concurrent > 0 {
263
+
let timeout_info = if config.resolver_max_concurrent_timeout_ms > 0 {
264
+
format!(", {}ms timeout", config.resolver_max_concurrent_timeout_ms)
265
+
} else {
266
+
String::new()
267
+
};
268
+
tracing::info!(
269
+
"Applying rate limiting to handle resolver (max {} concurrent resolutions{})",
270
+
config.resolver_max_concurrent,
271
+
timeout_info
272
+
);
273
+
base_handle_resolver = create_rate_limited_resolver_with_timeout(
274
+
base_handle_resolver,
275
+
config.resolver_max_concurrent,
276
+
config.resolver_max_concurrent_timeout_ms,
277
+
metrics_publisher.clone(),
278
+
);
279
+
}
135
280
136
281
// Create Redis pool if configured
137
282
let redis_pool = config
···
146
291
None
147
292
};
148
293
149
-
// Create handle resolver with cache priority: Redis > SQLite > In-memory
150
-
let handle_resolver: Arc<dyn quickdid::handle_resolver::HandleResolver> =
151
-
if let Some(pool) = redis_pool {
152
-
tracing::info!(
153
-
"Using Redis-backed handle resolver with {}-second cache TTL",
154
-
config.cache_ttl_redis
155
-
);
156
-
create_redis_resolver_with_ttl(base_handle_resolver, pool, config.cache_ttl_redis)
157
-
} else if let Some(pool) = sqlite_pool {
158
-
tracing::info!(
159
-
"Using SQLite-backed handle resolver with {}-second cache TTL",
160
-
config.cache_ttl_sqlite
161
-
);
162
-
create_sqlite_resolver_with_ttl(base_handle_resolver, pool, config.cache_ttl_sqlite)
163
-
} else {
164
-
tracing::info!(
165
-
"Using in-memory handle resolver with {}-second cache TTL",
166
-
config.cache_ttl_memory
167
-
);
168
-
create_caching_resolver(base_handle_resolver, config.cache_ttl_memory)
169
-
};
170
-
171
294
// Create task tracker and cancellation token
172
295
let tracker = TaskTracker::new();
173
296
let token = CancellationToken::new();
174
297
175
-
// Setup background handle resolution task and get the queue adapter
298
+
// Create the queue adapter first (needed for proactive refresh)
176
299
let handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>> = {
177
300
// Create queue adapter based on configuration
178
301
let adapter: Arc<dyn QueueAdapter<HandleResolutionWork>> = match config
···
189
312
if let Some(url) = queue_redis_url {
190
313
if let Some(pool) = try_create_redis_pool(url, "queue adapter") {
191
314
tracing::info!(
192
-
"Creating Redis queue adapter with prefix: {}",
193
-
config.queue_redis_prefix
315
+
"Creating Redis queue adapter with prefix: {}, dedup: {}, dedup_ttl: {}s",
316
+
config.queue_redis_prefix,
317
+
config.queue_redis_dedup_enabled,
318
+
config.queue_redis_dedup_ttl
194
319
);
195
-
create_redis_queue::<HandleResolutionWork>(
196
-
pool,
197
-
config.queue_worker_id.clone(),
198
-
config.queue_redis_prefix.clone(),
199
-
config.queue_redis_timeout,
200
-
)
320
+
if config.queue_redis_dedup_enabled {
321
+
create_redis_queue_with_dedup::<HandleResolutionWork>(
322
+
pool,
323
+
config.queue_worker_id.clone(),
324
+
config.queue_redis_prefix.clone(),
325
+
config.queue_redis_timeout,
326
+
true,
327
+
config.queue_redis_dedup_ttl,
328
+
)
329
+
} else {
330
+
create_redis_queue::<HandleResolutionWork>(
331
+
pool,
332
+
config.queue_worker_id.clone(),
333
+
config.queue_redis_prefix.clone(),
334
+
config.queue_redis_timeout,
335
+
)
336
+
}
201
337
} else {
202
338
tracing::warn!("Falling back to MPSC queue adapter");
203
339
// Fall back to MPSC if Redis fails
···
232
368
create_sqlite_queue::<HandleResolutionWork>(pool)
233
369
}
234
370
} else {
235
-
tracing::warn!("Failed to create SQLite pool for queue, falling back to MPSC queue adapter");
371
+
tracing::warn!(
372
+
"Failed to create SQLite pool for queue, falling back to MPSC queue adapter"
373
+
);
236
374
// Fall back to MPSC if SQLite fails
237
375
let (handle_sender, handle_receiver) =
238
376
tokio::sync::mpsc::channel::<HandleResolutionWork>(
···
272
410
}
273
411
};
274
412
275
-
// Keep a reference to the adapter for the AppContext
276
-
let adapter_for_context = adapter.clone();
413
+
adapter
414
+
};
415
+
416
+
// Create handle resolver with cache priority: Redis > SQLite > In-memory
417
+
let (mut handle_resolver, cache_ttl): (
418
+
Arc<dyn quickdid::handle_resolver::HandleResolver>,
419
+
u64,
420
+
) = if let Some(ref pool) = redis_pool {
421
+
tracing::info!(
422
+
"Using Redis-backed handle resolver with {}-second cache TTL",
423
+
config.cache_ttl_redis
424
+
);
425
+
(
426
+
create_redis_resolver_with_ttl(
427
+
base_handle_resolver,
428
+
pool.clone(),
429
+
config.cache_ttl_redis,
430
+
metrics_publisher.clone(),
431
+
),
432
+
config.cache_ttl_redis,
433
+
)
434
+
} else if let Some(pool) = sqlite_pool {
435
+
tracing::info!(
436
+
"Using SQLite-backed handle resolver with {}-second cache TTL",
437
+
config.cache_ttl_sqlite
438
+
);
439
+
(
440
+
create_sqlite_resolver_with_ttl(
441
+
base_handle_resolver,
442
+
pool,
443
+
config.cache_ttl_sqlite,
444
+
metrics_publisher.clone(),
445
+
),
446
+
config.cache_ttl_sqlite,
447
+
)
448
+
} else {
449
+
tracing::info!(
450
+
"Using in-memory handle resolver with {}-second cache TTL",
451
+
config.cache_ttl_memory
452
+
);
453
+
(
454
+
create_caching_resolver(
455
+
base_handle_resolver,
456
+
config.cache_ttl_memory,
457
+
metrics_publisher.clone(),
458
+
),
459
+
config.cache_ttl_memory,
460
+
)
461
+
};
462
+
463
+
// Apply proactive refresh if enabled
464
+
if config.proactive_refresh_enabled && !matches!(config.queue_adapter.as_str(), "noop" | "none")
465
+
{
466
+
tracing::info!(
467
+
"Enabling proactive cache refresh with {}% threshold",
468
+
(config.proactive_refresh_threshold * 100.0) as u32
469
+
);
470
+
handle_resolver = create_proactive_refresh_resolver_with_metrics(
471
+
handle_resolver,
472
+
handle_queue.clone(),
473
+
metrics_publisher.clone(),
474
+
cache_ttl,
475
+
config.proactive_refresh_threshold,
476
+
);
477
+
} else if config.proactive_refresh_enabled {
478
+
tracing::warn!(
479
+
"Proactive refresh enabled but queue adapter is no-op, skipping proactive refresh"
480
+
);
481
+
}
482
+
483
+
// Create lexicon resolver with Redis caching if available
484
+
let lexicon_resolver: Arc<dyn LexiconResolver> = {
485
+
let base_lexicon_resolver: Arc<dyn LexiconResolver> = Arc::new(
486
+
DefaultLexiconResolver::new(http_client.clone(), lexicon_dns_resolver),
487
+
);
488
+
489
+
if let Some(ref pool) = redis_pool {
490
+
tracing::info!(
491
+
"Using Redis-backed lexicon resolver with {}-second cache TTL",
492
+
config.cache_ttl_redis
493
+
);
494
+
create_redis_lexicon_resolver_with_ttl(
495
+
base_lexicon_resolver,
496
+
pool.clone(),
497
+
config.cache_ttl_redis,
498
+
metrics_publisher.clone(),
499
+
)
500
+
} else {
501
+
tracing::info!("Using base lexicon resolver without caching");
502
+
base_lexicon_resolver
503
+
}
504
+
};
505
+
506
+
// Setup background handle resolution task
507
+
{
508
+
let adapter_for_task = handle_queue.clone();
277
509
278
510
// Only spawn handle resolver task if not using noop adapter
279
511
if !matches!(config.queue_adapter.as_str(), "noop" | "none") {
···
284
516
285
517
// Create and start handle resolver task
286
518
let handle_task = create_handle_resolver_task_with_config(
287
-
adapter,
519
+
adapter_for_task,
288
520
handle_resolver.clone(),
289
521
token.clone(),
290
522
handle_task_config,
523
+
metrics_publisher.clone(),
291
524
);
292
525
293
526
// Spawn the handle resolver task
···
320
553
} else {
321
554
tracing::info!("Background handle resolution task disabled (using no-op adapter)");
322
555
}
323
-
324
-
// Return the adapter to be used in AppContext
325
-
adapter_for_context
326
556
};
327
557
328
558
// Create app context with the queue adapter
329
559
let app_context = AppContext::new(
330
-
service_document,
331
-
config.service_did.clone(),
332
560
handle_resolver.clone(),
333
561
handle_queue,
562
+
lexicon_resolver,
563
+
metrics_publisher.clone(),
564
+
config.etag_seed.clone(),
565
+
config.cache_control_header.clone(),
566
+
config.static_files_dir.clone(),
334
567
);
335
568
336
569
// Create router
···
377
610
signal_token.cancel();
378
611
tracing::info!("Signal handler task completed");
379
612
});
613
+
}
614
+
615
+
// Start Jetstream consumer if enabled
616
+
if config.jetstream_enabled {
617
+
let jetstream_resolver = handle_resolver.clone();
618
+
let jetstream_metrics = metrics_publisher.clone();
619
+
let jetstream_hostname = config.jetstream_hostname.clone();
620
+
let jetstream_user_agent = config.user_agent.clone();
621
+
622
+
spawn_cancellable_task(
623
+
&tracker,
624
+
token.clone(),
625
+
"jetstream_consumer",
626
+
move |cancel_token| async move {
627
+
tracing::info!(hostname = %jetstream_hostname, "Starting Jetstream consumer");
628
+
629
+
// Create event handler
630
+
let event_handler = Arc::new(QuickDidEventHandler::new(
631
+
jetstream_resolver,
632
+
jetstream_metrics.clone(),
633
+
));
634
+
635
+
// Reconnection loop
636
+
let mut reconnect_count = 0u32;
637
+
let max_reconnects_per_minute = 5;
638
+
let reconnect_window = std::time::Duration::from_secs(60);
639
+
let mut last_disconnect = std::time::Instant::now() - reconnect_window;
640
+
641
+
while !cancel_token.is_cancelled() {
642
+
let now = std::time::Instant::now();
643
+
if now.duration_since(last_disconnect) < reconnect_window {
644
+
reconnect_count += 1;
645
+
if reconnect_count > max_reconnects_per_minute {
646
+
tracing::warn!(
647
+
count = reconnect_count,
648
+
"Too many Jetstream reconnects, waiting 60 seconds"
649
+
);
650
+
tokio::time::sleep(reconnect_window).await;
651
+
reconnect_count = 0;
652
+
last_disconnect = now;
653
+
continue;
654
+
}
655
+
} else {
656
+
reconnect_count = 0;
657
+
}
658
+
659
+
// Create consumer configuration
660
+
let consumer_config = ConsumerTaskConfig {
661
+
user_agent: jetstream_user_agent.clone(),
662
+
compression: false,
663
+
zstd_dictionary_location: String::new(),
664
+
jetstream_hostname: jetstream_hostname.clone(),
665
+
// Listen to the "community.lexicon.collection.fake" collection
666
+
// so that we keep an active connection open but only for
667
+
// account and identity events.
668
+
collections: vec!["community.lexicon.collection.fake".to_string()], // Listen to all collections
669
+
dids: vec![],
670
+
max_message_size_bytes: None,
671
+
cursor: None,
672
+
require_hello: true,
673
+
};
674
+
675
+
let consumer = JetstreamConsumer::new(consumer_config);
676
+
677
+
// Register event handler
678
+
if let Err(e) = consumer.register_handler(event_handler.clone()).await {
679
+
tracing::error!(error = ?e, "Failed to register Jetstream event handler");
680
+
continue;
681
+
}
682
+
683
+
// Run consumer with cancellation support
684
+
match consumer.run_background(cancel_token.clone()).await {
685
+
Ok(()) => {
686
+
tracing::info!("Jetstream consumer stopped normally");
687
+
if cancel_token.is_cancelled() {
688
+
break;
689
+
}
690
+
last_disconnect = std::time::Instant::now();
691
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
692
+
}
693
+
Err(e) => {
694
+
tracing::error!(error = ?e, "Jetstream consumer connection failed, will reconnect");
695
+
jetstream_metrics.incr("jetstream.connection.error").await;
696
+
last_disconnect = std::time::Instant::now();
697
+
698
+
if !cancel_token.is_cancelled() {
699
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
700
+
}
701
+
}
702
+
}
703
+
}
704
+
705
+
tracing::info!("Jetstream consumer task shutting down");
706
+
Ok(())
707
+
},
708
+
);
709
+
} else {
710
+
tracing::info!("Jetstream consumer disabled");
380
711
}
381
712
382
713
// Start HTTP server with cancellation support
+11
-3
src/cache.rs
+11
-3
src/cache.rs
···
1
1
//! Redis cache utilities for QuickDID
2
2
3
-
use anyhow::Result;
4
3
use deadpool_redis::{Config, Pool, Runtime};
4
+
use thiserror::Error;
5
+
6
+
/// Cache-specific errors
7
+
#[derive(Debug, Error)]
8
+
pub enum CacheError {
9
+
/// Redis pool creation failed
10
+
#[error("error-quickdid-cache-1 Redis pool creation failed: {0}")]
11
+
RedisPoolCreationFailed(String),
12
+
}
5
13
6
14
/// Create a Redis connection pool from a Redis URL.
7
15
///
···
14
22
/// Returns an error if:
15
23
/// - The Redis URL is invalid
16
24
/// - Pool creation fails
17
-
pub fn create_redis_pool(redis_url: &str) -> Result<Pool> {
25
+
pub fn create_redis_pool(redis_url: &str) -> Result<Pool, CacheError> {
18
26
let config = Config::from_url(redis_url);
19
27
let pool = config
20
28
.create_pool(Some(Runtime::Tokio1))
21
-
.map_err(|e| anyhow::anyhow!("error-quickdid-cache-1 Redis pool creation failed: {}", e))?;
29
+
.map_err(|e| CacheError::RedisPoolCreationFailed(e.to_string()))?;
22
30
Ok(pool)
23
31
}
+255
-380
src/config.rs
+255
-380
src/config.rs
···
5
5
//!
6
6
//! ## Configuration Sources
7
7
//!
8
-
//! Configuration can be provided through:
9
-
//! - Environment variables (highest priority)
10
-
//! - Command-line arguments
11
-
//! - Default values (lowest priority)
8
+
//! Configuration is provided exclusively through environment variables following
9
+
//! the 12-factor app methodology.
12
10
//!
13
11
//! ## Example
14
12
//!
15
13
//! ```bash
16
14
//! # Minimal configuration
17
15
//! HTTP_EXTERNAL=quickdid.example.com \
18
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
19
16
//! quickdid
20
17
//!
21
18
//! # Full configuration with Redis and custom settings
22
19
//! HTTP_EXTERNAL=quickdid.example.com \
23
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
24
20
//! HTTP_PORT=3000 \
25
21
//! REDIS_URL=redis://localhost:6379 \
26
22
//! CACHE_TTL_MEMORY=300 \
···
30
26
//! quickdid
31
27
//! ```
32
28
33
-
use atproto_identity::config::optional_env;
34
-
use clap::Parser;
29
+
use std::env;
35
30
use thiserror::Error;
36
31
37
32
/// Configuration-specific errors following the QuickDID error format
···
41
36
pub enum ConfigError {
42
37
/// Missing required environment variable or command-line argument
43
38
///
44
-
/// Example: When SERVICE_KEY or HTTP_EXTERNAL are not provided
39
+
/// Example: When HTTP_EXTERNAL is not provided
45
40
#[error("error-quickdid-config-1 Missing required environment variable: {0}")]
46
41
MissingRequired(String),
47
42
48
43
/// Invalid configuration value that doesn't meet expected format or constraints
49
44
///
50
-
/// Example: Invalid QUEUE_ADAPTER value (must be 'mpsc', 'redis', or 'noop')
45
+
/// Example: Invalid QUEUE_ADAPTER value (must be 'mpsc', 'redis', 'sqlite', 'noop', or 'none')
51
46
#[error("error-quickdid-config-2 Invalid configuration value: {0}")]
52
47
InvalidValue(String),
53
48
···
64
59
InvalidTimeout(String),
65
60
}
66
61
67
-
#[derive(Parser, Clone)]
68
-
#[command(
69
-
name = "quickdid",
70
-
about = "QuickDID - AT Protocol Identity Resolver Service",
71
-
long_about = "
72
-
A fast identity resolution service for the AT Protocol ecosystem.
73
-
This service provides identity resolution endpoints and handle resolution
74
-
capabilities with in-memory caching.
75
-
76
-
FEATURES:
77
-
- AT Protocol identity resolution and DID document management
78
-
- Handle resolution with in-memory caching
79
-
- DID:web identity publishing via .well-known endpoints
80
-
- Health check endpoint
81
-
82
-
ENVIRONMENT VARIABLES:
83
-
SERVICE_KEY Private key for service identity (required)
84
-
HTTP_EXTERNAL External hostname for service endpoints (required)
85
-
HTTP_PORT HTTP server port (default: 8080)
86
-
PLC_HOSTNAME PLC directory hostname (default: plc.directory)
87
-
USER_AGENT HTTP User-Agent header (auto-generated with version)
88
-
DNS_NAMESERVERS Custom DNS nameservers (comma-separated IPs)
89
-
CERTIFICATE_BUNDLES Additional CA certificates (comma-separated paths)
90
-
91
-
CACHING:
92
-
REDIS_URL Redis URL for handle resolution caching (optional)
93
-
SQLITE_URL SQLite database URL for handle resolution caching (optional)
94
-
CACHE_TTL_MEMORY TTL for in-memory cache in seconds (default: 600)
95
-
CACHE_TTL_REDIS TTL for Redis cache in seconds (default: 7776000 = 90 days)
96
-
CACHE_TTL_SQLITE TTL for SQLite cache in seconds (default: 7776000 = 90 days)
97
-
98
-
QUEUE CONFIGURATION:
99
-
QUEUE_ADAPTER Queue adapter: 'mpsc', 'redis', 'sqlite', 'noop', 'none' (default: mpsc)
100
-
QUEUE_REDIS_URL Redis URL for queue adapter (uses REDIS_URL if not set)
101
-
QUEUE_REDIS_PREFIX Redis key prefix for queues (default: queue:handleresolver:)
102
-
QUEUE_REDIS_TIMEOUT Queue blocking timeout in seconds (default: 5)
103
-
QUEUE_WORKER_ID Worker ID for Redis queue (default: worker1)
104
-
QUEUE_BUFFER_SIZE Buffer size for MPSC queue (default: 1000)
105
-
QUEUE_SQLITE_MAX_SIZE Maximum SQLite queue size for work shedding (default: 10000, 0=unlimited)
106
-
"
107
-
)]
108
-
/// Command-line arguments and environment variables configuration
109
-
pub struct Args {
110
-
/// HTTP server port to bind to
111
-
///
112
-
/// Examples: "8080", "3000", "80"
113
-
/// Constraints: Must be a valid port number (1-65535)
114
-
#[arg(long, env = "HTTP_PORT", default_value = "8080")]
115
-
pub http_port: String,
116
-
117
-
/// PLC directory hostname for DID resolution
118
-
///
119
-
/// Examples: "plc.directory", "test.plc.directory"
120
-
/// Use "plc.directory" for production
121
-
#[arg(long, env = "PLC_HOSTNAME", default_value = "plc.directory")]
122
-
pub plc_hostname: String,
123
-
124
-
/// External hostname for service endpoints (REQUIRED)
125
-
///
126
-
/// Examples:
127
-
/// - "quickdid.example.com" (standard)
128
-
/// - "quickdid.example.com:8080" (with port)
129
-
/// - "localhost:3007" (development)
130
-
#[arg(long, env = "HTTP_EXTERNAL")]
131
-
pub http_external: Option<String>,
132
-
133
-
/// Private key for service identity in DID format (REQUIRED)
134
-
///
135
-
/// Examples:
136
-
/// - "did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK"
137
-
/// - "did:plc:xyz123abc456"
138
-
///
139
-
/// SECURITY: Keep this key secure and never commit to version control
140
-
#[arg(long, env = "SERVICE_KEY")]
141
-
pub service_key: Option<String>,
142
-
143
-
/// HTTP User-Agent header for outgoing requests
144
-
///
145
-
/// Example: `quickdid/1.0.0 (+https://quickdid.example.com)`
146
-
/// Default: Auto-generated with current version
147
-
#[arg(long, env = "USER_AGENT")]
148
-
pub user_agent: Option<String>,
149
-
150
-
/// Custom DNS nameservers (comma-separated IP addresses)
151
-
///
152
-
/// Examples:
153
-
/// - "8.8.8.8,8.8.4.4" (Google DNS)
154
-
/// - "1.1.1.1,1.0.0.1" (Cloudflare DNS)
155
-
/// - "192.168.1.1" (Local DNS)
156
-
#[arg(long, env = "DNS_NAMESERVERS")]
157
-
pub dns_nameservers: Option<String>,
158
-
159
-
/// Additional CA certificates (comma-separated file paths)
160
-
///
161
-
/// Examples:
162
-
/// - "/etc/ssl/certs/custom-ca.pem"
163
-
/// - "/certs/ca1.pem,/certs/ca2.pem"
164
-
///
165
-
/// Use for custom or internal certificate authorities
166
-
#[arg(long, env = "CERTIFICATE_BUNDLES")]
167
-
pub certificate_bundles: Option<String>,
168
-
169
-
/// Redis connection URL for caching
170
-
///
171
-
/// Examples:
172
-
/// - "redis://localhost:6379/0" (local, no auth)
173
-
/// - "redis://user:pass@redis.example.com:6379/0" (with auth)
174
-
/// - "rediss://secure-redis.example.com:6380/0" (TLS)
175
-
///
176
-
/// Benefits: Persistent cache, distributed caching, better performance
177
-
#[arg(long, env = "REDIS_URL")]
178
-
pub redis_url: Option<String>,
179
-
180
-
/// SQLite database URL for caching
181
-
///
182
-
/// Examples:
183
-
/// - "sqlite:./quickdid.db" (file-based database)
184
-
/// - "sqlite::memory:" (in-memory database for testing)
185
-
/// - "sqlite:/path/to/cache.db" (absolute path)
186
-
///
187
-
/// Benefits: Persistent cache, single-file storage, no external dependencies
188
-
#[arg(long, env = "SQLITE_URL")]
189
-
pub sqlite_url: Option<String>,
190
-
191
-
/// Queue adapter type for background processing
192
-
///
193
-
/// Values:
194
-
/// - "mpsc": In-memory multi-producer single-consumer queue
195
-
/// - "redis": Redis-backed distributed queue
196
-
/// - "sqlite": SQLite-backed persistent queue
197
-
/// - "noop": Disable queue processing (for testing)
198
-
/// - "none": Alias for "noop"
199
-
///
200
-
/// Default: "mpsc" for single-instance deployments
201
-
#[arg(long, env = "QUEUE_ADAPTER", default_value = "mpsc")]
202
-
pub queue_adapter: String,
203
-
204
-
/// Redis URL specifically for queue operations
205
-
///
206
-
/// Falls back to REDIS_URL if not specified
207
-
/// Use when separating cache and queue Redis instances
208
-
#[arg(long, env = "QUEUE_REDIS_URL")]
209
-
pub queue_redis_url: Option<String>,
210
-
211
-
/// Redis key prefix for queue operations
212
-
///
213
-
/// Examples:
214
-
/// - "queue:handleresolver:" (default)
215
-
/// - "prod:queue:hr:" (environment-specific)
216
-
/// - "quickdid:v1:queue:" (version-specific)
217
-
///
218
-
/// Use to namespace queues when sharing Redis
219
-
#[arg(
220
-
long,
221
-
env = "QUEUE_REDIS_PREFIX",
222
-
default_value = "queue:handleresolver:"
223
-
)]
224
-
pub queue_redis_prefix: String,
225
-
226
-
/// Worker ID for Redis queue operations
227
-
///
228
-
/// Examples: "worker-001", "prod-us-east-1", "quickdid-1"
229
-
/// Default: "worker1"
230
-
///
231
-
/// Use for identifying specific workers in logs
232
-
#[arg(long, env = "QUEUE_WORKER_ID")]
233
-
pub queue_worker_id: Option<String>,
234
-
235
-
/// Buffer size for MPSC queue
236
-
///
237
-
/// Range: 100-100000 (recommended)
238
-
/// Default: 1000
239
-
///
240
-
/// Increase for high-traffic deployments
241
-
#[arg(long, env = "QUEUE_BUFFER_SIZE", default_value = "1000")]
242
-
pub queue_buffer_size: usize,
243
-
244
-
/// TTL for in-memory cache in seconds
245
-
///
246
-
/// Range: 60-3600 (recommended)
247
-
/// Default: 600 (10 minutes)
248
-
///
249
-
/// Lower values = fresher data, more resolution requests
250
-
/// Higher values = better performance, potentially stale data
251
-
#[arg(long, env = "CACHE_TTL_MEMORY", default_value = "600")]
252
-
pub cache_ttl_memory: u64,
253
-
254
-
/// TTL for Redis cache in seconds
255
-
///
256
-
/// Range: 3600-31536000 (1 hour to 1 year)
257
-
/// Default: 7776000 (90 days)
258
-
///
259
-
/// Recommendation: 86400 (1 day) for frequently changing data
260
-
#[arg(long, env = "CACHE_TTL_REDIS", default_value = "7776000")]
261
-
pub cache_ttl_redis: u64,
262
-
263
-
/// TTL for SQLite cache in seconds
264
-
///
265
-
/// Range: 3600-31536000 (1 hour to 1 year)
266
-
/// Default: 7776000 (90 days)
267
-
///
268
-
/// Recommendation: 86400 (1 day) for frequently changing data
269
-
#[arg(long, env = "CACHE_TTL_SQLITE", default_value = "7776000")]
270
-
pub cache_ttl_sqlite: u64,
62
+
/// Helper function to get an environment variable with an optional default
63
+
fn get_env_or_default(key: &str, default: Option<&str>) -> Option<String> {
64
+
match env::var(key) {
65
+
Ok(val) if !val.is_empty() => Some(val),
66
+
_ => default.map(String::from),
67
+
}
68
+
}
271
69
272
-
/// Redis blocking timeout for queue operations in seconds
273
-
///
274
-
/// Range: 1-60 (recommended)
275
-
/// Default: 5
276
-
///
277
-
/// Lower values = more responsive to shutdown
278
-
/// Higher values = less Redis polling overhead
279
-
#[arg(long, env = "QUEUE_REDIS_TIMEOUT", default_value = "5")]
280
-
pub queue_redis_timeout: u64,
281
-
282
-
/// Maximum queue size for SQLite adapter (work shedding)
283
-
///
284
-
/// Range: 100-1000000 (recommended)
285
-
/// Default: 10000
286
-
///
287
-
/// When the SQLite queue exceeds this limit, the oldest entries are deleted
288
-
/// to maintain the queue size. This prevents unbounded queue growth while
289
-
/// preserving the most recently queued work items.
290
-
///
291
-
/// Set to 0 to disable work shedding (unlimited queue size)
292
-
#[arg(long, env = "QUEUE_SQLITE_MAX_SIZE", default_value = "10000")]
293
-
pub queue_sqlite_max_size: u64,
70
+
/// Helper function to parse an environment variable as a specific type
71
+
fn parse_env<T: std::str::FromStr>(key: &str, default: T) -> Result<T, ConfigError>
72
+
where
73
+
T::Err: std::fmt::Display,
74
+
{
75
+
match env::var(key) {
76
+
Ok(val) if !val.is_empty() => val
77
+
.parse::<T>()
78
+
.map_err(|e| ConfigError::InvalidValue(format!("{}: {}", key, e))),
79
+
_ => Ok(default),
80
+
}
294
81
}
295
82
296
83
/// Validated configuration for QuickDID service
297
84
///
298
85
/// This struct contains all configuration after validation and processing.
299
-
/// Use `Config::from_args()` to create from command-line arguments and environment variables.
86
+
/// Use `Config::from_env()` to create from environment variables.
300
87
///
301
88
/// ## Example
302
89
///
303
90
/// ```rust,no_run
304
-
/// use quickdid::config::{Args, Config};
305
-
/// use clap::Parser;
91
+
/// use quickdid::config::Config;
306
92
///
307
93
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
308
-
/// let args = Args::parse();
309
-
/// let config = Config::from_args(args)?;
94
+
/// let config = Config::from_env()?;
310
95
/// config.validate()?;
311
96
///
312
97
/// println!("Service running at: {}", config.http_external);
313
-
/// println!("Service DID: {}", config.service_did);
314
98
/// # Ok(())
315
99
/// # }
316
100
/// ```
···
325
109
/// External hostname for service endpoints (e.g., "quickdid.example.com")
326
110
pub http_external: String,
327
111
328
-
/// Private key for service identity (e.g., "did:key:z42tm...")
329
-
pub service_key: String,
330
-
331
112
/// HTTP User-Agent for outgoing requests (e.g., "quickdid/1.0.0 (+https://...)")
332
113
pub user_agent: String,
333
-
334
-
/// Derived service DID (e.g., "did:web:quickdid.example.com")
335
-
/// Automatically generated from http_external with proper encoding
336
-
pub service_did: String,
337
114
338
115
/// Custom DNS nameservers, comma-separated (e.g., "8.8.8.8,8.8.4.4")
339
116
pub dns_nameservers: Option<String>,
···
374
151
/// Redis blocking timeout for queue operations in seconds (e.g., 5)
375
152
pub queue_redis_timeout: u64,
376
153
154
+
/// Enable deduplication for Redis queue to prevent duplicate handles
155
+
/// Default: false
156
+
pub queue_redis_dedup_enabled: bool,
157
+
158
+
/// TTL for Redis queue deduplication keys in seconds
159
+
/// Default: 60 (1 minute)
160
+
pub queue_redis_dedup_ttl: u64,
161
+
377
162
/// Maximum queue size for SQLite adapter work shedding (e.g., 10000)
378
163
/// When exceeded, oldest entries are deleted to maintain this limit.
379
164
/// Set to 0 to disable work shedding (unlimited queue size).
380
165
pub queue_sqlite_max_size: u64,
166
+
167
+
/// Maximum concurrent handle resolutions allowed (rate limiting).
168
+
/// When set to > 0, enables rate limiting using a semaphore.
169
+
/// Default: 0 (disabled)
170
+
pub resolver_max_concurrent: usize,
171
+
172
+
/// Timeout for acquiring rate limit permit in milliseconds.
173
+
/// When set to > 0, requests will timeout if they can't acquire a permit within this time.
174
+
/// Default: 0 (no timeout)
175
+
pub resolver_max_concurrent_timeout_ms: u64,
176
+
177
+
/// Seed value for ETAG generation to allow cache invalidation.
178
+
/// This value is incorporated into ETAG checksums, allowing server admins
179
+
/// to invalidate client-cached responses after major changes.
180
+
/// Default: application version
181
+
pub etag_seed: String,
182
+
183
+
/// Maximum age for HTTP cache control in seconds.
184
+
/// When set to 0, Cache-Control header is disabled.
185
+
/// Default: 86400 (24 hours)
186
+
pub cache_max_age: u64,
187
+
188
+
/// Stale-if-error directive for Cache-Control in seconds.
189
+
/// Allows stale content to be served if backend errors occur.
190
+
/// Default: 172800 (48 hours)
191
+
pub cache_stale_if_error: u64,
192
+
193
+
/// Stale-while-revalidate directive for Cache-Control in seconds.
194
+
/// Allows stale content to be served while fetching fresh content.
195
+
/// Default: 86400 (24 hours)
196
+
pub cache_stale_while_revalidate: u64,
197
+
198
+
/// Max-stale directive for Cache-Control in seconds.
199
+
/// Maximum time client will accept stale responses.
200
+
/// Default: 172800 (48 hours)
201
+
pub cache_max_stale: u64,
202
+
203
+
/// Min-fresh directive for Cache-Control in seconds.
204
+
/// Minimum time response must remain fresh.
205
+
/// Default: 3600 (1 hour)
206
+
pub cache_min_fresh: u64,
207
+
208
+
/// Pre-calculated Cache-Control header value.
209
+
/// Calculated at startup for efficiency.
210
+
/// None if cache_max_age is 0 (disabled).
211
+
pub cache_control_header: Option<String>,
212
+
213
+
/// Metrics adapter type: "noop" or "statsd"
214
+
/// Default: "noop" (no metrics collection)
215
+
pub metrics_adapter: String,
216
+
217
+
/// StatsD host for metrics collection (e.g., "localhost:8125")
218
+
/// Required when metrics_adapter is "statsd"
219
+
pub metrics_statsd_host: Option<String>,
220
+
221
+
/// Bind address for StatsD UDP socket (e.g., "0.0.0.0:0" for IPv4 or "[::]:0" for IPv6)
222
+
/// Default: "[::]:0" (IPv6 any address, random port)
223
+
pub metrics_statsd_bind: String,
224
+
225
+
/// Metrics prefix for all metrics (e.g., "quickdid")
226
+
/// Default: "quickdid"
227
+
pub metrics_prefix: String,
228
+
229
+
/// Default tags for all metrics (comma-separated key:value pairs)
230
+
/// Example: "env:production,service:quickdid"
231
+
pub metrics_tags: Option<String>,
232
+
233
+
/// Enable proactive cache refresh for frequently accessed handles.
234
+
/// When enabled, cache entries that have reached the refresh threshold
235
+
/// will be queued for background refresh to keep the cache warm.
236
+
/// Default: false
237
+
pub proactive_refresh_enabled: bool,
238
+
239
+
/// Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh.
240
+
/// For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
241
+
/// Default: 0.8 (80%)
242
+
pub proactive_refresh_threshold: f64,
243
+
244
+
/// Directory path for serving static files.
245
+
/// When set, the root handler will serve files from this directory.
246
+
/// Default: "www" (relative to working directory)
247
+
pub static_files_dir: String,
248
+
249
+
/// Enable Jetstream consumer for AT Protocol events.
250
+
/// When enabled, the service will consume Account and Identity events
251
+
/// to maintain cache consistency.
252
+
/// Default: false
253
+
pub jetstream_enabled: bool,
254
+
255
+
/// Jetstream WebSocket hostname for consuming AT Protocol events.
256
+
/// Example: "jetstream.atproto.tools" or "jetstream1.us-west.bsky.network"
257
+
/// Default: "jetstream.atproto.tools"
258
+
pub jetstream_hostname: String,
381
259
}
382
260
383
261
impl Config {
384
-
/// Create a validated Config from command-line arguments and environment variables
262
+
/// Create a validated Config from environment variables
385
263
///
386
264
/// This method:
387
-
/// 1. Processes command-line arguments with environment variable fallbacks
388
-
/// 2. Validates required fields (HTTP_EXTERNAL and SERVICE_KEY)
389
-
/// 3. Generates derived values (service_did from http_external)
390
-
/// 4. Applies defaults where appropriate
391
-
///
392
-
/// ## Priority Order
393
-
///
394
-
/// 1. Command-line arguments (highest priority)
395
-
/// 2. Environment variables
396
-
/// 3. Default values (lowest priority)
265
+
/// 1. Reads configuration from environment variables
266
+
/// 2. Validates required fields (HTTP_EXTERNAL)
267
+
/// 3. Applies defaults where appropriate
397
268
///
398
269
/// ## Example
399
270
///
400
271
/// ```rust,no_run
401
-
/// use quickdid::config::{Args, Config};
402
-
/// use clap::Parser;
272
+
/// use quickdid::config::Config;
403
273
///
404
274
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
405
-
/// // Parse from environment and command-line
406
-
/// let args = Args::parse();
407
-
/// let config = Config::from_args(args)?;
275
+
/// // Parse from environment variables
276
+
/// let config = Config::from_env()?;
408
277
///
409
-
/// // The service DID is automatically generated from HTTP_EXTERNAL
410
-
/// assert!(config.service_did.starts_with("did:web:"));
411
278
/// # Ok(())
412
279
/// # }
413
280
/// ```
···
416
283
///
417
284
/// Returns `ConfigError::MissingRequired` if:
418
285
/// - HTTP_EXTERNAL is not provided
419
-
/// - SERVICE_KEY is not provided
420
-
pub fn from_args(args: Args) -> Result<Self, ConfigError> {
421
-
let http_external = args
422
-
.http_external
423
-
.or_else(|| {
424
-
let env_val = optional_env("HTTP_EXTERNAL");
425
-
if env_val.is_empty() {
426
-
None
427
-
} else {
428
-
Some(env_val)
429
-
}
430
-
})
286
+
pub fn from_env() -> Result<Self, ConfigError> {
287
+
// Required fields
288
+
let http_external = env::var("HTTP_EXTERNAL")
289
+
.ok()
290
+
.filter(|s| !s.is_empty())
431
291
.ok_or_else(|| ConfigError::MissingRequired("HTTP_EXTERNAL".to_string()))?;
432
292
433
-
let service_key = args
434
-
.service_key
435
-
.or_else(|| {
436
-
let env_val = optional_env("SERVICE_KEY");
437
-
if env_val.is_empty() {
438
-
None
439
-
} else {
440
-
Some(env_val)
441
-
}
442
-
})
443
-
.ok_or_else(|| ConfigError::MissingRequired("SERVICE_KEY".to_string()))?;
444
-
293
+
// Generate default user agent
445
294
let default_user_agent = format!(
446
295
"quickdid/{} (+https://github.com/smokesignal.events/quickdid)",
447
296
env!("CARGO_PKG_VERSION")
448
297
);
449
298
450
-
let user_agent = args
451
-
.user_agent
452
-
.or_else(|| {
453
-
let env_val = optional_env("USER_AGENT");
454
-
if env_val.is_empty() {
455
-
None
456
-
} else {
457
-
Some(env_val)
458
-
}
459
-
})
460
-
.unwrap_or(default_user_agent);
461
-
462
-
let service_did = if http_external.contains(':') {
463
-
let encoded_external = http_external.replace(':', "%3A");
464
-
format!("did:web:{}", encoded_external)
465
-
} else {
466
-
format!("did:web:{}", http_external)
299
+
let mut config = Config {
300
+
http_port: get_env_or_default("HTTP_PORT", Some("8080")).unwrap(),
301
+
plc_hostname: get_env_or_default("PLC_HOSTNAME", Some("plc.directory")).unwrap(),
302
+
http_external,
303
+
user_agent: get_env_or_default("USER_AGENT", None).unwrap_or(default_user_agent),
304
+
dns_nameservers: get_env_or_default("DNS_NAMESERVERS", None),
305
+
certificate_bundles: get_env_or_default("CERTIFICATE_BUNDLES", None),
306
+
redis_url: get_env_or_default("REDIS_URL", None),
307
+
sqlite_url: get_env_or_default("SQLITE_URL", None),
308
+
queue_adapter: get_env_or_default("QUEUE_ADAPTER", Some("mpsc")).unwrap(),
309
+
queue_redis_url: get_env_or_default("QUEUE_REDIS_URL", None),
310
+
queue_redis_prefix: get_env_or_default(
311
+
"QUEUE_REDIS_PREFIX",
312
+
Some("queue:handleresolver:"),
313
+
)
314
+
.unwrap(),
315
+
queue_worker_id: get_env_or_default("QUEUE_WORKER_ID", Some("worker1")).unwrap(),
316
+
queue_buffer_size: parse_env("QUEUE_BUFFER_SIZE", 1000)?,
317
+
cache_ttl_memory: parse_env("CACHE_TTL_MEMORY", 600)?,
318
+
cache_ttl_redis: parse_env("CACHE_TTL_REDIS", 7776000)?,
319
+
cache_ttl_sqlite: parse_env("CACHE_TTL_SQLITE", 7776000)?,
320
+
queue_redis_timeout: parse_env("QUEUE_REDIS_TIMEOUT", 5)?,
321
+
queue_redis_dedup_enabled: parse_env("QUEUE_REDIS_DEDUP_ENABLED", false)?,
322
+
queue_redis_dedup_ttl: parse_env("QUEUE_REDIS_DEDUP_TTL", 60)?,
323
+
queue_sqlite_max_size: parse_env("QUEUE_SQLITE_MAX_SIZE", 10000)?,
324
+
resolver_max_concurrent: parse_env("RESOLVER_MAX_CONCURRENT", 0)?,
325
+
resolver_max_concurrent_timeout_ms: parse_env("RESOLVER_MAX_CONCURRENT_TIMEOUT_MS", 0)?,
326
+
etag_seed: get_env_or_default("ETAG_SEED", Some(env!("CARGO_PKG_VERSION"))).unwrap(),
327
+
cache_max_age: parse_env("CACHE_MAX_AGE", 86400)?, // 24 hours
328
+
cache_stale_if_error: parse_env("CACHE_STALE_IF_ERROR", 172800)?, // 48 hours
329
+
cache_stale_while_revalidate: parse_env("CACHE_STALE_WHILE_REVALIDATE", 86400)?, // 24 hours
330
+
cache_max_stale: parse_env("CACHE_MAX_STALE", 172800)?, // 48 hours
331
+
cache_min_fresh: parse_env("CACHE_MIN_FRESH", 3600)?, // 1 hour
332
+
cache_control_header: None, // Will be calculated below
333
+
metrics_adapter: get_env_or_default("METRICS_ADAPTER", Some("noop")).unwrap(),
334
+
metrics_statsd_host: get_env_or_default("METRICS_STATSD_HOST", None),
335
+
metrics_statsd_bind: get_env_or_default("METRICS_STATSD_BIND", Some("[::]:0")).unwrap(),
336
+
metrics_prefix: get_env_or_default("METRICS_PREFIX", Some("quickdid")).unwrap(),
337
+
metrics_tags: get_env_or_default("METRICS_TAGS", None),
338
+
proactive_refresh_enabled: parse_env("PROACTIVE_REFRESH_ENABLED", false)?,
339
+
proactive_refresh_threshold: parse_env("PROACTIVE_REFRESH_THRESHOLD", 0.8)?,
340
+
static_files_dir: get_env_or_default("STATIC_FILES_DIR", Some("www")).unwrap(),
341
+
jetstream_enabled: parse_env("JETSTREAM_ENABLED", false)?,
342
+
jetstream_hostname: get_env_or_default(
343
+
"JETSTREAM_HOSTNAME",
344
+
Some("jetstream.atproto.tools"),
345
+
)
346
+
.unwrap(),
467
347
};
468
348
469
-
Ok(Config {
470
-
http_port: args.http_port,
471
-
plc_hostname: args.plc_hostname,
472
-
http_external,
473
-
service_key,
474
-
user_agent,
475
-
service_did,
476
-
dns_nameservers: args.dns_nameservers.or_else(|| {
477
-
let env_val = optional_env("DNS_NAMESERVERS");
478
-
if env_val.is_empty() {
479
-
None
480
-
} else {
481
-
Some(env_val)
482
-
}
483
-
}),
484
-
certificate_bundles: args.certificate_bundles.or_else(|| {
485
-
let env_val = optional_env("CERTIFICATE_BUNDLES");
486
-
if env_val.is_empty() {
487
-
None
488
-
} else {
489
-
Some(env_val)
490
-
}
491
-
}),
492
-
redis_url: args.redis_url.or_else(|| {
493
-
let env_val = optional_env("REDIS_URL");
494
-
if env_val.is_empty() {
495
-
None
496
-
} else {
497
-
Some(env_val)
498
-
}
499
-
}),
500
-
sqlite_url: args.sqlite_url.or_else(|| {
501
-
let env_val = optional_env("SQLITE_URL");
502
-
if env_val.is_empty() {
503
-
None
504
-
} else {
505
-
Some(env_val)
506
-
}
507
-
}),
508
-
queue_adapter: args.queue_adapter,
509
-
queue_redis_url: args.queue_redis_url.or_else(|| {
510
-
let env_val = optional_env("QUEUE_REDIS_URL");
511
-
if env_val.is_empty() {
512
-
None
513
-
} else {
514
-
Some(env_val)
515
-
}
516
-
}),
517
-
queue_redis_prefix: args.queue_redis_prefix,
518
-
queue_worker_id: args.queue_worker_id
519
-
.or_else(|| {
520
-
let env_val = optional_env("QUEUE_WORKER_ID");
521
-
if env_val.is_empty() {
522
-
None
523
-
} else {
524
-
Some(env_val)
525
-
}
526
-
})
527
-
.unwrap_or_else(|| "worker1".to_string()),
528
-
queue_buffer_size: args.queue_buffer_size,
529
-
cache_ttl_memory: args.cache_ttl_memory,
530
-
cache_ttl_redis: args.cache_ttl_redis,
531
-
cache_ttl_sqlite: args.cache_ttl_sqlite,
532
-
queue_redis_timeout: args.queue_redis_timeout,
533
-
queue_sqlite_max_size: args.queue_sqlite_max_size,
534
-
})
349
+
// Calculate the Cache-Control header value if enabled
350
+
config.cache_control_header = config.calculate_cache_control_header();
351
+
352
+
Ok(config)
353
+
}
354
+
355
+
/// Calculate the Cache-Control header value based on configuration.
356
+
/// Returns None if cache_max_age is 0 (disabled).
357
+
fn calculate_cache_control_header(&self) -> Option<String> {
358
+
if self.cache_max_age == 0 {
359
+
return None;
360
+
}
361
+
362
+
Some(format!(
363
+
"public, max-age={}, stale-while-revalidate={}, stale-if-error={}, max-stale={}, min-fresh={}",
364
+
self.cache_max_age,
365
+
self.cache_stale_while_revalidate,
366
+
self.cache_stale_if_error,
367
+
self.cache_max_stale,
368
+
self.cache_min_fresh
369
+
))
535
370
}
536
371
537
372
/// Validate the configuration for correctness and consistency
···
539
374
/// Checks:
540
375
/// - Cache TTL values are positive (> 0)
541
376
/// - Queue timeout is positive (> 0)
542
-
/// - Queue adapter is a valid value ('mpsc', 'redis', 'noop', 'none')
377
+
/// - Queue adapter is a valid value ('mpsc', 'redis', 'sqlite', 'noop', 'none')
543
378
///
544
379
/// ## Example
545
380
///
546
381
/// ```rust,no_run
547
-
/// # use quickdid::config::{Args, Config};
548
-
/// # use clap::Parser;
382
+
/// # use quickdid::config::Config;
549
383
/// # fn main() -> Result<(), Box<dyn std::error::Error>> {
550
-
/// # let args = Args::parse();
551
-
/// let config = Config::from_args(args)?;
384
+
/// let config = Config::from_env()?;
552
385
/// config.validate()?; // Ensures all values are valid
553
386
/// # Ok(())
554
387
/// # }
···
578
411
if self.queue_redis_timeout == 0 {
579
412
return Err(ConfigError::InvalidTimeout(
580
413
"QUEUE_REDIS_TIMEOUT must be > 0".to_string(),
414
+
));
415
+
}
416
+
if self.queue_redis_dedup_enabled && self.queue_redis_dedup_ttl == 0 {
417
+
return Err(ConfigError::InvalidTtl(
418
+
"QUEUE_REDIS_DEDUP_TTL must be > 0 when deduplication is enabled".to_string(),
581
419
));
582
420
}
583
421
match self.queue_adapter.as_str() {
584
422
"mpsc" | "redis" | "sqlite" | "noop" | "none" => {}
585
423
_ => {
586
424
return Err(ConfigError::InvalidValue(format!(
587
-
"Invalid QUEUE_ADAPTER '{}', must be 'mpsc', 'redis', 'sqlite', or 'noop'",
425
+
"Invalid QUEUE_ADAPTER '{}', must be 'mpsc', 'redis', 'sqlite', 'noop', or 'none'",
588
426
self.queue_adapter
589
427
)));
590
428
}
591
429
}
430
+
if self.resolver_max_concurrent > 10000 {
431
+
return Err(ConfigError::InvalidValue(
432
+
"RESOLVER_MAX_CONCURRENT must be between 0 and 10000".to_string(),
433
+
));
434
+
}
435
+
if self.resolver_max_concurrent_timeout_ms > 60000 {
436
+
return Err(ConfigError::InvalidTimeout(
437
+
"RESOLVER_MAX_CONCURRENT_TIMEOUT_MS must be <= 60000 (60 seconds)".to_string(),
438
+
));
439
+
}
440
+
441
+
// Validate metrics configuration
442
+
match self.metrics_adapter.as_str() {
443
+
"noop" | "statsd" => {}
444
+
_ => {
445
+
return Err(ConfigError::InvalidValue(format!(
446
+
"Invalid METRICS_ADAPTER '{}', must be 'noop' or 'statsd'",
447
+
self.metrics_adapter
448
+
)));
449
+
}
450
+
}
451
+
452
+
// If statsd is configured, ensure host is provided
453
+
if self.metrics_adapter == "statsd" && self.metrics_statsd_host.is_none() {
454
+
return Err(ConfigError::MissingRequired(
455
+
"METRICS_STATSD_HOST is required when METRICS_ADAPTER is 'statsd'".to_string(),
456
+
));
457
+
}
458
+
459
+
// Validate proactive refresh threshold
460
+
if self.proactive_refresh_threshold < 0.0 || self.proactive_refresh_threshold > 1.0 {
461
+
return Err(ConfigError::InvalidValue(format!(
462
+
"PROACTIVE_REFRESH_THRESHOLD must be between 0.0 and 1.0, got {}",
463
+
self.proactive_refresh_threshold
464
+
)));
465
+
}
466
+
592
467
Ok(())
593
468
}
594
469
}
+6
-3
src/handle_resolution_result.rs
+6
-3
src/handle_resolution_result.rs
···
11
11
/// Errors that can occur during handle resolution result operations
12
12
#[derive(Debug, Error)]
13
13
pub enum HandleResolutionError {
14
-
#[error("error-quickdid-resolution-1 System time error: {0}")]
14
+
/// System time error when getting timestamp
15
+
#[error("error-quickdid-result-1 System time error: {0}")]
15
16
SystemTime(String),
16
17
17
-
#[error("error-quickdid-serialization-1 Failed to serialize resolution result: {0}")]
18
+
/// Failed to serialize resolution result to binary format
19
+
#[error("error-quickdid-result-2 Failed to serialize resolution result: {0}")]
18
20
Serialization(String),
19
21
20
-
#[error("error-quickdid-serialization-2 Failed to deserialize resolution result: {0}")]
22
+
/// Failed to deserialize resolution result from binary format
23
+
#[error("error-quickdid-result-3 Failed to deserialize resolution result: {0}")]
21
24
Deserialization(String),
22
25
}
23
26
+54
-4
src/handle_resolver/base.rs
+54
-4
src/handle_resolver/base.rs
···
5
5
6
6
use super::errors::HandleResolverError;
7
7
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use async_trait::async_trait;
9
10
use atproto_identity::resolve::{DnsResolver, resolve_subject};
10
11
use reqwest::Client;
11
12
use std::sync::Arc;
13
+
use std::time::{SystemTime, UNIX_EPOCH};
12
14
13
15
/// Base handle resolver that performs actual resolution via DNS and HTTP.
14
16
///
···
24
26
/// use reqwest::Client;
25
27
/// use atproto_identity::resolve::HickoryDnsResolver;
26
28
/// use quickdid::handle_resolver::{create_base_resolver, HandleResolver};
29
+
/// use quickdid::metrics::NoOpMetricsPublisher;
27
30
///
28
31
/// # async fn example() {
29
32
/// let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
30
33
/// let http_client = Client::new();
34
+
/// let metrics = Arc::new(NoOpMetricsPublisher);
31
35
///
32
36
/// let resolver = create_base_resolver(
33
37
/// dns_resolver,
34
38
/// http_client,
39
+
/// metrics,
35
40
/// );
36
41
///
37
-
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
42
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
43
+
/// println!("Resolved {} at {}", did, timestamp);
38
44
/// # }
39
45
/// ```
40
46
pub(super) struct BaseHandleResolver {
···
43
49
44
50
/// HTTP client for DID document retrieval and well-known endpoint queries.
45
51
http_client: Client,
52
+
53
+
/// Metrics publisher for telemetry.
54
+
metrics: SharedMetricsPublisher,
46
55
}
47
56
48
57
#[async_trait]
49
58
impl HandleResolver for BaseHandleResolver {
50
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
51
-
resolve_subject(&self.http_client, &*self.dns_resolver, s)
59
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
60
+
let start_time = std::time::Instant::now();
61
+
62
+
// Perform DNS/HTTP resolution
63
+
let result = resolve_subject(&self.http_client, &*self.dns_resolver, s)
52
64
.await
53
-
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()))
65
+
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()));
66
+
67
+
let duration_ms = start_time.elapsed().as_millis() as u64;
68
+
69
+
// Publish metrics
70
+
71
+
match result {
72
+
Ok(did) => {
73
+
self.metrics
74
+
.time_with_tags(
75
+
"resolver.base.duration_ms",
76
+
duration_ms,
77
+
&[("success", "1")],
78
+
)
79
+
.await;
80
+
81
+
let timestamp = SystemTime::now()
82
+
.duration_since(UNIX_EPOCH)
83
+
.map_err(|e| {
84
+
HandleResolverError::ResolutionFailed(format!("System time error: {}", e))
85
+
})?
86
+
.as_secs();
87
+
88
+
Ok((did, timestamp))
89
+
}
90
+
Err(e) => {
91
+
self.metrics
92
+
.time_with_tags(
93
+
"resolver.base.duration_ms",
94
+
duration_ms,
95
+
&[("success", "0")],
96
+
)
97
+
.await;
98
+
Err(e)
99
+
}
100
+
}
54
101
}
55
102
}
56
103
···
63
110
///
64
111
/// * `dns_resolver` - DNS resolver for TXT record lookups
65
112
/// * `http_client` - HTTP client for well-known endpoint queries
113
+
/// * `metrics` - Metrics publisher for telemetry
66
114
pub fn create_base_resolver(
67
115
dns_resolver: Arc<dyn DnsResolver>,
68
116
http_client: Client,
117
+
metrics: SharedMetricsPublisher,
69
118
) -> Arc<dyn HandleResolver> {
70
119
Arc::new(BaseHandleResolver {
71
120
dns_resolver,
72
121
http_client,
122
+
metrics,
73
123
})
74
124
}
+3
src/handle_resolver/errors.rs
+3
src/handle_resolver/errors.rs
+70
-13
src/handle_resolver/memory.rs
+70
-13
src/handle_resolver/memory.rs
···
6
6
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
+
use crate::metrics::SharedMetricsPublisher;
9
10
use async_trait::async_trait;
10
-
use std::time::{SystemTime, UNIX_EPOCH};
11
11
use std::collections::HashMap;
12
12
use std::sync::Arc;
13
+
use std::time::{SystemTime, UNIX_EPOCH};
13
14
use tokio::sync::RwLock;
14
15
15
16
/// Result of a handle resolution cached in memory.
···
32
33
/// ```no_run
33
34
/// use std::sync::Arc;
34
35
/// use quickdid::handle_resolver::{create_caching_resolver, create_base_resolver, HandleResolver};
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
35
37
///
36
38
/// # async fn example() {
37
39
/// # use atproto_identity::resolve::HickoryDnsResolver;
38
40
/// # use reqwest::Client;
39
41
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
40
42
/// # let http_client = Client::new();
41
-
/// let base_resolver = create_base_resolver(dns_resolver, http_client);
43
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
44
+
/// let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
42
45
/// let caching_resolver = create_caching_resolver(
43
46
/// base_resolver,
44
-
/// 300 // 5 minute TTL
47
+
/// 300, // 5 minute TTL
48
+
/// metrics
45
49
/// );
46
50
///
47
51
/// // First call hits the underlying resolver
48
-
/// let did1 = caching_resolver.resolve("alice.bsky.social").await.unwrap();
52
+
/// let (did1, timestamp1) = caching_resolver.resolve("alice.bsky.social").await.unwrap();
49
53
///
50
54
/// // Second call returns cached result
51
-
/// let did2 = caching_resolver.resolve("alice.bsky.social").await.unwrap();
55
+
/// let (did2, timestamp2) = caching_resolver.resolve("alice.bsky.social").await.unwrap();
52
56
/// # }
53
57
/// ```
54
58
pub(super) struct CachingHandleResolver {
55
59
inner: Arc<dyn HandleResolver>,
56
60
cache: Arc<RwLock<HashMap<String, ResolveHandleResult>>>,
57
61
ttl_seconds: u64,
62
+
metrics: SharedMetricsPublisher,
58
63
}
59
64
60
65
impl CachingHandleResolver {
···
64
69
///
65
70
/// * `inner` - The underlying resolver to use for actual resolution
66
71
/// * `ttl_seconds` - How long to cache results in seconds
67
-
pub fn new(inner: Arc<dyn HandleResolver>, ttl_seconds: u64) -> Self {
72
+
/// * `metrics` - Metrics publisher for telemetry
73
+
pub fn new(
74
+
inner: Arc<dyn HandleResolver>,
75
+
ttl_seconds: u64,
76
+
metrics: SharedMetricsPublisher,
77
+
) -> Self {
68
78
Self {
69
79
inner,
70
80
cache: Arc::new(RwLock::new(HashMap::new())),
71
81
ttl_seconds,
82
+
metrics,
72
83
}
73
84
}
74
85
···
87
98
88
99
#[async_trait]
89
100
impl HandleResolver for CachingHandleResolver {
90
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
101
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
91
102
let handle = s.to_string();
92
103
93
104
// Check cache first
···
98
109
ResolveHandleResult::Found(timestamp, did) => {
99
110
if !self.is_expired(*timestamp) {
100
111
tracing::debug!("Cache hit for handle {}: {}", handle, did);
101
-
return Ok(did.clone());
112
+
self.metrics.incr("resolver.memory.cache_hit").await;
113
+
return Ok((did.clone(), *timestamp));
102
114
}
103
115
tracing::debug!("Cache entry expired for handle {}", handle);
116
+
self.metrics.incr("resolver.memory.cache_expired").await;
104
117
}
105
118
ResolveHandleResult::NotFound(timestamp, error) => {
106
119
if !self.is_expired(*timestamp) {
···
109
122
handle,
110
123
error
111
124
);
125
+
self.metrics
126
+
.incr("resolver.memory.cache_hit_not_resolved")
127
+
.await;
112
128
return Err(HandleResolverError::HandleNotFoundCached(error.clone()));
113
129
}
114
130
tracing::debug!("Cache entry expired for handle {}", handle);
131
+
self.metrics.incr("resolver.memory.cache_expired").await;
115
132
}
116
133
}
117
134
}
···
119
136
120
137
// Not in cache or expired, resolve through inner resolver
121
138
tracing::debug!("Cache miss for handle {}, resolving...", handle);
139
+
self.metrics.incr("resolver.memory.cache_miss").await;
122
140
let result = self.inner.resolve(s).await;
123
-
let timestamp = Self::current_timestamp();
124
141
125
142
// Store in cache
126
143
{
127
144
let mut cache = self.cache.write().await;
128
145
match &result {
129
-
Ok(did) => {
146
+
Ok((did, timestamp)) => {
130
147
cache.insert(
131
148
handle.clone(),
132
-
ResolveHandleResult::Found(timestamp, did.clone()),
149
+
ResolveHandleResult::Found(*timestamp, did.clone()),
133
150
);
151
+
self.metrics.incr("resolver.memory.cache_set").await;
134
152
tracing::debug!(
135
153
"Cached successful resolution for handle {}: {}",
136
154
handle,
···
138
156
);
139
157
}
140
158
Err(e) => {
159
+
let timestamp = Self::current_timestamp();
141
160
cache.insert(
142
161
handle.clone(),
143
162
ResolveHandleResult::NotFound(timestamp, e.to_string()),
144
163
);
164
+
self.metrics.incr("resolver.memory.cache_set_error").await;
145
165
tracing::debug!("Cached failed resolution for handle {}: {}", handle, e);
146
166
}
147
167
}
168
+
169
+
// Track cache size
170
+
let cache_size = cache.len() as u64;
171
+
self.metrics
172
+
.gauge("resolver.memory.cache_entries", cache_size)
173
+
.await;
148
174
}
149
175
150
176
result
151
177
}
178
+
179
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
180
+
// Normalize the handle to lowercase
181
+
let handle = handle.to_lowercase();
182
+
183
+
// Update the in-memory cache
184
+
{
185
+
let mut cache = self.cache.write().await;
186
+
let timestamp = Self::current_timestamp();
187
+
cache.insert(
188
+
handle.clone(),
189
+
ResolveHandleResult::Found(timestamp, did.to_string()),
190
+
);
191
+
self.metrics.incr("resolver.memory.set").await;
192
+
tracing::debug!("Set handle {} -> DID {} in memory cache", handle, did);
193
+
194
+
// Track cache size
195
+
let cache_size = cache.len() as u64;
196
+
self.metrics
197
+
.gauge("resolver.memory.cache_entries", cache_size)
198
+
.await;
199
+
}
200
+
201
+
// Chain to inner resolver
202
+
self.inner.set(&handle, did).await
203
+
}
152
204
}
153
205
154
206
/// Create a new in-memory caching handle resolver.
···
160
212
///
161
213
/// * `inner` - The underlying resolver to use for actual resolution
162
214
/// * `ttl_seconds` - How long to cache results in seconds
215
+
/// * `metrics` - Metrics publisher for telemetry
163
216
///
164
217
/// # Example
165
218
///
166
219
/// ```no_run
167
220
/// use std::sync::Arc;
168
221
/// use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
222
+
/// use quickdid::metrics::NoOpMetricsPublisher;
169
223
///
170
224
/// # async fn example() {
171
225
/// # use atproto_identity::resolve::HickoryDnsResolver;
172
226
/// # use reqwest::Client;
173
227
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
174
228
/// # let http_client = Client::new();
229
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
175
230
/// let base = create_base_resolver(
176
231
/// dns_resolver,
177
232
/// http_client,
233
+
/// metrics.clone(),
178
234
/// );
179
235
///
180
-
/// let resolver = create_caching_resolver(base, 300); // 5 minute TTL
236
+
/// let resolver = create_caching_resolver(base, 300, metrics); // 5 minute TTL
181
237
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
182
238
/// # }
183
239
/// ```
184
240
pub fn create_caching_resolver(
185
241
inner: Arc<dyn HandleResolver>,
186
242
ttl_seconds: u64,
243
+
metrics: SharedMetricsPublisher,
187
244
) -> Arc<dyn HandleResolver> {
188
-
Arc::new(CachingHandleResolver::new(inner, ttl_seconds))
245
+
Arc::new(CachingHandleResolver::new(inner, ttl_seconds, metrics))
189
246
}
+13
-1
src/handle_resolver/mod.rs
+13
-1
src/handle_resolver/mod.rs
···
9
9
//! implementations:
10
10
//!
11
11
//! - [`BaseHandleResolver`]: Core resolver that performs actual DNS/HTTP lookups
12
+
//! - [`RateLimitedHandleResolver`]: Rate limiting wrapper using semaphore-based concurrency control
12
13
//! - [`CachingHandleResolver`]: In-memory caching wrapper with configurable TTL
13
14
//! - [`RedisHandleResolver`]: Redis-backed persistent caching with binary serialization
14
15
//! - [`SqliteHandleResolver`]: SQLite-backed persistent caching for single-instance deployments
···
18
19
//! ```no_run
19
20
//! use std::sync::Arc;
20
21
//! use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
22
+
//! use quickdid::metrics::NoOpMetricsPublisher;
21
23
//!
22
24
//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
23
25
//! # use atproto_identity::resolve::HickoryDnsResolver;
24
26
//! # use reqwest::Client;
25
27
//! # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
26
28
//! # let http_client = Client::new();
29
+
//! # let metrics = Arc::new(NoOpMetricsPublisher);
27
30
//! // Create base resolver using factory function
28
31
//! let base = create_base_resolver(
29
32
//! dns_resolver,
30
33
//! http_client,
34
+
//! metrics.clone(),
31
35
//! );
32
36
//!
33
37
//! // Wrap with in-memory caching
34
-
//! let resolver = create_caching_resolver(base, 300);
38
+
//! let resolver = create_caching_resolver(base, 300, metrics);
35
39
//!
36
40
//! // Resolve a handle
37
41
//! let did = resolver.resolve("alice.bsky.social").await?;
···
43
47
mod base;
44
48
mod errors;
45
49
mod memory;
50
+
mod proactive_refresh;
51
+
mod rate_limited;
46
52
mod redis;
47
53
mod sqlite;
48
54
mod traits;
···
54
60
// Factory functions for creating resolvers
55
61
pub use base::create_base_resolver;
56
62
pub use memory::create_caching_resolver;
63
+
pub use proactive_refresh::{
64
+
ProactiveRefreshResolver, create_proactive_refresh_resolver,
65
+
create_proactive_refresh_resolver_dyn, create_proactive_refresh_resolver_with_metrics,
66
+
create_proactive_refresh_resolver_with_threshold,
67
+
};
68
+
pub use rate_limited::{create_rate_limited_resolver, create_rate_limited_resolver_with_timeout};
57
69
pub use redis::{create_redis_resolver, create_redis_resolver_with_ttl};
58
70
pub use sqlite::{create_sqlite_resolver, create_sqlite_resolver_with_ttl};
+438
src/handle_resolver/proactive_refresh.rs
+438
src/handle_resolver/proactive_refresh.rs
···
1
+
use crate::handle_resolution_result::HandleResolutionResult;
2
+
use crate::handle_resolver::{HandleResolver, HandleResolverError};
3
+
use crate::metrics::MetricsPublisher;
4
+
use crate::queue::{HandleResolutionWork, QueueAdapter};
5
+
use async_trait::async_trait;
6
+
use std::sync::Arc;
7
+
use std::time::{SystemTime, UNIX_EPOCH};
8
+
use tracing::{debug, trace};
9
+
10
+
/// Create a ProactiveRefreshResolver with default 80% threshold
11
+
///
12
+
/// # Arguments
13
+
/// * `inner` - The inner resolver to wrap
14
+
/// * `queue` - The queue adapter for background refresh tasks
15
+
/// * `cache_ttl` - The TTL in seconds for cache entries
16
+
pub fn create_proactive_refresh_resolver<R, Q>(
17
+
inner: Arc<R>,
18
+
queue: Arc<Q>,
19
+
cache_ttl: u64,
20
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
21
+
where
22
+
R: HandleResolver + Send + Sync + 'static,
23
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
24
+
{
25
+
Arc::new(ProactiveRefreshResolver::new(inner, queue, cache_ttl))
26
+
}
27
+
28
+
/// Create a ProactiveRefreshResolver with custom threshold
29
+
///
30
+
/// # Arguments
31
+
/// * `inner` - The inner resolver to wrap
32
+
/// * `queue` - The queue adapter for background refresh tasks
33
+
/// * `cache_ttl` - The TTL in seconds for cache entries
34
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
35
+
pub fn create_proactive_refresh_resolver_with_threshold<R, Q>(
36
+
inner: Arc<R>,
37
+
queue: Arc<Q>,
38
+
cache_ttl: u64,
39
+
threshold: f64,
40
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
41
+
where
42
+
R: HandleResolver + Send + Sync + 'static,
43
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
44
+
{
45
+
Arc::new(ProactiveRefreshResolver::with_threshold(
46
+
inner, queue, cache_ttl, threshold,
47
+
))
48
+
}
49
+
50
+
/// Wrapper struct for dynamic dispatch with proactive refresh
51
+
/// This works with trait objects instead of concrete types
52
+
pub struct DynProactiveRefreshResolver {
53
+
inner: Arc<dyn HandleResolver>,
54
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
55
+
metrics: Option<Arc<dyn MetricsPublisher>>,
56
+
#[allow(dead_code)]
57
+
cache_ttl: u64,
58
+
#[allow(dead_code)]
59
+
refresh_threshold: f64,
60
+
}
61
+
62
+
impl DynProactiveRefreshResolver {
63
+
pub fn new(
64
+
inner: Arc<dyn HandleResolver>,
65
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
66
+
cache_ttl: u64,
67
+
refresh_threshold: f64,
68
+
) -> Self {
69
+
Self::with_metrics(inner, queue, None, cache_ttl, refresh_threshold)
70
+
}
71
+
72
+
pub fn with_metrics(
73
+
inner: Arc<dyn HandleResolver>,
74
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
75
+
metrics: Option<Arc<dyn MetricsPublisher>>,
76
+
cache_ttl: u64,
77
+
refresh_threshold: f64,
78
+
) -> Self {
79
+
Self {
80
+
inner,
81
+
queue,
82
+
metrics,
83
+
cache_ttl,
84
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
85
+
}
86
+
}
87
+
88
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
89
+
// If resolution took less than 5ms, it was probably a cache hit
90
+
if resolve_time < 5000 {
91
+
trace!(
92
+
handle = handle,
93
+
resolve_time_us = resolve_time,
94
+
"Fast resolution detected, considering proactive refresh"
95
+
);
96
+
97
+
if let Some(metrics) = &self.metrics {
98
+
metrics.incr("proactive_refresh.cache_hit_detected").await;
99
+
}
100
+
101
+
// Simple heuristic: queue for refresh with some probability
102
+
let now = SystemTime::now()
103
+
.duration_since(UNIX_EPOCH)
104
+
.unwrap_or_default()
105
+
.as_secs();
106
+
107
+
// Queue every N seconds for frequently accessed handles
108
+
if now % 60 == 0 {
109
+
let work = HandleResolutionWork {
110
+
handle: handle.to_string(),
111
+
};
112
+
113
+
if let Err(e) = self.queue.push(work).await {
114
+
debug!(
115
+
handle = handle,
116
+
error = %e,
117
+
"Failed to queue handle for proactive refresh"
118
+
);
119
+
if let Some(metrics) = &self.metrics {
120
+
metrics.incr("proactive_refresh.queue_error").await;
121
+
}
122
+
} else {
123
+
debug!(handle = handle, "Queued handle for proactive refresh");
124
+
if let Some(metrics) = &self.metrics {
125
+
metrics.incr("proactive_refresh.queued").await;
126
+
}
127
+
}
128
+
}
129
+
}
130
+
}
131
+
}
132
+
133
+
#[async_trait]
134
+
impl HandleResolver for DynProactiveRefreshResolver {
135
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
136
+
// Resolve through the inner resolver
137
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
138
+
139
+
// Check if we should queue for refresh based on resolution time
140
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
141
+
142
+
Ok((did, resolve_time))
143
+
}
144
+
145
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
146
+
// Simply chain to inner resolver - no proactive refresh needed for manual sets
147
+
self.inner.set(handle, did).await
148
+
}
149
+
}
150
+
151
+
/// Create a ProactiveRefreshResolver with custom threshold using trait objects
152
+
/// This version works with dyn HandleResolver and dyn QueueAdapter
153
+
///
154
+
/// # Arguments
155
+
/// * `inner` - The inner resolver to wrap
156
+
/// * `queue` - The queue adapter for background refresh tasks
157
+
/// * `cache_ttl` - The TTL in seconds for cache entries
158
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
159
+
pub fn create_proactive_refresh_resolver_dyn(
160
+
inner: Arc<dyn HandleResolver>,
161
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
162
+
cache_ttl: u64,
163
+
threshold: f64,
164
+
) -> Arc<dyn HandleResolver> {
165
+
Arc::new(DynProactiveRefreshResolver::new(
166
+
inner, queue, cache_ttl, threshold,
167
+
))
168
+
}
169
+
170
+
/// Create a ProactiveRefreshResolver with metrics support
171
+
pub fn create_proactive_refresh_resolver_with_metrics(
172
+
inner: Arc<dyn HandleResolver>,
173
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
174
+
metrics: Arc<dyn MetricsPublisher>,
175
+
cache_ttl: u64,
176
+
threshold: f64,
177
+
) -> Arc<dyn HandleResolver> {
178
+
Arc::new(DynProactiveRefreshResolver::with_metrics(
179
+
inner,
180
+
queue,
181
+
Some(metrics),
182
+
cache_ttl,
183
+
threshold,
184
+
))
185
+
}
186
+
187
+
/// A handle resolver that proactively refreshes cache entries when they reach
188
+
/// a certain staleness threshold (default 80% of TTL).
189
+
///
190
+
/// This resolver wraps another resolver and checks successful resolutions from cache.
191
+
/// When a cached entry has lived for more than the threshold percentage of its TTL,
192
+
/// it queues the handle for background refresh to keep the cache warm.
193
+
///
194
+
/// Note: Due to the current trait design, this implementation uses the resolution time
195
+
/// as a heuristic. When resolve_time is 0 (instant cache hit), it may queue for refresh.
196
+
/// For full functionality, the trait would need to expose cache timestamps.
197
+
pub struct ProactiveRefreshResolver<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> {
198
+
inner: Arc<R>,
199
+
queue: Arc<Q>,
200
+
/// TTL in seconds for cache entries
201
+
cache_ttl: u64,
202
+
/// Threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
203
+
/// Default is 0.8 (80%)
204
+
refresh_threshold: f64,
205
+
}
206
+
207
+
impl<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> ProactiveRefreshResolver<R, Q> {
208
+
pub fn new(inner: Arc<R>, queue: Arc<Q>, cache_ttl: u64) -> Self {
209
+
Self::with_threshold(inner, queue, cache_ttl, 0.8)
210
+
}
211
+
212
+
pub fn with_threshold(
213
+
inner: Arc<R>,
214
+
queue: Arc<Q>,
215
+
cache_ttl: u64,
216
+
refresh_threshold: f64,
217
+
) -> Self {
218
+
Self {
219
+
inner,
220
+
queue,
221
+
cache_ttl,
222
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
223
+
}
224
+
}
225
+
226
+
/// Check if a cached entry needs proactive refresh based on its age
227
+
#[allow(dead_code)]
228
+
fn needs_refresh(&self, result: &HandleResolutionResult) -> bool {
229
+
let now = SystemTime::now()
230
+
.duration_since(UNIX_EPOCH)
231
+
.unwrap_or_default()
232
+
.as_secs();
233
+
234
+
let age = now.saturating_sub(result.timestamp);
235
+
let threshold = (self.cache_ttl as f64 * self.refresh_threshold) as u64;
236
+
237
+
let needs_refresh = age >= threshold;
238
+
239
+
if needs_refresh {
240
+
debug!(
241
+
handle = ?result.to_did(),
242
+
age_seconds = age,
243
+
threshold_seconds = threshold,
244
+
cache_ttl = self.cache_ttl,
245
+
"Cache entry needs proactive refresh"
246
+
);
247
+
} else {
248
+
trace!(
249
+
handle = ?result.to_did(),
250
+
age_seconds = age,
251
+
threshold_seconds = threshold,
252
+
"Cache entry still fresh"
253
+
);
254
+
}
255
+
256
+
needs_refresh
257
+
}
258
+
259
+
/// Queue a handle for background refresh
260
+
async fn queue_for_refresh(&self, handle: &str) {
261
+
let work = HandleResolutionWork {
262
+
handle: handle.to_string(),
263
+
};
264
+
265
+
match self.queue.push(work).await {
266
+
Ok(_) => {
267
+
debug!(handle = handle, "Queued handle for proactive refresh");
268
+
}
269
+
Err(e) => {
270
+
// Don't fail the request if we can't queue for refresh
271
+
debug!(
272
+
handle = handle,
273
+
error = %e,
274
+
"Failed to queue handle for proactive refresh"
275
+
);
276
+
}
277
+
}
278
+
}
279
+
280
+
/// Check if we should queue for refresh based on resolution time
281
+
///
282
+
/// This is a heuristic approach:
283
+
/// - If resolve_time is very low (< 5ms), it was likely a cache hit
284
+
/// - We probabilistically queue for refresh based on time since service start
285
+
///
286
+
/// For proper implementation, the HandleResolver trait would need to expose
287
+
/// cache metadata or return HandleResolutionResult directly.
288
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
289
+
// If resolution took less than 5ms, it was probably a cache hit
290
+
if resolve_time < 5000 {
291
+
// Use a simple probabilistic approach for demonstration
292
+
// In production, you'd want access to the actual cache timestamp
293
+
trace!(
294
+
handle = handle,
295
+
resolve_time_us = resolve_time,
296
+
"Fast resolution detected, considering proactive refresh"
297
+
);
298
+
299
+
// Queue for refresh with some probability to avoid overwhelming the queue
300
+
// This is a simplified approach - ideally we'd have access to cache metadata
301
+
let now = SystemTime::now()
302
+
.duration_since(UNIX_EPOCH)
303
+
.unwrap_or_default()
304
+
.as_secs();
305
+
306
+
// Simple heuristic: queue every N seconds for frequently accessed handles
307
+
if now % 60 == 0 {
308
+
self.queue_for_refresh(handle).await;
309
+
}
310
+
}
311
+
}
312
+
}
313
+
314
+
#[async_trait]
315
+
impl<R, Q> HandleResolver for ProactiveRefreshResolver<R, Q>
316
+
where
317
+
R: HandleResolver + Send + Sync,
318
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync,
319
+
{
320
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
321
+
// Resolve through the inner resolver
322
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
323
+
324
+
// Check if we should queue for refresh based on resolution time
325
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
326
+
327
+
Ok((did, resolve_time))
328
+
}
329
+
}
330
+
331
+
#[cfg(test)]
332
+
mod tests {
333
+
use super::*;
334
+
use crate::handle_resolution_result::DidMethodType;
335
+
336
+
#[test]
337
+
fn test_needs_refresh_calculation() {
338
+
// Create a resolver with 100 second TTL and 80% threshold
339
+
let inner = Arc::new(MockResolver);
340
+
let queue = Arc::new(MockQueueAdapter);
341
+
let resolver = ProactiveRefreshResolver::new(inner, queue, 100);
342
+
343
+
let now = SystemTime::now()
344
+
.duration_since(UNIX_EPOCH)
345
+
.unwrap()
346
+
.as_secs();
347
+
348
+
// Test entry that's 50% through TTL (should not refresh)
349
+
let fresh_result = HandleResolutionResult {
350
+
timestamp: now - 50,
351
+
method_type: DidMethodType::Plc,
352
+
payload: "alice123".to_string(),
353
+
};
354
+
assert!(!resolver.needs_refresh(&fresh_result));
355
+
356
+
// Test entry that's 80% through TTL (should refresh)
357
+
let stale_result = HandleResolutionResult {
358
+
timestamp: now - 80,
359
+
method_type: DidMethodType::Plc,
360
+
payload: "alice123".to_string(),
361
+
};
362
+
assert!(resolver.needs_refresh(&stale_result));
363
+
364
+
// Test entry that's 90% through TTL (should definitely refresh)
365
+
let very_stale_result = HandleResolutionResult {
366
+
timestamp: now - 90,
367
+
method_type: DidMethodType::Plc,
368
+
payload: "alice123".to_string(),
369
+
};
370
+
assert!(resolver.needs_refresh(&very_stale_result));
371
+
}
372
+
373
+
#[test]
374
+
fn test_custom_threshold() {
375
+
let inner = Arc::new(MockResolver);
376
+
let queue = Arc::new(MockQueueAdapter);
377
+
378
+
// Create resolver with 50% threshold
379
+
let resolver = ProactiveRefreshResolver::with_threshold(inner, queue, 100, 0.5);
380
+
381
+
let now = SystemTime::now()
382
+
.duration_since(UNIX_EPOCH)
383
+
.unwrap()
384
+
.as_secs();
385
+
386
+
// Test entry that's 40% through TTL (should not refresh with 50% threshold)
387
+
let result_40 = HandleResolutionResult {
388
+
timestamp: now - 40,
389
+
method_type: DidMethodType::Plc,
390
+
payload: "alice123".to_string(),
391
+
};
392
+
assert!(!resolver.needs_refresh(&result_40));
393
+
394
+
// Test entry that's 60% through TTL (should refresh with 50% threshold)
395
+
let result_60 = HandleResolutionResult {
396
+
timestamp: now - 60,
397
+
method_type: DidMethodType::Plc,
398
+
payload: "alice123".to_string(),
399
+
};
400
+
assert!(resolver.needs_refresh(&result_60));
401
+
}
402
+
403
+
// Mock resolver for testing
404
+
struct MockResolver;
405
+
406
+
#[async_trait]
407
+
impl HandleResolver for MockResolver {
408
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
409
+
Ok((format!("did:plc:{}", handle), 1000))
410
+
}
411
+
}
412
+
413
+
// Mock queue adapter for testing
414
+
struct MockQueueAdapter;
415
+
416
+
#[async_trait]
417
+
impl QueueAdapter<HandleResolutionWork> for MockQueueAdapter {
418
+
async fn pull(&self) -> Option<HandleResolutionWork> {
419
+
None
420
+
}
421
+
422
+
async fn push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
423
+
Ok(())
424
+
}
425
+
426
+
async fn ack(&self, _item: &HandleResolutionWork) -> crate::queue::Result<()> {
427
+
Ok(())
428
+
}
429
+
430
+
async fn try_push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
431
+
Ok(())
432
+
}
433
+
434
+
async fn is_healthy(&self) -> bool {
435
+
true
436
+
}
437
+
}
438
+
}
+294
src/handle_resolver/rate_limited.rs
+294
src/handle_resolver/rate_limited.rs
···
1
+
//! Rate-limited handle resolver implementation.
2
+
//!
3
+
//! This module provides a handle resolver wrapper that limits concurrent
4
+
//! resolution requests using a semaphore to implement basic rate limiting.
5
+
6
+
use super::errors::HandleResolverError;
7
+
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
9
+
use async_trait::async_trait;
10
+
use std::sync::Arc;
11
+
use std::time::Duration;
12
+
use tokio::sync::Semaphore;
13
+
use tokio::time::timeout;
14
+
15
+
/// Rate-limited handle resolver that constrains concurrent resolutions.
16
+
///
17
+
/// This resolver wraps an inner resolver and uses a semaphore to limit
18
+
/// the number of concurrent resolution requests. This provides basic
19
+
/// rate limiting and protects upstream services from being overwhelmed.
20
+
///
21
+
/// # Architecture
22
+
///
23
+
/// The rate limiter should be placed between the base resolver and any
24
+
/// caching layers:
25
+
/// ```text
26
+
/// Request -> Cache -> RateLimited -> Base -> DNS/HTTP
27
+
/// ```
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use std::sync::Arc;
33
+
/// use quickdid::handle_resolver::{
34
+
/// create_base_resolver,
35
+
/// create_rate_limited_resolver,
36
+
/// HandleResolver,
37
+
/// };
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
39
+
///
40
+
/// # async fn example() {
41
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
42
+
/// # use reqwest::Client;
43
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
44
+
/// # let http_client = Client::new();
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
46
+
/// // Create base resolver
47
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
48
+
///
49
+
/// // Wrap with rate limiting (max 10 concurrent resolutions)
50
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
51
+
///
52
+
/// // Use the rate-limited resolver
53
+
/// let (did, timestamp) = rate_limited.resolve("alice.bsky.social").await.unwrap();
54
+
/// # }
55
+
/// ```
56
+
pub(super) struct RateLimitedHandleResolver {
57
+
/// Inner resolver that performs actual resolution.
58
+
inner: Arc<dyn HandleResolver>,
59
+
60
+
/// Semaphore for limiting concurrent resolutions.
61
+
semaphore: Arc<Semaphore>,
62
+
63
+
/// Optional timeout for acquiring permits (in milliseconds).
64
+
/// When None or 0, no timeout is applied.
65
+
timeout_ms: Option<u64>,
66
+
67
+
/// Metrics publisher for telemetry.
68
+
metrics: SharedMetricsPublisher,
69
+
}
70
+
71
+
impl RateLimitedHandleResolver {
72
+
/// Create a new rate-limited resolver.
73
+
///
74
+
/// # Arguments
75
+
///
76
+
/// * `inner` - The inner resolver to wrap
77
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
78
+
/// * `metrics` - Metrics publisher for telemetry
79
+
pub fn new(
80
+
inner: Arc<dyn HandleResolver>,
81
+
max_concurrent: usize,
82
+
metrics: SharedMetricsPublisher,
83
+
) -> Self {
84
+
Self {
85
+
inner,
86
+
semaphore: Arc::new(Semaphore::new(max_concurrent)),
87
+
timeout_ms: None,
88
+
metrics,
89
+
}
90
+
}
91
+
92
+
/// Create a new rate-limited resolver with timeout.
93
+
///
94
+
/// # Arguments
95
+
///
96
+
/// * `inner` - The inner resolver to wrap
97
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
98
+
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
99
+
/// * `metrics` - Metrics publisher for telemetry
100
+
pub fn new_with_timeout(
101
+
inner: Arc<dyn HandleResolver>,
102
+
max_concurrent: usize,
103
+
timeout_ms: u64,
104
+
metrics: SharedMetricsPublisher,
105
+
) -> Self {
106
+
Self {
107
+
inner,
108
+
semaphore: Arc::new(Semaphore::new(max_concurrent)),
109
+
timeout_ms: if timeout_ms > 0 {
110
+
Some(timeout_ms)
111
+
} else {
112
+
None
113
+
},
114
+
metrics,
115
+
}
116
+
}
117
+
}
118
+
119
+
#[async_trait]
120
+
impl HandleResolver for RateLimitedHandleResolver {
121
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
122
+
let permit_start = std::time::Instant::now();
123
+
124
+
// Track rate limiter queue depth
125
+
let available_permits = self.semaphore.available_permits();
126
+
self.metrics
127
+
.gauge(
128
+
"resolver.rate_limit.available_permits",
129
+
available_permits as u64,
130
+
)
131
+
.await;
132
+
133
+
// Acquire a permit from the semaphore, with optional timeout
134
+
let _permit = match self.timeout_ms {
135
+
Some(timeout_ms) if timeout_ms > 0 => {
136
+
// Apply timeout when acquiring permit
137
+
let duration = Duration::from_millis(timeout_ms);
138
+
match timeout(duration, self.semaphore.acquire()).await {
139
+
Ok(Ok(permit)) => {
140
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
141
+
self.metrics
142
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
143
+
.await;
144
+
permit
145
+
}
146
+
Ok(Err(e)) => {
147
+
// Semaphore error (e.g., closed)
148
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
149
+
return Err(HandleResolverError::ResolutionFailed(format!(
150
+
"Failed to acquire rate limit permit: {}",
151
+
e
152
+
)));
153
+
}
154
+
Err(_) => {
155
+
// Timeout occurred
156
+
self.metrics
157
+
.incr("resolver.rate_limit.permit_timeout")
158
+
.await;
159
+
return Err(HandleResolverError::ResolutionFailed(format!(
160
+
"Rate limit permit acquisition timed out after {}ms",
161
+
timeout_ms
162
+
)));
163
+
}
164
+
}
165
+
}
166
+
_ => {
167
+
// No timeout configured, wait indefinitely
168
+
match self.semaphore.acquire().await {
169
+
Ok(permit) => {
170
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
171
+
self.metrics
172
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
173
+
.await;
174
+
permit
175
+
}
176
+
Err(e) => {
177
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
178
+
return Err(HandleResolverError::ResolutionFailed(format!(
179
+
"Failed to acquire rate limit permit: {}",
180
+
e
181
+
)));
182
+
}
183
+
}
184
+
}
185
+
};
186
+
187
+
// With permit acquired, forward to inner resolver
188
+
self.inner.resolve(s).await
189
+
}
190
+
191
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
192
+
// Set operations don't need rate limiting since they're typically administrative
193
+
// and don't involve network calls to external services
194
+
self.inner.set(handle, did).await
195
+
}
196
+
}
197
+
198
+
/// Create a rate-limited handle resolver.
199
+
///
200
+
/// This factory function creates a new [`RateLimitedHandleResolver`] that wraps
201
+
/// the provided inner resolver with concurrency limiting.
202
+
///
203
+
/// # Arguments
204
+
///
205
+
/// * `inner` - The resolver to wrap with rate limiting
206
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
207
+
/// * `metrics` - Metrics publisher for telemetry
208
+
///
209
+
/// # Returns
210
+
///
211
+
/// An `Arc<dyn HandleResolver>` that can be used wherever a handle resolver is needed.
212
+
///
213
+
/// # Example
214
+
///
215
+
/// ```no_run
216
+
/// use std::sync::Arc;
217
+
/// use quickdid::handle_resolver::{
218
+
/// create_base_resolver,
219
+
/// create_rate_limited_resolver,
220
+
/// };
221
+
///
222
+
/// # async fn example() {
223
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
224
+
/// # use reqwest::Client;
225
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
226
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
227
+
/// # let http_client = Client::new();
228
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
229
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
230
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
231
+
/// # }
232
+
/// ```
233
+
pub fn create_rate_limited_resolver(
234
+
inner: Arc<dyn HandleResolver>,
235
+
max_concurrent: usize,
236
+
metrics: SharedMetricsPublisher,
237
+
) -> Arc<dyn HandleResolver> {
238
+
Arc::new(RateLimitedHandleResolver::new(
239
+
inner,
240
+
max_concurrent,
241
+
metrics,
242
+
))
243
+
}
244
+
245
+
/// Create a rate-limited handle resolver with timeout.
246
+
///
247
+
/// This factory function creates a new [`RateLimitedHandleResolver`] that wraps
248
+
/// the provided inner resolver with concurrency limiting and timeout for permit acquisition.
249
+
///
250
+
/// # Arguments
251
+
///
252
+
/// * `inner` - The resolver to wrap with rate limiting
253
+
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
254
+
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
255
+
/// * `metrics` - Metrics publisher for telemetry
256
+
///
257
+
/// # Returns
258
+
///
259
+
/// An `Arc<dyn HandleResolver>` that can be used wherever a handle resolver is needed.
260
+
///
261
+
/// # Example
262
+
///
263
+
/// ```no_run
264
+
/// use std::sync::Arc;
265
+
/// use quickdid::handle_resolver::{
266
+
/// create_base_resolver,
267
+
/// create_rate_limited_resolver_with_timeout,
268
+
/// };
269
+
///
270
+
/// # async fn example() {
271
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
272
+
/// # use reqwest::Client;
273
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
274
+
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
275
+
/// # let http_client = Client::new();
276
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
277
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
278
+
/// // Rate limit with 10 concurrent resolutions and 5 second timeout
279
+
/// let rate_limited = create_rate_limited_resolver_with_timeout(base, 10, 5000, metrics);
280
+
/// # }
281
+
/// ```
282
+
pub fn create_rate_limited_resolver_with_timeout(
283
+
inner: Arc<dyn HandleResolver>,
284
+
max_concurrent: usize,
285
+
timeout_ms: u64,
286
+
metrics: SharedMetricsPublisher,
287
+
) -> Arc<dyn HandleResolver> {
288
+
Arc::new(RateLimitedHandleResolver::new_with_timeout(
289
+
inner,
290
+
max_concurrent,
291
+
timeout_ms,
292
+
metrics,
293
+
))
294
+
}
+580
-19
src/handle_resolver/redis.rs
+580
-19
src/handle_resolver/redis.rs
···
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
9
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
10
11
use async_trait::async_trait;
12
+
use atproto_identity::resolve::{InputType, parse_input};
11
13
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
12
14
use metrohash::MetroHash64;
13
15
use std::hash::Hasher as _;
···
33
35
/// use std::sync::Arc;
34
36
/// use deadpool_redis::Pool;
35
37
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
36
39
///
37
40
/// # async fn example() {
38
41
/// # use atproto_identity::resolve::HickoryDnsResolver;
39
42
/// # use reqwest::Client;
40
43
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
41
44
/// # let http_client = Client::new();
42
-
/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
46
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
43
47
/// # let redis_pool: Pool = todo!();
44
48
/// // Create with default 90-day TTL
45
49
/// let resolver = create_redis_resolver(
46
50
/// base_resolver,
47
-
/// redis_pool
51
+
/// redis_pool,
52
+
/// metrics
48
53
/// );
49
54
/// # }
50
55
/// ```
···
57
62
key_prefix: String,
58
63
/// TTL for cache entries in seconds
59
64
ttl_seconds: u64,
65
+
/// Metrics publisher for telemetry
66
+
metrics: SharedMetricsPublisher,
60
67
}
61
68
62
69
impl RedisHandleResolver {
63
70
/// Create a new Redis-backed handle resolver with default 90-day TTL.
64
-
fn new(inner: Arc<dyn HandleResolver>, pool: RedisPool) -> Self {
65
-
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
71
+
fn new(
72
+
inner: Arc<dyn HandleResolver>,
73
+
pool: RedisPool,
74
+
metrics: SharedMetricsPublisher,
75
+
) -> Self {
76
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
66
77
}
67
78
68
79
/// Create a new Redis-backed handle resolver with custom TTL.
69
-
fn with_ttl(inner: Arc<dyn HandleResolver>, pool: RedisPool, ttl_seconds: u64) -> Self {
70
-
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds)
80
+
fn with_ttl(
81
+
inner: Arc<dyn HandleResolver>,
82
+
pool: RedisPool,
83
+
ttl_seconds: u64,
84
+
metrics: SharedMetricsPublisher,
85
+
) -> Self {
86
+
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds, metrics)
71
87
}
72
88
73
89
/// Create a new Redis-backed handle resolver with full configuration.
···
76
92
pool: RedisPool,
77
93
key_prefix: String,
78
94
ttl_seconds: u64,
95
+
metrics: SharedMetricsPublisher,
79
96
) -> Self {
80
97
Self {
81
98
inner,
82
99
pool,
83
100
key_prefix,
84
101
ttl_seconds,
102
+
metrics,
85
103
}
86
104
}
87
105
···
100
118
fn ttl_seconds(&self) -> u64 {
101
119
self.ttl_seconds
102
120
}
121
+
122
+
/// Purge a handle and its associated DID from the cache.
123
+
///
124
+
/// This method removes both the handle->DID mapping and the reverse DID->handle mapping.
125
+
async fn purge_handle(&self, handle: &str) -> Result<(), HandleResolverError> {
126
+
let handle_key = self.make_key(handle);
127
+
128
+
match self.pool.get().await {
129
+
Ok(mut conn) => {
130
+
// First, try to get the cached result to find the associated DID
131
+
let cached: Option<Vec<u8>> = match conn.get(&handle_key).await {
132
+
Ok(value) => value,
133
+
Err(e) => {
134
+
tracing::warn!("Failed to get handle from Redis for purging: {}", e);
135
+
self.metrics.incr("resolver.redis.purge_get_error").await;
136
+
None
137
+
}
138
+
};
139
+
140
+
// If we found a cached result, extract the DID and delete both keys
141
+
if let Some(cached_bytes) = cached {
142
+
if let Ok(cached_result) = HandleResolutionResult::from_bytes(&cached_bytes) {
143
+
if let Some(did) = cached_result.to_did() {
144
+
let did_key = self.make_key(&did);
145
+
146
+
// Delete both the handle key and the DID key
147
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
148
+
149
+
tracing::debug!("Purged handle {} and associated DID {}", handle, did);
150
+
self.metrics
151
+
.incr("resolver.redis.purge_handle_success")
152
+
.await;
153
+
} else {
154
+
// Just delete the handle key if no DID was resolved
155
+
let _: Result<(), _> = conn.del(&handle_key).await;
156
+
tracing::debug!("Purged unresolved handle {}", handle);
157
+
self.metrics
158
+
.incr("resolver.redis.purge_handle_unresolved")
159
+
.await;
160
+
}
161
+
} else {
162
+
// If we can't deserialize, just delete the handle key
163
+
let _: Result<(), _> = conn.del(&handle_key).await;
164
+
tracing::warn!("Purged handle {} with undeserializable data", handle);
165
+
self.metrics
166
+
.incr("resolver.redis.purge_handle_corrupt")
167
+
.await;
168
+
}
169
+
} else {
170
+
tracing::debug!("Handle {} not found in cache for purging", handle);
171
+
self.metrics
172
+
.incr("resolver.redis.purge_handle_not_found")
173
+
.await;
174
+
}
175
+
176
+
Ok(())
177
+
}
178
+
Err(e) => {
179
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
180
+
self.metrics
181
+
.incr("resolver.redis.purge_connection_error")
182
+
.await;
183
+
Err(HandleResolverError::ResolutionFailed(format!(
184
+
"Redis connection error: {}",
185
+
e
186
+
)))
187
+
}
188
+
}
189
+
}
190
+
191
+
/// Purge a DID and its associated handle from the cache.
192
+
///
193
+
/// This method removes both the DID->handle mapping and the handle->DID mapping.
194
+
async fn purge_did(&self, did: &str) -> Result<(), HandleResolverError> {
195
+
let did_key = self.make_key(did);
196
+
197
+
match self.pool.get().await {
198
+
Ok(mut conn) => {
199
+
// First, try to get the associated handle from the reverse mapping
200
+
let handle_bytes: Option<Vec<u8>> = match conn.get(&did_key).await {
201
+
Ok(value) => value,
202
+
Err(e) => {
203
+
tracing::warn!("Failed to get DID from Redis for purging: {}", e);
204
+
self.metrics.incr("resolver.redis.purge_get_error").await;
205
+
None
206
+
}
207
+
};
208
+
209
+
// If we found a handle, delete both keys
210
+
if let Some(handle_bytes) = handle_bytes {
211
+
if let Ok(handle) = String::from_utf8(handle_bytes) {
212
+
let handle_key = self.make_key(&handle);
213
+
214
+
// Delete both the DID key and the handle key
215
+
let _: Result<(), _> = conn.del(&[&did_key, &handle_key]).await;
216
+
217
+
tracing::debug!("Purged DID {} and associated handle {}", did, handle);
218
+
self.metrics.incr("resolver.redis.purge_did_success").await;
219
+
} else {
220
+
// If we can't parse the handle, just delete the DID key
221
+
let _: Result<(), _> = conn.del(&did_key).await;
222
+
tracing::warn!("Purged DID {} with unparseable handle data", did);
223
+
self.metrics.incr("resolver.redis.purge_did_corrupt").await;
224
+
}
225
+
} else {
226
+
tracing::debug!("DID {} not found in cache for purging", did);
227
+
self.metrics
228
+
.incr("resolver.redis.purge_did_not_found")
229
+
.await;
230
+
}
231
+
232
+
Ok(())
233
+
}
234
+
Err(e) => {
235
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
236
+
self.metrics
237
+
.incr("resolver.redis.purge_connection_error")
238
+
.await;
239
+
Err(HandleResolverError::ResolutionFailed(format!(
240
+
"Redis connection error: {}",
241
+
e
242
+
)))
243
+
}
244
+
}
245
+
}
103
246
}
104
247
105
248
#[async_trait]
106
249
impl HandleResolver for RedisHandleResolver {
107
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
250
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
108
251
let handle = s.to_string();
109
252
let key = self.make_key(&handle);
110
253
···
115
258
let cached: Option<Vec<u8>> = match conn.get(&key).await {
116
259
Ok(value) => value,
117
260
Err(e) => {
261
+
self.metrics.incr("resolver.redis.get_error").await;
118
262
tracing::warn!("Failed to get handle from Redis cache: {}", e);
119
263
None
120
264
}
···
126
270
Ok(cached_result) => {
127
271
if let Some(did) = cached_result.to_did() {
128
272
tracing::debug!("Cache hit for handle {}: {}", handle, did);
129
-
return Ok(did);
273
+
self.metrics.incr("resolver.redis.cache_hit").await;
274
+
return Ok((did, cached_result.timestamp));
130
275
} else {
131
276
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
277
+
self.metrics
278
+
.incr("resolver.redis.cache_hit_not_resolved")
279
+
.await;
132
280
return Err(HandleResolverError::HandleNotFound);
133
281
}
134
282
}
···
138
286
handle,
139
287
e
140
288
);
289
+
self.metrics.incr("resolver.redis.deserialize_error").await;
141
290
// Fall through to re-resolve if deserialization fails
142
291
}
143
292
}
···
145
294
146
295
// Not in cache, resolve through inner resolver
147
296
tracing::debug!("Cache miss for handle {}, resolving...", handle);
297
+
self.metrics.incr("resolver.redis.cache_miss").await;
148
298
let result = self.inner.resolve(s).await;
149
299
150
300
// Create and serialize resolution result
151
301
let resolution_result = match &result {
152
-
Ok(did) => {
302
+
Ok((did, _timestamp)) => {
153
303
tracing::debug!(
154
304
"Caching successful resolution for handle {}: {}",
155
305
handle,
···
159
309
Ok(res) => res,
160
310
Err(e) => {
161
311
tracing::warn!("Failed to create resolution result: {}", e);
312
+
self.metrics
313
+
.incr("resolver.redis.result_create_error")
314
+
.await;
162
315
return result;
163
316
}
164
317
}
···
169
322
Ok(res) => res,
170
323
Err(err) => {
171
324
tracing::warn!("Failed to create not_resolved result: {}", err);
325
+
self.metrics
326
+
.incr("resolver.redis.result_create_error")
327
+
.await;
172
328
return result;
173
329
}
174
330
}
···
184
340
.await
185
341
{
186
342
tracing::warn!("Failed to cache handle resolution in Redis: {}", e);
343
+
self.metrics.incr("resolver.redis.cache_set_error").await;
344
+
} else {
345
+
self.metrics.incr("resolver.redis.cache_set").await;
346
+
347
+
// For successful resolutions, also store reverse DID -> handle mapping
348
+
if let Ok((did, _)) = &result {
349
+
let did_key = self.make_key(did);
350
+
if let Err(e) = conn
351
+
.set_ex::<_, _, ()>(
352
+
&did_key,
353
+
handle.as_bytes(),
354
+
self.ttl_seconds(),
355
+
)
356
+
.await
357
+
{
358
+
tracing::warn!(
359
+
"Failed to cache reverse DID->handle mapping in Redis: {}",
360
+
e
361
+
);
362
+
self.metrics
363
+
.incr("resolver.redis.reverse_cache_set_error")
364
+
.await;
365
+
} else {
366
+
tracing::debug!(
367
+
"Cached reverse mapping for DID {}: {}",
368
+
did,
369
+
handle
370
+
);
371
+
self.metrics.incr("resolver.redis.reverse_cache_set").await;
372
+
}
373
+
}
187
374
}
188
375
}
189
376
Err(e) => {
···
192
379
handle,
193
380
e
194
381
);
382
+
self.metrics.incr("resolver.redis.serialize_error").await;
195
383
}
196
384
}
197
385
···
203
391
"Failed to get Redis connection, falling back to uncached resolution: {}",
204
392
e
205
393
);
394
+
self.metrics.incr("resolver.redis.connection_error").await;
206
395
self.inner.resolve(s).await
207
396
}
208
397
}
209
398
}
399
+
400
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
401
+
// Use atproto_identity's parse_input to properly identify the input type
402
+
let parsed_input = parse_input(subject)
403
+
.map_err(|_| HandleResolverError::InvalidSubject(subject.to_string()))?;
404
+
match parsed_input {
405
+
InputType::Handle(handle) => {
406
+
// It's a handle, purge using the lowercase version
407
+
self.purge_handle(&handle.to_lowercase()).await
408
+
}
409
+
InputType::Plc(did) | InputType::Web(did) => {
410
+
// It's a DID, purge the DID
411
+
self.purge_did(&did).await
412
+
}
413
+
}
414
+
}
415
+
416
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
417
+
// Normalize the handle to lowercase
418
+
let handle = handle.to_lowercase();
419
+
let handle_key = self.make_key(&handle);
420
+
let did_key = self.make_key(did);
421
+
422
+
match self.pool.get().await {
423
+
Ok(mut conn) => {
424
+
// Create a resolution result for the successful mapping
425
+
let resolution_result = match HandleResolutionResult::success(did) {
426
+
Ok(res) => res,
427
+
Err(e) => {
428
+
tracing::warn!(
429
+
"Failed to create resolution result for set operation: {}",
430
+
e
431
+
);
432
+
self.metrics
433
+
.incr("resolver.redis.set_result_create_error")
434
+
.await;
435
+
return Err(HandleResolverError::InvalidSubject(format!(
436
+
"Failed to create resolution result: {}",
437
+
e
438
+
)));
439
+
}
440
+
};
441
+
442
+
// Serialize to bytes
443
+
match resolution_result.to_bytes() {
444
+
Ok(bytes) => {
445
+
// Set the handle -> DID mapping with expiration
446
+
if let Err(e) = conn
447
+
.set_ex::<_, _, ()>(&handle_key, bytes, self.ttl_seconds())
448
+
.await
449
+
{
450
+
tracing::warn!("Failed to set handle->DID mapping in Redis: {}", e);
451
+
self.metrics.incr("resolver.redis.set_cache_error").await;
452
+
return Err(HandleResolverError::ResolutionFailed(format!(
453
+
"Failed to set cache: {}",
454
+
e
455
+
)));
456
+
}
457
+
458
+
// Set the reverse DID -> handle mapping
459
+
if let Err(e) = conn
460
+
.set_ex::<_, _, ()>(&did_key, handle.as_bytes(), self.ttl_seconds())
461
+
.await
462
+
{
463
+
tracing::warn!("Failed to set DID->handle mapping in Redis: {}", e);
464
+
self.metrics
465
+
.incr("resolver.redis.set_reverse_cache_error")
466
+
.await;
467
+
// Don't fail the operation, but log the warning
468
+
}
469
+
470
+
tracing::debug!("Set handle {} -> DID {} mapping in cache", handle, did);
471
+
self.metrics.incr("resolver.redis.set_success").await;
472
+
Ok(())
473
+
}
474
+
Err(e) => {
475
+
tracing::warn!(
476
+
"Failed to serialize resolution result for set operation: {}",
477
+
e
478
+
);
479
+
self.metrics
480
+
.incr("resolver.redis.set_serialize_error")
481
+
.await;
482
+
Err(HandleResolverError::InvalidSubject(format!(
483
+
"Failed to serialize: {}",
484
+
e
485
+
)))
486
+
}
487
+
}
488
+
}
489
+
Err(e) => {
490
+
tracing::warn!("Failed to get Redis connection for set operation: {}", e);
491
+
self.metrics
492
+
.incr("resolver.redis.set_connection_error")
493
+
.await;
494
+
Err(HandleResolverError::ResolutionFailed(format!(
495
+
"Redis connection error: {}",
496
+
e
497
+
)))
498
+
}
499
+
}
500
+
}
210
501
}
211
502
212
503
/// Create a new Redis-backed handle resolver with default 90-day TTL.
···
215
506
///
216
507
/// * `inner` - The underlying resolver to use for actual resolution
217
508
/// * `pool` - Redis connection pool
509
+
/// * `metrics` - Metrics publisher for telemetry
218
510
///
219
511
/// # Example
220
512
///
···
222
514
/// use std::sync::Arc;
223
515
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
224
516
/// use quickdid::cache::create_redis_pool;
517
+
/// use quickdid::metrics::NoOpMetricsPublisher;
225
518
///
226
519
/// # async fn example() -> anyhow::Result<()> {
227
520
/// # use atproto_identity::resolve::HickoryDnsResolver;
228
521
/// # use reqwest::Client;
229
522
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
230
523
/// # let http_client = Client::new();
524
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
231
525
/// let base = create_base_resolver(
232
526
/// dns_resolver,
233
527
/// http_client,
528
+
/// metrics.clone(),
234
529
/// );
235
530
///
236
531
/// let pool = create_redis_pool("redis://localhost:6379")?;
237
-
/// let resolver = create_redis_resolver(base, pool);
238
-
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
532
+
/// let resolver = create_redis_resolver(base, pool, metrics);
533
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
239
534
/// # Ok(())
240
535
/// # }
241
536
/// ```
242
537
pub fn create_redis_resolver(
243
538
inner: Arc<dyn HandleResolver>,
244
539
pool: RedisPool,
540
+
metrics: SharedMetricsPublisher,
245
541
) -> Arc<dyn HandleResolver> {
246
-
Arc::new(RedisHandleResolver::new(inner, pool))
542
+
Arc::new(RedisHandleResolver::new(inner, pool, metrics))
247
543
}
248
544
249
545
/// Create a new Redis-backed handle resolver with custom TTL.
···
253
549
/// * `inner` - The underlying resolver to use for actual resolution
254
550
/// * `pool` - Redis connection pool
255
551
/// * `ttl_seconds` - TTL for cache entries in seconds
552
+
/// * `metrics` - Metrics publisher for telemetry
256
553
pub fn create_redis_resolver_with_ttl(
257
554
inner: Arc<dyn HandleResolver>,
258
555
pool: RedisPool,
259
556
ttl_seconds: u64,
557
+
metrics: SharedMetricsPublisher,
260
558
) -> Arc<dyn HandleResolver> {
261
-
Arc::new(RedisHandleResolver::with_ttl(inner, pool, ttl_seconds))
559
+
Arc::new(RedisHandleResolver::with_ttl(
560
+
inner,
561
+
pool,
562
+
ttl_seconds,
563
+
metrics,
564
+
))
262
565
}
263
566
264
567
#[cfg(test)]
···
274
577
275
578
#[async_trait]
276
579
impl HandleResolver for MockHandleResolver {
277
-
async fn resolve(&self, _handle: &str) -> Result<String, HandleResolverError> {
580
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
278
581
if self.should_fail {
279
582
Err(HandleResolverError::MockResolutionFailure)
280
583
} else {
281
-
Ok(self.expected_did.clone())
584
+
let timestamp = std::time::SystemTime::now()
585
+
.duration_since(std::time::UNIX_EPOCH)
586
+
.unwrap_or_default()
587
+
.as_secs();
588
+
Ok((self.expected_did.clone(), timestamp))
282
589
}
283
590
}
284
591
}
···
296
603
expected_did: "did:plc:testuser123".to_string(),
297
604
});
298
605
606
+
// Create metrics publisher
607
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
608
+
299
609
// Create Redis-backed resolver with a unique key prefix for testing
300
-
let test_prefix = format!("test:handle:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
610
+
let test_prefix = format!(
611
+
"test:handle:{}:",
612
+
std::time::SystemTime::now()
613
+
.duration_since(std::time::UNIX_EPOCH)
614
+
.unwrap()
615
+
.as_nanos()
616
+
);
301
617
let redis_resolver = RedisHandleResolver::with_full_config(
302
618
mock_resolver,
303
619
pool.clone(),
304
620
test_prefix.clone(),
305
621
3600,
622
+
metrics,
306
623
);
307
624
308
625
let test_handle = "alice.bsky.social";
309
626
310
627
// First resolution - should call inner resolver
311
-
let result1 = redis_resolver.resolve(test_handle).await.unwrap();
628
+
let (result1, _timestamp1) = redis_resolver.resolve(test_handle).await.unwrap();
312
629
assert_eq!(result1, "did:plc:testuser123");
313
630
314
631
// Second resolution - should hit cache
315
-
let result2 = redis_resolver.resolve(test_handle).await.unwrap();
632
+
let (result2, _timestamp2) = redis_resolver.resolve(test_handle).await.unwrap();
316
633
assert_eq!(result2, "did:plc:testuser123");
317
634
318
635
// Clean up test data
···
326
643
}
327
644
328
645
#[tokio::test]
646
+
async fn test_redis_handle_resolver_bidirectional_purge() {
647
+
let pool = match crate::test_helpers::get_test_redis_pool() {
648
+
Some(p) => p,
649
+
None => return,
650
+
};
651
+
652
+
// Create mock resolver
653
+
let mock_resolver = Arc::new(MockHandleResolver {
654
+
should_fail: false,
655
+
expected_did: "did:plc:testuser456".to_string(),
656
+
});
657
+
658
+
// Create metrics publisher
659
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
660
+
661
+
// Create Redis-backed resolver with a unique key prefix for testing
662
+
let test_prefix = format!(
663
+
"test:handle:{}:",
664
+
std::time::SystemTime::now()
665
+
.duration_since(std::time::UNIX_EPOCH)
666
+
.unwrap()
667
+
.as_nanos()
668
+
);
669
+
let redis_resolver = RedisHandleResolver::with_full_config(
670
+
mock_resolver,
671
+
pool.clone(),
672
+
test_prefix.clone(),
673
+
3600,
674
+
metrics,
675
+
);
676
+
677
+
let test_handle = "bob.bsky.social";
678
+
let expected_did = "did:plc:testuser456";
679
+
680
+
// First resolution - should call inner resolver and cache both directions
681
+
let (result1, _) = redis_resolver.resolve(test_handle).await.unwrap();
682
+
assert_eq!(result1, expected_did);
683
+
684
+
// Verify both keys exist in Redis
685
+
if let Ok(mut conn) = pool.get().await {
686
+
let mut h = MetroHash64::default();
687
+
h.write(test_handle.as_bytes());
688
+
let handle_key = format!("{}{}", test_prefix, h.finish());
689
+
690
+
let mut h2 = MetroHash64::default();
691
+
h2.write(expected_did.as_bytes());
692
+
let did_key = format!("{}{}", test_prefix, h2.finish());
693
+
694
+
// Check handle -> DID mapping exists
695
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
696
+
assert!(handle_exists, "Handle key should exist in cache");
697
+
698
+
// Check DID -> handle mapping exists
699
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
700
+
assert!(did_exists, "DID key should exist in cache");
701
+
702
+
// Test purge by handle using the trait method
703
+
redis_resolver.purge(test_handle).await.unwrap();
704
+
705
+
// Verify both keys were deleted
706
+
let handle_exists_after: bool = conn.exists(&handle_key).await.unwrap();
707
+
assert!(
708
+
!handle_exists_after,
709
+
"Handle key should be deleted after purge"
710
+
);
711
+
712
+
let did_exists_after: bool = conn.exists(&did_key).await.unwrap();
713
+
assert!(!did_exists_after, "DID key should be deleted after purge");
714
+
}
715
+
716
+
// Re-resolve to cache again
717
+
let (result2, _) = redis_resolver.resolve(test_handle).await.unwrap();
718
+
assert_eq!(result2, expected_did);
719
+
720
+
// Test purge by DID using the trait method
721
+
redis_resolver.purge(expected_did).await.unwrap();
722
+
723
+
// Verify both keys were deleted again
724
+
if let Ok(mut conn) = pool.get().await {
725
+
let mut h = MetroHash64::default();
726
+
h.write(test_handle.as_bytes());
727
+
let handle_key = format!("{}{}", test_prefix, h.finish());
728
+
729
+
let mut h2 = MetroHash64::default();
730
+
h2.write(expected_did.as_bytes());
731
+
let did_key = format!("{}{}", test_prefix, h2.finish());
732
+
733
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
734
+
assert!(
735
+
!handle_exists,
736
+
"Handle key should be deleted after DID purge"
737
+
);
738
+
739
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
740
+
assert!(!did_exists, "DID key should be deleted after DID purge");
741
+
}
742
+
}
743
+
744
+
#[tokio::test]
745
+
async fn test_redis_handle_resolver_purge_input_types() {
746
+
let pool = match crate::test_helpers::get_test_redis_pool() {
747
+
Some(p) => p,
748
+
None => return,
749
+
};
750
+
751
+
// Create mock resolver
752
+
let mock_resolver = Arc::new(MockHandleResolver {
753
+
should_fail: false,
754
+
expected_did: "did:plc:testuser789".to_string(),
755
+
});
756
+
757
+
// Create metrics publisher
758
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
759
+
760
+
// Create Redis-backed resolver with a unique key prefix for testing
761
+
let test_prefix = format!(
762
+
"test:handle:{}:",
763
+
std::time::SystemTime::now()
764
+
.duration_since(std::time::UNIX_EPOCH)
765
+
.unwrap()
766
+
.as_nanos()
767
+
);
768
+
let redis_resolver = RedisHandleResolver::with_full_config(
769
+
mock_resolver,
770
+
pool.clone(),
771
+
test_prefix.clone(),
772
+
3600,
773
+
metrics,
774
+
);
775
+
776
+
// Test different input formats
777
+
let test_cases = vec![
778
+
("alice.bsky.social", "alice.bsky.social"), // Handle
779
+
("ALICE.BSKY.SOCIAL", "alice.bsky.social"), // Handle (uppercase)
780
+
("did:plc:abc123", "did:plc:abc123"), // PLC DID
781
+
("did:web:example.com", "did:web:example.com"), // Web DID
782
+
];
783
+
784
+
for (input, expected_key) in test_cases {
785
+
// Resolve first to cache it
786
+
if !input.starts_with("did:") {
787
+
let _ = redis_resolver.resolve(input).await;
788
+
}
789
+
790
+
// Test purging with different input formats
791
+
let result = redis_resolver.purge(input).await;
792
+
assert!(result.is_ok(), "Failed to purge {}: {:?}", input, result);
793
+
794
+
// Verify the key was handled correctly based on type
795
+
if let Ok(mut conn) = pool.get().await {
796
+
let mut h = MetroHash64::default();
797
+
h.write(expected_key.as_bytes());
798
+
let key = format!("{}{}", test_prefix, h.finish());
799
+
800
+
// After purge, key should not exist
801
+
let exists: bool = conn.exists(&key).await.unwrap_or(false);
802
+
assert!(!exists, "Key for {} should not exist after purge", input);
803
+
}
804
+
}
805
+
}
806
+
807
+
#[tokio::test]
808
+
async fn test_redis_handle_resolver_set_method() {
809
+
let pool = match crate::test_helpers::get_test_redis_pool() {
810
+
Some(p) => p,
811
+
None => return,
812
+
};
813
+
814
+
// Create mock resolver
815
+
let mock_resolver = Arc::new(MockHandleResolver {
816
+
should_fail: false,
817
+
expected_did: "did:plc:old".to_string(),
818
+
});
819
+
820
+
// Create metrics publisher
821
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
822
+
823
+
// Create Redis-backed resolver with a unique key prefix for testing
824
+
let test_prefix = format!(
825
+
"test:handle:{}:",
826
+
std::time::SystemTime::now()
827
+
.duration_since(std::time::UNIX_EPOCH)
828
+
.unwrap()
829
+
.as_nanos()
830
+
);
831
+
let redis_resolver = RedisHandleResolver::with_full_config(
832
+
mock_resolver,
833
+
pool.clone(),
834
+
test_prefix.clone(),
835
+
3600,
836
+
metrics,
837
+
);
838
+
839
+
let test_handle = "charlie.bsky.social";
840
+
let test_did = "did:plc:newuser123";
841
+
842
+
// Set the mapping using the trait method
843
+
redis_resolver.set(test_handle, test_did).await.unwrap();
844
+
845
+
// Verify the mapping by resolving the handle
846
+
let (resolved_did, _) = redis_resolver.resolve(test_handle).await.unwrap();
847
+
assert_eq!(resolved_did, test_did);
848
+
849
+
// Test that uppercase handles are normalized
850
+
redis_resolver
851
+
.set("DAVE.BSKY.SOCIAL", "did:plc:dave456")
852
+
.await
853
+
.unwrap();
854
+
let (resolved_did2, _) = redis_resolver.resolve("dave.bsky.social").await.unwrap();
855
+
assert_eq!(resolved_did2, "did:plc:dave456");
856
+
857
+
// Verify both forward and reverse mappings exist
858
+
if let Ok(mut conn) = pool.get().await {
859
+
let mut h = MetroHash64::default();
860
+
h.write(test_handle.as_bytes());
861
+
let handle_key = format!("{}{}", test_prefix, h.finish());
862
+
863
+
let mut h2 = MetroHash64::default();
864
+
h2.write(test_did.as_bytes());
865
+
let did_key = format!("{}{}", test_prefix, h2.finish());
866
+
867
+
// Check both keys exist
868
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
869
+
assert!(handle_exists, "Handle key should exist after set");
870
+
871
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
872
+
assert!(did_exists, "DID key should exist after set");
873
+
874
+
// Clean up test data
875
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
876
+
}
877
+
}
878
+
879
+
#[tokio::test]
329
880
async fn test_redis_handle_resolver_cache_error() {
330
881
let pool = match crate::test_helpers::get_test_redis_pool() {
331
882
Some(p) => p,
···
337
888
should_fail: true,
338
889
expected_did: String::new(),
339
890
});
891
+
892
+
// Create metrics publisher
893
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
340
894
341
895
// Create Redis-backed resolver with a unique key prefix for testing
342
-
let test_prefix = format!("test:handle:{}:", std::time::SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos());
896
+
let test_prefix = format!(
897
+
"test:handle:{}:",
898
+
std::time::SystemTime::now()
899
+
.duration_since(std::time::UNIX_EPOCH)
900
+
.unwrap()
901
+
.as_nanos()
902
+
);
343
903
let redis_resolver = RedisHandleResolver::with_full_config(
344
904
mock_resolver,
345
905
pool.clone(),
346
906
test_prefix.clone(),
347
907
3600,
908
+
metrics,
348
909
);
349
910
350
911
let test_handle = "error.bsky.social";
+221
-66
src/handle_resolver/sqlite.rs
+221
-66
src/handle_resolver/sqlite.rs
···
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
9
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
10
11
use async_trait::async_trait;
11
12
use metrohash::MetroHash64;
12
13
use sqlx::{Row, SqlitePool};
···
35
36
/// use std::sync::Arc;
36
37
/// use sqlx::SqlitePool;
37
38
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
39
+
/// use quickdid::metrics::NoOpMetricsPublisher;
38
40
///
39
41
/// # async fn example() {
40
42
/// # use atproto_identity::resolve::HickoryDnsResolver;
41
43
/// # use reqwest::Client;
42
44
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
43
45
/// # let http_client = Client::new();
44
-
/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
46
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
47
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
45
48
/// # let sqlite_pool: SqlitePool = todo!();
46
49
/// // Create with default 90-day TTL
47
50
/// let resolver = create_sqlite_resolver(
48
51
/// base_resolver,
49
-
/// sqlite_pool
52
+
/// sqlite_pool,
53
+
/// metrics
50
54
/// );
51
55
/// # }
52
56
/// ```
···
57
61
pool: SqlitePool,
58
62
/// TTL for cache entries in seconds
59
63
ttl_seconds: u64,
64
+
/// Metrics publisher for telemetry
65
+
metrics: SharedMetricsPublisher,
60
66
}
61
67
62
68
impl SqliteHandleResolver {
63
69
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
64
-
fn new(inner: Arc<dyn HandleResolver>, pool: SqlitePool) -> Self {
65
-
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
70
+
fn new(
71
+
inner: Arc<dyn HandleResolver>,
72
+
pool: SqlitePool,
73
+
metrics: SharedMetricsPublisher,
74
+
) -> Self {
75
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
66
76
}
67
77
68
78
/// Create a new SQLite-backed handle resolver with custom TTL.
69
-
fn with_ttl(inner: Arc<dyn HandleResolver>, pool: SqlitePool, ttl_seconds: u64) -> Self {
79
+
fn with_ttl(
80
+
inner: Arc<dyn HandleResolver>,
81
+
pool: SqlitePool,
82
+
ttl_seconds: u64,
83
+
metrics: SharedMetricsPublisher,
84
+
) -> Self {
70
85
Self {
71
86
inner,
72
87
pool,
73
88
ttl_seconds,
89
+
metrics,
74
90
}
75
91
}
76
92
···
98
114
99
115
#[async_trait]
100
116
impl HandleResolver for SqliteHandleResolver {
101
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
117
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
102
118
let handle = s.to_string();
103
119
let key = self.make_key(&handle) as i64; // SQLite uses signed integers
104
120
105
121
// Try to get from SQLite cache first
106
-
let cached_result = sqlx::query(
107
-
"SELECT result, updated FROM handle_resolution_cache WHERE key = ?1"
108
-
)
109
-
.bind(key)
110
-
.fetch_optional(&self.pool)
111
-
.await;
122
+
let cached_result =
123
+
sqlx::query("SELECT result, updated FROM handle_resolution_cache WHERE key = ?1")
124
+
.bind(key)
125
+
.fetch_optional(&self.pool)
126
+
.await;
112
127
113
128
match cached_result {
114
129
Ok(Some(row)) => {
···
122
137
Ok(cached_result) => {
123
138
if let Some(did) = cached_result.to_did() {
124
139
tracing::debug!("Cache hit for handle {}: {}", handle, did);
125
-
return Ok(did);
140
+
self.metrics.incr("resolver.sqlite.cache_hit").await;
141
+
return Ok((did, cached_result.timestamp));
126
142
} else {
127
143
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
144
+
self.metrics
145
+
.incr("resolver.sqlite.cache_hit_not_resolved")
146
+
.await;
128
147
return Err(HandleResolverError::HandleNotFound);
129
148
}
130
149
}
···
134
153
handle,
135
154
e
136
155
);
156
+
self.metrics.incr("resolver.sqlite.deserialize_error").await;
137
157
// Fall through to re-resolve if deserialization fails
138
158
}
139
159
}
140
160
} else {
141
161
tracing::debug!("Cache entry expired for handle {}", handle);
162
+
self.metrics.incr("resolver.sqlite.cache_expired").await;
142
163
// Entry is expired, we'll re-resolve and update it
143
164
}
144
165
}
145
166
Ok(None) => {
146
167
tracing::debug!("Cache miss for handle {}, resolving...", handle);
168
+
self.metrics.incr("resolver.sqlite.cache_miss").await;
147
169
}
148
170
Err(e) => {
149
171
tracing::warn!("Failed to query SQLite cache for handle {}: {}", handle, e);
172
+
self.metrics.incr("resolver.sqlite.query_error").await;
150
173
// Fall through to resolve without caching on database error
151
174
}
152
175
}
···
156
179
157
180
// Create and serialize resolution result
158
181
let resolution_result = match &result {
159
-
Ok(did) => {
182
+
Ok((did, _timestamp)) => {
160
183
tracing::debug!(
161
184
"Caching successful resolution for handle {}: {}",
162
185
handle,
···
166
189
Ok(res) => res,
167
190
Err(e) => {
168
191
tracing::warn!("Failed to create resolution result: {}", e);
192
+
self.metrics
193
+
.incr("resolver.sqlite.result_create_error")
194
+
.await;
169
195
return result;
170
196
}
171
197
}
···
176
202
Ok(res) => res,
177
203
Err(err) => {
178
204
tracing::warn!("Failed to create not_resolved result: {}", err);
205
+
self.metrics
206
+
.incr("resolver.sqlite.result_create_error")
207
+
.await;
179
208
return result;
180
209
}
181
210
}
···
198
227
ON CONFLICT(key) DO UPDATE SET
199
228
result = excluded.result,
200
229
updated = excluded.updated
201
-
"#
230
+
"#,
202
231
)
203
232
.bind(key)
204
233
.bind(&bytes)
···
209
238
210
239
if let Err(e) = query_result {
211
240
tracing::warn!("Failed to cache handle resolution in SQLite: {}", e);
241
+
self.metrics.incr("resolver.sqlite.cache_set_error").await;
242
+
} else {
243
+
self.metrics.incr("resolver.sqlite.cache_set").await;
212
244
}
213
245
}
214
246
Err(e) => {
···
217
249
handle,
218
250
e
219
251
);
252
+
self.metrics.incr("resolver.sqlite.serialize_error").await;
220
253
}
221
254
}
222
255
223
256
result
224
257
}
258
+
259
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
260
+
// Normalize the handle to lowercase
261
+
let handle = handle.to_lowercase();
262
+
263
+
// Update the SQLite cache
264
+
if let Ok(mut conn) = self.pool.acquire().await {
265
+
// Create a resolution result for the successful mapping
266
+
let resolution_result = match HandleResolutionResult::success(did) {
267
+
Ok(res) => res,
268
+
Err(e) => {
269
+
tracing::warn!(
270
+
"Failed to create resolution result for set operation: {}",
271
+
e
272
+
);
273
+
self.metrics
274
+
.incr("resolver.sqlite.set_result_create_error")
275
+
.await;
276
+
// Still chain to inner resolver even if we can't cache
277
+
return self.inner.set(&handle, did).await;
278
+
}
279
+
};
280
+
281
+
// Serialize to bytes
282
+
match resolution_result.to_bytes() {
283
+
Ok(bytes) => {
284
+
// Insert or update the cache entry
285
+
let timestamp = std::time::SystemTime::now()
286
+
.duration_since(std::time::UNIX_EPOCH)
287
+
.unwrap_or_default()
288
+
.as_secs() as i64;
289
+
290
+
let expires_at = timestamp + self.ttl_seconds as i64;
291
+
292
+
match sqlx::query(
293
+
"INSERT OR REPLACE INTO handle_resolution_cache (handle, resolved_value, created_at, expires_at) VALUES (?, ?, ?, ?)"
294
+
)
295
+
.bind(&handle)
296
+
.bind(&bytes)
297
+
.bind(timestamp)
298
+
.bind(expires_at)
299
+
.execute(&mut *conn)
300
+
.await
301
+
{
302
+
Ok(_) => {
303
+
tracing::debug!("Set handle {} -> DID {} in SQLite cache", handle, did);
304
+
self.metrics.incr("resolver.sqlite.set_success").await;
305
+
}
306
+
Err(e) => {
307
+
tracing::warn!("Failed to set handle->DID mapping in SQLite: {}", e);
308
+
self.metrics.incr("resolver.sqlite.set_cache_error").await;
309
+
// Still chain to inner resolver even if cache update fails
310
+
}
311
+
}
312
+
}
313
+
Err(e) => {
314
+
tracing::warn!(
315
+
"Failed to serialize resolution result for set operation: {}",
316
+
e
317
+
);
318
+
self.metrics
319
+
.incr("resolver.sqlite.set_serialize_error")
320
+
.await;
321
+
// Still chain to inner resolver even if serialization fails
322
+
}
323
+
}
324
+
} else {
325
+
tracing::warn!("Failed to get SQLite connection for set operation");
326
+
self.metrics
327
+
.incr("resolver.sqlite.set_connection_error")
328
+
.await;
329
+
}
330
+
331
+
// Chain to inner resolver
332
+
self.inner.set(&handle, did).await
333
+
}
225
334
}
226
335
227
336
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
···
230
339
///
231
340
/// * `inner` - The underlying resolver to use for actual resolution
232
341
/// * `pool` - SQLite connection pool
342
+
/// * `metrics` - Metrics publisher for telemetry
233
343
///
234
344
/// # Example
235
345
///
···
237
347
/// use std::sync::Arc;
238
348
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
239
349
/// use quickdid::sqlite_schema::create_sqlite_pool;
350
+
/// use quickdid::metrics::NoOpMetricsPublisher;
240
351
///
241
352
/// # async fn example() -> anyhow::Result<()> {
242
353
/// # use atproto_identity::resolve::HickoryDnsResolver;
243
354
/// # use reqwest::Client;
244
355
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
245
356
/// # let http_client = Client::new();
357
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
246
358
/// let base = create_base_resolver(
247
359
/// dns_resolver,
248
360
/// http_client,
361
+
/// metrics.clone(),
249
362
/// );
250
363
///
251
364
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
252
-
/// let resolver = create_sqlite_resolver(base, pool);
253
-
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
365
+
/// let resolver = create_sqlite_resolver(base, pool, metrics);
366
+
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
254
367
/// # Ok(())
255
368
/// # }
256
369
/// ```
257
370
pub fn create_sqlite_resolver(
258
371
inner: Arc<dyn HandleResolver>,
259
372
pool: SqlitePool,
373
+
metrics: SharedMetricsPublisher,
260
374
) -> Arc<dyn HandleResolver> {
261
-
Arc::new(SqliteHandleResolver::new(inner, pool))
375
+
Arc::new(SqliteHandleResolver::new(inner, pool, metrics))
262
376
}
263
377
264
378
/// Create a new SQLite-backed handle resolver with custom TTL.
···
268
382
/// * `inner` - The underlying resolver to use for actual resolution
269
383
/// * `pool` - SQLite connection pool
270
384
/// * `ttl_seconds` - TTL for cache entries in seconds
385
+
/// * `metrics` - Metrics publisher for telemetry
271
386
pub fn create_sqlite_resolver_with_ttl(
272
387
inner: Arc<dyn HandleResolver>,
273
388
pool: SqlitePool,
274
389
ttl_seconds: u64,
390
+
metrics: SharedMetricsPublisher,
275
391
) -> Arc<dyn HandleResolver> {
276
-
Arc::new(SqliteHandleResolver::with_ttl(inner, pool, ttl_seconds))
392
+
Arc::new(SqliteHandleResolver::with_ttl(
393
+
inner,
394
+
pool,
395
+
ttl_seconds,
396
+
metrics,
397
+
))
277
398
}
278
399
279
400
#[cfg(test)]
···
289
410
290
411
#[async_trait]
291
412
impl HandleResolver for MockHandleResolver {
292
-
async fn resolve(&self, _handle: &str) -> Result<String, HandleResolverError> {
413
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
293
414
if self.should_fail {
294
415
Err(HandleResolverError::MockResolutionFailure)
295
416
} else {
296
-
Ok(self.expected_did.clone())
417
+
let timestamp = std::time::SystemTime::now()
418
+
.duration_since(std::time::UNIX_EPOCH)
419
+
.unwrap_or_default()
420
+
.as_secs();
421
+
Ok((self.expected_did.clone(), timestamp))
297
422
}
298
423
}
299
424
}
···
316
441
expected_did: "did:plc:testuser123".to_string(),
317
442
});
318
443
444
+
// Create metrics publisher
445
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
446
+
319
447
// Create SQLite-backed resolver
320
-
let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
448
+
let sqlite_resolver =
449
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
321
450
322
451
let test_handle = "alice.bsky.social";
323
452
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
···
330
459
assert_eq!(initial_count, 0);
331
460
332
461
// First resolution - should call inner resolver and cache the result
333
-
let result1 = sqlite_resolver.resolve(test_handle).await.unwrap();
462
+
let (result1, _timestamp1) = sqlite_resolver.resolve(test_handle).await.unwrap();
334
463
assert_eq!(result1, "did:plc:testuser123");
335
464
336
465
// Verify record was inserted
337
-
let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
338
-
.fetch_one(&pool)
339
-
.await
340
-
.expect("Failed to query count after first resolution");
466
+
let count_after_first: i64 =
467
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
468
+
.fetch_one(&pool)
469
+
.await
470
+
.expect("Failed to query count after first resolution");
341
471
assert_eq!(count_after_first, 1);
342
472
343
473
// Verify the cached record has correct key and non-empty result
344
-
let cached_record = sqlx::query("SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1")
345
-
.bind(expected_key)
346
-
.fetch_one(&pool)
347
-
.await
348
-
.expect("Failed to fetch cached record");
349
-
474
+
let cached_record = sqlx::query(
475
+
"SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1",
476
+
)
477
+
.bind(expected_key)
478
+
.fetch_one(&pool)
479
+
.await
480
+
.expect("Failed to fetch cached record");
481
+
350
482
let cached_key: i64 = cached_record.get("key");
351
483
let cached_result: Vec<u8> = cached_record.get("result");
352
484
let cached_created: i64 = cached_record.get("created");
353
485
let cached_updated: i64 = cached_record.get("updated");
354
486
355
487
assert_eq!(cached_key, expected_key);
356
-
assert!(!cached_result.is_empty(), "Cached result should not be empty");
488
+
assert!(
489
+
!cached_result.is_empty(),
490
+
"Cached result should not be empty"
491
+
);
357
492
assert!(cached_created > 0, "Created timestamp should be positive");
358
493
assert!(cached_updated > 0, "Updated timestamp should be positive");
359
-
assert_eq!(cached_created, cached_updated, "Created and updated should be equal on first insert");
494
+
assert_eq!(
495
+
cached_created, cached_updated,
496
+
"Created and updated should be equal on first insert"
497
+
);
360
498
361
499
// Verify we can deserialize the cached result
362
-
let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
363
-
.expect("Failed to deserialize cached result");
500
+
let resolution_result =
501
+
crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
502
+
.expect("Failed to deserialize cached result");
364
503
let cached_did = resolution_result.to_did().expect("Should have a DID");
365
504
assert_eq!(cached_did, "did:plc:testuser123");
366
505
367
506
// Second resolution - should hit cache (no additional database insert)
368
-
let result2 = sqlite_resolver.resolve(test_handle).await.unwrap();
507
+
let (result2, _timestamp2) = sqlite_resolver.resolve(test_handle).await.unwrap();
369
508
assert_eq!(result2, "did:plc:testuser123");
370
509
371
510
// Verify count hasn't changed (cache hit, no new insert)
372
-
let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
373
-
.fetch_one(&pool)
374
-
.await
375
-
.expect("Failed to query count after second resolution");
511
+
let count_after_second: i64 =
512
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
513
+
.fetch_one(&pool)
514
+
.await
515
+
.expect("Failed to query count after second resolution");
376
516
assert_eq!(count_after_second, 1);
377
517
}
378
518
···
394
534
expected_did: String::new(),
395
535
});
396
536
537
+
// Create metrics publisher
538
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
539
+
397
540
// Create SQLite-backed resolver
398
-
let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
541
+
let sqlite_resolver =
542
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
399
543
400
544
let test_handle = "error.bsky.social";
401
545
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
···
410
554
// First resolution - should fail and cache the failure
411
555
let result1 = sqlite_resolver.resolve(test_handle).await;
412
556
assert!(result1.is_err());
413
-
557
+
414
558
// Match the specific error type we expect
415
559
match result1 {
416
-
Err(HandleResolverError::MockResolutionFailure) => {},
560
+
Err(HandleResolverError::MockResolutionFailure) => {}
417
561
other => panic!("Expected MockResolutionFailure, got {:?}", other),
418
562
}
419
563
420
564
// Verify the failure was cached
421
-
let count_after_first: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
422
-
.fetch_one(&pool)
423
-
.await
424
-
.expect("Failed to query count after first resolution");
565
+
let count_after_first: i64 =
566
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
567
+
.fetch_one(&pool)
568
+
.await
569
+
.expect("Failed to query count after first resolution");
425
570
assert_eq!(count_after_first, 1);
426
571
427
572
// Verify the cached error record
428
-
let cached_record = sqlx::query("SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1")
429
-
.bind(expected_key)
430
-
.fetch_one(&pool)
431
-
.await
432
-
.expect("Failed to fetch cached error record");
433
-
573
+
let cached_record = sqlx::query(
574
+
"SELECT key, result, created, updated FROM handle_resolution_cache WHERE key = ?1",
575
+
)
576
+
.bind(expected_key)
577
+
.fetch_one(&pool)
578
+
.await
579
+
.expect("Failed to fetch cached error record");
580
+
434
581
let cached_key: i64 = cached_record.get("key");
435
582
let cached_result: Vec<u8> = cached_record.get("result");
436
583
let cached_created: i64 = cached_record.get("created");
437
584
let cached_updated: i64 = cached_record.get("updated");
438
585
439
586
assert_eq!(cached_key, expected_key);
440
-
assert!(!cached_result.is_empty(), "Cached error result should not be empty");
587
+
assert!(
588
+
!cached_result.is_empty(),
589
+
"Cached error result should not be empty"
590
+
);
441
591
assert!(cached_created > 0, "Created timestamp should be positive");
442
592
assert!(cached_updated > 0, "Updated timestamp should be positive");
443
-
assert_eq!(cached_created, cached_updated, "Created and updated should be equal on first insert");
593
+
assert_eq!(
594
+
cached_created, cached_updated,
595
+
"Created and updated should be equal on first insert"
596
+
);
444
597
445
598
// Verify we can deserialize the cached error result
446
-
let resolution_result = crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
447
-
.expect("Failed to deserialize cached error result");
599
+
let resolution_result =
600
+
crate::handle_resolution_result::HandleResolutionResult::from_bytes(&cached_result)
601
+
.expect("Failed to deserialize cached error result");
448
602
let cached_did = resolution_result.to_did();
449
603
assert!(cached_did.is_none(), "Error result should have no DID");
450
604
451
605
// Second resolution - should hit cache with error (no additional database operations)
452
606
let result2 = sqlite_resolver.resolve(test_handle).await;
453
607
assert!(result2.is_err());
454
-
608
+
455
609
// Match the specific error type we expect from cache
456
610
match result2 {
457
-
Err(HandleResolverError::HandleNotFound) => {}, // Cache returns HandleNotFound for "not resolved"
611
+
Err(HandleResolverError::HandleNotFound) => {} // Cache returns HandleNotFound for "not resolved"
458
612
other => panic!("Expected HandleNotFound from cache, got {:?}", other),
459
613
}
460
614
461
615
// Verify count hasn't changed (cache hit, no new operations)
462
-
let count_after_second: i64 = sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
463
-
.fetch_one(&pool)
464
-
.await
465
-
.expect("Failed to query count after second resolution");
616
+
let count_after_second: i64 =
617
+
sqlx::query_scalar("SELECT COUNT(*) FROM handle_resolution_cache")
618
+
.fetch_one(&pool)
619
+
.await
620
+
.expect("Failed to query count after second resolution");
466
621
assert_eq!(count_after_second, 1);
467
622
}
468
-
}
623
+
}
+105
-5
src/handle_resolver/traits.rs
+105
-5
src/handle_resolver/traits.rs
···
17
17
/// ```no_run
18
18
/// use async_trait::async_trait;
19
19
/// use quickdid::handle_resolver::{HandleResolver, HandleResolverError};
20
+
/// use std::time::{SystemTime, UNIX_EPOCH};
20
21
///
21
22
/// struct MyResolver;
22
23
///
23
24
/// #[async_trait]
24
25
/// impl HandleResolver for MyResolver {
25
-
/// async fn resolve(&self, s: &str) -> Result<String, HandleResolverError> {
26
+
/// async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
26
27
/// // Custom resolution logic
27
-
/// Ok(format!("did:plc:{}", s.replace('.', "")))
28
+
/// let did = format!("did:plc:{}", s.replace('.', ""));
29
+
/// let timestamp = SystemTime::now()
30
+
/// .duration_since(UNIX_EPOCH)
31
+
/// .unwrap()
32
+
/// .as_secs();
33
+
/// Ok((did, timestamp))
28
34
/// }
29
35
/// }
30
36
/// ```
31
37
#[async_trait]
32
38
pub trait HandleResolver: Send + Sync {
33
-
/// Resolve a handle to its DID.
39
+
/// Resolve a handle to its DID with timestamp.
34
40
///
35
41
/// # Arguments
36
42
///
···
38
44
///
39
45
/// # Returns
40
46
///
41
-
/// The resolved DID on success, or an error if resolution fails.
47
+
/// A tuple containing:
48
+
/// - The resolved DID string
49
+
/// - The resolution timestamp as UNIX epoch seconds
42
50
///
43
51
/// # Errors
44
52
///
···
46
54
/// - The handle cannot be resolved
47
55
/// - Network errors occur during resolution
48
56
/// - The handle is invalid or doesn't exist
49
-
async fn resolve(&self, s: &str) -> Result<String, HandleResolverError>;
57
+
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError>;
58
+
59
+
/// Purge a handle or DID from the cache.
60
+
///
61
+
/// This method removes cached entries for the given identifier, which can be
62
+
/// either a handle (e.g., "alice.bsky.social") or a DID (e.g., "did:plc:xyz123").
63
+
/// Implementations should handle bidirectional purging where applicable.
64
+
///
65
+
/// # Arguments
66
+
///
67
+
/// * `identifier` - Either a handle or DID to purge from cache
68
+
///
69
+
/// # Returns
70
+
///
71
+
/// Ok(()) if the purge was successful or if the identifier wasn't cached.
72
+
/// Most implementations will simply return Ok(()) as a no-op.
73
+
///
74
+
/// # Default Implementation
75
+
///
76
+
/// The default implementation is a no-op that always returns Ok(()).
77
+
async fn purge(&self, _subject: &str) -> Result<(), HandleResolverError> {
78
+
Ok(())
79
+
}
80
+
81
+
/// Set a handle-to-DID mapping in the cache.
82
+
///
83
+
/// This method allows manually setting or updating a cached mapping between
84
+
/// a handle and its corresponding DID. This is useful for pre-populating
85
+
/// caches or updating stale entries.
86
+
///
87
+
/// # Arguments
88
+
///
89
+
/// * `handle` - The handle to cache (e.g., "alice.bsky.social")
90
+
/// * `did` - The DID to associate with the handle (e.g., "did:plc:xyz123")
91
+
///
92
+
/// # Returns
93
+
///
94
+
/// Ok(()) if the mapping was successfully set or if the implementation
95
+
/// doesn't support manual cache updates. Most implementations will simply
96
+
/// return Ok(()) as a no-op.
97
+
///
98
+
/// # Default Implementation
99
+
///
100
+
/// The default implementation is a no-op that always returns Ok(()).
101
+
async fn set(&self, _handle: &str, _did: &str) -> Result<(), HandleResolverError> {
102
+
Ok(())
103
+
}
104
+
}
105
+
106
+
#[cfg(test)]
107
+
mod tests {
108
+
use super::*;
109
+
110
+
// Simple test resolver that doesn't cache anything
111
+
struct NoOpTestResolver;
112
+
113
+
#[async_trait]
114
+
impl HandleResolver for NoOpTestResolver {
115
+
async fn resolve(&self, _s: &str) -> Result<(String, u64), HandleResolverError> {
116
+
Ok(("did:test:123".to_string(), 1234567890))
117
+
}
118
+
// Uses default purge implementation
119
+
}
120
+
121
+
#[tokio::test]
122
+
async fn test_default_purge_implementation() {
123
+
let resolver = NoOpTestResolver;
124
+
125
+
// Default implementation should always return Ok(())
126
+
assert!(resolver.purge("alice.bsky.social").await.is_ok());
127
+
assert!(resolver.purge("did:plc:xyz123").await.is_ok());
128
+
assert!(resolver.purge("").await.is_ok());
129
+
}
130
+
131
+
#[tokio::test]
132
+
async fn test_default_set_implementation() {
133
+
let resolver = NoOpTestResolver;
134
+
135
+
// Default implementation should always return Ok(())
136
+
assert!(
137
+
resolver
138
+
.set("alice.bsky.social", "did:plc:xyz123")
139
+
.await
140
+
.is_ok()
141
+
);
142
+
assert!(
143
+
resolver
144
+
.set("bob.example.com", "did:web:example.com")
145
+
.await
146
+
.is_ok()
147
+
);
148
+
assert!(resolver.set("", "").await.is_ok());
149
+
}
50
150
}
+144
-93
src/handle_resolver_task.rs
+144
-93
src/handle_resolver_task.rs
···
5
5
//! and ensures resolved handles are cached for efficient subsequent lookups.
6
6
7
7
use crate::handle_resolver::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use crate::queue::{HandleResolutionWork, QueueAdapter};
9
10
use anyhow::Result;
10
11
use std::sync::Arc;
···
16
17
/// Handle resolver task errors
17
18
#[derive(Error, Debug)]
18
19
pub(crate) enum HandleResolverError {
19
-
#[error("Queue adapter health check failed: adapter is not healthy")]
20
+
/// Queue adapter health check failed
21
+
#[error("error-quickdid-task-1 Queue adapter health check failed: adapter is not healthy")]
20
22
QueueAdapterUnhealthy,
21
23
}
22
24
···
35
37
}
36
38
}
37
39
38
-
/// Metrics for handle resolution processing
39
-
#[derive(Debug, Default)]
40
-
pub(crate) struct HandleResolverMetrics {
41
-
pub total_processed: std::sync::atomic::AtomicU64,
42
-
pub total_succeeded: std::sync::atomic::AtomicU64,
43
-
pub total_failed: std::sync::atomic::AtomicU64,
44
-
pub total_cached: std::sync::atomic::AtomicU64,
45
-
}
46
-
47
40
/// Handle resolver task processor
48
41
pub(crate) struct HandleResolverTask {
49
42
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
50
43
handle_resolver: Arc<dyn HandleResolver>,
51
44
cancel_token: CancellationToken,
52
45
config: HandleResolverTaskConfig,
53
-
metrics: Arc<HandleResolverMetrics>,
46
+
metrics_publisher: SharedMetricsPublisher,
54
47
}
55
48
56
49
impl HandleResolverTask {
···
59
52
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
60
53
handle_resolver: Arc<dyn HandleResolver>,
61
54
cancel_token: CancellationToken,
55
+
metrics_publisher: SharedMetricsPublisher,
62
56
) -> Self {
63
57
let config = HandleResolverTaskConfig::default();
64
58
Self {
···
66
60
handle_resolver,
67
61
cancel_token,
68
62
config,
69
-
metrics: Arc::new(HandleResolverMetrics::default()),
63
+
metrics_publisher,
70
64
}
71
65
}
72
66
···
76
70
handle_resolver: Arc<dyn HandleResolver>,
77
71
cancel_token: CancellationToken,
78
72
config: HandleResolverTaskConfig,
73
+
metrics_publisher: SharedMetricsPublisher,
79
74
) -> Self {
80
75
Self {
81
76
adapter,
82
77
handle_resolver,
83
78
cancel_token,
84
79
config,
85
-
metrics: Arc::new(HandleResolverMetrics::default()),
80
+
metrics_publisher,
86
81
}
87
82
}
88
83
···
115
110
116
111
// All work has been processed
117
112
info!("All handle resolutions completed");
113
+
info!("Handle resolver task processor stopped");
118
114
119
-
info!(
120
-
total_processed = self
121
-
.metrics
122
-
.total_processed
123
-
.load(std::sync::atomic::Ordering::Relaxed),
124
-
total_succeeded = self
125
-
.metrics
126
-
.total_succeeded
127
-
.load(std::sync::atomic::Ordering::Relaxed),
128
-
total_failed = self
129
-
.metrics
130
-
.total_failed
131
-
.load(std::sync::atomic::Ordering::Relaxed),
132
-
total_cached = self
133
-
.metrics
134
-
.total_cached
135
-
.load(std::sync::atomic::Ordering::Relaxed),
136
-
"Handle resolver task processor stopped"
137
-
);
115
+
Ok(())
116
+
}
138
117
139
-
Ok(())
118
+
/// Check if an error represents a soft failure (handle not found)
119
+
/// rather than a real error condition.
120
+
///
121
+
/// These atproto_identity library errors indicate the handle doesn't support
122
+
/// the specific resolution method, which is normal and expected:
123
+
/// - error-atproto-identity-resolve-4: DNS resolution failed (no records)
124
+
/// - error-atproto-identity-resolve-5: HTTP resolution failed (hostname not found)
125
+
fn is_soft_failure(error_str: &str) -> bool {
126
+
// Check for specific atproto_identity error codes that indicate "not found"
127
+
// rather than actual failures
128
+
if error_str.starts_with("error-atproto-identity-resolve-4") {
129
+
// DNS resolution - check if it's a "no records" scenario
130
+
error_str.contains("NoRecordsFound")
131
+
} else if error_str.starts_with("error-atproto-identity-resolve-6") {
132
+
// HTTP resolution - check if it's a DID format issue
133
+
error_str.contains("expected DID format")
134
+
} else if error_str.starts_with("error-atproto-identity-resolve-5") {
135
+
// HTTP resolution - check if it's a hostname lookup failure
136
+
error_str.contains("No address associated with hostname")
137
+
|| error_str.contains("failed to lookup address information")
138
+
} else {
139
+
false
140
+
}
140
141
}
141
142
142
143
/// Process a single handle resolution work item
···
156
157
157
158
let duration_ms = start_time.elapsed().as_millis() as u64;
158
159
159
-
// Update metrics
160
-
self.metrics
161
-
.total_processed
162
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
160
+
// Publish metrics
161
+
self.metrics_publisher
162
+
.incr("task.handle_resolution.processed")
163
+
.await;
164
+
self.metrics_publisher
165
+
.time("task.handle_resolution.duration_ms", duration_ms)
166
+
.await;
163
167
164
168
match result {
165
-
Ok(Ok(did)) => {
166
-
self.metrics
167
-
.total_succeeded
168
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
169
-
self.metrics
170
-
.total_cached
171
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
169
+
Ok(Ok((did, _timestamp))) => {
170
+
// Publish success metrics
171
+
self.metrics_publisher
172
+
.incr("task.handle_resolution.success")
173
+
.await;
174
+
self.metrics_publisher
175
+
.incr("task.handle_resolution.cached")
176
+
.await;
172
177
173
-
info!(
178
+
debug!(
174
179
handle = %work.handle,
175
180
did = %did,
176
181
duration_ms = duration_ms,
···
178
183
);
179
184
}
180
185
Ok(Err(e)) => {
181
-
self.metrics
182
-
.total_failed
183
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
186
+
let error_str = e.to_string();
187
+
188
+
if Self::is_soft_failure(&error_str) {
189
+
// This is a soft failure - handle simply doesn't support this resolution method
190
+
// Publish not-found metrics
191
+
self.metrics_publisher
192
+
.incr("task.handle_resolution.not_found")
193
+
.await;
194
+
195
+
debug!(
196
+
handle = %work.handle,
197
+
error = %error_str,
198
+
duration_ms = duration_ms,
199
+
"Handle not found (soft failure)"
200
+
);
201
+
} else {
202
+
// This is a real error
203
+
// Publish failure metrics
204
+
self.metrics_publisher
205
+
.incr("task.handle_resolution.failed")
206
+
.await;
184
207
185
-
error!(
186
-
handle = %work.handle,
187
-
error = %e,
188
-
duration_ms = duration_ms,
189
-
"Handle resolution failed"
190
-
);
208
+
error!(
209
+
handle = %work.handle,
210
+
error = %error_str,
211
+
duration_ms = duration_ms,
212
+
"Handle resolution failed"
213
+
);
214
+
}
191
215
}
192
216
Err(_) => {
193
-
self.metrics
194
-
.total_failed
195
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
217
+
// Publish timeout metrics
218
+
self.metrics_publisher
219
+
.incr("task.handle_resolution.timeout")
220
+
.await;
196
221
197
222
error!(
198
223
handle = %work.handle,
···
227
252
/// * `adapter` - Queue adapter for work items
228
253
/// * `handle_resolver` - Handle resolver implementation
229
254
/// * `cancel_token` - Token for graceful shutdown
255
+
/// * `metrics_publisher` - Metrics publisher for telemetry
230
256
pub fn create_handle_resolver_task(
231
257
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
232
258
handle_resolver: Arc<dyn HandleResolver>,
233
259
cancel_token: CancellationToken,
260
+
metrics_publisher: SharedMetricsPublisher,
234
261
) -> HandleResolverTaskHandle {
235
262
HandleResolverTaskHandle {
236
-
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token),
263
+
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token, metrics_publisher),
237
264
}
238
265
}
239
266
···
245
272
/// * `handle_resolver` - Handle resolver implementation
246
273
/// * `cancel_token` - Token for graceful shutdown
247
274
/// * `config` - Task configuration
275
+
/// * `metrics_publisher` - Metrics publisher for telemetry
248
276
pub fn create_handle_resolver_task_with_config(
249
277
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
250
278
handle_resolver: Arc<dyn HandleResolver>,
251
279
cancel_token: CancellationToken,
252
280
config: HandleResolverTaskConfig,
281
+
metrics_publisher: SharedMetricsPublisher,
253
282
) -> HandleResolverTaskHandle {
254
283
HandleResolverTaskHandle {
255
-
task: HandleResolverTask::with_config(adapter, handle_resolver, cancel_token, config),
284
+
task: HandleResolverTask::with_config(
285
+
adapter,
286
+
handle_resolver,
287
+
cancel_token,
288
+
config,
289
+
metrics_publisher,
290
+
),
256
291
}
257
292
}
258
293
···
275
310
async fn resolve(
276
311
&self,
277
312
handle: &str,
278
-
) -> Result<String, crate::handle_resolver::HandleResolverError> {
313
+
) -> Result<(String, u64), crate::handle_resolver::HandleResolverError> {
279
314
if self.should_fail {
280
315
Err(crate::handle_resolver::HandleResolverError::MockResolutionFailure)
281
316
} else {
282
-
Ok(format!("did:plc:{}", handle.replace('.', "")))
317
+
let timestamp = std::time::SystemTime::now()
318
+
.duration_since(std::time::UNIX_EPOCH)
319
+
.unwrap_or_default()
320
+
.as_secs();
321
+
Ok((format!("did:plc:{}", handle.replace('.', "")), timestamp))
283
322
}
284
323
}
285
324
}
···
295
334
296
335
// Create cancellation token
297
336
let cancel_token = CancellationToken::new();
337
+
338
+
// Create metrics publisher
339
+
let metrics_publisher = Arc::new(crate::metrics::NoOpMetricsPublisher);
298
340
299
341
// Create task with custom config
300
342
let config = HandleResolverTaskConfig {
···
306
348
handle_resolver,
307
349
cancel_token.clone(),
308
350
config,
351
+
metrics_publisher,
309
352
);
310
353
311
354
// Create handle resolution work
···
313
356
314
357
// Send work to queue
315
358
sender.send(work).await.unwrap();
316
-
317
-
// Get metrics reference before moving task
318
-
let metrics = task.metrics.clone();
319
359
320
360
// Run task for a short time
321
361
let task_handle = tokio::spawn(async move { task.run().await });
···
329
369
// Wait for task to complete
330
370
let _ = task_handle.await;
331
371
332
-
// Verify metrics
333
-
assert_eq!(
334
-
metrics
335
-
.total_processed
336
-
.load(std::sync::atomic::Ordering::Relaxed),
337
-
1
338
-
);
339
-
assert_eq!(
340
-
metrics
341
-
.total_succeeded
342
-
.load(std::sync::atomic::Ordering::Relaxed),
343
-
1
344
-
);
372
+
// Test passes if task runs without panic
345
373
}
346
374
347
-
#[tokio::test]
348
-
async fn test_handle_resolver_metrics() {
349
-
use std::sync::atomic::Ordering;
375
+
#[test]
376
+
fn test_is_soft_failure() {
377
+
// Test DNS NoRecordsFound pattern (error-atproto-identity-resolve-4)
378
+
let dns_no_records = "error-atproto-identity-resolve-4 DNS resolution failed: ResolveError { kind: Proto(ProtoError { kind: NoRecordsFound { query: Query { name: Name(\"_atproto.noahshachtman.bsky.social.railway.internal.\"), query_type: TXT, query_class: IN }, soa: None, ns: None, negative_ttl: None, response_code: NotImp, trusted: true, authorities: None } }) }";
379
+
assert!(HandleResolverTask::is_soft_failure(dns_no_records));
380
+
381
+
// Test HTTP hostname not found pattern (error-atproto-identity-resolve-5)
382
+
let http_no_hostname = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://mattie.thegem.city/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information: No address associated with hostname\" })) }";
383
+
assert!(HandleResolverTask::is_soft_failure(http_no_hostname));
384
+
385
+
// Test alternate HTTP hostname failure message
386
+
let http_lookup_failed = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://example.com/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information\" })) }";
387
+
assert!(HandleResolverTask::is_soft_failure(http_lookup_failed));
388
+
389
+
// Test HTTP invalid DID format (error-atproto-identity-resolve-6) - like reuters.com
390
+
let http_invalid_did = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
391
+
assert!(HandleResolverTask::is_soft_failure(http_invalid_did));
392
+
393
+
// Test weratedogs.com case
394
+
let weratedogs_error = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
395
+
assert!(HandleResolverTask::is_soft_failure(weratedogs_error));
396
+
397
+
// Test DNS error that is NOT a soft failure (different DNS error)
398
+
let dns_real_error = "error-atproto-identity-resolve-4 DNS resolution failed: timeout";
399
+
assert!(!HandleResolverTask::is_soft_failure(dns_real_error));
350
400
351
-
let metrics = HandleResolverMetrics::default();
401
+
// Test HTTP error that is NOT a soft failure (connection timeout)
402
+
let http_timeout =
403
+
"error-atproto-identity-resolve-5 HTTP resolution failed: connection timeout";
404
+
assert!(!HandleResolverTask::is_soft_failure(http_timeout));
352
405
353
-
// Test initial values
354
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 0);
355
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 0);
356
-
assert_eq!(metrics.total_failed.load(Ordering::Relaxed), 0);
357
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 0);
406
+
// Test HTTP error that is NOT a soft failure (500 error)
407
+
let http_500 = "error-atproto-identity-resolve-5 HTTP resolution failed: status code 500";
408
+
assert!(!HandleResolverTask::is_soft_failure(http_500));
358
409
359
-
// Test incrementing
360
-
metrics.total_processed.fetch_add(1, Ordering::Relaxed);
361
-
metrics.total_succeeded.fetch_add(1, Ordering::Relaxed);
362
-
metrics.total_cached.fetch_add(1, Ordering::Relaxed);
410
+
// Test QuickDID errors should never be soft failures
411
+
let quickdid_error =
412
+
"error-quickdid-resolve-1 Failed to resolve subject: internal server error";
413
+
assert!(!HandleResolverTask::is_soft_failure(quickdid_error));
363
414
364
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 1);
365
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 1);
366
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 1);
415
+
// Test other atproto_identity error codes should not be soft failures
416
+
let other_atproto_error = "error-atproto-identity-resolve-1 Some other error";
417
+
assert!(!HandleResolverTask::is_soft_failure(other_atproto_error));
367
418
}
368
419
}
+289
-28
src/http/handle_xrpc_resolve_handle.rs
+289
-28
src/http/handle_xrpc_resolve_handle.rs
···
1
1
use std::sync::Arc;
2
+
use std::time::{Duration, SystemTime, UNIX_EPOCH};
2
3
3
4
use crate::{
4
5
handle_resolver::HandleResolver,
6
+
http::AppContext,
7
+
metrics::SharedMetricsPublisher,
5
8
queue::{HandleResolutionWork, QueueAdapter},
6
9
};
7
10
8
11
use atproto_identity::resolve::{InputType, parse_input};
9
12
use axum::{
10
13
extract::{Query, State},
11
-
http::StatusCode,
12
-
response::{IntoResponse, Json},
14
+
http::{HeaderMap, HeaderValue, StatusCode, header},
15
+
response::{IntoResponse, Json, Response},
13
16
};
17
+
use metrohash::MetroHash64;
14
18
use serde::{Deserialize, Serialize};
19
+
use std::hash::Hasher;
15
20
16
21
#[derive(Deserialize)]
17
22
pub(super) struct ResolveHandleParams {
···
31
36
message: String,
32
37
}
33
38
39
+
/// Represents the result of a handle resolution
40
+
enum ResolutionResult {
41
+
Success {
42
+
did: String,
43
+
timestamp: u64,
44
+
etag: String,
45
+
},
46
+
Error {
47
+
error: String,
48
+
message: String,
49
+
timestamp: u64,
50
+
etag: String,
51
+
},
52
+
}
53
+
54
+
struct ResolutionResultView {
55
+
result: ResolutionResult,
56
+
cache_control: Option<String>,
57
+
if_none_match: Option<HeaderValue>,
58
+
if_modified_since: Option<HeaderValue>,
59
+
}
60
+
61
+
impl IntoResponse for ResolutionResultView {
62
+
fn into_response(self) -> Response {
63
+
let (last_modified, etag) = match &self.result {
64
+
ResolutionResult::Success {
65
+
timestamp, etag, ..
66
+
} => (*timestamp, etag),
67
+
ResolutionResult::Error {
68
+
timestamp, etag, ..
69
+
} => (*timestamp, etag),
70
+
};
71
+
72
+
let mut headers = HeaderMap::new();
73
+
74
+
// WARNING: this swallows errors
75
+
if let Ok(etag_value) = HeaderValue::from_str(etag) {
76
+
headers.insert(header::ETAG, etag_value);
77
+
}
78
+
79
+
// Add Last-Modified header
80
+
let last_modified_date = format_http_date(last_modified);
81
+
// WARNING: this swallows errors
82
+
if let Ok(last_modified_value) = HeaderValue::from_str(&last_modified_date) {
83
+
headers.insert(header::LAST_MODIFIED, last_modified_value);
84
+
}
85
+
86
+
// Add Cache-Control header if configured
87
+
if let Some(cache_control) = &self.cache_control {
88
+
// WARNING: this swallows errors
89
+
if let Ok(cache_control_value) = HeaderValue::from_str(cache_control) {
90
+
headers.insert(header::CACHE_CONTROL, cache_control_value);
91
+
}
92
+
}
93
+
94
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
95
+
headers.insert(
96
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
97
+
HeaderValue::from_static("*"),
98
+
);
99
+
headers.insert(
100
+
header::ACCESS_CONTROL_ALLOW_METHODS,
101
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
102
+
);
103
+
headers.insert(
104
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
105
+
HeaderValue::from_static("*"),
106
+
);
107
+
headers.insert(
108
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
109
+
HeaderValue::from_static("*"),
110
+
);
111
+
headers.insert(
112
+
header::ACCESS_CONTROL_MAX_AGE,
113
+
HeaderValue::from_static("86400"),
114
+
);
115
+
headers.insert(
116
+
"Access-Control-Request-Headers",
117
+
HeaderValue::from_static("*"),
118
+
);
119
+
headers.insert(
120
+
"Access-Control-Request-Method",
121
+
HeaderValue::from_static("GET"),
122
+
);
123
+
124
+
if let ResolutionResult::Success { .. } = self.result {
125
+
let fresh = self
126
+
.if_modified_since
127
+
.and_then(|inner_header_value| match inner_header_value.to_str() {
128
+
Ok(value) => Some(value.to_string()),
129
+
Err(_) => None,
130
+
})
131
+
.and_then(|inner_str_value| parse_http_date(&inner_str_value))
132
+
.is_some_and(|inner_if_modified_since| last_modified <= inner_if_modified_since);
133
+
134
+
if fresh {
135
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
136
+
}
137
+
}
138
+
139
+
let fresh = self
140
+
.if_none_match
141
+
.is_some_and(|if_none_match_value| if_none_match_value == etag);
142
+
if fresh {
143
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
144
+
}
145
+
146
+
match &self.result {
147
+
ResolutionResult::Success { did, .. } => (
148
+
StatusCode::OK,
149
+
headers,
150
+
Json(ResolveHandleResponse { did: did.clone() }),
151
+
)
152
+
.into_response(),
153
+
ResolutionResult::Error { error, message, .. } => (
154
+
StatusCode::BAD_REQUEST,
155
+
headers,
156
+
Json(ErrorResponse {
157
+
error: error.clone(),
158
+
message: message.clone(),
159
+
}),
160
+
)
161
+
.into_response(),
162
+
}
163
+
164
+
// (status_code, headers).into_response()
165
+
}
166
+
}
167
+
168
+
/// Calculate a weak ETag for the given content using MetroHash64 with a seed
169
+
fn calculate_etag(content: &str, seed: &str) -> String {
170
+
let mut hasher = MetroHash64::new();
171
+
hasher.write(seed.as_bytes());
172
+
hasher.write(content.as_bytes());
173
+
let hash = hasher.finish();
174
+
format!("W/\"{:x}\"", hash)
175
+
}
176
+
177
+
/// Format a UNIX timestamp as an HTTP date string (RFC 7231)
178
+
fn format_http_date(timestamp: u64) -> String {
179
+
let system_time = UNIX_EPOCH + Duration::from_secs(timestamp);
180
+
httpdate::fmt_http_date(system_time)
181
+
}
182
+
183
+
/// Parse an HTTP date string (RFC 7231) into a UNIX timestamp
184
+
fn parse_http_date(date_str: &str) -> Option<u64> {
185
+
httpdate::parse_http_date(date_str)
186
+
.ok()
187
+
.and_then(|system_time| system_time.duration_since(UNIX_EPOCH).ok())
188
+
.map(|duration| duration.as_secs())
189
+
}
190
+
34
191
pub(super) async fn handle_xrpc_resolve_handle(
192
+
headers: HeaderMap,
35
193
Query(params): Query<ResolveHandleParams>,
194
+
State(app_context): State<AppContext>,
36
195
State(handle_resolver): State<Arc<dyn HandleResolver>>,
37
196
State(queue): State<Arc<dyn QueueAdapter<HandleResolutionWork>>>,
38
-
) -> Result<impl IntoResponse, (StatusCode, Json<ErrorResponse>)> {
197
+
State(metrics): State<SharedMetricsPublisher>,
198
+
) -> impl IntoResponse {
199
+
let validating = params.validate.is_some();
200
+
let queueing = params.queue.is_some();
201
+
39
202
// Validate that handle is provided
40
203
let handle = match params.handle {
41
204
Some(h) => h,
42
205
None => {
43
-
return Err((
206
+
metrics
207
+
.incr_with_tags(
208
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
209
+
&[("reason", "missing")],
210
+
)
211
+
.await;
212
+
return (
44
213
StatusCode::BAD_REQUEST,
45
214
Json(ErrorResponse {
46
215
error: "InvalidRequest".to_string(),
47
216
message: "Error: Params must have the property \"handle\"".to_string(),
48
217
}),
49
-
));
218
+
)
219
+
.into_response();
50
220
}
51
221
};
52
222
53
223
// Validate that the input is a handle and not a DID
54
224
let handle = match parse_input(&handle) {
55
-
Ok(InputType::Handle(value)) => value,
225
+
Ok(InputType::Handle(value)) => value.to_lowercase(),
56
226
Ok(InputType::Plc(_)) | Ok(InputType::Web(_)) => {
57
227
// It's a DID, not a handle
58
-
return Err((
228
+
metrics
229
+
.incr_with_tags(
230
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
231
+
&[("reason", "did")],
232
+
)
233
+
.await;
234
+
return (
59
235
StatusCode::BAD_REQUEST,
60
236
Json(ErrorResponse {
61
237
error: "InvalidRequest".to_string(),
62
238
message: "Error: handle must be a valid handle".to_string(),
63
239
}),
64
-
));
240
+
)
241
+
.into_response();
65
242
}
66
243
Err(_) => {
67
-
return Err((
244
+
metrics
245
+
.incr_with_tags(
246
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
247
+
&[("reason", "error")],
248
+
)
249
+
.await;
250
+
return (
68
251
StatusCode::BAD_REQUEST,
69
252
Json(ErrorResponse {
70
253
error: "InvalidRequest".to_string(),
71
254
message: "Error: handle must be a valid handle".to_string(),
72
255
}),
73
-
));
256
+
)
257
+
.into_response();
74
258
}
75
259
};
76
260
77
-
if params.validate.is_some() {
78
-
return Ok(StatusCode::NO_CONTENT.into_response());
261
+
if validating {
262
+
metrics
263
+
.incr("xrpc.com.atproto.identity.resolveHandle")
264
+
.await;
265
+
return StatusCode::NO_CONTENT.into_response();
79
266
}
80
267
81
-
if params.queue.is_some() {
268
+
if queueing {
82
269
// Create work item
83
270
let work = HandleResolutionWork::new(handle.clone());
84
271
85
272
// Queue the work
86
273
match queue.push(work).await {
87
274
Ok(()) => {
275
+
metrics
276
+
.incr("xrpc.com.atproto.identity.resolveHandle")
277
+
.await;
88
278
tracing::debug!("Queued handle resolution for {}", handle);
89
279
}
90
280
Err(e) => {
281
+
metrics
282
+
.incr("xrpc.com.atproto.identity.resolveHandle.queue_failure")
283
+
.await;
91
284
tracing::error!("Failed to queue handle resolution: {}", e);
92
285
}
93
286
}
94
287
95
-
return Ok(StatusCode::NO_CONTENT.into_response());
288
+
return StatusCode::NO_CONTENT.into_response();
96
289
}
97
290
98
-
tracing::debug!("Resolving handle: {}", handle);
291
+
tracing::debug!(handle, "Resolving handle");
292
+
293
+
// Get conditional request headers
294
+
let if_none_match = headers.get(header::IF_NONE_MATCH).cloned();
295
+
let if_modified_since = headers.get(header::IF_MODIFIED_SINCE).cloned();
296
+
297
+
// Perform the resolution and build the response
298
+
let result = match handle_resolver.resolve(&handle).await {
299
+
Ok((did, timestamp)) => {
300
+
tracing::debug!(handle, did, "Found cached DID for handle");
99
301
100
-
match handle_resolver.resolve(&handle).await {
101
-
Ok(did) => {
102
-
tracing::debug!("Found cached DID for handle {}: {}", handle, did);
103
-
Ok(Json(ResolveHandleResponse { did }).into_response())
302
+
metrics
303
+
.incr_with_tags("handle.resolution.request", &[("success", "1")])
304
+
.await;
305
+
306
+
let etag = calculate_etag(&did, app_context.etag_seed());
307
+
ResolutionResult::Success {
308
+
did,
309
+
timestamp,
310
+
etag,
311
+
}
104
312
}
105
-
Err(_) => {
106
-
// {"error":"InvalidRequest","message":"Unable to resolve handle"}
107
-
Err((
108
-
StatusCode::BAD_REQUEST,
109
-
Json(ErrorResponse {
110
-
error: "InvalidRequest".to_string(),
111
-
message: "Unable to resolve handle".to_string(),
112
-
}),
113
-
))
313
+
Err(err) => {
314
+
tracing::debug!(error = ?err, handle, "Error resolving handle");
315
+
metrics
316
+
.incr_with_tags("handle.resolution.request", &[("success", "0")])
317
+
.await;
318
+
let error_content = format!("error:{}:{}", handle, err);
319
+
let etag = calculate_etag(&error_content, app_context.etag_seed());
320
+
let timestamp = SystemTime::now()
321
+
.duration_since(UNIX_EPOCH)
322
+
.unwrap_or_default()
323
+
.as_secs();
324
+
ResolutionResult::Error {
325
+
error: "InvalidRequest".to_string(),
326
+
message: "Unable to resolve handle".to_string(),
327
+
timestamp,
328
+
etag,
329
+
}
114
330
}
331
+
};
332
+
333
+
ResolutionResultView {
334
+
result,
335
+
cache_control: app_context.cache_control_header().map(|s| s.to_string()),
336
+
if_none_match,
337
+
if_modified_since,
115
338
}
339
+
.into_response()
340
+
}
341
+
342
+
pub(super) async fn handle_xrpc_resolve_handle_options() -> impl IntoResponse {
343
+
let mut headers = HeaderMap::new();
344
+
345
+
// Add CORS and Allow headers for OPTIONS request
346
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
347
+
headers.insert(
348
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
349
+
HeaderValue::from_static("*"),
350
+
);
351
+
headers.insert(
352
+
header::ACCESS_CONTROL_ALLOW_METHODS,
353
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
354
+
);
355
+
headers.insert(
356
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
357
+
HeaderValue::from_static("*"),
358
+
);
359
+
headers.insert(
360
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
361
+
HeaderValue::from_static("*"),
362
+
);
363
+
headers.insert(
364
+
header::ACCESS_CONTROL_MAX_AGE,
365
+
HeaderValue::from_static("86400"),
366
+
);
367
+
headers.insert(
368
+
"Access-Control-Request-Headers",
369
+
HeaderValue::from_static("*"),
370
+
);
371
+
headers.insert(
372
+
"Access-Control-Request-Method",
373
+
HeaderValue::from_static("GET"),
374
+
);
375
+
376
+
(StatusCode::NO_CONTENT, headers)
116
377
}
+126
src/http/handle_xrpc_resolve_lexicon.rs
+126
src/http/handle_xrpc_resolve_lexicon.rs
···
1
+
use std::sync::Arc;
2
+
3
+
use atproto_lexicon::resolve::LexiconResolver;
4
+
use axum::{
5
+
extract::{Query, State},
6
+
http::{HeaderMap, HeaderValue, StatusCode, header},
7
+
response::{IntoResponse, Json},
8
+
};
9
+
use serde::{Deserialize, Serialize};
10
+
11
+
use crate::metrics::SharedMetricsPublisher;
12
+
13
+
#[derive(Deserialize)]
14
+
pub(super) struct ResolveLexiconParams {
15
+
nsid: Option<String>,
16
+
}
17
+
18
+
#[derive(Serialize)]
19
+
pub(super) struct ErrorResponse {
20
+
error: String,
21
+
message: String,
22
+
}
23
+
24
+
pub(super) async fn handle_xrpc_resolve_lexicon(
25
+
Query(params): Query<ResolveLexiconParams>,
26
+
State(lexicon_resolver): State<Arc<dyn LexiconResolver>>,
27
+
State(metrics): State<SharedMetricsPublisher>,
28
+
) -> impl IntoResponse {
29
+
// Validate that nsid is provided
30
+
let nsid = match params.nsid {
31
+
Some(n) => n,
32
+
None => {
33
+
metrics
34
+
.incr_with_tags(
35
+
"xrpc.com.atproto.lexicon.resolveLexicon.invalid_nsid",
36
+
&[("reason", "missing")],
37
+
)
38
+
.await;
39
+
return (
40
+
StatusCode::BAD_REQUEST,
41
+
Json(ErrorResponse {
42
+
error: "InvalidRequest".to_string(),
43
+
message: "Error: Params must have the property \"nsid\"".to_string(),
44
+
}),
45
+
)
46
+
.into_response();
47
+
}
48
+
};
49
+
50
+
tracing::debug!(nsid, "Resolving lexicon");
51
+
52
+
// Perform the lexicon resolution
53
+
match lexicon_resolver.resolve(&nsid).await {
54
+
Ok(resolved) => {
55
+
tracing::debug!(nsid, "Successfully resolved lexicon");
56
+
57
+
metrics
58
+
.incr_with_tags("lexicon.resolution.request", &[("success", "1")])
59
+
.await;
60
+
61
+
let mut headers = HeaderMap::new();
62
+
add_cors_headers(&mut headers);
63
+
64
+
// The resolved value is already a serde_json::Value, so just return it as JSON
65
+
(StatusCode::OK, headers, Json(resolved)).into_response()
66
+
}
67
+
Err(err) => {
68
+
tracing::debug!(error = ?err, nsid, "Error resolving lexicon");
69
+
70
+
metrics
71
+
.incr_with_tags("lexicon.resolution.request", &[("success", "0")])
72
+
.await;
73
+
74
+
let mut headers = HeaderMap::new();
75
+
add_cors_headers(&mut headers);
76
+
77
+
(
78
+
StatusCode::BAD_REQUEST,
79
+
headers,
80
+
Json(ErrorResponse {
81
+
error: "LexiconNotFound".to_string(),
82
+
message: "No lexicon was resolved for the NSID".to_string(),
83
+
}),
84
+
)
85
+
.into_response()
86
+
}
87
+
}
88
+
}
89
+
90
+
pub(super) async fn handle_xrpc_resolve_lexicon_options() -> impl IntoResponse {
91
+
let mut headers = HeaderMap::new();
92
+
add_cors_headers(&mut headers);
93
+
(StatusCode::NO_CONTENT, headers)
94
+
}
95
+
96
+
fn add_cors_headers(headers: &mut HeaderMap) {
97
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
98
+
headers.insert(
99
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
100
+
HeaderValue::from_static("*"),
101
+
);
102
+
headers.insert(
103
+
header::ACCESS_CONTROL_ALLOW_METHODS,
104
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
105
+
);
106
+
headers.insert(
107
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
108
+
HeaderValue::from_static("*"),
109
+
);
110
+
headers.insert(
111
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
112
+
HeaderValue::from_static("*"),
113
+
);
114
+
headers.insert(
115
+
header::ACCESS_CONTROL_MAX_AGE,
116
+
HeaderValue::from_static("86400"),
117
+
);
118
+
headers.insert(
119
+
"Access-Control-Request-Headers",
120
+
HeaderValue::from_static("*"),
121
+
);
122
+
headers.insert(
123
+
"Access-Control-Request-Method",
124
+
HeaderValue::from_static("GET"),
125
+
);
126
+
}
+1
src/http/mod.rs
+1
src/http/mod.rs
+84
-45
src/http/server.rs
+84
-45
src/http/server.rs
···
1
1
use crate::handle_resolver::HandleResolver;
2
+
use crate::metrics::SharedMetricsPublisher;
2
3
use crate::queue::{HandleResolutionWork, QueueAdapter};
4
+
use atproto_lexicon::resolve::LexiconResolver;
3
5
use axum::{
4
6
Router,
5
-
extract::State,
6
-
response::{Html, IntoResponse, Json, Response},
7
+
extract::{MatchedPath, State},
8
+
http::Request,
9
+
middleware::{self, Next},
10
+
response::{Json, Response},
7
11
routing::get,
8
-
http::StatusCode,
9
12
};
10
13
use serde_json::json;
11
14
use std::sync::Arc;
15
+
use std::time::Instant;
16
+
use tower_http::services::ServeDir;
12
17
13
18
pub(crate) struct InnerAppContext {
14
-
pub(crate) service_document: serde_json::Value,
15
-
pub(crate) service_did: String,
16
19
pub(crate) handle_resolver: Arc<dyn HandleResolver>,
17
20
pub(crate) handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
21
+
pub(crate) lexicon_resolver: Arc<dyn LexiconResolver>,
22
+
pub(crate) metrics: SharedMetricsPublisher,
23
+
pub(crate) etag_seed: String,
24
+
pub(crate) cache_control_header: Option<String>,
25
+
pub(crate) static_files_dir: String,
18
26
}
19
27
20
28
#[derive(Clone)]
···
23
31
impl AppContext {
24
32
/// Create a new AppContext with the provided configuration.
25
33
pub fn new(
26
-
service_document: serde_json::Value,
27
-
service_did: String,
28
34
handle_resolver: Arc<dyn HandleResolver>,
29
35
handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
36
+
lexicon_resolver: Arc<dyn LexiconResolver>,
37
+
metrics: SharedMetricsPublisher,
38
+
etag_seed: String,
39
+
cache_control_header: Option<String>,
40
+
static_files_dir: String,
30
41
) -> Self {
31
42
Self(Arc::new(InnerAppContext {
32
-
service_document,
33
-
service_did,
34
43
handle_resolver,
35
44
handle_queue,
45
+
lexicon_resolver,
46
+
metrics,
47
+
etag_seed,
48
+
cache_control_header,
49
+
static_files_dir,
36
50
}))
37
51
}
38
52
39
53
// Internal accessor methods for handlers
40
-
pub(super) fn service_document(&self) -> &serde_json::Value {
41
-
&self.0.service_document
54
+
pub(super) fn etag_seed(&self) -> &str {
55
+
&self.0.etag_seed
56
+
}
57
+
58
+
pub(super) fn cache_control_header(&self) -> Option<&str> {
59
+
self.0.cache_control_header.as_deref()
42
60
}
43
61
44
-
pub(super) fn service_did(&self) -> &str {
45
-
&self.0.service_did
62
+
pub(super) fn static_files_dir(&self) -> &str {
63
+
&self.0.static_files_dir
46
64
}
47
65
}
48
66
···
64
82
handle_queue,
65
83
Arc<dyn QueueAdapter<HandleResolutionWork>>
66
84
);
85
+
impl_from_ref!(AppContext, lexicon_resolver, Arc<dyn LexiconResolver>);
86
+
impl_from_ref!(AppContext, metrics, SharedMetricsPublisher);
87
+
88
+
/// Middleware to track HTTP request metrics
89
+
async fn metrics_middleware(
90
+
State(metrics): State<SharedMetricsPublisher>,
91
+
matched_path: Option<MatchedPath>,
92
+
request: Request<axum::body::Body>,
93
+
next: Next,
94
+
) -> Response {
95
+
let start = Instant::now();
96
+
let method = request.method().to_string();
97
+
let path = matched_path
98
+
.as_ref()
99
+
.map(|p| p.as_str().to_string())
100
+
.unwrap_or_else(|| "unknown".to_string());
101
+
102
+
// Process the request
103
+
let response = next.run(request).await;
104
+
105
+
// Calculate duration
106
+
let duration_ms = start.elapsed().as_millis() as u64;
107
+
let status_code = response.status().as_u16().to_string();
108
+
109
+
// Publish metrics with tags
110
+
metrics
111
+
.time_with_tags(
112
+
"http.request.duration_ms",
113
+
duration_ms,
114
+
&[
115
+
("method", &method),
116
+
("path", &path),
117
+
("status", &status_code),
118
+
],
119
+
)
120
+
.await;
121
+
122
+
response
123
+
}
67
124
68
125
pub fn create_router(app_context: AppContext) -> Router {
126
+
let static_dir = app_context.static_files_dir().to_string();
127
+
69
128
Router::new()
70
-
.route("/", get(handle_index))
71
-
.route("/.well-known/did.json", get(handle_wellknown_did_json))
72
-
.route(
73
-
"/.well-known/atproto-did",
74
-
get(handle_wellknown_atproto_did),
75
-
)
76
129
.route("/xrpc/_health", get(handle_xrpc_health))
77
130
.route(
78
131
"/xrpc/com.atproto.identity.resolveHandle",
79
-
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle),
132
+
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle)
133
+
.options(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle_options),
80
134
)
135
+
.route(
136
+
"/xrpc/com.atproto.lexicon.resolveLexicon",
137
+
get(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon)
138
+
.options(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon_options),
139
+
)
140
+
.fallback_service(ServeDir::new(static_dir))
141
+
.layer(middleware::from_fn_with_state(
142
+
app_context.0.metrics.clone(),
143
+
metrics_middleware,
144
+
))
81
145
.with_state(app_context)
82
-
}
83
-
84
-
pub(super) async fn handle_index() -> Html<&'static str> {
85
-
Html(
86
-
r#"<!DOCTYPE html>
87
-
<html>
88
-
<head>
89
-
<title>QuickDID</title>
90
-
</head>
91
-
<body>
92
-
<h1>QuickDID</h1>
93
-
<p>AT Protocol Identity Resolution Service</p>
94
-
</body>
95
-
</html>"#,
96
-
)
97
-
}
98
-
99
-
pub(super) async fn handle_wellknown_did_json(
100
-
State(context): State<AppContext>,
101
-
) -> Json<serde_json::Value> {
102
-
Json(context.service_document().clone())
103
-
}
104
-
105
-
pub(super) async fn handle_wellknown_atproto_did(State(context): State<AppContext>) -> Response {
106
-
(StatusCode::OK, context.service_did().to_string()).into_response()
107
146
}
108
147
109
148
pub(super) async fn handle_xrpc_health() -> Json<serde_json::Value> {
+360
src/jetstream_handler.rs
+360
src/jetstream_handler.rs
···
1
+
//! Jetstream event handler for QuickDID
2
+
//!
3
+
//! This module provides the event handler for processing AT Protocol Jetstream events,
4
+
//! specifically handling Account and Identity events to maintain cache consistency.
5
+
6
+
use crate::handle_resolver::HandleResolver;
7
+
use crate::metrics::MetricsPublisher;
8
+
use anyhow::Result;
9
+
use atproto_jetstream::{EventHandler, JetstreamEvent};
10
+
use std::sync::Arc;
11
+
use tracing::{debug, info, warn};
12
+
13
+
/// Jetstream event handler for QuickDID
14
+
///
15
+
/// This handler processes AT Protocol events from the Jetstream firehose to keep
16
+
/// the handle resolver cache in sync with the network state.
17
+
///
18
+
/// # Event Processing
19
+
///
20
+
/// ## Account Events
21
+
/// - When an account is marked as "deleted" or "deactivated", the DID is purged from the cache
22
+
/// - Metrics are tracked for successful and failed purge operations
23
+
///
24
+
/// ## Identity Events
25
+
/// - When an identity event contains a handle, the handle-to-DID mapping is updated
26
+
/// - When an identity event lacks a handle (indicating removal), the DID is purged
27
+
/// - Metrics are tracked for successful and failed update/purge operations
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use quickdid::jetstream_handler::QuickDidEventHandler;
33
+
/// use quickdid::handle_resolver::HandleResolver;
34
+
/// use quickdid::metrics::MetricsPublisher;
35
+
/// use std::sync::Arc;
36
+
///
37
+
/// # async fn example(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) {
38
+
/// let handler = QuickDidEventHandler::new(resolver, metrics);
39
+
/// // Register with a JetstreamConsumer
40
+
/// # }
41
+
/// ```
42
+
pub struct QuickDidEventHandler {
43
+
resolver: Arc<dyn HandleResolver>,
44
+
metrics: Arc<dyn MetricsPublisher>,
45
+
}
46
+
47
+
impl QuickDidEventHandler {
48
+
/// Create a new Jetstream event handler
49
+
///
50
+
/// # Arguments
51
+
///
52
+
/// * `resolver` - The handle resolver to use for cache operations
53
+
/// * `metrics` - The metrics publisher for tracking event processing
54
+
pub fn new(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) -> Self {
55
+
Self { resolver, metrics }
56
+
}
57
+
}
58
+
59
+
#[async_trait::async_trait]
60
+
impl EventHandler for QuickDidEventHandler {
61
+
fn handler_id(&self) -> String {
62
+
"quickdid_handler".to_string()
63
+
}
64
+
65
+
async fn handle_event(&self, event: JetstreamEvent) -> Result<()> {
66
+
match event {
67
+
JetstreamEvent::Account { did, kind, .. } => {
68
+
// If account kind is "deleted" or "deactivated", purge the DID
69
+
if kind == "deleted" || kind == "deactivated" {
70
+
info!(did = %did, kind = %kind, "Purging account");
71
+
match self.resolver.purge(&did).await {
72
+
Ok(()) => {
73
+
self.metrics.incr("jetstream.account.purged").await;
74
+
}
75
+
Err(e) => {
76
+
warn!(did = %did, error = ?e, "Failed to purge DID");
77
+
self.metrics.incr("jetstream.account.purge_error").await;
78
+
}
79
+
}
80
+
}
81
+
self.metrics.incr("jetstream.account.processed").await;
82
+
}
83
+
JetstreamEvent::Identity { did, identity, .. } => {
84
+
// Extract handle from identity JSON if available
85
+
if !identity.is_null() {
86
+
if let Some(handle_value) = identity.get("handle") {
87
+
if let Some(handle) = handle_value.as_str() {
88
+
info!(handle = %handle, did = %did, "Updating identity mapping");
89
+
match self.resolver.set(handle, &did).await {
90
+
Ok(()) => {
91
+
self.metrics.incr("jetstream.identity.updated").await;
92
+
}
93
+
Err(e) => {
94
+
warn!(handle = %handle, did = %did, error = ?e, "Failed to update mapping");
95
+
self.metrics.incr("jetstream.identity.update_error").await;
96
+
}
97
+
}
98
+
} else {
99
+
// No handle or invalid handle, purge the DID
100
+
info!(did = %did, "Purging identity without valid handle");
101
+
match self.resolver.purge(&did).await {
102
+
Ok(()) => {
103
+
self.metrics.incr("jetstream.identity.purged").await;
104
+
}
105
+
Err(e) => {
106
+
warn!(did = %did, error = ?e, "Failed to purge DID");
107
+
self.metrics.incr("jetstream.identity.purge_error").await;
108
+
}
109
+
}
110
+
}
111
+
} else {
112
+
// No handle field, purge the DID
113
+
info!(did = %did, "Purging identity without handle field");
114
+
match self.resolver.purge(&did).await {
115
+
Ok(()) => {
116
+
self.metrics.incr("jetstream.identity.purged").await;
117
+
}
118
+
Err(e) => {
119
+
warn!(did = %did, error = ?e, "Failed to purge DID");
120
+
self.metrics.incr("jetstream.identity.purge_error").await;
121
+
}
122
+
}
123
+
}
124
+
} else {
125
+
// Null identity means removed, purge the DID
126
+
info!(did = %did, "Purging identity with null info");
127
+
match self.resolver.purge(&did).await {
128
+
Ok(()) => {
129
+
self.metrics.incr("jetstream.identity.purged").await;
130
+
}
131
+
Err(e) => {
132
+
warn!(did = %did, error = ?e, "Failed to purge DID");
133
+
self.metrics.incr("jetstream.identity.purge_error").await;
134
+
}
135
+
}
136
+
}
137
+
self.metrics.incr("jetstream.identity.processed").await;
138
+
}
139
+
_ => {
140
+
// Other event types we don't care about
141
+
debug!("Ignoring unhandled Jetstream event type");
142
+
}
143
+
}
144
+
Ok(())
145
+
}
146
+
}
147
+
148
+
#[cfg(test)]
149
+
mod tests {
150
+
use super::*;
151
+
use crate::handle_resolver::HandleResolverError;
152
+
use crate::metrics::NoOpMetricsPublisher;
153
+
use async_trait::async_trait;
154
+
use serde_json::json;
155
+
156
+
/// Mock resolver for testing
157
+
struct MockResolver {
158
+
purge_called: std::sync::Arc<std::sync::Mutex<Vec<String>>>,
159
+
set_called: std::sync::Arc<std::sync::Mutex<Vec<(String, String)>>>,
160
+
}
161
+
162
+
impl MockResolver {
163
+
fn new() -> Self {
164
+
Self {
165
+
purge_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
166
+
set_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
167
+
}
168
+
}
169
+
170
+
fn get_purge_calls(&self) -> Vec<String> {
171
+
self.purge_called.lock().unwrap().clone()
172
+
}
173
+
174
+
fn get_set_calls(&self) -> Vec<(String, String)> {
175
+
self.set_called.lock().unwrap().clone()
176
+
}
177
+
}
178
+
179
+
#[async_trait]
180
+
impl HandleResolver for MockResolver {
181
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
182
+
unimplemented!("Not needed for tests")
183
+
}
184
+
185
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
186
+
self.purge_called.lock().unwrap().push(subject.to_string());
187
+
Ok(())
188
+
}
189
+
190
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
191
+
self.set_called
192
+
.lock()
193
+
.unwrap()
194
+
.push((handle.to_string(), did.to_string()));
195
+
Ok(())
196
+
}
197
+
}
198
+
199
+
#[tokio::test]
200
+
async fn test_account_deleted_event() {
201
+
let resolver = Arc::new(MockResolver::new());
202
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
203
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
204
+
205
+
// Create a deleted account event
206
+
let event = JetstreamEvent::Account {
207
+
did: "did:plc:test123".to_string(),
208
+
kind: "deleted".to_string(),
209
+
time_us: 0,
210
+
account: json!(null),
211
+
};
212
+
213
+
handler.handle_event(event).await.unwrap();
214
+
215
+
// Verify the DID was purged
216
+
let purge_calls = resolver.get_purge_calls();
217
+
assert_eq!(purge_calls.len(), 1);
218
+
assert_eq!(purge_calls[0], "did:plc:test123");
219
+
}
220
+
221
+
#[tokio::test]
222
+
async fn test_account_deactivated_event() {
223
+
let resolver = Arc::new(MockResolver::new());
224
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
225
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
226
+
227
+
// Create a deactivated account event
228
+
let event = JetstreamEvent::Account {
229
+
did: "did:plc:test456".to_string(),
230
+
kind: "deactivated".to_string(),
231
+
time_us: 0,
232
+
account: json!(null),
233
+
};
234
+
235
+
handler.handle_event(event).await.unwrap();
236
+
237
+
// Verify the DID was purged
238
+
let purge_calls = resolver.get_purge_calls();
239
+
assert_eq!(purge_calls.len(), 1);
240
+
assert_eq!(purge_calls[0], "did:plc:test456");
241
+
}
242
+
243
+
#[tokio::test]
244
+
async fn test_account_active_event() {
245
+
let resolver = Arc::new(MockResolver::new());
246
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
247
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
248
+
249
+
// Create an active account event (should not purge)
250
+
let event = JetstreamEvent::Account {
251
+
did: "did:plc:test789".to_string(),
252
+
kind: "active".to_string(),
253
+
time_us: 0,
254
+
account: json!(null),
255
+
};
256
+
257
+
handler.handle_event(event).await.unwrap();
258
+
259
+
// Verify the DID was NOT purged
260
+
let purge_calls = resolver.get_purge_calls();
261
+
assert_eq!(purge_calls.len(), 0);
262
+
}
263
+
264
+
#[tokio::test]
265
+
async fn test_identity_with_handle_event() {
266
+
let resolver = Arc::new(MockResolver::new());
267
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
268
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
269
+
270
+
// Create an identity event with a handle
271
+
let event = JetstreamEvent::Identity {
272
+
did: "did:plc:testuser".to_string(),
273
+
kind: "update".to_string(),
274
+
time_us: 0,
275
+
identity: json!({
276
+
"handle": "alice.bsky.social"
277
+
}),
278
+
};
279
+
280
+
handler.handle_event(event).await.unwrap();
281
+
282
+
// Verify the set method was called
283
+
let set_calls = resolver.get_set_calls();
284
+
assert_eq!(set_calls.len(), 1);
285
+
assert_eq!(
286
+
set_calls[0],
287
+
(
288
+
"alice.bsky.social".to_string(),
289
+
"did:plc:testuser".to_string()
290
+
)
291
+
);
292
+
293
+
// Verify no purge was called
294
+
let purge_calls = resolver.get_purge_calls();
295
+
assert_eq!(purge_calls.len(), 0);
296
+
}
297
+
298
+
#[tokio::test]
299
+
async fn test_identity_without_handle_event() {
300
+
let resolver = Arc::new(MockResolver::new());
301
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
302
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
303
+
304
+
// Create an identity event without a handle field
305
+
let event = JetstreamEvent::Identity {
306
+
did: "did:plc:nohandle".to_string(),
307
+
kind: "update".to_string(),
308
+
time_us: 0,
309
+
identity: json!({
310
+
"other_field": "value"
311
+
}),
312
+
};
313
+
314
+
handler.handle_event(event).await.unwrap();
315
+
316
+
// Verify the DID was purged
317
+
let purge_calls = resolver.get_purge_calls();
318
+
assert_eq!(purge_calls.len(), 1);
319
+
assert_eq!(purge_calls[0], "did:plc:nohandle");
320
+
321
+
// Verify set was not called
322
+
let set_calls = resolver.get_set_calls();
323
+
assert_eq!(set_calls.len(), 0);
324
+
}
325
+
326
+
#[tokio::test]
327
+
async fn test_identity_with_null_identity() {
328
+
let resolver = Arc::new(MockResolver::new());
329
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
330
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
331
+
332
+
// Create an identity event with null identity
333
+
let event = JetstreamEvent::Identity {
334
+
did: "did:plc:nullidentity".to_string(),
335
+
kind: "delete".to_string(),
336
+
time_us: 0,
337
+
identity: json!(null),
338
+
};
339
+
340
+
handler.handle_event(event).await.unwrap();
341
+
342
+
// Verify the DID was purged
343
+
let purge_calls = resolver.get_purge_calls();
344
+
assert_eq!(purge_calls.len(), 1);
345
+
assert_eq!(purge_calls[0], "did:plc:nullidentity");
346
+
347
+
// Verify set was not called
348
+
let set_calls = resolver.get_set_calls();
349
+
assert_eq!(set_calls.len(), 0);
350
+
}
351
+
352
+
#[tokio::test]
353
+
async fn test_handler_id() {
354
+
let resolver = Arc::new(MockResolver::new());
355
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
356
+
let handler = QuickDidEventHandler::new(resolver, metrics);
357
+
358
+
assert_eq!(handler.handler_id(), "quickdid_handler");
359
+
}
360
+
}
+8
src/lexicon_resolver/mod.rs
+8
src/lexicon_resolver/mod.rs
···
1
+
//! Lexicon resolution with caching support.
2
+
//!
3
+
//! This module provides implementations for resolving AT Protocol lexicons (NSIDs)
4
+
//! to their schemas with various caching strategies.
5
+
6
+
mod redis;
7
+
8
+
pub use redis::{create_redis_lexicon_resolver, create_redis_lexicon_resolver_with_ttl};
+458
src/lexicon_resolver/redis.rs
+458
src/lexicon_resolver/redis.rs
···
1
+
//! Redis-backed caching lexicon resolver.
2
+
//!
3
+
//! This module provides a lexicon resolver that caches resolution results in Redis
4
+
//! with configurable expiration times. Redis caching provides persistence across
5
+
//! service restarts and allows sharing of cached results across multiple instances.
6
+
7
+
use crate::metrics::SharedMetricsPublisher;
8
+
use async_trait::async_trait;
9
+
use atproto_lexicon::resolve::LexiconResolver;
10
+
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
11
+
use metrohash::MetroHash64;
12
+
use std::hash::Hasher as _;
13
+
use std::sync::Arc;
14
+
15
+
/// Redis-backed caching lexicon resolver.
16
+
///
17
+
/// This resolver caches lexicon resolution results in Redis with a configurable TTL.
18
+
/// Results are stored as JSON bytes to minimize storage overhead while maintaining
19
+
/// the schema structure.
20
+
///
21
+
/// # Features
22
+
///
23
+
/// - Persistent caching across service restarts
24
+
/// - Shared cache across multiple service instances
25
+
/// - Configurable TTL (default: 90 days)
26
+
/// - JSON storage format for lexicon schemas
27
+
/// - Graceful fallback if Redis is unavailable
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use std::sync::Arc;
33
+
/// use deadpool_redis::Pool;
34
+
/// use atproto_lexicon::resolve::LexiconResolver;
35
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
37
+
///
38
+
/// # async fn example() {
39
+
/// # let inner_resolver: Arc<dyn LexiconResolver> = todo!();
40
+
/// # let redis_pool: Pool = todo!();
41
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
42
+
/// // Create with default 90-day TTL
43
+
/// let resolver = create_redis_lexicon_resolver(
44
+
/// inner_resolver,
45
+
/// redis_pool,
46
+
/// metrics
47
+
/// );
48
+
/// # }
49
+
/// ```
50
+
pub(super) struct RedisLexiconResolver {
51
+
/// Base lexicon resolver to perform actual resolution
52
+
inner: Arc<dyn LexiconResolver>,
53
+
/// Redis connection pool
54
+
pool: RedisPool,
55
+
/// Redis key prefix for lexicon resolution cache
56
+
key_prefix: String,
57
+
/// TTL for cache entries in seconds
58
+
ttl_seconds: u64,
59
+
/// Metrics publisher for telemetry
60
+
metrics: SharedMetricsPublisher,
61
+
}
62
+
63
+
impl RedisLexiconResolver {
64
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
65
+
fn new(
66
+
inner: Arc<dyn LexiconResolver>,
67
+
pool: RedisPool,
68
+
metrics: SharedMetricsPublisher,
69
+
) -> Self {
70
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
71
+
}
72
+
73
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
74
+
fn with_ttl(
75
+
inner: Arc<dyn LexiconResolver>,
76
+
pool: RedisPool,
77
+
ttl_seconds: u64,
78
+
metrics: SharedMetricsPublisher,
79
+
) -> Self {
80
+
Self::with_full_config(inner, pool, "lexicon:".to_string(), ttl_seconds, metrics)
81
+
}
82
+
83
+
/// Create a new Redis-backed lexicon resolver with full configuration.
84
+
fn with_full_config(
85
+
inner: Arc<dyn LexiconResolver>,
86
+
pool: RedisPool,
87
+
key_prefix: String,
88
+
ttl_seconds: u64,
89
+
metrics: SharedMetricsPublisher,
90
+
) -> Self {
91
+
Self {
92
+
inner,
93
+
pool,
94
+
key_prefix,
95
+
ttl_seconds,
96
+
metrics,
97
+
}
98
+
}
99
+
100
+
/// Generate the Redis key for an NSID.
101
+
///
102
+
/// Uses MetroHash64 to generate a consistent hash of the NSID
103
+
/// for use as the Redis key. This provides better key distribution
104
+
/// and avoids issues with special characters in NSIDs.
105
+
fn make_key(&self, nsid: &str) -> String {
106
+
let mut h = MetroHash64::default();
107
+
h.write(nsid.as_bytes());
108
+
format!("{}{}", self.key_prefix, h.finish())
109
+
}
110
+
111
+
/// Get the TTL in seconds.
112
+
fn ttl_seconds(&self) -> u64 {
113
+
self.ttl_seconds
114
+
}
115
+
}
116
+
117
+
#[async_trait]
118
+
impl LexiconResolver for RedisLexiconResolver {
119
+
async fn resolve(&self, nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
120
+
let key = self.make_key(nsid);
121
+
122
+
// Try to get from Redis cache first
123
+
match self.pool.get().await {
124
+
Ok(mut conn) => {
125
+
// Check if the key exists in Redis (stored as JSON bytes)
126
+
let cached: Option<Vec<u8>> = match conn.get(&key).await {
127
+
Ok(value) => value,
128
+
Err(e) => {
129
+
self.metrics.incr("lexicon_resolver.redis.get_error").await;
130
+
tracing::warn!("Failed to get NSID from Redis cache: {}", e);
131
+
None
132
+
}
133
+
};
134
+
135
+
if let Some(cached_bytes) = cached {
136
+
// Deserialize the cached JSON
137
+
match serde_json::from_slice::<serde_json::Value>(&cached_bytes) {
138
+
Ok(cached_value) => {
139
+
tracing::debug!("Cache hit for NSID {}", nsid);
140
+
self.metrics.incr("lexicon_resolver.redis.cache_hit").await;
141
+
return Ok(cached_value);
142
+
}
143
+
Err(e) => {
144
+
tracing::warn!(
145
+
"Failed to deserialize cached lexicon for NSID {}: {}",
146
+
nsid,
147
+
e
148
+
);
149
+
self.metrics
150
+
.incr("lexicon_resolver.redis.deserialize_error")
151
+
.await;
152
+
// Fall through to re-resolve if deserialization fails
153
+
}
154
+
}
155
+
}
156
+
157
+
// Not in cache, resolve through inner resolver
158
+
tracing::debug!("Cache miss for NSID {}, resolving...", nsid);
159
+
self.metrics.incr("lexicon_resolver.redis.cache_miss").await;
160
+
let result = self.inner.resolve(nsid).await;
161
+
162
+
// Cache successful result
163
+
if let Ok(ref schema) = result {
164
+
// Serialize to JSON bytes
165
+
match serde_json::to_vec(schema) {
166
+
Ok(bytes) => {
167
+
// Set with expiration (ignore errors to not fail the resolution)
168
+
if let Err(e) = conn
169
+
.set_ex::<_, _, ()>(&key, bytes, self.ttl_seconds())
170
+
.await
171
+
{
172
+
tracing::warn!(
173
+
"Failed to cache lexicon resolution in Redis: {}",
174
+
e
175
+
);
176
+
self.metrics
177
+
.incr("lexicon_resolver.redis.cache_set_error")
178
+
.await;
179
+
} else {
180
+
tracing::debug!("Cached lexicon for NSID {}", nsid);
181
+
self.metrics.incr("lexicon_resolver.redis.cache_set").await;
182
+
}
183
+
}
184
+
Err(e) => {
185
+
tracing::warn!(
186
+
"Failed to serialize lexicon result for NSID {}: {}",
187
+
nsid,
188
+
e
189
+
);
190
+
self.metrics
191
+
.incr("lexicon_resolver.redis.serialize_error")
192
+
.await;
193
+
}
194
+
}
195
+
}
196
+
197
+
result
198
+
}
199
+
Err(e) => {
200
+
// Redis connection failed, fall back to inner resolver
201
+
tracing::warn!(
202
+
"Failed to get Redis connection, falling back to uncached resolution: {}",
203
+
e
204
+
);
205
+
self.metrics
206
+
.incr("lexicon_resolver.redis.connection_error")
207
+
.await;
208
+
self.inner.resolve(nsid).await
209
+
}
210
+
}
211
+
}
212
+
}
213
+
214
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
215
+
///
216
+
/// # Arguments
217
+
///
218
+
/// * `inner` - The underlying resolver to use for actual resolution
219
+
/// * `pool` - Redis connection pool
220
+
/// * `metrics` - Metrics publisher for telemetry
221
+
///
222
+
/// # Example
223
+
///
224
+
/// ```no_run
225
+
/// use std::sync::Arc;
226
+
/// use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
227
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
228
+
/// use quickdid::cache::create_redis_pool;
229
+
/// use quickdid::metrics::NoOpMetricsPublisher;
230
+
///
231
+
/// # async fn example() -> anyhow::Result<()> {
232
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
233
+
/// # use reqwest::Client;
234
+
/// # let dns_resolver = HickoryDnsResolver::create_resolver(&[]);
235
+
/// # let http_client = Client::new();
236
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
237
+
/// let base: Arc<dyn LexiconResolver> = Arc::new(
238
+
/// DefaultLexiconResolver::new(http_client, dns_resolver)
239
+
/// );
240
+
///
241
+
/// let pool = create_redis_pool("redis://localhost:6379")?;
242
+
/// let resolver = create_redis_lexicon_resolver(base, pool, metrics);
243
+
/// let schema = resolver.resolve("app.bsky.feed.post").await.unwrap();
244
+
/// # Ok(())
245
+
/// # }
246
+
/// ```
247
+
pub fn create_redis_lexicon_resolver(
248
+
inner: Arc<dyn LexiconResolver>,
249
+
pool: RedisPool,
250
+
metrics: SharedMetricsPublisher,
251
+
) -> Arc<dyn LexiconResolver> {
252
+
Arc::new(RedisLexiconResolver::new(inner, pool, metrics))
253
+
}
254
+
255
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
256
+
///
257
+
/// # Arguments
258
+
///
259
+
/// * `inner` - The underlying resolver to use for actual resolution
260
+
/// * `pool` - Redis connection pool
261
+
/// * `ttl_seconds` - TTL for cache entries in seconds
262
+
/// * `metrics` - Metrics publisher for telemetry
263
+
pub fn create_redis_lexicon_resolver_with_ttl(
264
+
inner: Arc<dyn LexiconResolver>,
265
+
pool: RedisPool,
266
+
ttl_seconds: u64,
267
+
metrics: SharedMetricsPublisher,
268
+
) -> Arc<dyn LexiconResolver> {
269
+
Arc::new(RedisLexiconResolver::with_ttl(
270
+
inner,
271
+
pool,
272
+
ttl_seconds,
273
+
metrics,
274
+
))
275
+
}
276
+
277
+
#[cfg(test)]
278
+
mod tests {
279
+
use super::*;
280
+
281
+
// Mock lexicon resolver for testing
282
+
#[derive(Clone)]
283
+
struct MockLexiconResolver {
284
+
should_fail: bool,
285
+
expected_schema: serde_json::Value,
286
+
}
287
+
288
+
#[async_trait]
289
+
impl LexiconResolver for MockLexiconResolver {
290
+
async fn resolve(&self, _nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
291
+
if self.should_fail {
292
+
Err(anyhow::anyhow!("Mock resolution failure"))
293
+
} else {
294
+
Ok(self.expected_schema.clone())
295
+
}
296
+
}
297
+
}
298
+
299
+
#[tokio::test]
300
+
async fn test_redis_lexicon_resolver_cache_hit() {
301
+
let pool = match crate::test_helpers::get_test_redis_pool() {
302
+
Some(p) => p,
303
+
None => return,
304
+
};
305
+
306
+
// Create mock resolver with sample schema
307
+
let schema = serde_json::json!({
308
+
"lexicon": 1,
309
+
"id": "app.bsky.feed.post",
310
+
"defs": {
311
+
"main": {
312
+
"type": "record",
313
+
"description": "A post record"
314
+
}
315
+
}
316
+
});
317
+
318
+
let mock_resolver = Arc::new(MockLexiconResolver {
319
+
should_fail: false,
320
+
expected_schema: schema.clone(),
321
+
});
322
+
323
+
// Create metrics publisher
324
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
325
+
326
+
// Create Redis-backed resolver with a unique key prefix for testing
327
+
let test_prefix = format!(
328
+
"test:lexicon:{}:",
329
+
std::time::SystemTime::now()
330
+
.duration_since(std::time::UNIX_EPOCH)
331
+
.unwrap()
332
+
.as_nanos()
333
+
);
334
+
let redis_resolver = RedisLexiconResolver::with_full_config(
335
+
mock_resolver,
336
+
pool.clone(),
337
+
test_prefix.clone(),
338
+
3600,
339
+
metrics,
340
+
);
341
+
342
+
let test_nsid = "app.bsky.feed.post";
343
+
344
+
// First resolution - should call inner resolver
345
+
let result1 = redis_resolver.resolve(test_nsid).await.unwrap();
346
+
assert_eq!(result1, schema);
347
+
348
+
// Second resolution - should hit cache
349
+
let result2 = redis_resolver.resolve(test_nsid).await.unwrap();
350
+
assert_eq!(result2, schema);
351
+
352
+
// Clean up test data
353
+
if let Ok(mut conn) = pool.get().await {
354
+
let mut h = MetroHash64::default();
355
+
h.write(test_nsid.as_bytes());
356
+
let key = format!("{}{}", test_prefix, h.finish());
357
+
let _: Result<(), _> = conn.del(key).await;
358
+
}
359
+
}
360
+
361
+
#[tokio::test]
362
+
async fn test_redis_lexicon_resolver_cache_miss() {
363
+
let pool = match crate::test_helpers::get_test_redis_pool() {
364
+
Some(p) => p,
365
+
None => return,
366
+
};
367
+
368
+
let schema = serde_json::json!({
369
+
"lexicon": 1,
370
+
"id": "com.example.test",
371
+
});
372
+
373
+
let mock_resolver = Arc::new(MockLexiconResolver {
374
+
should_fail: false,
375
+
expected_schema: schema.clone(),
376
+
});
377
+
378
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
379
+
380
+
let test_prefix = format!(
381
+
"test:lexicon:{}:",
382
+
std::time::SystemTime::now()
383
+
.duration_since(std::time::UNIX_EPOCH)
384
+
.unwrap()
385
+
.as_nanos()
386
+
);
387
+
let redis_resolver = RedisLexiconResolver::with_full_config(
388
+
mock_resolver,
389
+
pool.clone(),
390
+
test_prefix.clone(),
391
+
3600,
392
+
metrics,
393
+
);
394
+
395
+
let test_nsid = "com.example.test";
396
+
397
+
// Ensure key doesn't exist
398
+
if let Ok(mut conn) = pool.get().await {
399
+
let mut h = MetroHash64::default();
400
+
h.write(test_nsid.as_bytes());
401
+
let key = format!("{}{}", test_prefix, h.finish());
402
+
let _: Result<(), _> = conn.del(&key).await;
403
+
}
404
+
405
+
// Resolution should succeed and cache the result
406
+
let result = redis_resolver.resolve(test_nsid).await.unwrap();
407
+
assert_eq!(result, schema);
408
+
409
+
// Verify the result was cached
410
+
if let Ok(mut conn) = pool.get().await {
411
+
let mut h = MetroHash64::default();
412
+
h.write(test_nsid.as_bytes());
413
+
let key = format!("{}{}", test_prefix, h.finish());
414
+
let exists: bool = conn.exists(&key).await.unwrap();
415
+
assert!(exists, "Result should be cached");
416
+
417
+
// Clean up
418
+
let _: Result<(), _> = conn.del(key).await;
419
+
}
420
+
}
421
+
422
+
#[tokio::test]
423
+
async fn test_redis_lexicon_resolver_error_handling() {
424
+
let pool = match crate::test_helpers::get_test_redis_pool() {
425
+
Some(p) => p,
426
+
None => return,
427
+
};
428
+
429
+
// Create mock resolver that fails
430
+
let mock_resolver = Arc::new(MockLexiconResolver {
431
+
should_fail: true,
432
+
expected_schema: serde_json::Value::Null,
433
+
});
434
+
435
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
436
+
437
+
let test_prefix = format!(
438
+
"test:lexicon:{}:",
439
+
std::time::SystemTime::now()
440
+
.duration_since(std::time::UNIX_EPOCH)
441
+
.unwrap()
442
+
.as_nanos()
443
+
);
444
+
let redis_resolver = RedisLexiconResolver::with_full_config(
445
+
mock_resolver,
446
+
pool.clone(),
447
+
test_prefix,
448
+
3600,
449
+
metrics,
450
+
);
451
+
452
+
let test_nsid = "com.example.nonexistent";
453
+
454
+
// Resolution should fail
455
+
let result = redis_resolver.resolve(test_nsid).await;
456
+
assert!(result.is_err());
457
+
}
458
+
}
+3
src/lib.rs
+3
src/lib.rs
···
2
2
pub mod config; // Config and Args needed by binary
3
3
pub mod handle_resolver; // Only traits and factory functions exposed
4
4
pub mod http; // Only create_router exposed
5
+
pub mod jetstream_handler; // Jetstream event handler for AT Protocol events
6
+
pub mod lexicon_resolver; // Lexicon resolution with caching support
5
7
6
8
// Semi-public modules - needed by binary but with limited exposure
7
9
pub mod cache; // Only create_redis_pool exposed
8
10
pub mod handle_resolver_task; // Factory functions and TaskConfig exposed
11
+
pub mod metrics; // Metrics publishing trait and implementations
9
12
pub mod queue; // Queue adapter system with trait and factory functions
10
13
pub mod sqlite_schema; // SQLite schema management functions exposed
11
14
pub mod task_manager; // Only spawn_cancellable_task exposed
+547
src/metrics.rs
+547
src/metrics.rs
···
1
+
use crate::config::Config;
2
+
use async_trait::async_trait;
3
+
use cadence::{
4
+
BufferedUdpMetricSink, Counted, CountedExt, Gauged, Metric, QueuingMetricSink, StatsdClient,
5
+
Timed,
6
+
};
7
+
use std::net::UdpSocket;
8
+
use std::sync::Arc;
9
+
use thiserror::Error;
10
+
use tracing::{debug, error};
11
+
12
+
/// Trait for publishing metrics with counter and gauge support
13
+
/// Designed for minimal compatibility with cadence-style metrics
14
+
#[async_trait]
15
+
pub trait MetricsPublisher: Send + Sync {
16
+
/// Increment a counter by 1
17
+
async fn incr(&self, key: &str);
18
+
19
+
/// Increment a counter by a specific value
20
+
async fn count(&self, key: &str, value: u64);
21
+
22
+
/// Increment a counter with tags
23
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]);
24
+
25
+
/// Increment a counter by a specific value with tags
26
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
27
+
28
+
/// Record a gauge value
29
+
async fn gauge(&self, key: &str, value: u64);
30
+
31
+
/// Record a gauge value with tags
32
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
33
+
34
+
/// Record a timing in milliseconds
35
+
async fn time(&self, key: &str, millis: u64);
36
+
37
+
/// Record a timing with tags
38
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]);
39
+
}
40
+
41
+
/// No-op implementation for development and testing
42
+
#[derive(Debug, Clone, Default)]
43
+
pub struct NoOpMetricsPublisher;
44
+
45
+
impl NoOpMetricsPublisher {
46
+
pub fn new() -> Self {
47
+
Self
48
+
}
49
+
}
50
+
51
+
#[async_trait]
52
+
impl MetricsPublisher for NoOpMetricsPublisher {
53
+
async fn incr(&self, _key: &str) {
54
+
// No-op
55
+
}
56
+
57
+
async fn count(&self, _key: &str, _value: u64) {
58
+
// No-op
59
+
}
60
+
61
+
async fn incr_with_tags(&self, _key: &str, _tags: &[(&str, &str)]) {
62
+
// No-op
63
+
}
64
+
65
+
async fn count_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
66
+
// No-op
67
+
}
68
+
69
+
async fn gauge(&self, _key: &str, _value: u64) {
70
+
// No-op
71
+
}
72
+
73
+
async fn gauge_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
74
+
// No-op
75
+
}
76
+
77
+
async fn time(&self, _key: &str, _millis: u64) {
78
+
// No-op
79
+
}
80
+
81
+
async fn time_with_tags(&self, _key: &str, _millis: u64, _tags: &[(&str, &str)]) {
82
+
// No-op
83
+
}
84
+
}
85
+
86
+
/// Statsd-backed metrics publisher using cadence
87
+
pub struct StatsdMetricsPublisher {
88
+
client: StatsdClient,
89
+
default_tags: Vec<(String, String)>,
90
+
}
91
+
92
+
impl StatsdMetricsPublisher {
93
+
/// Create a new StatsdMetricsPublisher with default configuration
94
+
pub fn new(host: &str, prefix: &str) -> Result<Self, Box<dyn std::error::Error>> {
95
+
Self::new_with_bind(host, prefix, "[::]:0")
96
+
}
97
+
98
+
/// Create a new StatsdMetricsPublisher with custom bind address
99
+
pub fn new_with_bind(
100
+
host: &str,
101
+
prefix: &str,
102
+
bind_addr: &str,
103
+
) -> Result<Self, Box<dyn std::error::Error>> {
104
+
Self::new_with_bind_and_tags(host, prefix, bind_addr, vec![])
105
+
}
106
+
107
+
/// Create a new StatsdMetricsPublisher with default tags
108
+
pub fn new_with_tags(
109
+
host: &str,
110
+
prefix: &str,
111
+
default_tags: Vec<(String, String)>,
112
+
) -> Result<Self, Box<dyn std::error::Error>> {
113
+
Self::new_with_bind_and_tags(host, prefix, "[::]:0", default_tags)
114
+
}
115
+
116
+
/// Create a new StatsdMetricsPublisher with custom bind address and tags
117
+
pub fn new_with_bind_and_tags(
118
+
host: &str,
119
+
prefix: &str,
120
+
bind_addr: &str,
121
+
default_tags: Vec<(String, String)>,
122
+
) -> Result<Self, Box<dyn std::error::Error>> {
123
+
tracing::info!(
124
+
"Creating StatsdMetricsPublisher: host={}, prefix={}, bind={}, tags={:?}",
125
+
host,
126
+
prefix,
127
+
bind_addr,
128
+
default_tags
129
+
);
130
+
131
+
let socket = UdpSocket::bind(bind_addr)?;
132
+
socket.set_nonblocking(true)?;
133
+
134
+
let buffered_sink = BufferedUdpMetricSink::from(host, socket)?;
135
+
let queuing_sink = QueuingMetricSink::builder()
136
+
.with_error_handler(move |error| {
137
+
error!("Failed to send metric via sink: {}", error);
138
+
})
139
+
.build(buffered_sink);
140
+
let client = StatsdClient::from_sink(prefix, queuing_sink);
141
+
142
+
tracing::info!(
143
+
"StatsdMetricsPublisher created successfully with bind address: {}",
144
+
bind_addr
145
+
);
146
+
Ok(Self {
147
+
client,
148
+
default_tags,
149
+
})
150
+
}
151
+
152
+
/// Create from an existing StatsdClient
153
+
pub fn from_client(client: StatsdClient) -> Self {
154
+
Self::from_client_with_tags(client, vec![])
155
+
}
156
+
157
+
/// Create from an existing StatsdClient with default tags
158
+
pub fn from_client_with_tags(
159
+
client: StatsdClient,
160
+
default_tags: Vec<(String, String)>,
161
+
) -> Self {
162
+
Self {
163
+
client,
164
+
default_tags,
165
+
}
166
+
}
167
+
168
+
/// Apply default tags to a builder
169
+
fn apply_default_tags<'a, M>(
170
+
&'a self,
171
+
mut builder: cadence::MetricBuilder<'a, 'a, M>,
172
+
) -> cadence::MetricBuilder<'a, 'a, M>
173
+
where
174
+
M: Metric + From<String>,
175
+
{
176
+
for (k, v) in &self.default_tags {
177
+
builder = builder.with_tag(k.as_str(), v.as_str());
178
+
}
179
+
builder
180
+
}
181
+
}
182
+
183
+
#[async_trait]
184
+
impl MetricsPublisher for StatsdMetricsPublisher {
185
+
async fn incr(&self, key: &str) {
186
+
debug!("Sending metric incr: {}", key);
187
+
if self.default_tags.is_empty() {
188
+
match self.client.incr(key) {
189
+
Ok(_) => debug!("Successfully sent metric: {}", key),
190
+
Err(e) => error!("Failed to send metric {}: {}", key, e),
191
+
}
192
+
} else {
193
+
let builder = self.client.incr_with_tags(key);
194
+
let builder = self.apply_default_tags(builder);
195
+
let _ = builder.send();
196
+
debug!("Sent metric with tags: {}", key);
197
+
}
198
+
}
199
+
200
+
async fn count(&self, key: &str, value: u64) {
201
+
if self.default_tags.is_empty() {
202
+
let _ = self.client.count(key, value);
203
+
} else {
204
+
let builder = self.client.count_with_tags(key, value);
205
+
let builder = self.apply_default_tags(builder);
206
+
let _ = builder.send();
207
+
}
208
+
}
209
+
210
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]) {
211
+
let mut builder = self.client.incr_with_tags(key);
212
+
builder = self.apply_default_tags(builder);
213
+
for (k, v) in tags {
214
+
builder = builder.with_tag(k, v);
215
+
}
216
+
let _ = builder.send();
217
+
}
218
+
219
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
220
+
let mut builder = self.client.count_with_tags(key, value);
221
+
builder = self.apply_default_tags(builder);
222
+
for (k, v) in tags {
223
+
builder = builder.with_tag(k, v);
224
+
}
225
+
let _ = builder.send();
226
+
}
227
+
228
+
async fn gauge(&self, key: &str, value: u64) {
229
+
debug!("Sending metric gauge: {} = {}", key, value);
230
+
if self.default_tags.is_empty() {
231
+
match self.client.gauge(key, value) {
232
+
Ok(_) => debug!("Successfully sent gauge: {} = {}", key, value),
233
+
Err(e) => error!("Failed to send gauge {} = {}: {}", key, value, e),
234
+
}
235
+
} else {
236
+
let builder = self.client.gauge_with_tags(key, value);
237
+
let builder = self.apply_default_tags(builder);
238
+
builder.send();
239
+
debug!("Sent gauge with tags: {} = {}", key, value);
240
+
}
241
+
}
242
+
243
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
244
+
let mut builder = self.client.gauge_with_tags(key, value);
245
+
builder = self.apply_default_tags(builder);
246
+
for (k, v) in tags {
247
+
builder = builder.with_tag(k, v);
248
+
}
249
+
let _ = builder.send();
250
+
}
251
+
252
+
async fn time(&self, key: &str, millis: u64) {
253
+
if self.default_tags.is_empty() {
254
+
let _ = self.client.time(key, millis);
255
+
} else {
256
+
let builder = self.client.time_with_tags(key, millis);
257
+
let builder = self.apply_default_tags(builder);
258
+
let _ = builder.send();
259
+
}
260
+
}
261
+
262
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]) {
263
+
let mut builder = self.client.time_with_tags(key, millis);
264
+
builder = self.apply_default_tags(builder);
265
+
for (k, v) in tags {
266
+
builder = builder.with_tag(k, v);
267
+
}
268
+
let _ = builder.send();
269
+
}
270
+
}
271
+
272
+
/// Type alias for shared metrics publisher
273
+
pub type SharedMetricsPublisher = Arc<dyn MetricsPublisher>;
274
+
275
+
/// Metrics-specific errors
276
+
#[derive(Debug, Error)]
277
+
pub enum MetricsError {
278
+
/// Failed to create metrics publisher
279
+
#[error("error-quickdid-metrics-1 Failed to create metrics publisher: {0}")]
280
+
CreationFailed(String),
281
+
282
+
/// Invalid configuration for metrics
283
+
#[error("error-quickdid-metrics-2 Invalid metrics configuration: {0}")]
284
+
InvalidConfig(String),
285
+
}
286
+
287
+
/// Create a metrics publisher based on configuration
288
+
///
289
+
/// Returns either a no-op publisher or a StatsD publisher based on the
290
+
/// `metrics_adapter` configuration value.
291
+
///
292
+
/// ## Example
293
+
///
294
+
/// ```rust,no_run
295
+
/// use quickdid::config::Config;
296
+
/// use quickdid::metrics::create_metrics_publisher;
297
+
///
298
+
/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
299
+
/// let config = Config::from_env()?;
300
+
/// let metrics = create_metrics_publisher(&config)?;
301
+
///
302
+
/// // Use the metrics publisher
303
+
/// metrics.incr("request.count").await;
304
+
/// # Ok(())
305
+
/// # }
306
+
/// ```
307
+
pub fn create_metrics_publisher(config: &Config) -> Result<SharedMetricsPublisher, MetricsError> {
308
+
match config.metrics_adapter.as_str() {
309
+
"noop" => Ok(Arc::new(NoOpMetricsPublisher::new())),
310
+
"statsd" => {
311
+
let host = config.metrics_statsd_host.as_ref().ok_or_else(|| {
312
+
MetricsError::InvalidConfig(
313
+
"METRICS_STATSD_HOST is required when using statsd adapter".to_string(),
314
+
)
315
+
})?;
316
+
317
+
// Parse tags from comma-separated key:value pairs
318
+
let default_tags = if let Some(tags_str) = &config.metrics_tags {
319
+
tags_str
320
+
.split(',')
321
+
.filter_map(|tag| {
322
+
let parts: Vec<&str> = tag.trim().split(':').collect();
323
+
if parts.len() == 2 {
324
+
Some((parts[0].to_string(), parts[1].to_string()))
325
+
} else {
326
+
error!("Invalid tag format: {}", tag);
327
+
None
328
+
}
329
+
})
330
+
.collect()
331
+
} else {
332
+
vec![]
333
+
};
334
+
335
+
let publisher = StatsdMetricsPublisher::new_with_bind_and_tags(
336
+
host,
337
+
&config.metrics_prefix,
338
+
&config.metrics_statsd_bind,
339
+
default_tags,
340
+
)
341
+
.map_err(|e| MetricsError::CreationFailed(e.to_string()))?;
342
+
343
+
Ok(Arc::new(publisher))
344
+
}
345
+
_ => Err(MetricsError::InvalidConfig(format!(
346
+
"Unknown metrics adapter: {}",
347
+
config.metrics_adapter
348
+
))),
349
+
}
350
+
}
351
+
352
+
#[cfg(test)]
353
+
mod tests {
354
+
use super::*;
355
+
use once_cell::sync::Lazy;
356
+
use std::sync::Mutex;
357
+
358
+
// Use a mutex to serialize tests that modify environment variables
359
+
static ENV_MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
360
+
361
+
#[tokio::test]
362
+
async fn test_noop_metrics() {
363
+
let metrics = NoOpMetricsPublisher::new();
364
+
365
+
// These should all be no-ops and not panic
366
+
metrics.incr("test.counter").await;
367
+
metrics.count("test.counter", 5).await;
368
+
metrics
369
+
.incr_with_tags("test.counter", &[("env", "test")])
370
+
.await;
371
+
metrics
372
+
.count_with_tags(
373
+
"test.counter",
374
+
10,
375
+
&[("env", "test"), ("service", "quickdid")],
376
+
)
377
+
.await;
378
+
metrics.gauge("test.gauge", 100).await;
379
+
metrics
380
+
.gauge_with_tags("test.gauge", 200, &[("host", "localhost")])
381
+
.await;
382
+
metrics.time("test.timing", 42).await;
383
+
metrics
384
+
.time_with_tags("test.timing", 84, &[("endpoint", "/resolve")])
385
+
.await;
386
+
}
387
+
388
+
#[tokio::test]
389
+
async fn test_shared_metrics() {
390
+
let metrics: SharedMetricsPublisher = Arc::new(NoOpMetricsPublisher::new());
391
+
392
+
// Verify it can be used as a shared reference
393
+
metrics.incr("shared.counter").await;
394
+
metrics.gauge("shared.gauge", 50).await;
395
+
396
+
// Verify it can be cloned
397
+
let metrics2 = Arc::clone(&metrics);
398
+
metrics2.count("cloned.counter", 3).await;
399
+
}
400
+
401
+
#[test]
402
+
fn test_create_noop_publisher() {
403
+
use std::env;
404
+
405
+
// Lock mutex to prevent concurrent environment variable modification
406
+
let _guard = ENV_MUTEX.lock().unwrap();
407
+
408
+
// Clean up any existing environment variables first
409
+
unsafe {
410
+
env::remove_var("METRICS_ADAPTER");
411
+
env::remove_var("METRICS_STATSD_HOST");
412
+
env::remove_var("METRICS_PREFIX");
413
+
env::remove_var("METRICS_TAGS");
414
+
}
415
+
416
+
// Set up environment for noop adapter
417
+
unsafe {
418
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
419
+
env::set_var("METRICS_ADAPTER", "noop");
420
+
}
421
+
422
+
let config = Config::from_env().unwrap();
423
+
let metrics = create_metrics_publisher(&config).unwrap();
424
+
425
+
// Should create successfully - actual type checking happens at compile time
426
+
assert!(Arc::strong_count(&metrics) == 1);
427
+
428
+
// Clean up
429
+
unsafe {
430
+
env::remove_var("METRICS_ADAPTER");
431
+
env::remove_var("HTTP_EXTERNAL");
432
+
}
433
+
}
434
+
435
+
#[test]
436
+
fn test_create_statsd_publisher() {
437
+
use std::env;
438
+
439
+
// Lock mutex to prevent concurrent environment variable modification
440
+
let _guard = ENV_MUTEX.lock().unwrap();
441
+
442
+
// Clean up any existing environment variables first
443
+
unsafe {
444
+
env::remove_var("METRICS_ADAPTER");
445
+
env::remove_var("METRICS_STATSD_HOST");
446
+
env::remove_var("METRICS_PREFIX");
447
+
env::remove_var("METRICS_TAGS");
448
+
}
449
+
450
+
// Set up environment for statsd adapter
451
+
unsafe {
452
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
453
+
env::set_var("METRICS_ADAPTER", "statsd");
454
+
env::set_var("METRICS_STATSD_HOST", "localhost:8125");
455
+
env::set_var("METRICS_PREFIX", "test");
456
+
env::set_var("METRICS_TAGS", "env:test,service:quickdid");
457
+
}
458
+
459
+
let config = Config::from_env().unwrap();
460
+
let metrics = create_metrics_publisher(&config).unwrap();
461
+
462
+
// Should create successfully
463
+
assert!(Arc::strong_count(&metrics) == 1);
464
+
465
+
// Clean up
466
+
unsafe {
467
+
env::remove_var("METRICS_ADAPTER");
468
+
env::remove_var("METRICS_STATSD_HOST");
469
+
env::remove_var("METRICS_PREFIX");
470
+
env::remove_var("METRICS_TAGS");
471
+
env::remove_var("HTTP_EXTERNAL");
472
+
}
473
+
}
474
+
475
+
#[test]
476
+
fn test_missing_statsd_host() {
477
+
use std::env;
478
+
479
+
// Lock mutex to prevent concurrent environment variable modification
480
+
let _guard = ENV_MUTEX.lock().unwrap();
481
+
482
+
// Clean up any existing environment variables first
483
+
unsafe {
484
+
env::remove_var("METRICS_ADAPTER");
485
+
env::remove_var("METRICS_STATSD_HOST");
486
+
env::remove_var("METRICS_PREFIX");
487
+
env::remove_var("METRICS_TAGS");
488
+
}
489
+
490
+
// Set up environment for statsd adapter without host
491
+
unsafe {
492
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
493
+
env::set_var("METRICS_ADAPTER", "statsd");
494
+
env::remove_var("METRICS_STATSD_HOST");
495
+
}
496
+
497
+
let config = Config::from_env().unwrap();
498
+
let result = create_metrics_publisher(&config);
499
+
500
+
// Should fail with invalid config error
501
+
assert!(result.is_err());
502
+
if let Err(e) = result {
503
+
assert!(matches!(e, MetricsError::InvalidConfig(_)));
504
+
}
505
+
506
+
// Clean up
507
+
unsafe {
508
+
env::remove_var("METRICS_ADAPTER");
509
+
env::remove_var("HTTP_EXTERNAL");
510
+
}
511
+
}
512
+
513
+
#[test]
514
+
fn test_invalid_adapter() {
515
+
use std::env;
516
+
517
+
// Lock mutex to prevent concurrent environment variable modification
518
+
let _guard = ENV_MUTEX.lock().unwrap();
519
+
520
+
// Clean up any existing environment variables first
521
+
unsafe {
522
+
env::remove_var("METRICS_ADAPTER");
523
+
env::remove_var("METRICS_STATSD_HOST");
524
+
env::remove_var("METRICS_PREFIX");
525
+
env::remove_var("METRICS_TAGS");
526
+
}
527
+
528
+
// Set up environment with invalid adapter
529
+
unsafe {
530
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
531
+
env::set_var("METRICS_ADAPTER", "invalid");
532
+
env::remove_var("METRICS_STATSD_HOST"); // Clean up from other tests
533
+
}
534
+
535
+
let config = Config::from_env().unwrap();
536
+
537
+
// Config validation should catch this
538
+
let validation_result = config.validate();
539
+
assert!(validation_result.is_err());
540
+
541
+
// Clean up
542
+
unsafe {
543
+
env::remove_var("METRICS_ADAPTER");
544
+
env::remove_var("HTTP_EXTERNAL");
545
+
}
546
+
}
547
+
}
+6
-6
src/queue/adapter.rs
+6
-6
src/queue/adapter.rs
···
3
3
//! This module defines the core `QueueAdapter` trait that provides a common
4
4
//! interface for different queue implementations (MPSC, Redis, SQLite, etc.).
5
5
6
-
use async_trait::async_trait;
7
6
use super::error::Result;
7
+
use async_trait::async_trait;
8
8
9
9
/// Generic trait for queue adapters that can work with any work type.
10
10
///
···
173
173
#[tokio::test]
174
174
async fn test_default_trait_methods() {
175
175
let queue = MockQueue::<String>::new();
176
-
176
+
177
177
// Test default ack implementation
178
178
assert!(queue.ack(&"test".to_string()).await.is_ok());
179
-
179
+
180
180
// Test default try_push implementation
181
181
assert!(queue.try_push("test".to_string()).await.is_ok());
182
-
182
+
183
183
// Test default depth implementation
184
184
assert_eq!(queue.depth().await, None);
185
-
185
+
186
186
// Test default is_healthy implementation
187
187
assert!(queue.is_healthy().await);
188
188
}
189
-
}
189
+
}
+4
-4
src/queue/error.rs
+4
-4
src/queue/error.rs
···
29
29
30
30
/// Redis operation failed.
31
31
#[error("error-quickdid-queue-5 Redis operation failed: {operation}: {details}")]
32
-
RedisOperationFailed {
32
+
RedisOperationFailed {
33
33
/// The Redis operation that failed
34
-
operation: String,
34
+
operation: String,
35
35
/// Details about the failure
36
-
details: String
36
+
details: String,
37
37
},
38
38
39
39
/// Failed to serialize an item for storage.
···
73
73
assert!(err.to_string().contains("LPUSH"));
74
74
assert!(err.to_string().contains("connection timeout"));
75
75
}
76
-
}
76
+
}
+77
-26
src/queue/factory.rs
+77
-26
src/queue/factory.rs
···
9
9
use tokio::sync::mpsc;
10
10
11
11
use super::{
12
-
adapter::QueueAdapter,
13
-
mpsc::MpscQueueAdapter,
14
-
noop::NoopQueueAdapter,
15
-
redis::RedisQueueAdapter,
16
-
sqlite::SqliteQueueAdapter,
12
+
adapter::QueueAdapter, mpsc::MpscQueueAdapter, noop::NoopQueueAdapter,
13
+
redis::RedisQueueAdapter, sqlite::SqliteQueueAdapter, work::DedupKey,
17
14
};
18
15
19
16
// ========= MPSC Queue Factories =========
···
84
81
/// # Examples
85
82
///
86
83
/// ```no_run
87
-
/// use quickdid::queue::create_redis_queue;
84
+
/// use quickdid::queue::{create_redis_queue, HandleResolutionWork};
88
85
/// use deadpool_redis::Config;
89
86
///
90
87
/// # async fn example() -> anyhow::Result<()> {
91
88
/// let cfg = Config::from_url("redis://localhost:6379");
92
89
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
93
90
///
94
-
/// let queue = create_redis_queue::<String>(
91
+
/// let queue = create_redis_queue::<HandleResolutionWork>(
95
92
/// pool,
96
93
/// "worker-1".to_string(),
97
94
/// "queue:myapp:".to_string(),
···
107
104
timeout_seconds: u64,
108
105
) -> Arc<dyn QueueAdapter<T>>
109
106
where
110
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
107
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
111
108
{
112
109
Arc::new(RedisQueueAdapter::new(
113
110
pool,
···
117
114
))
118
115
}
119
116
117
+
/// Create a new Redis-backed queue adapter with deduplication.
118
+
///
119
+
/// This creates a distributed queue with deduplication to prevent duplicate items
120
+
/// from being queued within the specified TTL window.
121
+
///
122
+
/// # Arguments
123
+
///
124
+
/// * `pool` - Redis connection pool
125
+
/// * `worker_id` - Worker identifier for this queue instance
126
+
/// * `key_prefix` - Redis key prefix for queue operations
127
+
/// * `timeout_seconds` - Timeout for blocking operations
128
+
/// * `dedup_enabled` - Whether to enable deduplication
129
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
130
+
///
131
+
/// # Examples
132
+
///
133
+
/// ```no_run
134
+
/// use quickdid::queue::{create_redis_queue_with_dedup, HandleResolutionWork};
135
+
/// use deadpool_redis::Config;
136
+
///
137
+
/// # async fn example() -> anyhow::Result<()> {
138
+
/// let cfg = Config::from_url("redis://localhost:6379");
139
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
140
+
///
141
+
/// let queue = create_redis_queue_with_dedup::<HandleResolutionWork>(
142
+
/// pool,
143
+
/// "worker-1".to_string(),
144
+
/// "queue:myapp:".to_string(),
145
+
/// 5,
146
+
/// true, // Enable deduplication
147
+
/// 60, // 60 second dedup window
148
+
/// );
149
+
/// # Ok(())
150
+
/// # }
151
+
/// ```
152
+
pub fn create_redis_queue_with_dedup<T>(
153
+
pool: RedisPool,
154
+
worker_id: String,
155
+
key_prefix: String,
156
+
timeout_seconds: u64,
157
+
dedup_enabled: bool,
158
+
dedup_ttl: u64,
159
+
) -> Arc<dyn QueueAdapter<T>>
160
+
where
161
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
162
+
{
163
+
Arc::new(RedisQueueAdapter::with_dedup(
164
+
pool,
165
+
worker_id,
166
+
key_prefix,
167
+
timeout_seconds,
168
+
dedup_enabled,
169
+
dedup_ttl,
170
+
))
171
+
}
172
+
120
173
// ========= SQLite Queue Factories =========
121
174
122
175
/// Create a new SQLite queue adapter with unlimited queue size.
···
218
271
#[tokio::test]
219
272
async fn test_create_mpsc_queue() {
220
273
let queue = create_mpsc_queue::<String>(10);
221
-
274
+
222
275
queue.push("test".to_string()).await.unwrap();
223
276
let item = queue.pull().await;
224
277
assert_eq!(item, Some("test".to_string()));
···
228
281
async fn test_create_mpsc_queue_from_channel() {
229
282
let (sender, receiver) = mpsc::channel(5);
230
283
let queue = create_mpsc_queue_from_channel(sender.clone(), receiver);
231
-
284
+
232
285
// Send via original sender
233
286
sender.send("external".to_string()).await.unwrap();
234
-
287
+
235
288
// Receive via queue
236
289
let item = queue.pull().await;
237
290
assert_eq!(item, Some("external".to_string()));
···
240
293
#[tokio::test]
241
294
async fn test_create_noop_queue() {
242
295
let queue = create_noop_queue::<String>();
243
-
296
+
244
297
// Should accept pushes
245
298
queue.push("ignored".to_string()).await.unwrap();
246
-
299
+
247
300
// Should report as healthy
248
301
assert!(queue.is_healthy().await);
249
-
302
+
250
303
// Should report depth as 0
251
304
assert_eq!(queue.depth().await, Some(0));
252
305
}
···
264
317
.expect("Failed to create schema");
265
318
266
319
let queue = create_sqlite_queue::<HandleResolutionWork>(pool);
267
-
320
+
268
321
let work = HandleResolutionWork::new("test.example.com".to_string());
269
322
queue.push(work.clone()).await.unwrap();
270
-
323
+
271
324
let pulled = queue.pull().await;
272
325
assert_eq!(pulled, Some(work));
273
326
}
···
286
339
287
340
// Create queue with small max size
288
341
let queue = create_sqlite_queue_with_max_size::<HandleResolutionWork>(pool, 5);
289
-
342
+
290
343
// Push items
291
344
for i in 0..10 {
292
345
let work = HandleResolutionWork::new(format!("test-{}.example.com", i));
293
346
queue.push(work).await.unwrap();
294
347
}
295
-
348
+
296
349
// Should have limited items due to work shedding
297
350
let depth = queue.depth().await.unwrap();
298
-
assert!(depth <= 5, "Queue should have at most 5 items after work shedding");
351
+
assert!(
352
+
depth <= 5,
353
+
"Queue should have at most 5 items after work shedding"
354
+
);
299
355
}
300
356
301
357
#[tokio::test]
···
316
372
.as_nanos()
317
373
);
318
374
319
-
let queue = create_redis_queue::<String>(
320
-
pool,
321
-
"test-worker".to_string(),
322
-
test_prefix,
323
-
1,
324
-
);
375
+
let queue = create_redis_queue::<String>(pool, "test-worker".to_string(), test_prefix, 1);
325
376
326
377
queue.push("test-item".to_string()).await.unwrap();
327
378
let pulled = queue.pull().await;
328
379
assert_eq!(pulled, Some("test-item".to_string()));
329
380
}
330
-
}
381
+
}
+3
-8
src/queue/mod.rs
+3
-8
src/queue/mod.rs
···
63
63
// Re-export core types
64
64
pub use adapter::QueueAdapter;
65
65
pub use error::{QueueError, Result};
66
-
pub use work::HandleResolutionWork;
66
+
pub use work::{DedupKey, HandleResolutionWork};
67
67
68
68
// Re-export implementations (with limited visibility)
69
69
pub use mpsc::MpscQueueAdapter;
···
73
73
74
74
// Re-export factory functions
75
75
pub use factory::{
76
-
create_mpsc_queue,
77
-
create_mpsc_queue_from_channel,
78
-
create_noop_queue,
79
-
create_redis_queue,
80
-
create_sqlite_queue,
81
-
create_sqlite_queue_with_max_size,
76
+
create_mpsc_queue, create_mpsc_queue_from_channel, create_noop_queue, create_redis_queue,
77
+
create_redis_queue_with_dedup, create_sqlite_queue, create_sqlite_queue_with_max_size,
82
78
};
83
-
+4
-4
src/queue/mpsc.rs
+4
-4
src/queue/mpsc.rs
···
6
6
7
7
use async_trait::async_trait;
8
8
use std::sync::Arc;
9
-
use tokio::sync::{mpsc, Mutex};
9
+
use tokio::sync::{Mutex, mpsc};
10
10
11
11
use super::adapter::QueueAdapter;
12
12
use super::error::{QueueError, Result};
···
204
204
#[tokio::test]
205
205
async fn test_mpsc_queue_health() {
206
206
let queue = MpscQueueAdapter::<String>::new(10);
207
-
207
+
208
208
// Queue should be healthy initially
209
209
assert!(queue.is_healthy().await);
210
210
···
212
212
let (sender, receiver) = mpsc::channel::<String>(10);
213
213
drop(receiver);
214
214
let closed_queue = MpscQueueAdapter::from_channel(sender, mpsc::channel(1).1);
215
-
215
+
216
216
// Push should fail on closed queue
217
217
let result = closed_queue.push("test".to_string()).await;
218
218
assert!(result.is_err());
···
283
283
// Ack should always succeed (no-op)
284
284
queue.ack(&item).await.unwrap();
285
285
}
286
-
}
286
+
}
+1
-1
src/queue/noop.rs
+1
-1
src/queue/noop.rs
+240
-12
src/queue/redis.rs
+240
-12
src/queue/redis.rs
···
10
10
11
11
use super::adapter::QueueAdapter;
12
12
use super::error::{QueueError, Result};
13
+
use super::work::DedupKey;
13
14
14
15
/// Redis-backed queue adapter implementation.
15
16
///
···
40
41
/// # Examples
41
42
///
42
43
/// ```no_run
43
-
/// use quickdid::queue::RedisQueueAdapter;
44
-
/// use quickdid::queue::QueueAdapter;
44
+
/// use quickdid::queue::{RedisQueueAdapter, QueueAdapter, HandleResolutionWork};
45
45
/// use deadpool_redis::Config;
46
46
///
47
47
/// # async fn example() -> anyhow::Result<()> {
···
50
50
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
51
51
///
52
52
/// // Create queue adapter
53
-
/// let queue = RedisQueueAdapter::<String>::new(
53
+
/// let queue = RedisQueueAdapter::<HandleResolutionWork>::new(
54
54
/// pool,
55
55
/// "worker-1".to_string(),
56
56
/// "queue:myapp:".to_string(),
···
58
58
/// );
59
59
///
60
60
/// // Use the queue
61
-
/// queue.push("work-item".to_string()).await?;
61
+
/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
62
+
/// queue.push(work.clone()).await?;
62
63
/// if let Some(item) = queue.pull().await {
63
64
/// // Process item
64
65
/// queue.ack(&item).await?;
···
78
79
key_prefix: String,
79
80
/// Timeout for blocking RPOPLPUSH operations (in seconds)
80
81
timeout_seconds: u64,
82
+
/// Enable deduplication to prevent duplicate items in queue
83
+
dedup_enabled: bool,
84
+
/// TTL for deduplication keys in seconds
85
+
dedup_ttl: u64,
81
86
/// Type marker for generic parameter
82
87
_phantom: std::marker::PhantomData<T>,
83
88
}
···
120
125
key_prefix: String,
121
126
timeout_seconds: u64,
122
127
) -> Self {
128
+
Self::with_dedup(
129
+
pool,
130
+
worker_id,
131
+
key_prefix,
132
+
timeout_seconds,
133
+
false,
134
+
60, // Default TTL of 60 seconds
135
+
)
136
+
}
137
+
138
+
/// Create a new Redis queue adapter with deduplication settings.
139
+
///
140
+
/// # Arguments
141
+
///
142
+
/// * `pool` - Redis connection pool
143
+
/// * `worker_id` - Unique identifier for this worker instance
144
+
/// * `key_prefix` - Redis key prefix for queue operations
145
+
/// * `timeout_seconds` - Timeout for blocking pull operations
146
+
/// * `dedup_enabled` - Whether to enable deduplication
147
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
148
+
pub fn with_dedup(
149
+
pool: RedisPool,
150
+
worker_id: String,
151
+
key_prefix: String,
152
+
timeout_seconds: u64,
153
+
dedup_enabled: bool,
154
+
dedup_ttl: u64,
155
+
) -> Self {
123
156
Self {
124
157
pool,
125
158
worker_id,
126
159
key_prefix,
127
160
timeout_seconds,
161
+
dedup_enabled,
162
+
dedup_ttl,
128
163
_phantom: std::marker::PhantomData,
129
164
}
130
165
}
···
138
173
fn worker_queue_key(&self) -> String {
139
174
format!("{}{}", self.key_prefix, self.worker_id)
140
175
}
176
+
177
+
/// Get the deduplication key for an item.
178
+
/// This key is used to track if an item is already queued.
179
+
fn dedup_key(&self, item_id: &str) -> String {
180
+
format!("{}dedup:{}", self.key_prefix, item_id)
181
+
}
182
+
183
+
/// Check and mark an item for deduplication.
184
+
/// Returns true if the item was successfully marked (not duplicate),
185
+
/// false if it was already in the deduplication set (duplicate).
186
+
async fn check_and_mark_dedup(
187
+
&self,
188
+
conn: &mut deadpool_redis::Connection,
189
+
item_id: &str,
190
+
) -> Result<bool> {
191
+
if !self.dedup_enabled {
192
+
return Ok(true); // Always allow if dedup is disabled
193
+
}
194
+
195
+
let dedup_key = self.dedup_key(item_id);
196
+
197
+
// Use SET NX EX to atomically set if not exists with expiry
198
+
// Returns OK if the key was set, Nil if it already existed
199
+
let result: Option<String> = deadpool_redis::redis::cmd("SET")
200
+
.arg(&dedup_key)
201
+
.arg("1")
202
+
.arg("NX") // Only set if not exists
203
+
.arg("EX") // Set expiry
204
+
.arg(self.dedup_ttl)
205
+
.query_async(conn)
206
+
.await
207
+
.map_err(|e| QueueError::RedisOperationFailed {
208
+
operation: "SET NX EX".to_string(),
209
+
details: e.to_string(),
210
+
})?;
211
+
212
+
// If result is Some("OK"), the key was set (not duplicate)
213
+
// If result is None, the key already existed (duplicate)
214
+
Ok(result.is_some())
215
+
}
141
216
}
142
217
143
218
#[async_trait]
144
219
impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
145
220
where
146
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
221
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
147
222
{
148
223
async fn pull(&self) -> Option<T> {
149
224
match self.pool.get().await {
···
198
273
.get()
199
274
.await
200
275
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
276
+
277
+
// Check for deduplication if enabled
278
+
if self.dedup_enabled {
279
+
let dedup_id = work.dedup_key();
280
+
let is_new = self.check_and_mark_dedup(&mut conn, &dedup_id).await?;
281
+
282
+
if !is_new {
283
+
debug!(
284
+
dedup_key = %dedup_id,
285
+
"Item already queued, skipping duplicate"
286
+
);
287
+
return Ok(()); // Successfully deduplicated
288
+
}
289
+
}
201
290
202
291
let data = serde_json::to_vec(&work)
203
292
.map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
···
392
481
.unwrap()
393
482
.as_nanos()
394
483
);
395
-
let adapter = RedisQueueAdapter::<String>::new(
396
-
pool,
397
-
"test-worker-depth".to_string(),
398
-
test_prefix,
399
-
1,
400
-
);
484
+
let adapter =
485
+
RedisQueueAdapter::<String>::new(pool, "test-worker-depth".to_string(), test_prefix, 1);
401
486
402
487
// Initially empty
403
488
assert_eq!(adapter.depth().await, Some(0));
···
436
521
}
437
522
438
523
#[tokio::test]
524
+
async fn test_redis_queue_deduplication() {
525
+
use crate::queue::HandleResolutionWork;
526
+
527
+
let pool = match crate::test_helpers::get_test_redis_pool() {
528
+
Some(p) => p,
529
+
None => {
530
+
eprintln!("Skipping Redis test - no Redis connection available");
531
+
return;
532
+
}
533
+
};
534
+
535
+
let test_prefix = format!(
536
+
"test:queue:dedup:{}:",
537
+
std::time::SystemTime::now()
538
+
.duration_since(std::time::UNIX_EPOCH)
539
+
.unwrap()
540
+
.as_nanos()
541
+
);
542
+
543
+
// Create adapter with deduplication enabled
544
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
545
+
pool.clone(),
546
+
"test-worker-dedup".to_string(),
547
+
test_prefix.clone(),
548
+
1,
549
+
true, // Enable deduplication
550
+
2, // 2 second TTL for quick testing
551
+
);
552
+
553
+
let work = HandleResolutionWork::new("alice.example.com".to_string());
554
+
555
+
// First push should succeed
556
+
adapter
557
+
.push(work.clone())
558
+
.await
559
+
.expect("First push should succeed");
560
+
561
+
// Second push of same item should be deduplicated (but still return Ok)
562
+
adapter
563
+
.push(work.clone())
564
+
.await
565
+
.expect("Second push should succeed (deduplicated)");
566
+
567
+
// Queue should only have one item
568
+
let depth = adapter.depth().await;
569
+
assert_eq!(
570
+
depth,
571
+
Some(1),
572
+
"Queue should only have one item after deduplication"
573
+
);
574
+
575
+
// Pull the item
576
+
let pulled = adapter.pull().await;
577
+
assert_eq!(pulled, Some(work.clone()));
578
+
579
+
// Queue should now be empty
580
+
let depth = adapter.depth().await;
581
+
assert_eq!(depth, Some(0), "Queue should be empty after pulling");
582
+
583
+
// Wait for dedup TTL to expire
584
+
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
585
+
586
+
// Should be able to push again after TTL expires
587
+
adapter
588
+
.push(work.clone())
589
+
.await
590
+
.expect("Push after TTL expiry should succeed");
591
+
592
+
let depth = adapter.depth().await;
593
+
assert_eq!(
594
+
depth,
595
+
Some(1),
596
+
"Queue should have one item after TTL expiry"
597
+
);
598
+
}
599
+
600
+
#[tokio::test]
601
+
async fn test_redis_queue_deduplication_disabled() {
602
+
use crate::queue::HandleResolutionWork;
603
+
604
+
let pool = match crate::test_helpers::get_test_redis_pool() {
605
+
Some(p) => p,
606
+
None => {
607
+
eprintln!("Skipping Redis test - no Redis connection available");
608
+
return;
609
+
}
610
+
};
611
+
612
+
let test_prefix = format!(
613
+
"test:queue:nodedup:{}:",
614
+
std::time::SystemTime::now()
615
+
.duration_since(std::time::UNIX_EPOCH)
616
+
.unwrap()
617
+
.as_nanos()
618
+
);
619
+
620
+
// Create adapter with deduplication disabled
621
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
622
+
pool.clone(),
623
+
"test-worker-nodedup".to_string(),
624
+
test_prefix.clone(),
625
+
1,
626
+
false, // Disable deduplication
627
+
60,
628
+
);
629
+
630
+
let work = HandleResolutionWork::new("bob.example.com".to_string());
631
+
632
+
// Push same item twice
633
+
adapter
634
+
.push(work.clone())
635
+
.await
636
+
.expect("First push should succeed");
637
+
adapter
638
+
.push(work.clone())
639
+
.await
640
+
.expect("Second push should succeed");
641
+
642
+
// Queue should have two items (no deduplication)
643
+
let depth = adapter.depth().await;
644
+
assert_eq!(
645
+
depth,
646
+
Some(2),
647
+
"Queue should have two items when deduplication is disabled"
648
+
);
649
+
650
+
// Pull both items
651
+
let pulled1 = adapter.pull().await;
652
+
assert_eq!(pulled1, Some(work.clone()));
653
+
654
+
let pulled2 = adapter.pull().await;
655
+
assert_eq!(pulled2, Some(work.clone()));
656
+
657
+
// Queue should now be empty
658
+
let depth = adapter.depth().await;
659
+
assert_eq!(
660
+
depth,
661
+
Some(0),
662
+
"Queue should be empty after pulling all items"
663
+
);
664
+
}
665
+
666
+
#[tokio::test]
439
667
async fn test_redis_queue_serialization() {
440
668
use crate::queue::HandleResolutionWork;
441
669
···
471
699
// Ack
472
700
adapter.ack(&work).await.unwrap();
473
701
}
474
-
}
702
+
}
+40
-37
src/queue/sqlite.rs
+40
-37
src/queue/sqlite.rs
···
165
165
let record = match sqlx::query(
166
166
"SELECT id, work FROM handle_resolution_queue
167
167
ORDER BY queued_at ASC
168
-
LIMIT 1"
168
+
LIMIT 1",
169
169
)
170
170
.fetch_optional(&mut *transaction)
171
171
.await
···
226
226
227
227
// Optimized approach: Insert first, then check if cleanup needed
228
228
// This avoids counting on every insert
229
-
sqlx::query(
230
-
"INSERT INTO handle_resolution_queue (work, queued_at) VALUES (?1, ?2)"
231
-
)
232
-
.bind(&work_json)
233
-
.bind(current_timestamp)
234
-
.execute(&self.pool)
235
-
.await
236
-
.map_err(|e| QueueError::PushFailed(format!("Failed to insert work item: {}", e)))?;
229
+
sqlx::query("INSERT INTO handle_resolution_queue (work, queued_at) VALUES (?1, ?2)")
230
+
.bind(&work_json)
231
+
.bind(current_timestamp)
232
+
.execute(&self.pool)
233
+
.await
234
+
.map_err(|e| QueueError::PushFailed(format!("Failed to insert work item: {}", e)))?;
237
235
238
236
// Implement optimized work shedding if max_size is configured
239
237
if self.max_size > 0 {
···
243
241
let approx_count: Option<i64> = sqlx::query_scalar(
244
242
"SELECT COUNT(*) FROM (
245
243
SELECT 1 FROM handle_resolution_queue LIMIT ?1
246
-
) AS limited_count"
244
+
) AS limited_count",
247
245
)
248
246
.bind(check_limit)
249
247
.fetch_one(&self.pool)
···
251
249
.map_err(|e| QueueError::PushFailed(format!("Failed to check queue size: {}", e)))?;
252
250
253
251
// Only perform cleanup if we're definitely over the limit
254
-
if let Some(count) = approx_count && count >= check_limit {
252
+
if let Some(count) = approx_count
253
+
&& count >= check_limit
254
+
{
255
255
// Perform batch cleanup - delete more than just the excess to reduce frequency
256
256
// Delete 20% more than needed to avoid frequent shedding
257
257
let target_size = (self.max_size as f64 * 0.8) as i64; // Keep 80% of max_size
258
258
let to_delete = count - target_size;
259
-
259
+
260
260
if to_delete > 0 {
261
261
// Optimized deletion: First get the cutoff id and timestamp
262
262
// This avoids the expensive subquery in the DELETE statement
263
263
let cutoff: Option<(i64, i64)> = sqlx::query_as(
264
264
"SELECT id, queued_at FROM handle_resolution_queue
265
265
ORDER BY queued_at ASC, id ASC
266
-
LIMIT 1 OFFSET ?1"
266
+
LIMIT 1 OFFSET ?1",
267
267
)
268
268
.bind(to_delete - 1)
269
269
.fetch_optional(&self.pool)
···
276
276
let deleted_result = sqlx::query(
277
277
"DELETE FROM handle_resolution_queue
278
278
WHERE queued_at < ?1
279
-
OR (queued_at = ?1 AND id <= ?2)"
279
+
OR (queued_at = ?1 AND id <= ?2)",
280
280
)
281
281
.bind(cutoff_timestamp)
282
282
.bind(cutoff_id)
283
283
.execute(&self.pool)
284
284
.await
285
-
.map_err(|e| QueueError::PushFailed(format!("Failed to delete excess entries: {}", e)))?;
285
+
.map_err(|e| {
286
+
QueueError::PushFailed(format!(
287
+
"Failed to delete excess entries: {}",
288
+
e
289
+
))
290
+
})?;
286
291
287
292
let deleted_count = deleted_result.rows_affected();
288
293
if deleted_count > 0 {
289
294
info!(
290
295
"Work shedding: deleted {} oldest entries (target size: {}, max: {})",
291
-
deleted_count,
292
-
target_size,
293
-
self.max_size
296
+
deleted_count, target_size, self.max_size
294
297
);
295
298
}
296
299
}
···
298
301
}
299
302
}
300
303
301
-
debug!("Pushed work item to SQLite queue (max_size: {})", self.max_size);
304
+
debug!(
305
+
"Pushed work item to SQLite queue (max_size: {})",
306
+
self.max_size
307
+
);
302
308
Ok(())
303
309
}
304
310
···
310
316
}
311
317
312
318
async fn depth(&self) -> Option<usize> {
313
-
match sqlx::query_scalar::<_, i64>(
314
-
"SELECT COUNT(*) FROM handle_resolution_queue"
315
-
)
316
-
.fetch_one(&self.pool)
317
-
.await
319
+
match sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM handle_resolution_queue")
320
+
.fetch_one(&self.pool)
321
+
.await
318
322
{
319
323
Ok(count) => Some(count as usize),
320
324
Err(e) => {
···
380
384
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::new(pool);
381
385
382
386
// Push multiple items
383
-
let handles = vec!["alice.example.com", "bob.example.com", "charlie.example.com"];
387
+
let handles = vec![
388
+
"alice.example.com",
389
+
"bob.example.com",
390
+
"charlie.example.com",
391
+
];
384
392
for handle in &handles {
385
393
let work = HandleResolutionWork::new(handle.to_string());
386
394
adapter.push(work).await.unwrap();
···
419
427
#[tokio::test]
420
428
async fn test_sqlite_queue_work_shedding() {
421
429
let pool = create_test_pool().await;
422
-
430
+
423
431
// Create adapter with small max_size for testing
424
432
let max_size = 10;
425
-
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
426
-
pool.clone(),
427
-
max_size
428
-
);
433
+
let adapter =
434
+
SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(pool.clone(), max_size);
429
435
430
436
// Push items up to the limit (should not trigger shedding)
431
437
for i in 0..max_size {
···
446
452
// After triggering shedding, queue should be around 80% of max_size
447
453
let depth_after_shedding = adapter.depth().await.unwrap();
448
454
let expected_size = (max_size as f64 * 0.8) as usize;
449
-
455
+
450
456
// Allow some variance due to batch deletion
451
457
assert!(
452
458
depth_after_shedding <= expected_size + 1,
···
459
465
#[tokio::test]
460
466
async fn test_sqlite_queue_work_shedding_disabled() {
461
467
let pool = create_test_pool().await;
462
-
468
+
463
469
// Create adapter with max_size = 0 (disabled work shedding)
464
-
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(
465
-
pool,
466
-
0
467
-
);
470
+
let adapter = SqliteQueueAdapter::<HandleResolutionWork>::with_max_size(pool, 0);
468
471
469
472
// Push many items (should not trigger any shedding)
470
473
for i in 0..100 {
···
499
502
let pulled = adapter.pull().await;
500
503
assert_eq!(pulled, Some(work));
501
504
}
502
-
}
505
+
}
+43
-5
src/queue/work.rs
+43
-5
src/queue/work.rs
···
50
50
}
51
51
}
52
52
53
+
/// Trait for getting a unique deduplication key from a work item.
54
+
/// This is used by the Redis queue adapter to prevent duplicate items.
55
+
pub trait DedupKey {
56
+
/// Get a unique key for deduplication purposes.
57
+
/// This should return a consistent identifier for equivalent work items.
58
+
fn dedup_key(&self) -> String;
59
+
}
60
+
61
+
impl DedupKey for HandleResolutionWork {
62
+
fn dedup_key(&self) -> String {
63
+
// Use the handle itself as the dedup key
64
+
self.handle.clone()
65
+
}
66
+
}
67
+
68
+
// For testing purposes, implement DedupKey for String
69
+
#[cfg(test)]
70
+
impl DedupKey for String {
71
+
fn dedup_key(&self) -> String {
72
+
self.clone()
73
+
}
74
+
}
75
+
53
76
#[cfg(test)]
54
77
mod tests {
55
78
use super::*;
···
64
87
#[test]
65
88
fn test_handle_resolution_work_serialization() {
66
89
let work = HandleResolutionWork::new("bob.example.com".to_string());
67
-
90
+
68
91
// Test JSON serialization (which is what we actually use in the queue adapters)
69
92
let json = serde_json::to_string(&work).expect("Failed to serialize to JSON");
70
-
let deserialized: HandleResolutionWork =
93
+
let deserialized: HandleResolutionWork =
71
94
serde_json::from_str(&json).expect("Failed to deserialize from JSON");
72
95
assert_eq!(work, deserialized);
73
-
96
+
74
97
// Verify the JSON structure
75
98
let json_value: serde_json::Value = serde_json::from_str(&json).unwrap();
76
99
assert_eq!(json_value["handle"], "bob.example.com");
···
88
111
let work1 = HandleResolutionWork::new("alice.example.com".to_string());
89
112
let work2 = HandleResolutionWork::new("alice.example.com".to_string());
90
113
let work3 = HandleResolutionWork::new("bob.example.com".to_string());
91
-
114
+
92
115
assert_eq!(work1, work2);
93
116
assert_ne!(work1, work3);
94
117
}
95
-
}
118
+
119
+
#[test]
120
+
fn test_handle_resolution_work_dedup_key() {
121
+
let work1 = HandleResolutionWork::new("alice.example.com".to_string());
122
+
let work2 = HandleResolutionWork::new("alice.example.com".to_string());
123
+
let work3 = HandleResolutionWork::new("bob.example.com".to_string());
124
+
125
+
// Same handle should have same dedup key
126
+
assert_eq!(work1.dedup_key(), work2.dedup_key());
127
+
assert_eq!(work1.dedup_key(), "alice.example.com");
128
+
129
+
// Different handle should have different dedup key
130
+
assert_ne!(work1.dedup_key(), work3.dedup_key());
131
+
assert_eq!(work3.dedup_key(), "bob.example.com");
132
+
}
133
+
}
+14
-15
src/sqlite_schema.rs
+14
-15
src/sqlite_schema.rs
···
4
4
//! schema used by the SQLite-backed handle resolver cache.
5
5
6
6
use anyhow::Result;
7
-
use sqlx::{SqlitePool, migrate::MigrateDatabase, Sqlite};
7
+
use sqlx::{Sqlite, SqlitePool, migrate::MigrateDatabase};
8
8
use std::path::Path;
9
9
10
10
/// SQL schema for the handle resolution cache table.
···
60
60
tracing::info!("Initializing SQLite database: {}", database_url);
61
61
62
62
// Extract the database path from the URL for file-based databases
63
-
if let Some(path) = database_url.strip_prefix("sqlite:")
64
-
&& path != ":memory:"
65
-
&& !path.is_empty()
63
+
if let Some(path) = database_url.strip_prefix("sqlite:")
64
+
&& path != ":memory:"
65
+
&& !path.is_empty()
66
66
{
67
67
// Create the database file if it doesn't exist
68
68
if !Sqlite::database_exists(database_url).await? {
···
71
71
}
72
72
73
73
// Ensure the parent directory exists
74
-
if let Some(parent) = Path::new(path).parent()
75
-
&& !parent.exists()
74
+
if let Some(parent) = Path::new(path).parent()
75
+
&& !parent.exists()
76
76
{
77
77
tracing::info!("Creating directory: {}", parent.display());
78
78
std::fs::create_dir_all(parent)?;
···
114
114
sqlx::query(CREATE_HANDLE_RESOLUTION_CACHE_TABLE)
115
115
.execute(pool)
116
116
.await?;
117
-
117
+
118
118
sqlx::query(CREATE_HANDLE_RESOLUTION_QUEUE_TABLE)
119
119
.execute(pool)
120
120
.await?;
···
249
249
250
250
let cutoff_timestamp = current_timestamp - (max_age_seconds as i64);
251
251
252
-
let result = sqlx::query(
253
-
"DELETE FROM handle_resolution_queue WHERE queued_at < ?1"
254
-
)
255
-
.bind(cutoff_timestamp)
256
-
.execute(pool)
257
-
.await?;
252
+
let result = sqlx::query("DELETE FROM handle_resolution_queue WHERE queued_at < ?1")
253
+
.bind(cutoff_timestamp)
254
+
.execute(pool)
255
+
.await?;
258
256
259
257
let deleted_count = result.rows_affected();
260
258
if deleted_count > 0 {
···
325
323
let old_timestamp = std::time::SystemTime::now()
326
324
.duration_since(std::time::UNIX_EPOCH)
327
325
.unwrap()
328
-
.as_secs() as i64 - 3600; // 1 hour ago
326
+
.as_secs() as i64
327
+
- 3600; // 1 hour ago
329
328
330
329
sqlx::query(
331
330
"INSERT INTO handle_resolution_cache (key, result, created, updated) VALUES (1, ?1, ?2, ?2)"
···
380
379
assert_eq!(total_entries, 1);
381
380
assert!(size_bytes > 0);
382
381
}
383
-
}
382
+
}
+2
-2
src/test_helpers.rs
+2
-2
src/test_helpers.rs
···
4
4
use deadpool_redis::Pool;
5
5
6
6
/// Helper function to get a Redis pool for testing.
7
-
///
7
+
///
8
8
/// Returns None if TEST_REDIS_URL is not set, logging a skip message.
9
9
/// This consolidates the repeated Redis test setup code.
10
10
pub(crate) fn get_test_redis_pool() -> Option<Pool> {
···
32
32
None => return,
33
33
}
34
34
};
35
-
}
35
+
}
+364
test-scripts/docker-test.sh
+364
test-scripts/docker-test.sh
···
1
+
#!/bin/bash
2
+
3
+
# Comprehensive test script for Telegraf/TimescaleDB metrics setup
4
+
# This script validates the entire metrics pipeline
5
+
6
+
set -e
7
+
8
+
echo "========================================="
9
+
echo "Telegraf/TimescaleDB Metrics Test Suite"
10
+
echo "========================================="
11
+
echo ""
12
+
13
+
# Check if Docker is running
14
+
if ! docker info > /dev/null 2>&1; then
15
+
echo "โ Docker is not running. Please start Docker first."
16
+
exit 1
17
+
fi
18
+
19
+
# Function to wait for a service to be healthy
20
+
wait_for_service() {
21
+
local service=$1
22
+
local max_attempts=30
23
+
local attempt=1
24
+
25
+
echo -n "Waiting for $service to be healthy"
26
+
while [ $attempt -le $max_attempts ]; do
27
+
if docker-compose ps $service | grep -q "healthy"; then
28
+
echo " โ
"
29
+
return 0
30
+
fi
31
+
echo -n "."
32
+
sleep 2
33
+
attempt=$((attempt + 1))
34
+
done
35
+
echo " โ"
36
+
echo "Service $service failed to become healthy after $max_attempts attempts"
37
+
return 1
38
+
}
39
+
40
+
# Function to run SQL query
41
+
run_query() {
42
+
docker exec -i timescaledb psql -U postgres -d metrics -t -c "$1" 2>/dev/null
43
+
}
44
+
45
+
# Function to check table exists
46
+
check_table() {
47
+
local table=$1
48
+
local result=$(run_query "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = '$table');")
49
+
if [[ "$result" =~ "t" ]]; then
50
+
echo "โ
Table '$table' exists"
51
+
return 0
52
+
else
53
+
echo "โ Table '$table' does not exist"
54
+
return 1
55
+
fi
56
+
}
57
+
58
+
# Navigate to the metrics-stack directory (create if needed)
59
+
if [ ! -d "metrics-stack" ]; then
60
+
echo "Creating metrics-stack directory..."
61
+
mkdir -p metrics-stack/telegraf
62
+
mkdir -p metrics-stack/test-scripts
63
+
mkdir -p metrics-stack/init-scripts
64
+
fi
65
+
66
+
cd metrics-stack
67
+
68
+
# Create .env file if it doesn't exist
69
+
if [ ! -f ".env" ]; then
70
+
echo "Creating .env file..."
71
+
cat > .env << 'EOF'
72
+
# PostgreSQL/TimescaleDB Configuration
73
+
POSTGRES_DB=metrics
74
+
POSTGRES_USER=postgres
75
+
POSTGRES_PASSWORD=secretpassword
76
+
77
+
# Telegraf Database User
78
+
TELEGRAF_DB_USER=postgres
79
+
TELEGRAF_DB_PASSWORD=secretpassword
80
+
81
+
# TimescaleDB Settings
82
+
TIMESCALE_TELEMETRY=off
83
+
EOF
84
+
fi
85
+
86
+
# Copy configuration files if they don't exist
87
+
if [ ! -f "telegraf/telegraf.conf" ]; then
88
+
echo "Creating telegraf.conf..."
89
+
cat > telegraf/telegraf.conf << 'EOF'
90
+
[agent]
91
+
interval = "10s"
92
+
round_interval = true
93
+
metric_batch_size = 1000
94
+
metric_buffer_limit = 10000
95
+
collection_jitter = "0s"
96
+
flush_interval = "10s"
97
+
flush_jitter = "0s"
98
+
precision = ""
99
+
debug = false
100
+
quiet = false
101
+
hostname = "telegraf-agent"
102
+
omit_hostname = false
103
+
104
+
[[inputs.statsd]]
105
+
service_address = ":8125"
106
+
protocol = "udp"
107
+
delete_gauges = true
108
+
delete_counters = true
109
+
delete_sets = true
110
+
delete_timings = true
111
+
percentiles = [50, 90, 95, 99]
112
+
metric_separator = "."
113
+
allowed_pending_messages = 10000
114
+
datadog_extensions = true
115
+
datadog_distributions = true
116
+
117
+
[[outputs.postgresql]]
118
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
119
+
schema = "public"
120
+
create_templates = [
121
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
122
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
123
+
]
124
+
tags_as_jsonb = true
125
+
fields_as_jsonb = false
126
+
EOF
127
+
fi
128
+
129
+
# Copy docker-compose.yml if it doesn't exist
130
+
if [ ! -f "docker-compose.yml" ]; then
131
+
echo "Creating docker-compose.yml..."
132
+
cat > docker-compose.yml << 'EOF'
133
+
version: '3.8'
134
+
135
+
services:
136
+
timescaledb:
137
+
image: timescale/timescaledb:latest-pg17
138
+
container_name: timescaledb
139
+
restart: unless-stopped
140
+
environment:
141
+
POSTGRES_DB: ${POSTGRES_DB}
142
+
POSTGRES_USER: ${POSTGRES_USER}
143
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
144
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
145
+
ports:
146
+
- "5442:5432"
147
+
volumes:
148
+
- timescale_data:/home/postgres/pgdata/data
149
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
150
+
command:
151
+
- postgres
152
+
- -c
153
+
- shared_buffers=256MB
154
+
- -c
155
+
- effective_cache_size=1GB
156
+
- -c
157
+
- maintenance_work_mem=64MB
158
+
- -c
159
+
- work_mem=8MB
160
+
healthcheck:
161
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
162
+
interval: 10s
163
+
timeout: 5s
164
+
retries: 5
165
+
networks:
166
+
- metrics_network
167
+
168
+
telegraf:
169
+
image: telegraf:1.35
170
+
container_name: telegraf
171
+
restart: unless-stopped
172
+
environment:
173
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
174
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
175
+
POSTGRES_DB: ${POSTGRES_DB}
176
+
ports:
177
+
- "8125:8125/udp"
178
+
volumes:
179
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
180
+
depends_on:
181
+
timescaledb:
182
+
condition: service_healthy
183
+
networks:
184
+
- metrics_network
185
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
186
+
187
+
networks:
188
+
metrics_network:
189
+
driver: bridge
190
+
191
+
volumes:
192
+
timescale_data:
193
+
EOF
194
+
fi
195
+
196
+
# Create init script
197
+
if [ ! -f "init-scripts/01-init.sql" ]; then
198
+
echo "Creating init script..."
199
+
cat > init-scripts/01-init.sql << 'EOF'
200
+
-- Enable TimescaleDB extension
201
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
202
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
203
+
EOF
204
+
fi
205
+
206
+
echo ""
207
+
echo "Step 1: Starting Docker services..."
208
+
echo "========================================="
209
+
docker-compose down -v 2>/dev/null || true
210
+
docker-compose up -d
211
+
212
+
echo ""
213
+
echo "Step 2: Waiting for services to be healthy..."
214
+
echo "========================================="
215
+
wait_for_service timescaledb
216
+
sleep 5 # Extra time for Telegraf to connect
217
+
218
+
echo ""
219
+
echo "Step 3: Sending test metrics..."
220
+
echo "========================================="
221
+
222
+
# Send various types of metrics
223
+
echo "Sending counter metrics..."
224
+
for i in {1..5}; do
225
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
226
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
227
+
done
228
+
229
+
echo "Sending gauge metrics..."
230
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
231
+
sleep 1
232
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
233
+
234
+
echo "Sending timing metrics..."
235
+
for i in {1..10}; do
236
+
duration=$((RANDOM % 100 + 10))
237
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
238
+
done
239
+
240
+
echo "Sending histogram metrics..."
241
+
for i in {1..5}; do
242
+
resolution_time=$((RANDOM % 500 + 50))
243
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
244
+
done
245
+
246
+
echo "Waiting 15 seconds for Telegraf to flush metrics..."
247
+
sleep 15
248
+
249
+
echo ""
250
+
echo "Step 4: Verifying table creation..."
251
+
echo "========================================="
252
+
253
+
# Check if tables were created
254
+
check_table "quickdid.http.request.count"
255
+
check_table "quickdid.http.request.duration_ms"
256
+
check_table "quickdid.resolver.rate_limit.available_permits"
257
+
check_table "quickdid.resolver.resolution_time"
258
+
259
+
echo ""
260
+
echo "Step 5: Verifying data insertion..."
261
+
echo "========================================="
262
+
263
+
# Check row counts
264
+
for table in "quickdid.http.request.count" "quickdid.http.request.duration_ms" "quickdid.resolver.rate_limit.available_permits" "quickdid.resolver.resolution_time"; do
265
+
count=$(run_query "SELECT COUNT(*) FROM \"$table\";" | tr -d ' ')
266
+
if [ "$count" -gt 0 ]; then
267
+
echo "โ
Table '$table' has $count rows"
268
+
else
269
+
echo "โ Table '$table' is empty"
270
+
fi
271
+
done
272
+
273
+
echo ""
274
+
echo "Step 6: Testing JSONB tag queries..."
275
+
echo "========================================="
276
+
277
+
# Test JSONB tag filtering
278
+
result=$(run_query "SELECT COUNT(*) FROM \"quickdid.http.request.count\" WHERE tags->>'method' = 'GET';" | tr -d ' ')
279
+
if [ "$result" -gt 0 ]; then
280
+
echo "โ
JSONB tag filtering works (found $result GET requests)"
281
+
else
282
+
echo "โ JSONB tag filtering failed"
283
+
fi
284
+
285
+
echo ""
286
+
echo "Step 7: Testing TimescaleDB functions..."
287
+
echo "========================================="
288
+
289
+
# Test time_bucket function
290
+
result=$(run_query "SELECT COUNT(*) FROM (SELECT time_bucket('1 minute', time) FROM \"quickdid.http.request.count\" GROUP BY 1) t;" | tr -d ' ')
291
+
if [ "$result" -gt 0 ]; then
292
+
echo "โ
time_bucket function works"
293
+
else
294
+
echo "โ time_bucket function failed"
295
+
fi
296
+
297
+
# Check if hypertables were created
298
+
hypertable_count=$(run_query "SELECT COUNT(*) FROM timescaledb_information.hypertables WHERE hypertable_name LIKE 'quickdid%';" | tr -d ' ')
299
+
if [ "$hypertable_count" -gt 0 ]; then
300
+
echo "โ
Found $hypertable_count hypertables"
301
+
else
302
+
echo "โ No hypertables found"
303
+
fi
304
+
305
+
echo ""
306
+
echo "Step 8: Running comprehensive query tests..."
307
+
echo "========================================="
308
+
309
+
# Run the verify-queries.sql script if it exists
310
+
if [ -f "../test-scripts/verify-queries.sql" ]; then
311
+
echo "Running verify-queries.sql..."
312
+
docker exec -i timescaledb psql -U postgres -d metrics < ../test-scripts/verify-queries.sql > query_results.txt 2>&1
313
+
if [ $? -eq 0 ]; then
314
+
echo "โ
All queries executed successfully"
315
+
echo " Results saved to query_results.txt"
316
+
else
317
+
echo "โ Some queries failed. Check query_results.txt for details"
318
+
fi
319
+
else
320
+
echo "โ ๏ธ verify-queries.sql not found, skipping comprehensive query tests"
321
+
fi
322
+
323
+
echo ""
324
+
echo "========================================="
325
+
echo "Test Summary"
326
+
echo "========================================="
327
+
328
+
# Generate summary
329
+
failures=0
330
+
successes=0
331
+
332
+
# Count successes and failures from the output
333
+
if check_table "quickdid.http.request.count" > /dev/null 2>&1; then
334
+
successes=$((successes + 1))
335
+
else
336
+
failures=$((failures + 1))
337
+
fi
338
+
339
+
if [ "$hypertable_count" -gt 0 ]; then
340
+
successes=$((successes + 1))
341
+
else
342
+
failures=$((failures + 1))
343
+
fi
344
+
345
+
echo ""
346
+
if [ $failures -eq 0 ]; then
347
+
echo "โ
All tests passed successfully!"
348
+
echo ""
349
+
echo "You can now:"
350
+
echo "1. Connect to the database: docker exec -it timescaledb psql -U postgres -d metrics"
351
+
echo "2. View logs: docker-compose logs -f"
352
+
echo "3. Send more metrics: echo 'metric.name:value|type|#tag:value' | nc -u -w0 localhost 8125"
353
+
echo "4. Stop services: docker-compose down"
354
+
else
355
+
echo "โ ๏ธ Some tests failed. Please check the output above for details."
356
+
echo ""
357
+
echo "Troubleshooting tips:"
358
+
echo "1. Check Telegraf logs: docker-compose logs telegraf"
359
+
echo "2. Check TimescaleDB logs: docker-compose logs timescaledb"
360
+
echo "3. Verify connectivity: docker exec telegraf telegraf --test"
361
+
fi
362
+
363
+
echo ""
364
+
echo "Test complete!"
+44
test-scripts/send-metrics.sh
+44
test-scripts/send-metrics.sh
···
1
+
#!/bin/bash
2
+
3
+
# Send test metrics to StatsD/Telegraf
4
+
5
+
echo "Sending test metrics to StatsD on localhost:8125..."
6
+
7
+
# Counter metrics
8
+
for i in {1..10}; do
9
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
10
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
11
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
12
+
done
13
+
14
+
# Gauge metrics
15
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
16
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
17
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
18
+
19
+
# Timing metrics (in milliseconds)
20
+
for i in {1..20}; do
21
+
duration=$((RANDOM % 100 + 10))
22
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
23
+
done
24
+
25
+
for i in {1..10}; do
26
+
duration=$((RANDOM % 200 + 50))
27
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
28
+
done
29
+
30
+
# Histogram metrics
31
+
for i in {1..15}; do
32
+
resolution_time=$((RANDOM % 500 + 50))
33
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
34
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
35
+
done
36
+
37
+
# Cache metrics
38
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
39
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
40
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
41
+
42
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
43
+
sleep 15
44
+
echo "Done!"
+145
test-scripts/verify-queries.sql
+145
test-scripts/verify-queries.sql
···
1
+
-- Test script to verify all metrics queries work correctly
2
+
-- Run this after sending test metrics with send-metrics.sh
3
+
4
+
\echo '===== CHECKING AVAILABLE TABLES ====='
5
+
SELECT table_name
6
+
FROM information_schema.tables
7
+
WHERE table_schema = 'public'
8
+
AND table_name LIKE 'quickdid%'
9
+
ORDER BY table_name;
10
+
11
+
\echo ''
12
+
\echo '===== CHECKING TABLE STRUCTURES ====='
13
+
\echo 'Structure of quickdid.http.request.count table:'
14
+
\d "quickdid.http.request.count"
15
+
16
+
\echo ''
17
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
18
+
\d "quickdid.http.request.duration_ms"
19
+
20
+
\echo ''
21
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
22
+
SELECT
23
+
time,
24
+
tags,
25
+
tags->>'method' as method,
26
+
tags->>'path' as path,
27
+
tags->>'status' as status,
28
+
value
29
+
FROM "quickdid.http.request.count"
30
+
WHERE time > NOW() - INTERVAL '1 hour'
31
+
ORDER BY time DESC
32
+
LIMIT 10;
33
+
34
+
\echo ''
35
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
36
+
SELECT
37
+
time_bucket('1 minute', time) AS minute,
38
+
tags->>'method' as method,
39
+
tags->>'path' as path,
40
+
tags->>'status' as status,
41
+
COUNT(*) as request_count,
42
+
AVG(mean) as avg_duration_ms,
43
+
MAX(p99) as p99_duration_ms,
44
+
MIN(mean) as min_duration_ms
45
+
FROM "quickdid.http.request.duration_ms"
46
+
WHERE time > NOW() - INTERVAL '1 hour'
47
+
AND tags IS NOT NULL
48
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
49
+
ORDER BY minute DESC
50
+
LIMIT 10;
51
+
52
+
\echo ''
53
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
54
+
SELECT
55
+
time,
56
+
value as available_permits
57
+
FROM "quickdid.resolver.rate_limit.available_permits"
58
+
WHERE time > NOW() - INTERVAL '1 hour'
59
+
ORDER BY time DESC
60
+
LIMIT 10;
61
+
62
+
\echo ''
63
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
64
+
SELECT
65
+
tags->>'resolver' as resolver_type,
66
+
COUNT(*) as sample_count,
67
+
AVG(mean) as avg_resolution_time_ms,
68
+
MAX(p99) as p99_resolution_time_ms,
69
+
MIN(mean) as min_resolution_time_ms
70
+
FROM "quickdid.resolver.resolution_time"
71
+
WHERE time > NOW() - INTERVAL '1 hour'
72
+
AND tags->>'resolver' IS NOT NULL
73
+
GROUP BY tags->>'resolver'
74
+
ORDER BY avg_resolution_time_ms;
75
+
76
+
\echo ''
77
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
78
+
WITH cache_stats AS (
79
+
SELECT
80
+
'hits' as metric_type,
81
+
SUM(value) as total_count
82
+
FROM "quickdid.cache.hit.count"
83
+
WHERE time > NOW() - INTERVAL '1 hour'
84
+
UNION ALL
85
+
SELECT
86
+
'misses' as metric_type,
87
+
SUM(value) as total_count
88
+
FROM "quickdid.cache.miss.count"
89
+
WHERE time > NOW() - INTERVAL '1 hour'
90
+
)
91
+
SELECT
92
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
93
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
94
+
CASE
95
+
WHEN SUM(total_count) > 0 THEN
96
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
97
+
ELSE 0
98
+
END as hit_rate_percentage
99
+
FROM cache_stats;
100
+
101
+
\echo ''
102
+
\echo '===== QUERY 6: Hypertable Information ====='
103
+
SELECT
104
+
hypertable_schema,
105
+
hypertable_name,
106
+
owner,
107
+
num_dimensions,
108
+
num_chunks,
109
+
compression_enabled
110
+
FROM timescaledb_information.hypertables
111
+
WHERE hypertable_name LIKE 'quickdid%'
112
+
ORDER BY hypertable_name;
113
+
114
+
\echo ''
115
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
116
+
WITH status_counts AS (
117
+
SELECT
118
+
time_bucket('5 minutes', time) as period,
119
+
tags->>'path' as path,
120
+
CASE
121
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
122
+
ELSE 'success'
123
+
END as status_category,
124
+
SUM(value) as request_count
125
+
FROM "quickdid.http.request.count"
126
+
WHERE time > NOW() - INTERVAL '1 hour'
127
+
GROUP BY period, path, status_category
128
+
)
129
+
SELECT
130
+
period,
131
+
path,
132
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
133
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
134
+
CASE
135
+
WHEN SUM(request_count) > 0 THEN
136
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
137
+
ELSE 0
138
+
END as error_rate_percentage
139
+
FROM status_counts
140
+
GROUP BY period, path
141
+
HAVING SUM(request_count) > 0
142
+
ORDER BY period DESC, error_rate_percentage DESC;
143
+
144
+
\echo ''
145
+
\echo '===== TEST COMPLETED ====='
+1
www/.well-known/atproto-did
+1
www/.well-known/atproto-did
···
1
+
did:web:quickdid.smokesignal.tools
+15
www/.well-known/did.json
+15
www/.well-known/did.json
···
1
+
{
2
+
"@context": [
3
+
"https://www.w3.org/ns/did/v1",
4
+
"https://w3id.org/security/multikey/v1"
5
+
],
6
+
"id": "did:web:quickdid.smokesignal.tools",
7
+
"verificationMethod": [],
8
+
"service": [
9
+
{
10
+
"id": "#quickdid",
11
+
"type": "QuickDIDService",
12
+
"serviceEndpoint": "https://quickdid.smokesignal.tools"
13
+
}
14
+
]
15
+
}
+74
www/README.md
+74
www/README.md
···
1
+
# QuickDID Static Files Directory
2
+
3
+
This directory contains static files that are served by QuickDID. By default, QuickDID serves files from the `www` directory, but this can be configured using the `STATIC_FILES_DIR` environment variable.
4
+
5
+
## Directory Structure
6
+
7
+
```
8
+
www/
9
+
โโโ .well-known/
10
+
โ โโโ atproto-did # AT Protocol DID identifier
11
+
โ โโโ did.json # DID document
12
+
โโโ index.html # Landing page
13
+
โโโ README.md # This file
14
+
```
15
+
16
+
## Files
17
+
18
+
### `.well-known/atproto-did`
19
+
Contains the service's DID identifier (e.g., `did:web:example.com`). This file is used by AT Protocol clients to discover the service's DID.
20
+
21
+
### `.well-known/did.json`
22
+
Contains the DID document with verification methods and service endpoints. This is a JSON-LD document following the W3C DID specification.
23
+
24
+
### `index.html`
25
+
The landing page shown when users visit the root URL. This provides information about the service and available endpoints.
26
+
27
+
## Customization
28
+
29
+
### Using the Generation Script
30
+
31
+
You can generate the `.well-known` files for your deployment using the provided script:
32
+
33
+
```bash
34
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
35
+
```
36
+
37
+
This will create the appropriate files based on your domain.
38
+
39
+
### Manual Customization
40
+
41
+
1. **Update `.well-known/atproto-did`**: Replace with your service's DID
42
+
2. **Update `.well-known/did.json`**: Add your public key to the `verificationMethod` array if needed
43
+
3. **Customize `index.html`**: Modify the landing page to match your branding
44
+
45
+
### Docker Deployment
46
+
47
+
When using Docker, you can mount custom static files:
48
+
49
+
```yaml
50
+
volumes:
51
+
- ./custom-www:/app/www:ro
52
+
```
53
+
54
+
Or just override specific files:
55
+
56
+
```yaml
57
+
volumes:
58
+
- ./custom-index.html:/app/www/index.html:ro
59
+
- ./custom-wellknown:/app/www/.well-known:ro
60
+
```
61
+
62
+
### Environment Variable
63
+
64
+
You can change the static files directory using:
65
+
66
+
```bash
67
+
STATIC_FILES_DIR=/path/to/custom/www
68
+
```
69
+
70
+
## Security Notes
71
+
72
+
- Static files are served with automatic MIME type detection
73
+
- The `.well-known` files are crucial for AT Protocol compatibility
74
+
- Ensure proper permissions on mounted volumes in production
+4
www/css/pico.classless.green.min.css
+4
www/css/pico.classless.green.min.css
···
1
+
@charset "UTF-8";/*!
2
+
* Pico CSS โจ v2.1.1 (https://picocss.com)
3
+
* Copyright 2019-2025 - Licensed under MIT
4
+
*/:host,:root{--pico-font-family-emoji:"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--pico-font-family-sans-serif:system-ui,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,Helvetica,Arial,"Helvetica Neue",sans-serif,var(--pico-font-family-emoji);--pico-font-family-monospace:ui-monospace,SFMono-Regular,"SF Mono",Menlo,Consolas,"Liberation Mono",monospace,var(--pico-font-family-emoji);--pico-font-family:var(--pico-font-family-sans-serif);--pico-line-height:1.5;--pico-font-weight:400;--pico-font-size:100%;--pico-text-underline-offset:0.1rem;--pico-border-radius:0.25rem;--pico-border-width:0.0625rem;--pico-outline-width:0.125rem;--pico-transition:0.2s ease-in-out;--pico-spacing:1rem;--pico-typography-spacing-vertical:1rem;--pico-block-spacing-vertical:var(--pico-spacing);--pico-block-spacing-horizontal:var(--pico-spacing);--pico-form-element-spacing-vertical:0.75rem;--pico-form-element-spacing-horizontal:1rem;--pico-group-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-group-box-shadow-focus-with-button:0 0 0 var(--pico-outline-width) var(--pico-primary-focus);--pico-group-box-shadow-focus-with-input:0 0 0 0.0625rem var(--pico-form-element-border-color);--pico-modal-overlay-backdrop-filter:blur(0.375rem);--pico-nav-element-spacing-vertical:1rem;--pico-nav-element-spacing-horizontal:0.5rem;--pico-nav-link-spacing-vertical:0.5rem;--pico-nav-link-spacing-horizontal:0.5rem;--pico-nav-breadcrumb-divider:">";--pico-icon-checkbox:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-minus:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='5' y1='12' x2='19' y2='12'%3E%3C/line%3E%3C/svg%3E");--pico-icon-chevron:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-date:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Crect x='3' y='4' width='18' height='18' rx='2' ry='2'%3E%3C/rect%3E%3Cline x1='16' y1='2' x2='16' y2='6'%3E%3C/line%3E%3Cline x1='8' y1='2' x2='8' y2='6'%3E%3C/line%3E%3Cline x1='3' y1='10' x2='21' y2='10'%3E%3C/line%3E%3C/svg%3E");--pico-icon-time:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cpolyline points='12 6 12 12 16 14'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-search:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='11' cy='11' r='8'%3E%3C/circle%3E%3Cline x1='21' y1='21' x2='16.65' y2='16.65'%3E%3C/line%3E%3C/svg%3E");--pico-icon-close:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='3' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='18' y1='6' x2='6' y2='18'%3E%3C/line%3E%3Cline x1='6' y1='6' x2='18' y2='18'%3E%3C/line%3E%3C/svg%3E");--pico-icon-loading:url("data:image/svg+xml,%3Csvg fill='none' height='24' width='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg' %3E%3Cstyle%3E g %7B animation: rotate 2s linear infinite; transform-origin: center center; %7D circle %7B stroke-dasharray: 75,100; stroke-dashoffset: -5; animation: dash 1.5s ease-in-out infinite; stroke-linecap: round; %7D @keyframes rotate %7B 0%25 %7B transform: rotate(0deg); %7D 100%25 %7B transform: rotate(360deg); %7D %7D @keyframes dash %7B 0%25 %7B stroke-dasharray: 1,100; stroke-dashoffset: 0; %7D 50%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -17.5; %7D 100%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -62; %7D %7D %3C/style%3E%3Cg%3E%3Ccircle cx='12' cy='12' r='10' fill='none' stroke='rgb(136, 145, 164)' stroke-width='4' /%3E%3C/g%3E%3C/svg%3E")}@media (min-width:576px){:host,:root{--pico-font-size:106.25%}}@media (min-width:768px){:host,:root{--pico-font-size:112.5%}}@media (min-width:1024px){:host,:root{--pico-font-size:118.75%}}@media (min-width:1280px){:host,:root{--pico-font-size:125%}}@media (min-width:1536px){:host,:root{--pico-font-size:131.25%}}a{--pico-text-decoration:underline}small{--pico-font-size:0.875em}h1,h2,h3,h4,h5,h6{--pico-font-weight:700}h1{--pico-font-size:2rem;--pico-line-height:1.125;--pico-typography-spacing-top:3rem}h2{--pico-font-size:1.75rem;--pico-line-height:1.15;--pico-typography-spacing-top:2.625rem}h3{--pico-font-size:1.5rem;--pico-line-height:1.175;--pico-typography-spacing-top:2.25rem}h4{--pico-font-size:1.25rem;--pico-line-height:1.2;--pico-typography-spacing-top:1.874rem}h5{--pico-font-size:1.125rem;--pico-line-height:1.225;--pico-typography-spacing-top:1.6875rem}h6{--pico-font-size:1rem;--pico-line-height:1.25;--pico-typography-spacing-top:1.5rem}tfoot td,tfoot th,thead td,thead th{--pico-font-weight:600;--pico-border-width:0.1875rem}code,kbd,pre,samp{--pico-font-family:var(--pico-font-family-monospace)}kbd{--pico-font-weight:bolder}:where(select,textarea),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-outline-width:0.0625rem}[type=search]{--pico-border-radius:5rem}[type=checkbox],[type=radio]{--pico-border-width:0.125rem}[type=checkbox][role=switch]{--pico-border-width:0.1875rem}[role=search]{--pico-border-radius:5rem}[role=group] [role=button],[role=group] [type=button],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=submit],[role=search] button{--pico-form-element-spacing-horizontal:2rem}details summary[role=button]::after{filter:brightness(0) invert(1)}[aria-busy=true]:not(input,select,textarea):is(button,[type=submit],[type=button],[type=reset],[role=button])::before{filter:brightness(0) invert(1)}:host(:not([data-theme=dark])),:root:not([data-theme=dark]),[data-theme=light]{color-scheme:light;--pico-background-color:#fff;--pico-color:#373c44;--pico-text-selection-color:rgba(71, 164, 23, 0.25);--pico-muted-color:#646b79;--pico-muted-border-color:rgb(231, 234, 239.5);--pico-primary:#33790f;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(51, 121, 15, 0.5);--pico-primary-hover:#265e09;--pico-primary-hover-background:#33790f;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(71, 164, 23, 0.5);--pico-primary-inverse:#fff;--pico-secondary:#5d6b89;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(93, 107, 137, 0.5);--pico-secondary-hover:#48536b;--pico-secondary-hover-background:#48536b;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(93, 107, 137, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#181c25;--pico-contrast-background:#181c25;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(24, 28, 37, 0.5);--pico-contrast-hover:#000;--pico-contrast-hover-background:#000;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-secondary-hover);--pico-contrast-focus:rgba(93, 107, 137, 0.25);--pico-contrast-inverse:#fff;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(129, 145, 181, 0.01698),0.0335rem 0.067rem 0.402rem rgba(129, 145, 181, 0.024),0.0625rem 0.125rem 0.75rem rgba(129, 145, 181, 0.03),0.1125rem 0.225rem 1.35rem rgba(129, 145, 181, 0.036),0.2085rem 0.417rem 2.502rem rgba(129, 145, 181, 0.04302),0.5rem 1rem 6rem rgba(129, 145, 181, 0.06),0 0 0 0.0625rem rgba(129, 145, 181, 0.015);--pico-h1-color:#2d3138;--pico-h2-color:#373c44;--pico-h3-color:#424751;--pico-h4-color:#4d535e;--pico-h5-color:#5c6370;--pico-h6-color:#646b79;--pico-mark-background-color:rgb(252.5, 230.5, 191.5);--pico-mark-color:#0f1114;--pico-ins-color:rgb(28.5, 105.5, 84);--pico-del-color:rgb(136, 56.5, 53);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(243, 244.5, 246.75);--pico-code-color:#646b79;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(251, 251.5, 252.25);--pico-form-element-selected-background-color:#dfe3eb;--pico-form-element-border-color:#cfd5e2;--pico-form-element-color:#23262c;--pico-form-element-placeholder-color:var(--pico-muted-color);--pico-form-element-active-background-color:#fff;--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(183.5, 105.5, 106.5);--pico-form-element-invalid-active-border-color:rgb(200.25, 79.25, 72.25);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:rgb(76, 154.5, 137.5);--pico-form-element-valid-active-border-color:rgb(39, 152.75, 118.75);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#bfc7d9;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#dfe3eb;--pico-range-active-border-color:#bfc7d9;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:var(--pico-background-color);--pico-card-border-color:var(--pico-muted-border-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(251, 251.5, 252.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(232, 234, 237, 0.75);--pico-progress-background-color:#dfe3eb;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(76, 154.5, 137.5)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(200.25, 79.25, 72.25)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme=dark])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme=dark]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),[data-theme=light] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}@media only screen and (prefers-color-scheme:dark){:host(:not([data-theme])),:root:not([data-theme]){color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}}[data-theme=dark]{color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}[data-theme=dark] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}[type=checkbox],[type=radio],[type=range],progress{accent-color:var(--pico-primary)}*,::after,::before{box-sizing:border-box;background-repeat:no-repeat}::after,::before{text-decoration:inherit;vertical-align:inherit}:where(:host),:where(:root){-webkit-tap-highlight-color:transparent;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family);text-underline-offset:var(--pico-text-underline-offset);text-rendering:optimizeLegibility;overflow-wrap:break-word;-moz-tab-size:4;-o-tab-size:4;tab-size:4}body{width:100%;margin:0}main{display:block}body>footer,body>header,body>main{width:100%;margin-right:auto;margin-left:auto;padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal)}@media (min-width:576px){body>footer,body>header,body>main{max-width:510px;padding-right:0;padding-left:0}}@media (min-width:768px){body>footer,body>header,body>main{max-width:700px}}@media (min-width:1024px){body>footer,body>header,body>main{max-width:950px}}@media (min-width:1280px){body>footer,body>header,body>main{max-width:1200px}}@media (min-width:1536px){body>footer,body>header,body>main{max-width:1450px}}section{margin-bottom:var(--pico-block-spacing-vertical)}b,strong{font-weight:bolder}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}address,blockquote,dl,ol,p,pre,table,ul{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-style:normal;font-weight:var(--pico-font-weight)}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family)}h1{--pico-color:var(--pico-h1-color)}h2{--pico-color:var(--pico-h2-color)}h3{--pico-color:var(--pico-h3-color)}h4{--pico-color:var(--pico-h4-color)}h5{--pico-color:var(--pico-h5-color)}h6{--pico-color:var(--pico-h6-color)}:where(article,address,blockquote,dl,figure,form,ol,p,pre,table,ul)~:is(h1,h2,h3,h4,h5,h6){margin-top:var(--pico-typography-spacing-top)}p{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup>*{margin-top:0;margin-bottom:0}hgroup>:not(:first-child):last-child{--pico-color:var(--pico-muted-color);--pico-font-weight:unset;font-size:1rem}:where(ol,ul) li{margin-bottom:calc(var(--pico-typography-spacing-vertical) * .25)}:where(dl,ol,ul) :where(dl,ol,ul){margin:0;margin-top:calc(var(--pico-typography-spacing-vertical) * .25)}ul li{list-style:square}mark{padding:.125rem .25rem;background-color:var(--pico-mark-background-color);color:var(--pico-mark-color);vertical-align:baseline}blockquote{display:block;margin:var(--pico-typography-spacing-vertical) 0;padding:var(--pico-spacing);border-right:none;border-left:.25rem solid var(--pico-blockquote-border-color);border-inline-start:0.25rem solid var(--pico-blockquote-border-color);border-inline-end:none}blockquote footer{margin-top:calc(var(--pico-typography-spacing-vertical) * .5);color:var(--pico-blockquote-footer-color)}abbr[title]{border-bottom:1px dotted;text-decoration:none;cursor:help}ins{color:var(--pico-ins-color);text-decoration:none}del{color:var(--pico-del-color)}::-moz-selection{background-color:var(--pico-text-selection-color)}::selection{background-color:var(--pico-text-selection-color)}:where(a:not([role=button])),[role=link]{--pico-color:var(--pico-primary);--pico-background-color:transparent;--pico-underline:var(--pico-primary-underline);outline:0;background-color:var(--pico-background-color);color:var(--pico-color);-webkit-text-decoration:var(--pico-text-decoration);text-decoration:var(--pico-text-decoration);text-decoration-color:var(--pico-underline);text-underline-offset:0.125em;transition:background-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition)}:where(a:not([role=button])):is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[role=link]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-color:var(--pico-primary-hover);--pico-underline:var(--pico-primary-hover-underline);--pico-text-decoration:underline}:where(a:not([role=button])):focus-visible,[role=link]:focus-visible{box-shadow:0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}a[role=button]{display:inline-block}button{margin:0;overflow:visible;font-family:inherit;text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[role=button],[type=button],[type=file]::file-selector-button,[type=reset],[type=submit],button{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);--pico-color:var(--pico-primary-inverse);--pico-box-shadow:var(--pico-button-box-shadow, 0 0 0 rgba(0, 0, 0, 0));padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal);border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:1rem;line-height:var(--pico-line-height);text-align:center;text-decoration:none;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}[role=button]:is(:hover,:active,:focus),[role=button]:is([aria-current]:not([aria-current=false])),[type=button]:is(:hover,:active,:focus),[type=button]:is([aria-current]:not([aria-current=false])),[type=file]::file-selector-button:is(:hover,:active,:focus),[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])),[type=reset]:is(:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false])),[type=submit]:is(:hover,:active,:focus),[type=submit]:is([aria-current]:not([aria-current=false])),button:is(:hover,:active,:focus),button:is([aria-current]:not([aria-current=false])){--pico-background-color:var(--pico-primary-hover-background);--pico-border-color:var(--pico-primary-hover-border);--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0));--pico-color:var(--pico-primary-inverse)}[role=button]:focus,[role=button]:is([aria-current]:not([aria-current=false])):focus,[type=button]:focus,[type=button]:is([aria-current]:not([aria-current=false])):focus,[type=file]::file-selector-button:focus,[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])):focus,[type=reset]:focus,[type=reset]:is([aria-current]:not([aria-current=false])):focus,[type=submit]:focus,[type=submit]:is([aria-current]:not([aria-current=false])):focus,button:focus,button:is([aria-current]:not([aria-current=false])):focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}[type=button],[type=reset],[type=submit]{margin-bottom:var(--pico-spacing)}[type=file]::file-selector-button,[type=reset]{--pico-background-color:var(--pico-secondary-background);--pico-border-color:var(--pico-secondary-border);--pico-color:var(--pico-secondary-inverse);cursor:pointer}[type=file]::file-selector-button:is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border);--pico-color:var(--pico-secondary-inverse)}[type=file]::file-selector-button:focus,[type=reset]:focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}:where(button,[type=submit],[type=reset],[type=button],[role=button])[disabled],:where(fieldset[disabled]) :is(button,[type=submit],[type=button],[type=reset],[role=button]){opacity:.5;pointer-events:none}:where(table){width:100%;border-collapse:collapse;border-spacing:0;text-indent:0}td,th{padding:calc(var(--pico-spacing)/ 2) var(--pico-spacing);border-bottom:var(--pico-border-width) solid var(--pico-table-border-color);background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);text-align:left;text-align:start}tfoot td,tfoot th{border-top:var(--pico-border-width) solid var(--pico-table-border-color);border-bottom:0}table.striped tbody tr:nth-child(odd) td,table.striped tbody tr:nth-child(odd) th{background-color:var(--pico-table-row-stripped-background-color)}:where(audio,canvas,iframe,img,svg,video){vertical-align:middle}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}:where(iframe){border-style:none}img{max-width:100%;height:auto;border-style:none}:where(svg:not([fill])){fill:currentColor}svg:not(:host),svg:not(:root){overflow:hidden}code,kbd,pre,samp{font-size:.875em;font-family:var(--pico-font-family)}pre code,pre samp{font-size:inherit;font-family:inherit}pre{-ms-overflow-style:scrollbar;overflow:auto}code,kbd,pre,samp{border-radius:var(--pico-border-radius);background:var(--pico-code-background-color);color:var(--pico-code-color);font-weight:var(--pico-font-weight);line-height:initial}code,kbd,samp{display:inline-block;padding:.375rem}pre{display:block;margin-bottom:var(--pico-spacing);overflow-x:auto}pre>code,pre>samp{display:block;padding:var(--pico-spacing);background:0 0;line-height:var(--pico-line-height)}kbd{background-color:var(--pico-code-kbd-background-color);color:var(--pico-code-kbd-color);vertical-align:baseline}figure{display:block;margin:0;padding:0}figure figcaption{padding:calc(var(--pico-spacing) * .5) 0;color:var(--pico-muted-color)}hr{height:0;margin:var(--pico-typography-spacing-vertical) 0;border:0;border-top:1px solid var(--pico-muted-border-color);color:inherit}[hidden],template{display:none!important}canvas{display:inline-block}input,optgroup,select,textarea{margin:0;font-size:1rem;line-height:var(--pico-line-height);font-family:inherit;letter-spacing:inherit}input{overflow:visible}select{text-transform:none}legend{max-width:100%;padding:0;color:inherit;white-space:normal}textarea{overflow:auto}[type=checkbox],[type=radio]{padding:0}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}::-moz-focus-inner{padding:0;border-style:none}:-moz-focusring{outline:0}:-moz-ui-invalid{box-shadow:none}::-ms-expand{display:none}[type=file],[type=range]{padding:0;border-width:0}input:not([type=checkbox],[type=radio],[type=range]){height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2)}fieldset{width:100%;margin:0;margin-bottom:var(--pico-spacing);padding:0;border:0}fieldset legend,label{display:block;margin-bottom:calc(var(--pico-spacing) * .375);color:var(--pico-color);font-weight:var(--pico-form-label-font-weight,var(--pico-font-weight))}fieldset legend{margin-bottom:calc(var(--pico-spacing) * .5)}button[type=submit],input:not([type=checkbox],[type=radio]),select,textarea{width:100%}input:not([type=checkbox],[type=radio],[type=range],[type=file]),select,textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none;padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal)}input,select,textarea{--pico-background-color:var(--pico-form-element-background-color);--pico-border-color:var(--pico-form-element-border-color);--pico-color:var(--pico-form-element-color);--pico-box-shadow:none;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[readonly]):is(:active,:focus){--pico-background-color:var(--pico-form-element-active-background-color)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[role=switch],[readonly]):is(:active,:focus){--pico-border-color:var(--pico-form-element-active-border-color)}:where(select,textarea):not([readonly]):focus,input:not([type=submit],[type=button],[type=reset],[type=range],[type=file],[readonly]):focus{--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-focus-color)}:where(fieldset[disabled]) :is(input:not([type=submit],[type=button],[type=reset]),select,textarea),input:not([type=submit],[type=button],[type=reset])[disabled],label[aria-disabled=true],select[disabled],textarea[disabled]{opacity:var(--pico-form-element-disabled-opacity);pointer-events:none}label[aria-disabled=true] input[disabled]{opacity:1}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid]{padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal)!important;padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=false]:not(select){background-image:var(--pico-icon-valid)}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=true]:not(select){background-image:var(--pico-icon-invalid)}:where(input,select,textarea)[aria-invalid=false]{--pico-border-color:var(--pico-form-element-valid-border-color)}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus){--pico-border-color:var(--pico-form-element-valid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-valid-focus-color)!important}:where(input,select,textarea)[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus){--pico-border-color:var(--pico-form-element-invalid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-invalid-focus-color)!important}[dir=rtl] :where(input,select,textarea):not([type=checkbox],[type=radio]):is([aria-invalid],[aria-invalid=true],[aria-invalid=false]){background-position:center left .75rem}input::-webkit-input-placeholder,input::placeholder,select:invalid,textarea::-webkit-input-placeholder,textarea::placeholder{color:var(--pico-form-element-placeholder-color);opacity:1}input:not([type=checkbox],[type=radio]),select,textarea{margin-bottom:var(--pico-spacing)}select::-ms-expand{border:0;background-color:transparent}select:not([multiple],[size]){padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal);padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);background-image:var(--pico-icon-chevron);background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}select[multiple] option:checked{background:var(--pico-form-element-selected-background-color);color:var(--pico-form-element-color)}[dir=rtl] select:not([multiple],[size]){background-position:center left .75rem}textarea{display:block;resize:vertical}textarea[aria-invalid]{--pico-icon-height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2);background-position:top right .75rem!important;background-size:1rem var(--pico-icon-height)!important}:where(input,select,textarea,fieldset)+small{display:block;width:100%;margin-top:calc(var(--pico-spacing) * -.75);margin-bottom:var(--pico-spacing);color:var(--pico-muted-color)}:where(input,select,textarea,fieldset)[aria-invalid=false]+small{color:var(--pico-ins-color)}:where(input,select,textarea,fieldset)[aria-invalid=true]+small{color:var(--pico-del-color)}label>:where(input,select,textarea){margin-top:calc(var(--pico-spacing) * .25)}label:has([type=checkbox],[type=radio]){width:-moz-fit-content;width:fit-content;cursor:pointer}[type=checkbox],[type=radio]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:1.25em;height:1.25em;margin-top:-.125em;margin-inline-end:.5em;border-width:var(--pico-border-width);vertical-align:middle;cursor:pointer}[type=checkbox]::-ms-check,[type=radio]::-ms-check{display:none}[type=checkbox]:checked,[type=checkbox]:checked:active,[type=checkbox]:checked:focus,[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-checkbox);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=checkbox]~label,[type=radio]~label{display:inline-block;margin-bottom:0;cursor:pointer}[type=checkbox]~label:not(:last-of-type),[type=radio]~label:not(:last-of-type){margin-inline-end:1em}[type=checkbox]:indeterminate{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-minus);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=radio]{border-radius:50%}[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-inverse);border-width:.35em;background-image:none}[type=checkbox][role=switch]{--pico-background-color:var(--pico-switch-background-color);--pico-color:var(--pico-switch-color);width:2.25em;height:1.25em;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:1.25em;background-color:var(--pico-background-color);line-height:1.25em}[type=checkbox][role=switch]:not([aria-invalid]){--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:before{display:block;aspect-ratio:1;height:100%;border-radius:50%;background-color:var(--pico-color);box-shadow:var(--pico-switch-thumb-box-shadow);content:"";transition:margin .1s ease-in-out}[type=checkbox][role=switch]:focus{--pico-background-color:var(--pico-switch-background-color);--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:checked{--pico-background-color:var(--pico-switch-checked-background-color);--pico-border-color:var(--pico-switch-checked-background-color);background-image:none}[type=checkbox][role=switch]:checked::before{margin-inline-start:calc(2.25em - 1.25em)}[type=checkbox][role=switch][disabled]{--pico-background-color:var(--pico-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus{--pico-background-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true]{--pico-background-color:var(--pico-form-element-invalid-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus,[type=radio][aria-invalid=false]:checked,[type=radio][aria-invalid=false]:checked:active,[type=radio][aria-invalid=false]:checked:focus{--pico-border-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true],[type=radio]:checked:active[aria-invalid=true],[type=radio]:checked:focus[aria-invalid=true],[type=radio]:checked[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}[type=color]::-webkit-color-swatch-wrapper{padding:0}[type=color]::-moz-focus-inner{padding:0}[type=color]::-webkit-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}[type=color]::-moz-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}input:not([type=checkbox],[type=radio],[type=range],[type=file]):is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){--pico-icon-position:0.75rem;--pico-icon-width:1rem;padding-right:calc(var(--pico-icon-width) + var(--pico-icon-position));background-image:var(--pico-icon-date);background-position:center right var(--pico-icon-position);background-size:var(--pico-icon-width) auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=time]{background-image:var(--pico-icon-time)}[type=date]::-webkit-calendar-picker-indicator,[type=datetime-local]::-webkit-calendar-picker-indicator,[type=month]::-webkit-calendar-picker-indicator,[type=time]::-webkit-calendar-picker-indicator,[type=week]::-webkit-calendar-picker-indicator{width:var(--pico-icon-width);margin-right:calc(var(--pico-icon-width) * -1);margin-left:var(--pico-icon-position);opacity:0}@-moz-document url-prefix(){[type=date],[type=datetime-local],[type=month],[type=time],[type=week]{padding-right:var(--pico-form-element-spacing-horizontal)!important;background-image:none!important}}[dir=rtl] :is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){text-align:right}[type=file]{--pico-color:var(--pico-muted-color);margin-left:calc(var(--pico-outline-width) * -1);padding:calc(var(--pico-form-element-spacing-vertical) * .5) 0;padding-left:var(--pico-outline-width);border:0;border-radius:0;background:0 0}[type=file]::file-selector-button{margin-right:calc(var(--pico-spacing)/ 2);padding:calc(var(--pico-form-element-spacing-vertical) * .5) var(--pico-form-element-spacing-horizontal)}[type=file]:is(:hover,:active,:focus)::file-selector-button{--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border)}[type=file]:focus::file-selector-button{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}[type=range]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:100%;height:1.25rem;background:0 0}[type=range]::-webkit-slider-runnable-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-webkit-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-moz-range-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-moz-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-ms-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-ms-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-webkit-slider-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-webkit-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-moz-range-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-moz-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-ms-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-ms-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]:active,[type=range]:focus-within{--pico-range-border-color:var(--pico-range-active-border-color);--pico-range-thumb-color:var(--pico-range-thumb-active-color)}[type=range]:active::-webkit-slider-thumb{transform:scale(1.25)}[type=range]:active::-moz-range-thumb{transform:scale(1.25)}[type=range]:active::-ms-thumb{transform:scale(1.25)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem);background-image:var(--pico-icon-search);background-position:center left calc(var(--pico-form-element-spacing-horizontal) + .125rem);background-size:1rem auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem)!important;background-position:center left 1.125rem,center right .75rem}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=false]{background-image:var(--pico-icon-search),var(--pico-icon-valid)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=true]{background-image:var(--pico-icon-search),var(--pico-icon-invalid)}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{background-position:center right 1.125rem}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{background-position:center right 1.125rem,center left .75rem}details{display:block;margin-bottom:var(--pico-spacing)}details summary{line-height:1rem;list-style-type:none;cursor:pointer;transition:color var(--pico-transition)}details summary:not([role]){color:var(--pico-accordion-close-summary-color)}details summary::-webkit-details-marker{display:none}details summary::marker{display:none}details summary::-moz-list-bullet{list-style-type:none}details summary::after{display:block;width:1rem;height:1rem;margin-inline-start:calc(var(--pico-spacing,1rem) * .5);float:right;transform:rotate(-90deg);background-image:var(--pico-icon-chevron);background-position:right center;background-size:1rem auto;background-repeat:no-repeat;content:"";transition:transform var(--pico-transition)}details summary:focus{outline:0}details summary:focus:not([role]){color:var(--pico-accordion-active-summary-color)}details summary:focus-visible:not([role]){outline:var(--pico-outline-width) solid var(--pico-primary-focus);outline-offset:calc(var(--pico-spacing,1rem) * 0.5);color:var(--pico-primary)}details summary[role=button]{width:100%;text-align:left}details summary[role=button]::after{height:calc(1rem * var(--pico-line-height,1.5))}details[open]>summary{margin-bottom:var(--pico-spacing)}details[open]>summary:not([role]):not(:focus){color:var(--pico-accordion-open-summary-color)}details[open]>summary::after{transform:rotate(0)}[dir=rtl] details summary{text-align:right}[dir=rtl] details summary::after{float:left;background-position:left center}article{margin-bottom:var(--pico-block-spacing-vertical);padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal);border-radius:var(--pico-border-radius);background:var(--pico-card-background-color);box-shadow:var(--pico-card-box-shadow)}article>footer,article>header{margin-right:calc(var(--pico-block-spacing-horizontal) * -1);margin-left:calc(var(--pico-block-spacing-horizontal) * -1);padding:calc(var(--pico-block-spacing-vertical) * .66) var(--pico-block-spacing-horizontal);background-color:var(--pico-card-sectioning-background-color)}article>header{margin-top:calc(var(--pico-block-spacing-vertical) * -1);margin-bottom:var(--pico-block-spacing-vertical);border-bottom:var(--pico-border-width) solid var(--pico-card-border-color);border-top-right-radius:var(--pico-border-radius);border-top-left-radius:var(--pico-border-radius)}article>footer{margin-top:var(--pico-block-spacing-vertical);margin-bottom:calc(var(--pico-block-spacing-vertical) * -1);border-top:var(--pico-border-width) solid var(--pico-card-border-color);border-bottom-right-radius:var(--pico-border-radius);border-bottom-left-radius:var(--pico-border-radius)}[role=group],[role=search]{display:inline-flex;position:relative;width:100%;margin-bottom:var(--pico-spacing);border-radius:var(--pico-border-radius);box-shadow:var(--pico-group-box-shadow,0 0 0 transparent);vertical-align:middle;transition:box-shadow var(--pico-transition)}[role=group] input:not([type=checkbox],[type=radio]),[role=group] select,[role=group]>*,[role=search] input:not([type=checkbox],[type=radio]),[role=search] select,[role=search]>*{position:relative;flex:1 1 auto;margin-bottom:0}[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=group]>:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child),[role=search]>:not(:first-child){margin-left:0;border-top-left-radius:0;border-bottom-left-radius:0}[role=group] input:not([type=checkbox],[type=radio]):not(:last-child),[role=group] select:not(:last-child),[role=group]>:not(:last-child),[role=search] input:not([type=checkbox],[type=radio]):not(:last-child),[role=search] select:not(:last-child),[role=search]>:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}[role=group] input:not([type=checkbox],[type=radio]):focus,[role=group] select:focus,[role=group]>:focus,[role=search] input:not([type=checkbox],[type=radio]):focus,[role=search] select:focus,[role=search]>:focus{z-index:2}[role=group] [role=button]:not(:first-child),[role=group] [type=button]:not(:first-child),[role=group] [type=reset]:not(:first-child),[role=group] [type=submit]:not(:first-child),[role=group] button:not(:first-child),[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=search] [role=button]:not(:first-child),[role=search] [type=button]:not(:first-child),[role=search] [type=reset]:not(:first-child),[role=search] [type=submit]:not(:first-child),[role=search] button:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child){margin-left:calc(var(--pico-border-width) * -1)}[role=group] [role=button],[role=group] [type=button],[role=group] [type=reset],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=reset],[role=search] [type=submit],[role=search] button{width:auto}@supports selector(:has(*)){[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-button)}[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select,[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select{border-color:transparent}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus),[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-input)}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) button,[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) button{--pico-button-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-border);--pico-button-hover-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-hover-border)}[role=group] [role=button]:focus,[role=group] [type=button]:focus,[role=group] [type=reset]:focus,[role=group] [type=submit]:focus,[role=group] button:focus,[role=search] [role=button]:focus,[role=search] [type=button]:focus,[role=search] [type=reset]:focus,[role=search] [type=submit]:focus,[role=search] button:focus{box-shadow:none}}[role=search]>:first-child{border-top-left-radius:5rem;border-bottom-left-radius:5rem}[role=search]>:last-child{border-top-right-radius:5rem;border-bottom-right-radius:5rem}[aria-busy=true]:not(input,select,textarea,html,form){white-space:nowrap}[aria-busy=true]:not(input,select,textarea,html,form)::before{display:inline-block;width:1em;height:1em;background-image:var(--pico-icon-loading);background-size:1em auto;background-repeat:no-repeat;content:"";vertical-align:-.125em}[aria-busy=true]:not(input,select,textarea,html,form):not(:empty)::before{margin-inline-end:calc(var(--pico-spacing) * .5)}[aria-busy=true]:not(input,select,textarea,html,form):empty{text-align:center}[role=button][aria-busy=true],[type=button][aria-busy=true],[type=reset][aria-busy=true],[type=submit][aria-busy=true],a[aria-busy=true],button[aria-busy=true]{pointer-events:none}:host,:root{--pico-scrollbar-width:0px}dialog{display:flex;z-index:999;position:fixed;top:0;right:0;bottom:0;left:0;align-items:center;justify-content:center;width:inherit;min-width:100%;height:inherit;min-height:100%;padding:0;border:0;-webkit-backdrop-filter:var(--pico-modal-overlay-backdrop-filter);backdrop-filter:var(--pico-modal-overlay-backdrop-filter);background-color:var(--pico-modal-overlay-background-color);color:var(--pico-color)}dialog>article{width:100%;max-height:calc(100vh - var(--pico-spacing) * 2);margin:var(--pico-spacing);overflow:auto}@media (min-width:576px){dialog>article{max-width:510px}}@media (min-width:768px){dialog>article{max-width:700px}}dialog>article>header>*{margin-bottom:0}dialog>article>header :is(a,button)[rel=prev]{margin:0;margin-left:var(--pico-spacing);padding:0;float:right}dialog>article>footer{text-align:right}dialog>article>footer [role=button],dialog>article>footer button{margin-bottom:0}dialog>article>footer [role=button]:not(:first-of-type),dialog>article>footer button:not(:first-of-type){margin-left:calc(var(--pico-spacing) * .5)}dialog>article :is(a,button)[rel=prev]{display:block;width:1rem;height:1rem;margin-top:calc(var(--pico-spacing) * -1);margin-bottom:var(--pico-spacing);margin-left:auto;border:none;background-image:var(--pico-icon-close);background-position:center;background-size:auto 1rem;background-repeat:no-repeat;background-color:transparent;opacity:.5;transition:opacity var(--pico-transition)}dialog>article :is(a,button)[rel=prev]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){opacity:1}dialog:not([open]),dialog[open=false]{display:none}:where(nav li)::before{float:left;content:"โ"}nav,nav ul{display:flex}nav{justify-content:space-between;overflow:visible}nav ol,nav ul{align-items:center;margin-bottom:0;padding:0;list-style:none}nav ol:first-of-type,nav ul:first-of-type{margin-left:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav ol:last-of-type,nav ul:last-of-type{margin-right:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav li{display:inline-block;margin:0;padding:var(--pico-nav-element-spacing-vertical) var(--pico-nav-element-spacing-horizontal)}nav li :where(a,[role=link]){display:inline-block;margin:calc(var(--pico-nav-link-spacing-vertical) * -1) calc(var(--pico-nav-link-spacing-horizontal) * -1);padding:var(--pico-nav-link-spacing-vertical) var(--pico-nav-link-spacing-horizontal);border-radius:var(--pico-border-radius)}nav li :where(a,[role=link]):not(:hover){text-decoration:none}nav li [role=button],nav li [type=button],nav li button,nav li input:not([type=checkbox],[type=radio],[type=range],[type=file]),nav li select{height:auto;margin-right:inherit;margin-bottom:0;margin-left:inherit;padding:calc(var(--pico-nav-link-spacing-vertical) - var(--pico-border-width) * 2) var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb]{align-items:center;justify-content:start}nav[aria-label=breadcrumb] ul li:not(:first-child){margin-inline-start:var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb] ul li a{margin:calc(var(--pico-nav-link-spacing-vertical) * -1) 0;margin-inline-start:calc(var(--pico-nav-link-spacing-horizontal) * -1)}nav[aria-label=breadcrumb] ul li:not(:last-child)::after{display:inline-block;position:absolute;width:calc(var(--pico-nav-link-spacing-horizontal) * 4);margin:0 calc(var(--pico-nav-link-spacing-horizontal) * -1);content:var(--pico-nav-breadcrumb-divider);color:var(--pico-muted-color);text-align:center;text-decoration:none;white-space:nowrap}nav[aria-label=breadcrumb] a[aria-current]:not([aria-current=false]){background-color:transparent;color:inherit;text-decoration:none;pointer-events:none}aside li,aside nav,aside ol,aside ul{display:block}aside li{padding:calc(var(--pico-nav-element-spacing-vertical) * .5) var(--pico-nav-element-spacing-horizontal)}aside li a{display:block}aside li [role=button]{margin:inherit}[dir=rtl] nav[aria-label=breadcrumb] ul li:not(:last-child) ::after{content:"\\"}progress{display:inline-block;vertical-align:baseline}progress{-webkit-appearance:none;-moz-appearance:none;display:inline-block;appearance:none;width:100%;height:.5rem;margin-bottom:calc(var(--pico-spacing) * .5);overflow:hidden;border:0;border-radius:var(--pico-border-radius);background-color:var(--pico-progress-background-color);color:var(--pico-progress-color)}progress::-webkit-progress-bar{border-radius:var(--pico-border-radius);background:0 0}progress[value]::-webkit-progress-value{background-color:var(--pico-progress-color);-webkit-transition:inline-size var(--pico-transition);transition:inline-size var(--pico-transition)}progress::-moz-progress-bar{background-color:var(--pico-progress-color)}@media (prefers-reduced-motion:no-preference){progress:indeterminate{background:var(--pico-progress-background-color) linear-gradient(to right,var(--pico-progress-color) 30%,var(--pico-progress-background-color) 30%) top left/150% 150% no-repeat;animation:progress-indeterminate 1s linear infinite}progress:indeterminate[value]::-webkit-progress-value{background-color:transparent}progress:indeterminate::-moz-progress-bar{background-color:transparent}}@media (prefers-reduced-motion:no-preference){[dir=rtl] progress:indeterminate{animation-direction:reverse}}@keyframes progress-indeterminate{0%{background-position:200% 0}100%{background-position:-200% 0}}[data-tooltip]{position:relative}[data-tooltip]:not(a,button,input,[role=button]){border-bottom:1px dotted;text-decoration:none;cursor:help}[data-tooltip]::after,[data-tooltip]::before,[data-tooltip][data-placement=top]::after,[data-tooltip][data-placement=top]::before{display:block;z-index:99;position:absolute;bottom:100%;left:50%;padding:.25rem .5rem;overflow:hidden;transform:translate(-50%,-.25rem);border-radius:var(--pico-border-radius);background:var(--pico-tooltip-background-color);content:attr(data-tooltip);color:var(--pico-tooltip-color);font-style:normal;font-weight:var(--pico-font-weight);font-size:.875rem;text-decoration:none;text-overflow:ellipsis;white-space:nowrap;opacity:0;pointer-events:none}[data-tooltip]::after,[data-tooltip][data-placement=top]::after{padding:0;transform:translate(-50%,0);border-top:.3rem solid;border-right:.3rem solid transparent;border-left:.3rem solid transparent;border-radius:0;background-color:transparent;content:"";color:var(--pico-tooltip-background-color)}[data-tooltip][data-placement=bottom]::after,[data-tooltip][data-placement=bottom]::before{top:100%;bottom:auto;transform:translate(-50%,.25rem)}[data-tooltip][data-placement=bottom]:after{transform:translate(-50%,-.3rem);border:.3rem solid transparent;border-bottom:.3rem solid}[data-tooltip][data-placement=left]::after,[data-tooltip][data-placement=left]::before{top:50%;right:100%;bottom:auto;left:auto;transform:translate(-.25rem,-50%)}[data-tooltip][data-placement=left]:after{transform:translate(.3rem,-50%);border:.3rem solid transparent;border-left:.3rem solid}[data-tooltip][data-placement=right]::after,[data-tooltip][data-placement=right]::before{top:50%;right:auto;bottom:auto;left:100%;transform:translate(.25rem,-50%)}[data-tooltip][data-placement=right]:after{transform:translate(-.3rem,-50%);border:.3rem solid transparent;border-right:.3rem solid}[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{opacity:1}@media (hover:hover) and (pointer:fine){[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{--pico-tooltip-slide-to:translate(-50%, -0.25rem);transform:translate(-50%,.75rem);animation-duration:.2s;animation-fill-mode:forwards;animation-name:tooltip-slide;opacity:0}[data-tooltip]:focus::after,[data-tooltip]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, 0rem);transform:translate(-50%,-.25rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:focus::before,[data-tooltip][data-placement=bottom]:hover::after,[data-tooltip][data-placement=bottom]:hover::before{--pico-tooltip-slide-to:translate(-50%, 0.25rem);transform:translate(-50%,-.75rem);animation-name:tooltip-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, -0.3rem);transform:translate(-50%,-.5rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:focus::before,[data-tooltip][data-placement=left]:hover::after,[data-tooltip][data-placement=left]:hover::before{--pico-tooltip-slide-to:translate(-0.25rem, -50%);transform:translate(.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:hover::after{--pico-tooltip-caret-slide-to:translate(0.3rem, -50%);transform:translate(.05rem,-50%);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:focus::before,[data-tooltip][data-placement=right]:hover::after,[data-tooltip][data-placement=right]:hover::before{--pico-tooltip-slide-to:translate(0.25rem, -50%);transform:translate(-.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:hover::after{--pico-tooltip-caret-slide-to:translate(-0.3rem, -50%);transform:translate(-.05rem,-50%);animation-name:tooltip-caret-slide}}@keyframes tooltip-slide{to{transform:var(--pico-tooltip-slide-to);opacity:1}}@keyframes tooltip-caret-slide{50%{opacity:0}to{transform:var(--pico-tooltip-caret-slide-to);opacity:1}}[aria-controls]{cursor:pointer}[aria-disabled=true],[disabled]{cursor:not-allowed}[aria-hidden=false][hidden]{display:initial}[aria-hidden=false][hidden]:not(:focus){clip:rect(0,0,0,0);position:absolute}[tabindex],a,area,button,input,label,select,summary,textarea{-ms-touch-action:manipulation}[dir=rtl]{direction:rtl}@media (prefers-reduced-motion:reduce){:not([aria-busy=true]),:not([aria-busy=true])::after,:not([aria-busy=true])::before{background-attachment:initial!important;animation-duration:1ms!important;animation-delay:-1ms!important;animation-iteration-count:1!important;scroll-behavior:auto!important;transition-delay:0s!important;transition-duration:0s!important}}
+224
www/index.html
+224
www/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
4
+
<head>
5
+
<meta charset="utf-8">
6
+
<meta name="viewport" content="width=device-width, initial-scale=1">
7
+
<meta name="color-scheme" content="light dark">
8
+
9
+
<!-- Primary Meta Tags -->
10
+
<title>QuickDID - AT Protocol Identity Resolution Service</title>
11
+
<meta name="title" content="QuickDID - AT Protocol Identity Resolution Service">
12
+
<meta name="description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
13
+
<meta name="keywords" content="ATProtocol, Bluesky, DID, handle resolution, decentralized identity, atproto">
14
+
<meta name="author" content="Nick Gerakines">
15
+
16
+
<!-- Open Graph / Facebook -->
17
+
<meta property="og:type" content="website">
18
+
<meta property="og:url" content="https://quickdid.smokesignal.tools/">
19
+
<meta property="og:title" content="QuickDID - AT Protocol Identity Resolution Service">
20
+
<meta property="og:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
21
+
<meta property="og:site_name" content="QuickDID">
22
+
23
+
<!-- Twitter -->
24
+
<meta property="twitter:card" content="summary_large_image">
25
+
<meta property="twitter:url" content="https://quickdid.smokesignal.tools/">
26
+
<meta property="twitter:title" content="QuickDID - AT Protocol Identity Resolution Service">
27
+
<meta property="twitter:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
28
+
29
+
<!-- Additional Meta Tags -->
30
+
<meta name="robots" content="index, follow">
31
+
<meta name="language" content="English">
32
+
<meta name="theme-color" content="#1976d2">
33
+
<link rel="canonical" href="https://quickdid.smokesignal.tools/">
34
+
35
+
<!-- Stylesheet -->
36
+
<link rel="stylesheet" href="/css/pico.classless.green.min.css">
37
+
<style>
38
+
.resolver-form {
39
+
margin: 2rem 0;
40
+
padding: 1.5rem;
41
+
background: var(--card-background-color);
42
+
border-radius: var(--border-radius);
43
+
border: 1px solid var(--muted-border-color);
44
+
}
45
+
46
+
.resolver-result {
47
+
margin-top: 1rem;
48
+
padding: 1.5rem;
49
+
background: var(--code-background-color);
50
+
border-radius: var(--border-radius);
51
+
border: 1px solid var(--muted-border-color);
52
+
}
53
+
54
+
.result-content {
55
+
background: transparent;
56
+
padding: 1rem;
57
+
overflow-x: auto;
58
+
white-space: pre-wrap;
59
+
word-break: break-word;
60
+
}
61
+
62
+
code {
63
+
padding: 0.25rem 0.5rem;
64
+
background: var(--code-background-color);
65
+
border-radius: var(--border-radius);
66
+
}
67
+
68
+
span {
69
+
display: inline-block;
70
+
padding: 0.25rem 0.5rem;
71
+
background: var(--primary);
72
+
color: var(--primary-inverse);
73
+
border-radius: var(--border-radius);
74
+
font-size: 0.875rem;
75
+
font-weight: bold;
76
+
margin-right: 0.5rem;
77
+
}
78
+
79
+
.endpoint-section {
80
+
margin-bottom: 3rem;
81
+
}
82
+
</style>
83
+
</head>
84
+
85
+
<body>
86
+
<header>
87
+
<hgroup>
88
+
<h1>QuickDID</h1>
89
+
<p>AT Protocol Identity Resolution Service</p>
90
+
</hgroup>
91
+
</header>
92
+
<main>
93
+
<p>QuickDID provides high-performance resolution services for the AT Protocol ecosystem.</p>
94
+
95
+
<h2>Available Endpoints</h2>
96
+
97
+
<section class="endpoint-section">
98
+
<h3>GET /xrpc/com.atproto.identity.resolveHandle</h3>
99
+
<p>Resolve an AT Protocol handle to its DID</p>
100
+
<p>Parameters: <code>?handle={handle}</code></p>
101
+
102
+
<h4>Try It Out</h4>
103
+
<form id="handleResolveForm" class="resolver-form">
104
+
<label for="handleInput">
105
+
Enter an AT Protocol handle to resolve:
106
+
<input type="text" id="handleInput" name="handle" placeholder="e.g., alice.bsky.social" required>
107
+
</label>
108
+
<button type="submit">Resolve Handle</button>
109
+
</form>
110
+
111
+
<div id="handleResult" class="resolver-result" style="display: none;">
112
+
<h4>Result</h4>
113
+
<pre id="handleResultContent" class="result-content"></pre>
114
+
</div>
115
+
116
+
<h4>Example Usage</h4>
117
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.identity.resolveHandle?handle=ngerakines.me"</code>
118
+
</section>
119
+
120
+
<section class="endpoint-section">
121
+
<h3>GET /xrpc/com.atproto.lexicon.resolveLexicon</h3>
122
+
<p>Resolve an AT Protocol lexicon (NSID) to its schema</p>
123
+
<p>Parameters: <code>?nsid={nsid}</code></p>
124
+
125
+
<h4>Try It Out</h4>
126
+
<form id="lexiconResolveForm" class="resolver-form">
127
+
<label for="nsidInput">
128
+
Enter an AT Protocol NSID to resolve:
129
+
<input type="text" id="nsidInput" name="nsid" placeholder="e.g., app.bsky.feed.post" required>
130
+
</label>
131
+
<button type="submit">Resolve Lexicon</button>
132
+
</form>
133
+
134
+
<div id="lexiconResult" class="resolver-result" style="display: none;">
135
+
<h4>Result</h4>
136
+
<pre id="lexiconResultContent" class="result-content"></pre>
137
+
</div>
138
+
139
+
<h4>Example Usage</h4>
140
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.lexicon.resolveLexicon?nsid=app.bsky.feed.post"</code>
141
+
</section>
142
+
143
+
<h2>Documentation</h2>
144
+
<p>
145
+
For more information, visit the
146
+
<a href="https://tangled.sh/@smokesignal.events/quickdid" target="_blank">
147
+
QuickDID repository
148
+
</a>
149
+
.
150
+
</p>
151
+
</main>
152
+
153
+
<script>
154
+
// Handle form submission for handle resolution
155
+
document.getElementById('handleResolveForm').addEventListener('submit', async (e) => {
156
+
e.preventDefault();
157
+
158
+
const handle = document.getElementById('handleInput').value.trim();
159
+
const resultDiv = document.getElementById('handleResult');
160
+
const resultContent = document.getElementById('handleResultContent');
161
+
162
+
// Show loading state
163
+
resultDiv.style.display = 'block';
164
+
resultContent.textContent = 'Loading...';
165
+
166
+
try {
167
+
// Build the request URL
168
+
const url = `/xrpc/com.atproto.identity.resolveHandle?handle=${encodeURIComponent(handle)}`;
169
+
170
+
// Make the GET request
171
+
const response = await fetch(url);
172
+
const data = await response.json();
173
+
174
+
// Display the result
175
+
if (response.ok) {
176
+
resultContent.textContent = JSON.stringify(data, null, 2);
177
+
resultContent.style.color = '';
178
+
} else {
179
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
180
+
resultContent.style.color = '#d32f2f';
181
+
}
182
+
} catch (error) {
183
+
resultContent.textContent = `Network Error: ${error.message}`;
184
+
resultContent.style.color = '#d32f2f';
185
+
}
186
+
});
187
+
188
+
// Handle form submission for lexicon resolution
189
+
document.getElementById('lexiconResolveForm').addEventListener('submit', async (e) => {
190
+
e.preventDefault();
191
+
192
+
const nsid = document.getElementById('nsidInput').value.trim();
193
+
const resultDiv = document.getElementById('lexiconResult');
194
+
const resultContent = document.getElementById('lexiconResultContent');
195
+
196
+
// Show loading state
197
+
resultDiv.style.display = 'block';
198
+
resultContent.textContent = 'Loading...';
199
+
200
+
try {
201
+
// Build the request URL
202
+
const url = `/xrpc/com.atproto.lexicon.resolveLexicon?nsid=${encodeURIComponent(nsid)}`;
203
+
204
+
// Make the GET request
205
+
const response = await fetch(url);
206
+
const data = await response.json();
207
+
208
+
// Display the result
209
+
if (response.ok) {
210
+
resultContent.textContent = JSON.stringify(data, null, 2);
211
+
resultContent.style.color = '';
212
+
} else {
213
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
214
+
resultContent.style.color = '#d32f2f';
215
+
}
216
+
} catch (error) {
217
+
resultContent.textContent = `Network Error: ${error.message}`;
218
+
resultContent.style.color = '#d32f2f';
219
+
}
220
+
});
221
+
</script>
222
+
</body>
223
+
224
+
</html>