+37
.dockerignore
+37
.dockerignore
···
1
+
# Git
2
+
.git
3
+
.gitignore
4
+
5
+
# Documentation
6
+
*.md
7
+
docs/
8
+
LICENSE
9
+
10
+
# Development files
11
+
.vscode/
12
+
.env
13
+
.env.local
14
+
*.log
15
+
16
+
# Build artifacts
17
+
target/
18
+
Dockerfile
19
+
.dockerignore
20
+
21
+
# Test files
22
+
tests/
23
+
benches/
24
+
25
+
# Scripts (except the ones we need)
26
+
*.sh
27
+
28
+
# SQLite databases
29
+
*.db
30
+
*.db-*
31
+
32
+
# OS files
33
+
.DS_Store
34
+
Thumbs.db
35
+
36
+
# Keep the www directory for static files
37
+
!www/
+12
-8
.env.example
+12
-8
.env.example
···
1
1
# QuickDID Environment Configuration Template
2
2
# Copy this file to .env and customize for your deployment
3
-
#
4
-
# IMPORTANT: Never commit .env files with real SERVICE_KEY values
5
3
6
4
# ============================================================================
7
5
# REQUIRED CONFIGURATION
···
13
11
# - quickdid.example.com:8080
14
12
# - localhost:3007
15
13
HTTP_EXTERNAL=quickdid.example.com
16
-
17
-
# Private key for service identity (REQUIRED)
18
-
# SECURITY: Generate a new key for each environment
19
-
# NEVER commit real keys to version control
20
-
SERVICE_KEY=did:key:YOUR_PRIVATE_KEY_HERE
21
14
22
15
# ============================================================================
23
16
# NETWORK CONFIGURATION
···
98
91
QUEUE_BUFFER_SIZE=1000
99
92
100
93
# ============================================================================
94
+
# STATIC FILES CONFIGURATION
95
+
# ============================================================================
96
+
97
+
# Directory for serving static files (default: www)
98
+
# This should contain:
99
+
# - index.html (landing page)
100
+
# - .well-known/atproto-did (service DID)
101
+
# - .well-known/did.json (DID document)
102
+
# Docker default: /app/www
103
+
STATIC_FILES_DIR=www
104
+
105
+
# ============================================================================
101
106
# LOGGING
102
107
# ============================================================================
103
108
···
112
117
# ============================================================================
113
118
114
119
# HTTP_EXTERNAL=localhost:3007
115
-
# SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
116
120
# RUST_LOG=debug
117
121
# CACHE_TTL_MEMORY=60
118
122
# CACHE_TTL_REDIS=300
+3
.gitignore
+3
.gitignore
+47
CHANGELOG.md
+47
CHANGELOG.md
···
7
7
8
8
## [Unreleased]
9
9
10
+
## [1.0.0-rc.5] - 2025-09-10
11
+
12
+
### Added
13
+
- Bidirectional caching support for handle-to-DID and DID-to-handle lookups in Redis resolver
14
+
- `purge` method to HandleResolver trait for removing entries by handle or DID
15
+
- `set` method to HandleResolver trait for manual cache updates
16
+
- Jetstream consumer integration for real-time cache updates from AT Protocol firehose
17
+
- QuickDidEventHandler module for processing Account and Identity events
18
+
- Static file serving with www directory support for landing page and well-known files
19
+
- Comprehensive test coverage for new bidirectional cache operations
20
+
21
+
### Changed
22
+
- Handle normalization to lowercase throughout the system for consistency
23
+
- Updated all resolver implementations to chain `purge` and `set` calls through the stack
24
+
- Enhanced documentation to reflect Jetstream configuration and bidirectional caching
25
+
- Improved production deployment guide with real-time sync recommendations
26
+
27
+
### Fixed
28
+
- Handle case sensitivity issues - all handles now normalized to lowercase
29
+
- Cache consistency between handle and DID lookups
30
+
- Event processing error handling in Jetstream consumer
31
+
32
+
## [1.0.0-rc.4] - 2025-09-08
33
+
34
+
### Added
35
+
- Metrics system with pluggable adapters (StatsD support) for monitoring and observability
36
+
- Proactive refresh resolver for keeping cached entries fresh before expiration
37
+
- Redis queue deduplication to prevent duplicate handle resolution work items
38
+
- Configurable bind address for StatsD UDP socket supporting both IPv4 and IPv6
39
+
- CORS headers support for cross-origin requests
40
+
- OPTIONS method handling for preflight requests
41
+
- Resolution timing measurements for performance monitoring
42
+
- Comprehensive metrics tracking including counters, gauges, and timings
43
+
- Telegraf and TimescaleDB integration guide for metrics aggregation
44
+
- Railway deployment resources for production environments
45
+
46
+
### Changed
47
+
- Replaced chrono with httpdate for more efficient HTTP date formatting
48
+
- Refactored handle resolver to include resolution time measurements
49
+
- Improved handle resolution view architecture
50
+
- Enhanced documentation with metrics configuration and deployment guides
51
+
52
+
### Fixed
53
+
- Minor typo in feature commit message ("fesature" corrected to "feature")
54
+
10
55
## [1.0.0-rc.3] - 2025-09-06
11
56
12
57
### Added
···
80
125
- Unnecessary feature flags (axum macros, deadpool-redis script)
81
126
- 4 unused dependencies reducing compilation time
82
127
128
+
[1.0.0-rc.5]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.5
129
+
[1.0.0-rc.4]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.4
83
130
[1.0.0-rc.3]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.3
84
131
[1.0.0-rc.2]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.2
85
132
[1.0.0-rc.1]: https://tangled.sh/@smokesignal.events/quickdid/tree/v1.0.0-rc.1
+109
-18
CLAUDE.md
+109
-18
CLAUDE.md
···
1
1
# QuickDID - Development Guide for Claude
2
2
3
3
## Overview
4
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides handle-to-DID resolution with Redis-backed caching and queue processing.
4
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides bidirectional handle-to-DID and DID-to-handle resolution with multi-layer caching (Redis, SQLite, in-memory), queue processing, metrics support, proactive cache refreshing, and real-time cache updates via Jetstream consumer.
5
5
6
6
## Configuration
7
7
···
21
21
cargo build
22
22
23
23
# Run in debug mode (requires environment variables)
24
-
HTTP_EXTERNAL=localhost:3007 SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK cargo run
24
+
HTTP_EXTERNAL=localhost:3007 cargo run
25
25
26
26
# Run tests
27
27
cargo test
···
49
49
1. **Handle Resolution** (`src/handle_resolver/`)
50
50
- `BaseHandleResolver`: Core resolution using DNS and HTTP
51
51
- `RateLimitedHandleResolver`: Semaphore-based rate limiting with optional timeout
52
-
- `CachingHandleResolver`: In-memory caching layer
53
-
- `RedisHandleResolver`: Redis-backed persistent caching
54
-
- `SqliteHandleResolver`: SQLite-backed persistent caching
52
+
- `CachingHandleResolver`: In-memory caching layer with bidirectional support
53
+
- `RedisHandleResolver`: Redis-backed persistent caching with bidirectional lookups
54
+
- `SqliteHandleResolver`: SQLite-backed persistent caching with bidirectional support
55
+
- `ProactiveRefreshResolver`: Automatically refreshes cache entries before expiration
56
+
- All resolvers implement `HandleResolver` trait with:
57
+
- `resolve`: Handle-to-DID resolution
58
+
- `purge`: Remove entries by handle or DID
59
+
- `set`: Manually update handle-to-DID mappings
55
60
- Uses binary serialization via `HandleResolutionResult` for space efficiency
56
-
- Resolution stack: Cache โ RateLimited (optional) โ Base โ DNS/HTTP
61
+
- Resolution stack: Cache โ ProactiveRefresh (optional) โ RateLimited (optional) โ Base โ DNS/HTTP
62
+
- Includes resolution timing measurements for metrics
57
63
58
64
2. **Binary Serialization** (`src/handle_resolution_result.rs`)
59
65
- Compact storage format using bincode
···
69
75
4. **HTTP Server** (`src/http/`)
70
76
- XRPC endpoints for AT Protocol compatibility
71
77
- Health check endpoint
72
-
- DID document serving via .well-known
78
+
- Static file serving from configurable directory (default: www)
79
+
- Serves .well-known files as static content
80
+
- CORS headers support for cross-origin requests
81
+
- Cache-Control headers with configurable max-age and stale directives
82
+
- ETag support with configurable seed for cache invalidation
83
+
84
+
5. **Metrics System** (`src/metrics.rs`)
85
+
- Pluggable metrics publishing with StatsD support
86
+
- Tracks counters, gauges, and timings
87
+
- Configurable tags for environment/service identification
88
+
- No-op adapter for development environments
89
+
- Metrics for Jetstream event processing
90
+
91
+
6. **Jetstream Consumer** (`src/jetstream_handler.rs`)
92
+
- Consumes AT Protocol firehose events via WebSocket
93
+
- Processes Account events (purges deleted/deactivated accounts)
94
+
- Processes Identity events (updates handle-to-DID mappings)
95
+
- Automatic reconnection with exponential backoff
96
+
- Comprehensive metrics for event processing
97
+
- Spawned as cancellable task using task manager
73
98
74
99
## Key Technical Details
75
100
···
79
104
- Other DID methods stored with full identifier
80
105
81
106
### Redis Integration
82
-
- **Caching**: Uses MetroHash64 for key generation, stores binary data
107
+
- **Bidirectional Caching**:
108
+
- Stores both handleโDID and DIDโhandle mappings
109
+
- Uses MetroHash64 for key generation
110
+
- Binary data storage for efficiency
111
+
- Automatic synchronization of both directions
83
112
- **Queuing**: Reliable queue with processing/dead letter queues
84
113
- **Key Prefixes**: Configurable via `QUEUE_REDIS_PREFIX` environment variable
85
114
···
89
118
- Acquire semaphore permit (with optional timeout)
90
119
- If timeout configured and exceeded, return error
91
120
3. Perform DNS TXT lookup or HTTP well-known query
92
-
4. Cache result with appropriate TTL
121
+
4. Cache result with appropriate TTL in both directions (handleโDID and DIDโhandle)
93
122
5. Return DID or error
94
123
124
+
### Cache Management Operations
125
+
- **Purge**: Removes entries by either handle or DID
126
+
- Uses `atproto_identity::resolve::parse_input` for identifier detection
127
+
- Removes both handleโDID and DIDโhandle mappings
128
+
- Chains through all resolver layers
129
+
- **Set**: Manually updates handle-to-DID mappings
130
+
- Updates both directions in cache
131
+
- Normalizes handles to lowercase
132
+
- Chains through all resolver layers
133
+
95
134
## Environment Variables
96
135
97
136
### Required
98
137
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
99
-
- `SERVICE_KEY`: Private key for service identity (DID format)
100
138
101
-
### Optional
139
+
### Optional - Core Configuration
102
140
- `HTTP_PORT`: Server port (default: 8080)
103
141
- `PLC_HOSTNAME`: PLC directory hostname (default: plc.directory)
142
+
- `RUST_LOG`: Logging level (e.g., debug, info)
143
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
144
+
145
+
### Optional - Caching
104
146
- `REDIS_URL`: Redis connection URL for caching
105
147
- `SQLITE_URL`: SQLite database URL for caching (e.g., `sqlite:./quickdid.db`)
148
+
- `CACHE_TTL_MEMORY`: TTL for in-memory cache in seconds (default: 600)
149
+
- `CACHE_TTL_REDIS`: TTL for Redis cache in seconds (default: 7776000)
150
+
- `CACHE_TTL_SQLITE`: TTL for SQLite cache in seconds (default: 7776000)
151
+
152
+
### Optional - Queue Configuration
106
153
- `QUEUE_ADAPTER`: Queue type - 'mpsc', 'redis', 'sqlite', 'noop', or 'none' (default: mpsc)
107
154
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
108
155
- `QUEUE_WORKER_ID`: Worker ID for queue operations (default: worker1)
109
156
- `QUEUE_BUFFER_SIZE`: Buffer size for MPSC queue (default: 1000)
110
157
- `QUEUE_SQLITE_MAX_SIZE`: Max queue size for SQLite work shedding (default: 10000)
111
-
- `CACHE_TTL_MEMORY`: TTL for in-memory cache in seconds (default: 600)
112
-
- `CACHE_TTL_REDIS`: TTL for Redis cache in seconds (default: 7776000)
113
-
- `CACHE_TTL_SQLITE`: TTL for SQLite cache in seconds (default: 7776000)
114
158
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
159
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication to prevent duplicate handles (default: false)
160
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
161
+
162
+
### Optional - Rate Limiting
115
163
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
116
164
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
117
-
- `RUST_LOG`: Logging level (e.g., debug, info)
165
+
166
+
### Optional - HTTP Cache Control
167
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
168
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
169
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate directive in seconds (default: 86400)
170
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
171
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
172
+
173
+
### Optional - Metrics
174
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
175
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd, e.g., localhost:8125)
176
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
177
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
178
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
179
+
180
+
### Optional - Proactive Refresh
181
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
182
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
183
+
184
+
### Optional - Jetstream Consumer
185
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
186
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
118
187
119
188
## Error Handling
120
189
···
152
221
### Test Coverage Areas
153
222
- Handle resolution with various DID methods
154
223
- Binary serialization/deserialization
155
-
- Redis caching and expiration
224
+
- Redis caching and expiration with bidirectional lookups
156
225
- Queue processing logic
157
226
- HTTP endpoint responses
227
+
- Jetstream event handler processing
228
+
- Purge and set operations across resolver layers
158
229
159
230
## Development Patterns
160
231
···
171
242
- Connection pooling for Redis
172
243
- Configurable TTLs for cache entries
173
244
- Rate limiting via semaphore-based concurrency control
245
+
- HTTP caching with ETag and Cache-Control headers
246
+
- Resolution timing metrics for performance monitoring
174
247
175
248
### Code Style
176
249
- Follow existing Rust idioms and patterns
177
250
- Use `tracing` for logging, not `println!`
178
251
- Prefer `Arc` for shared state across async tasks
179
252
- Handle errors explicitly, avoid `.unwrap()` in production code
253
+
- Use `httpdate` crate for HTTP date formatting (not `chrono`)
180
254
181
255
## Common Tasks
182
256
···
189
263
- For in-memory: Set `CACHE_TTL_MEMORY` environment variable
190
264
- For Redis: Set `CACHE_TTL_REDIS` environment variable
191
265
- For SQLite: Set `CACHE_TTL_SQLITE` environment variable
266
+
267
+
### Configuring Metrics
268
+
1. Set `METRICS_ADAPTER=statsd` and `METRICS_STATSD_HOST=localhost:8125`
269
+
2. Configure tags with `METRICS_TAGS=env:prod,service:quickdid`
270
+
3. Use Telegraf + TimescaleDB for aggregation (see `docs/telegraf-timescaledb-metrics-guide.md`)
271
+
4. Railway deployment resources available in `railway-resources/telegraf/`
192
272
193
273
### Debugging Resolution Issues
194
274
1. Enable debug logging: `RUST_LOG=debug`
195
-
2. Check Redis cache: `redis-cli GET "handle:<hash>"`
275
+
2. Check Redis cache:
276
+
- Handle lookup: `redis-cli GET "handle:<hash>"`
277
+
- DID lookup: `redis-cli GET "handle:<hash>"` (same key format)
196
278
3. Check SQLite cache: `sqlite3 quickdid.db "SELECT * FROM handle_resolution_cache;"`
197
279
4. Monitor queue processing in logs
198
280
5. Check rate limiting: Look for "Rate limit permit acquisition timed out" errors
199
281
6. Verify DNS/HTTP connectivity to AT Protocol infrastructure
282
+
7. Monitor metrics for resolution timing and cache hit rates
283
+
8. Check Jetstream consumer status:
284
+
- Look for "Jetstream consumer" log entries
285
+
- Monitor `jetstream.*` metrics
286
+
- Check reconnection attempts in logs
200
287
201
288
## Dependencies
202
289
- `atproto-identity`: Core AT Protocol identity resolution
290
+
- `atproto-jetstream`: AT Protocol Jetstream event consumer
203
291
- `bincode`: Binary serialization
204
292
- `deadpool-redis`: Redis connection pooling
205
293
- `metrohash`: Fast non-cryptographic hashing
206
294
- `tokio`: Async runtime
207
-
- `axum`: Web framework
295
+
- `axum`: Web framework
296
+
- `httpdate`: HTTP date formatting (replacing chrono)
297
+
- `cadence`: StatsD metrics client
298
+
- `thiserror`: Error handling
+647
-553
Cargo.lock
+647
-553
Cargo.lock
···
3
3
version = 4
4
4
5
5
[[package]]
6
-
name = "addr2line"
7
-
version = "0.24.2"
8
-
source = "registry+https://github.com/rust-lang/crates.io-index"
9
-
checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1"
10
-
dependencies = [
11
-
"gimli",
12
-
]
13
-
14
-
[[package]]
15
-
name = "adler2"
16
-
version = "2.0.1"
17
-
source = "registry+https://github.com/rust-lang/crates.io-index"
18
-
checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa"
19
-
20
-
[[package]]
21
6
name = "aho-corasick"
22
-
version = "1.1.3"
7
+
version = "1.1.4"
23
8
source = "registry+https://github.com/rust-lang/crates.io-index"
24
-
checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916"
9
+
checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301"
25
10
dependencies = [
26
11
"memchr",
27
12
]
···
33
18
checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923"
34
19
35
20
[[package]]
36
-
name = "android-tzdata"
37
-
version = "0.1.1"
38
-
source = "registry+https://github.com/rust-lang/crates.io-index"
39
-
checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0"
40
-
41
-
[[package]]
42
-
name = "android_system_properties"
43
-
version = "0.1.5"
44
-
source = "registry+https://github.com/rust-lang/crates.io-index"
45
-
checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311"
46
-
dependencies = [
47
-
"libc",
48
-
]
49
-
50
-
[[package]]
51
21
name = "anyhow"
52
-
version = "1.0.99"
22
+
version = "1.0.100"
53
23
source = "registry+https://github.com/rust-lang/crates.io-index"
54
-
checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100"
24
+
checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61"
55
25
56
26
[[package]]
57
27
name = "arc-swap"
···
67
37
dependencies = [
68
38
"proc-macro2",
69
39
"quote",
70
-
"syn",
40
+
"syn 2.0.108",
71
41
]
72
42
73
43
[[package]]
···
86
56
checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0"
87
57
88
58
[[package]]
59
+
name = "atproto-client"
60
+
version = "0.13.0"
61
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
62
+
dependencies = [
63
+
"anyhow",
64
+
"async-trait",
65
+
"atproto-identity",
66
+
"atproto-oauth",
67
+
"atproto-record",
68
+
"bytes",
69
+
"reqwest",
70
+
"reqwest-chain",
71
+
"reqwest-middleware",
72
+
"serde",
73
+
"serde_json",
74
+
"thiserror 2.0.17",
75
+
"tokio",
76
+
"tracing",
77
+
"urlencoding",
78
+
]
79
+
80
+
[[package]]
89
81
name = "atproto-identity"
90
-
version = "0.11.3"
91
-
source = "registry+https://github.com/rust-lang/crates.io-index"
92
-
checksum = "aaac8751c7e4329a95714c01d9e47d22d94bc8c96e78079098312235128acb9f"
82
+
version = "0.13.0"
83
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
93
84
dependencies = [
94
85
"anyhow",
95
86
"async-trait",
···
106
97
"serde",
107
98
"serde_ipld_dagcbor",
108
99
"serde_json",
109
-
"thiserror 2.0.16",
100
+
"thiserror 2.0.17",
110
101
"tokio",
111
102
"tracing",
103
+
"url",
104
+
"urlencoding",
105
+
]
106
+
107
+
[[package]]
108
+
name = "atproto-jetstream"
109
+
version = "0.13.0"
110
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
111
+
dependencies = [
112
+
"anyhow",
113
+
"async-trait",
114
+
"atproto-identity",
115
+
"futures",
116
+
"http",
117
+
"serde",
118
+
"serde_json",
119
+
"thiserror 2.0.17",
120
+
"tokio",
121
+
"tokio-util",
122
+
"tokio-websockets",
123
+
"tracing",
124
+
"tracing-subscriber",
125
+
"urlencoding",
126
+
"zstd",
127
+
]
128
+
129
+
[[package]]
130
+
name = "atproto-lexicon"
131
+
version = "0.13.0"
132
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
133
+
dependencies = [
134
+
"anyhow",
135
+
"async-trait",
136
+
"atproto-client",
137
+
"atproto-identity",
138
+
"hickory-resolver",
139
+
"reqwest",
140
+
"serde",
141
+
"serde_json",
142
+
"thiserror 2.0.17",
143
+
"tokio",
144
+
"tracing",
145
+
]
146
+
147
+
[[package]]
148
+
name = "atproto-oauth"
149
+
version = "0.13.0"
150
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
151
+
dependencies = [
152
+
"anyhow",
153
+
"async-trait",
154
+
"atproto-identity",
155
+
"base64",
156
+
"chrono",
157
+
"ecdsa",
158
+
"elliptic-curve",
159
+
"k256",
160
+
"lru",
161
+
"multibase",
162
+
"p256",
163
+
"p384",
164
+
"rand 0.8.5",
165
+
"reqwest",
166
+
"reqwest-chain",
167
+
"reqwest-middleware",
168
+
"serde",
169
+
"serde_ipld_dagcbor",
170
+
"serde_json",
171
+
"sha2",
172
+
"thiserror 2.0.17",
173
+
"tokio",
174
+
"tracing",
175
+
"ulid",
176
+
]
177
+
178
+
[[package]]
179
+
name = "atproto-record"
180
+
version = "0.13.0"
181
+
source = "git+https://tangled.org/@smokesignal.events/atproto-identity-rs#8a38edecc8ebebd74d511ae7863c7eecd0b877ad"
182
+
dependencies = [
183
+
"anyhow",
184
+
"atproto-identity",
185
+
"base64",
186
+
"chrono",
187
+
"cid",
188
+
"multihash",
189
+
"rand 0.8.5",
190
+
"serde",
191
+
"serde_ipld_dagcbor",
192
+
"serde_json",
193
+
"sha2",
194
+
"thiserror 2.0.17",
112
195
]
113
196
114
197
[[package]]
···
119
202
120
203
[[package]]
121
204
name = "axum"
122
-
version = "0.8.4"
205
+
version = "0.8.6"
123
206
source = "registry+https://github.com/rust-lang/crates.io-index"
124
-
checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5"
207
+
checksum = "8a18ed336352031311f4e0b4dd2ff392d4fbb370777c9d18d7fc9d7359f73871"
125
208
dependencies = [
126
209
"axum-core",
127
210
"bytes",
···
138
221
"mime",
139
222
"percent-encoding",
140
223
"pin-project-lite",
141
-
"rustversion",
142
-
"serde",
224
+
"serde_core",
143
225
"serde_json",
144
226
"serde_path_to_error",
145
227
"serde_urlencoded",
···
153
235
154
236
[[package]]
155
237
name = "axum-core"
156
-
version = "0.5.2"
238
+
version = "0.5.5"
157
239
source = "registry+https://github.com/rust-lang/crates.io-index"
158
-
checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6"
240
+
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
159
241
dependencies = [
160
242
"bytes",
161
243
"futures-core",
···
164
246
"http-body-util",
165
247
"mime",
166
248
"pin-project-lite",
167
-
"rustversion",
168
249
"sync_wrapper",
169
250
"tower-layer",
170
251
"tower-service",
···
173
254
174
255
[[package]]
175
256
name = "backon"
176
-
version = "1.5.2"
257
+
version = "1.6.0"
177
258
source = "registry+https://github.com/rust-lang/crates.io-index"
178
-
checksum = "592277618714fbcecda9a02ba7a8781f319d26532a88553bbacc77ba5d2b3a8d"
259
+
checksum = "cffb0e931875b666fc4fcb20fee52e9bbd1ef836fd9e9e04ec21555f9f85f7ef"
179
260
dependencies = [
180
261
"fastrand",
181
262
]
182
263
183
264
[[package]]
184
-
name = "backtrace"
185
-
version = "0.3.75"
186
-
source = "registry+https://github.com/rust-lang/crates.io-index"
187
-
checksum = "6806a6321ec58106fea15becdad98371e28d92ccbc7c8f1b3b6dd724fe8f1002"
188
-
dependencies = [
189
-
"addr2line",
190
-
"cfg-if",
191
-
"libc",
192
-
"miniz_oxide",
193
-
"object",
194
-
"rustc-demangle",
195
-
"windows-targets 0.52.6",
196
-
]
197
-
198
-
[[package]]
199
265
name = "base-x"
200
266
version = "0.2.11"
201
267
source = "registry+https://github.com/rust-lang/crates.io-index"
···
206
272
version = "0.2.0"
207
273
source = "registry+https://github.com/rust-lang/crates.io-index"
208
274
checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf"
275
+
276
+
[[package]]
277
+
name = "base256emoji"
278
+
version = "1.0.2"
279
+
source = "registry+https://github.com/rust-lang/crates.io-index"
280
+
checksum = "b5e9430d9a245a77c92176e649af6e275f20839a48389859d1661e9a128d077c"
281
+
dependencies = [
282
+
"const-str",
283
+
"match-lookup",
284
+
]
209
285
210
286
[[package]]
211
287
name = "base64"
···
241
317
242
318
[[package]]
243
319
name = "bitflags"
244
-
version = "2.9.4"
320
+
version = "2.10.0"
245
321
source = "registry+https://github.com/rust-lang/crates.io-index"
246
-
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
322
+
checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3"
247
323
dependencies = [
248
-
"serde",
324
+
"serde_core",
249
325
]
250
326
251
327
[[package]]
···
276
352
checksum = "d71b6127be86fdcfddb610f7182ac57211d4b18a3e9c82eb2d17662f2227ad6a"
277
353
278
354
[[package]]
355
+
name = "cadence"
356
+
version = "1.6.0"
357
+
source = "registry+https://github.com/rust-lang/crates.io-index"
358
+
checksum = "3075f133bee430b7644c54fb629b9b4420346ffa275a45c81a6babe8b09b4f51"
359
+
dependencies = [
360
+
"crossbeam-channel",
361
+
]
362
+
363
+
[[package]]
279
364
name = "cbor4ii"
280
365
version = "0.2.14"
281
366
source = "registry+https://github.com/rust-lang/crates.io-index"
···
286
371
287
372
[[package]]
288
373
name = "cc"
289
-
version = "1.2.36"
374
+
version = "1.2.44"
290
375
source = "registry+https://github.com/rust-lang/crates.io-index"
291
-
checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54"
376
+
checksum = "37521ac7aabe3d13122dc382493e20c9416f299d2ccd5b3a5340a2570cdeb0f3"
292
377
dependencies = [
293
378
"find-msvc-tools",
379
+
"jobserver",
380
+
"libc",
294
381
"shlex",
295
382
]
296
383
297
384
[[package]]
298
385
name = "cfg-if"
299
-
version = "1.0.3"
386
+
version = "1.0.4"
300
387
source = "registry+https://github.com/rust-lang/crates.io-index"
301
-
checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9"
388
+
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
302
389
303
390
[[package]]
304
391
name = "cfg_aliases"
···
308
395
309
396
[[package]]
310
397
name = "chrono"
311
-
version = "0.4.41"
398
+
version = "0.4.42"
312
399
source = "registry+https://github.com/rust-lang/crates.io-index"
313
-
checksum = "c469d952047f47f91b68d1cba3f10d63c11d73e4636f24f08daf0278abf01c4d"
400
+
checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2"
314
401
dependencies = [
315
-
"android-tzdata",
316
-
"iana-time-zone",
317
402
"num-traits",
318
-
"windows-link",
403
+
"serde",
319
404
]
320
405
321
406
[[package]]
···
360
445
version = "0.9.6"
361
446
source = "registry+https://github.com/rust-lang/crates.io-index"
362
447
checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8"
448
+
449
+
[[package]]
450
+
name = "const-str"
451
+
version = "0.4.3"
452
+
source = "registry+https://github.com/rust-lang/crates.io-index"
453
+
checksum = "2f421161cb492475f1661ddc9815a745a1c894592070661180fdec3d4872e9c3"
363
454
364
455
[[package]]
365
456
name = "core-foundation"
···
504
595
checksum = "8d162beedaa69905488a8da94f5ac3edb4dd4788b732fadb7bd120b2625c1976"
505
596
dependencies = [
506
597
"data-encoding",
507
-
"syn",
598
+
"syn 2.0.108",
508
599
]
509
600
510
601
[[package]]
···
569
660
dependencies = [
570
661
"proc-macro2",
571
662
"quote",
572
-
"syn",
663
+
"syn 2.0.108",
573
664
]
574
665
575
666
[[package]]
···
588
679
"digest",
589
680
"elliptic-curve",
590
681
"rfc6979",
682
+
"serdect",
591
683
"signature",
592
684
"spki",
593
685
]
···
643
735
"heck",
644
736
"proc-macro2",
645
737
"quote",
646
-
"syn",
738
+
"syn 2.0.108",
647
739
]
648
740
649
741
[[package]]
···
654
746
655
747
[[package]]
656
748
name = "errno"
657
-
version = "0.3.13"
749
+
version = "0.3.14"
658
750
source = "registry+https://github.com/rust-lang/crates.io-index"
659
-
checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad"
751
+
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
660
752
dependencies = [
661
753
"libc",
662
-
"windows-sys 0.60.2",
754
+
"windows-sys 0.61.2",
663
755
]
664
756
665
757
[[package]]
···
702
794
703
795
[[package]]
704
796
name = "find-msvc-tools"
705
-
version = "0.1.1"
797
+
version = "0.1.4"
706
798
source = "registry+https://github.com/rust-lang/crates.io-index"
707
-
checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d"
799
+
checksum = "52051878f80a721bb68ebfbc930e07b65ba72f2da88968ea5c06fd6ca3d3a127"
708
800
709
801
[[package]]
710
802
name = "flume"
···
754
846
]
755
847
756
848
[[package]]
849
+
name = "futures"
850
+
version = "0.3.31"
851
+
source = "registry+https://github.com/rust-lang/crates.io-index"
852
+
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
853
+
dependencies = [
854
+
"futures-channel",
855
+
"futures-core",
856
+
"futures-executor",
857
+
"futures-io",
858
+
"futures-sink",
859
+
"futures-task",
860
+
"futures-util",
861
+
]
862
+
863
+
[[package]]
757
864
name = "futures-channel"
758
865
version = "0.3.31"
759
866
source = "registry+https://github.com/rust-lang/crates.io-index"
···
805
912
dependencies = [
806
913
"proc-macro2",
807
914
"quote",
808
-
"syn",
915
+
"syn 2.0.108",
809
916
]
810
917
811
918
[[package]]
···
826
933
source = "registry+https://github.com/rust-lang/crates.io-index"
827
934
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
828
935
dependencies = [
936
+
"futures-channel",
829
937
"futures-core",
830
938
"futures-io",
831
939
"futures-macro",
···
838
946
]
839
947
840
948
[[package]]
841
-
name = "generator"
842
-
version = "0.8.7"
843
-
source = "registry+https://github.com/rust-lang/crates.io-index"
844
-
checksum = "605183a538e3e2a9c1038635cc5c2d194e2ee8fd0d1b66b8349fad7dbacce5a2"
845
-
dependencies = [
846
-
"cc",
847
-
"cfg-if",
848
-
"libc",
849
-
"log",
850
-
"rustversion",
851
-
"windows",
852
-
]
853
-
854
-
[[package]]
855
949
name = "generic-array"
856
-
version = "0.14.7"
950
+
version = "0.14.9"
857
951
source = "registry+https://github.com/rust-lang/crates.io-index"
858
-
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
952
+
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
859
953
dependencies = [
860
954
"typenum",
861
955
"version_check",
···
871
965
"cfg-if",
872
966
"js-sys",
873
967
"libc",
874
-
"wasi 0.11.1+wasi-snapshot-preview1",
968
+
"wasi",
875
969
"wasm-bindgen",
876
970
]
877
971
878
972
[[package]]
879
973
name = "getrandom"
880
-
version = "0.3.3"
974
+
version = "0.3.4"
881
975
source = "registry+https://github.com/rust-lang/crates.io-index"
882
-
checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4"
976
+
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
883
977
dependencies = [
884
978
"cfg-if",
885
979
"js-sys",
886
980
"libc",
887
981
"r-efi",
888
-
"wasi 0.14.3+wasi-0.2.4",
982
+
"wasip2",
889
983
"wasm-bindgen",
890
984
]
891
-
892
-
[[package]]
893
-
name = "gimli"
894
-
version = "0.31.1"
895
-
source = "registry+https://github.com/rust-lang/crates.io-index"
896
-
checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
897
985
898
986
[[package]]
899
987
name = "group"
···
937
1025
]
938
1026
939
1027
[[package]]
1028
+
name = "hashbrown"
1029
+
version = "0.16.0"
1030
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1031
+
checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d"
1032
+
1033
+
[[package]]
940
1034
name = "hashlink"
941
1035
version = "0.10.0"
942
1036
source = "registry+https://github.com/rust-lang/crates.io-index"
943
1037
checksum = "7382cf6263419f2d8df38c55d7da83da5c18aef87fc7a7fc1fb1e344edfe14c1"
944
1038
dependencies = [
945
-
"hashbrown",
1039
+
"hashbrown 0.15.5",
946
1040
]
947
1041
948
1042
[[package]]
···
981
1075
"once_cell",
982
1076
"rand 0.9.2",
983
1077
"ring",
984
-
"thiserror 2.0.16",
1078
+
"thiserror 2.0.17",
985
1079
"tinyvec",
986
1080
"tokio",
987
1081
"tracing",
···
1004
1098
"rand 0.9.2",
1005
1099
"resolv-conf",
1006
1100
"smallvec",
1007
-
"thiserror 2.0.16",
1101
+
"thiserror 2.0.17",
1008
1102
"tokio",
1009
1103
"tracing",
1010
1104
]
···
1029
1123
1030
1124
[[package]]
1031
1125
name = "home"
1032
-
version = "0.5.11"
1126
+
version = "0.5.12"
1033
1127
source = "registry+https://github.com/rust-lang/crates.io-index"
1034
-
checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf"
1128
+
checksum = "cc627f471c528ff0c4a49e1d5e60450c8f6461dd6d10ba9dcd3a61d3dff7728d"
1035
1129
dependencies = [
1036
-
"windows-sys 0.59.0",
1130
+
"windows-sys 0.61.2",
1037
1131
]
1038
1132
1039
1133
[[package]]
···
1071
1165
]
1072
1166
1073
1167
[[package]]
1168
+
name = "http-range-header"
1169
+
version = "0.4.2"
1170
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1171
+
checksum = "9171a2ea8a68358193d15dd5d70c1c10a2afc3e7e4c5bc92bc9f025cebd7359c"
1172
+
1173
+
[[package]]
1074
1174
name = "httparse"
1075
1175
version = "1.10.1"
1076
1176
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1140
1240
1141
1241
[[package]]
1142
1242
name = "hyper-util"
1143
-
version = "0.1.16"
1243
+
version = "0.1.17"
1144
1244
source = "registry+https://github.com/rust-lang/crates.io-index"
1145
-
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
1245
+
checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8"
1146
1246
dependencies = [
1147
1247
"base64",
1148
1248
"bytes",
···
1156
1256
"libc",
1157
1257
"percent-encoding",
1158
1258
"pin-project-lite",
1159
-
"socket2 0.6.0",
1259
+
"socket2 0.6.1",
1160
1260
"system-configuration",
1161
1261
"tokio",
1162
1262
"tower-service",
···
1165
1265
]
1166
1266
1167
1267
[[package]]
1168
-
name = "iana-time-zone"
1169
-
version = "0.1.63"
1170
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1171
-
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
1172
-
dependencies = [
1173
-
"android_system_properties",
1174
-
"core-foundation-sys",
1175
-
"iana-time-zone-haiku",
1176
-
"js-sys",
1177
-
"log",
1178
-
"wasm-bindgen",
1179
-
"windows-core",
1180
-
]
1181
-
1182
-
[[package]]
1183
-
name = "iana-time-zone-haiku"
1184
-
version = "0.1.2"
1185
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1186
-
checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f"
1187
-
dependencies = [
1188
-
"cc",
1189
-
]
1190
-
1191
-
[[package]]
1192
1268
name = "icu_collections"
1193
-
version = "2.0.0"
1269
+
version = "2.1.1"
1194
1270
source = "registry+https://github.com/rust-lang/crates.io-index"
1195
-
checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47"
1271
+
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
1196
1272
dependencies = [
1197
1273
"displaydoc",
1198
1274
"potential_utf",
···
1203
1279
1204
1280
[[package]]
1205
1281
name = "icu_locale_core"
1206
-
version = "2.0.0"
1282
+
version = "2.1.1"
1207
1283
source = "registry+https://github.com/rust-lang/crates.io-index"
1208
-
checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a"
1284
+
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
1209
1285
dependencies = [
1210
1286
"displaydoc",
1211
1287
"litemap",
···
1216
1292
1217
1293
[[package]]
1218
1294
name = "icu_normalizer"
1219
-
version = "2.0.0"
1295
+
version = "2.1.1"
1220
1296
source = "registry+https://github.com/rust-lang/crates.io-index"
1221
-
checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979"
1297
+
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
1222
1298
dependencies = [
1223
-
"displaydoc",
1224
1299
"icu_collections",
1225
1300
"icu_normalizer_data",
1226
1301
"icu_properties",
···
1231
1306
1232
1307
[[package]]
1233
1308
name = "icu_normalizer_data"
1234
-
version = "2.0.0"
1309
+
version = "2.1.1"
1235
1310
source = "registry+https://github.com/rust-lang/crates.io-index"
1236
-
checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3"
1311
+
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
1237
1312
1238
1313
[[package]]
1239
1314
name = "icu_properties"
1240
-
version = "2.0.1"
1315
+
version = "2.1.1"
1241
1316
source = "registry+https://github.com/rust-lang/crates.io-index"
1242
-
checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b"
1317
+
checksum = "e93fcd3157766c0c8da2f8cff6ce651a31f0810eaa1c51ec363ef790bbb5fb99"
1243
1318
dependencies = [
1244
-
"displaydoc",
1245
1319
"icu_collections",
1246
1320
"icu_locale_core",
1247
1321
"icu_properties_data",
1248
1322
"icu_provider",
1249
-
"potential_utf",
1250
1323
"zerotrie",
1251
1324
"zerovec",
1252
1325
]
1253
1326
1254
1327
[[package]]
1255
1328
name = "icu_properties_data"
1256
-
version = "2.0.1"
1329
+
version = "2.1.1"
1257
1330
source = "registry+https://github.com/rust-lang/crates.io-index"
1258
-
checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632"
1331
+
checksum = "02845b3647bb045f1100ecd6480ff52f34c35f82d9880e029d329c21d1054899"
1259
1332
1260
1333
[[package]]
1261
1334
name = "icu_provider"
1262
-
version = "2.0.0"
1335
+
version = "2.1.1"
1263
1336
source = "registry+https://github.com/rust-lang/crates.io-index"
1264
-
checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af"
1337
+
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
1265
1338
dependencies = [
1266
1339
"displaydoc",
1267
1340
"icu_locale_core",
1268
-
"stable_deref_trait",
1269
-
"tinystr",
1270
1341
"writeable",
1271
1342
"yoke",
1272
1343
"zerofrom",
···
1297
1368
1298
1369
[[package]]
1299
1370
name = "indexmap"
1300
-
version = "2.11.0"
1371
+
version = "2.12.0"
1301
1372
source = "registry+https://github.com/rust-lang/crates.io-index"
1302
-
checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9"
1373
+
checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f"
1303
1374
dependencies = [
1304
1375
"equivalent",
1305
-
"hashbrown",
1306
-
]
1307
-
1308
-
[[package]]
1309
-
name = "io-uring"
1310
-
version = "0.7.10"
1311
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1312
-
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
1313
-
dependencies = [
1314
-
"bitflags",
1315
-
"cfg-if",
1316
-
"libc",
1376
+
"hashbrown 0.16.0",
1317
1377
]
1318
1378
1319
1379
[[package]]
···
1362
1422
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
1363
1423
1364
1424
[[package]]
1425
+
name = "jobserver"
1426
+
version = "0.1.34"
1427
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1428
+
checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33"
1429
+
dependencies = [
1430
+
"getrandom 0.3.4",
1431
+
"libc",
1432
+
]
1433
+
1434
+
[[package]]
1365
1435
name = "js-sys"
1366
-
version = "0.3.78"
1436
+
version = "0.3.82"
1367
1437
source = "registry+https://github.com/rust-lang/crates.io-index"
1368
-
checksum = "0c0b063578492ceec17683ef2f8c5e89121fbd0b172cbc280635ab7567db2738"
1438
+
checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65"
1369
1439
dependencies = [
1370
1440
"once_cell",
1371
1441
"wasm-bindgen",
···
1396
1466
1397
1467
[[package]]
1398
1468
name = "libc"
1399
-
version = "0.2.175"
1469
+
version = "0.2.177"
1400
1470
source = "registry+https://github.com/rust-lang/crates.io-index"
1401
-
checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543"
1471
+
checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976"
1402
1472
1403
1473
[[package]]
1404
1474
name = "libm"
···
1408
1478
1409
1479
[[package]]
1410
1480
name = "libredox"
1411
-
version = "0.1.9"
1481
+
version = "0.1.10"
1412
1482
source = "registry+https://github.com/rust-lang/crates.io-index"
1413
-
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
1483
+
checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb"
1414
1484
dependencies = [
1415
1485
"bitflags",
1416
1486
"libc",
···
1430
1500
1431
1501
[[package]]
1432
1502
name = "linux-raw-sys"
1433
-
version = "0.9.4"
1503
+
version = "0.11.0"
1434
1504
source = "registry+https://github.com/rust-lang/crates.io-index"
1435
-
checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12"
1505
+
checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039"
1436
1506
1437
1507
[[package]]
1438
1508
name = "litemap"
1439
-
version = "0.8.0"
1509
+
version = "0.8.1"
1440
1510
source = "registry+https://github.com/rust-lang/crates.io-index"
1441
-
checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956"
1511
+
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
1442
1512
1443
1513
[[package]]
1444
1514
name = "lock_api"
1445
-
version = "0.4.13"
1515
+
version = "0.4.14"
1446
1516
source = "registry+https://github.com/rust-lang/crates.io-index"
1447
-
checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765"
1517
+
checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965"
1448
1518
dependencies = [
1449
-
"autocfg",
1450
1519
"scopeguard",
1451
1520
]
1452
1521
···
1457
1526
checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432"
1458
1527
1459
1528
[[package]]
1460
-
name = "loom"
1461
-
version = "0.7.2"
1462
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1463
-
checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca"
1464
-
dependencies = [
1465
-
"cfg-if",
1466
-
"generator",
1467
-
"scoped-tls",
1468
-
"tracing",
1469
-
"tracing-subscriber",
1470
-
]
1471
-
1472
-
[[package]]
1473
1529
name = "lru"
1474
1530
version = "0.12.5"
1475
1531
source = "registry+https://github.com/rust-lang/crates.io-index"
1476
1532
checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38"
1477
1533
dependencies = [
1478
-
"hashbrown",
1534
+
"hashbrown 0.15.5",
1479
1535
]
1480
1536
1481
1537
[[package]]
···
1485
1541
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
1486
1542
1487
1543
[[package]]
1544
+
name = "match-lookup"
1545
+
version = "0.1.1"
1546
+
source = "registry+https://github.com/rust-lang/crates.io-index"
1547
+
checksum = "1265724d8cb29dbbc2b0f06fffb8bf1a8c0cf73a78eede9ba73a4a66c52a981e"
1548
+
dependencies = [
1549
+
"proc-macro2",
1550
+
"quote",
1551
+
"syn 1.0.109",
1552
+
]
1553
+
1554
+
[[package]]
1488
1555
name = "matchers"
1489
1556
version = "0.2.0"
1490
1557
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1511
1578
1512
1579
[[package]]
1513
1580
name = "memchr"
1514
-
version = "2.7.5"
1581
+
version = "2.7.6"
1515
1582
source = "registry+https://github.com/rust-lang/crates.io-index"
1516
-
checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0"
1583
+
checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273"
1517
1584
1518
1585
[[package]]
1519
1586
name = "metrohash"
···
1528
1595
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
1529
1596
1530
1597
[[package]]
1531
-
name = "miniz_oxide"
1532
-
version = "0.8.9"
1598
+
name = "mime_guess"
1599
+
version = "2.0.5"
1533
1600
source = "registry+https://github.com/rust-lang/crates.io-index"
1534
-
checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316"
1601
+
checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e"
1535
1602
dependencies = [
1536
-
"adler2",
1603
+
"mime",
1604
+
"unicase",
1537
1605
]
1538
1606
1539
1607
[[package]]
1540
1608
name = "mio"
1541
-
version = "1.0.4"
1609
+
version = "1.1.0"
1542
1610
source = "registry+https://github.com/rust-lang/crates.io-index"
1543
-
checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c"
1611
+
checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873"
1544
1612
dependencies = [
1545
1613
"libc",
1546
-
"wasi 0.11.1+wasi-snapshot-preview1",
1547
-
"windows-sys 0.59.0",
1614
+
"wasi",
1615
+
"windows-sys 0.61.2",
1548
1616
]
1549
1617
1550
1618
[[package]]
1551
1619
name = "moka"
1552
-
version = "0.12.10"
1620
+
version = "0.12.11"
1553
1621
source = "registry+https://github.com/rust-lang/crates.io-index"
1554
-
checksum = "a9321642ca94a4282428e6ea4af8cc2ca4eac48ac7a6a4ea8f33f76d0ce70926"
1622
+
checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077"
1555
1623
dependencies = [
1556
1624
"crossbeam-channel",
1557
1625
"crossbeam-epoch",
1558
1626
"crossbeam-utils",
1559
-
"loom",
1627
+
"equivalent",
1560
1628
"parking_lot",
1561
1629
"portable-atomic",
1562
1630
"rustc_version",
1563
1631
"smallvec",
1564
1632
"tagptr",
1565
-
"thiserror 1.0.69",
1566
1633
"uuid",
1567
1634
]
1568
1635
1569
1636
[[package]]
1570
1637
name = "multibase"
1571
-
version = "0.9.1"
1638
+
version = "0.9.2"
1572
1639
source = "registry+https://github.com/rust-lang/crates.io-index"
1573
-
checksum = "9b3539ec3c1f04ac9748a260728e855f261b4977f5c3406612c884564f329404"
1640
+
checksum = "8694bb4835f452b0e3bb06dbebb1d6fc5385b6ca1caf2e55fd165c042390ec77"
1574
1641
dependencies = [
1575
1642
"base-x",
1643
+
"base256emoji",
1576
1644
"data-encoding",
1577
1645
"data-encoding-macro",
1578
1646
]
···
1607
1675
1608
1676
[[package]]
1609
1677
name = "nu-ansi-term"
1610
-
version = "0.50.1"
1678
+
version = "0.50.3"
1611
1679
source = "registry+https://github.com/rust-lang/crates.io-index"
1612
-
checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399"
1680
+
checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5"
1613
1681
dependencies = [
1614
-
"windows-sys 0.52.0",
1682
+
"windows-sys 0.61.2",
1615
1683
]
1616
1684
1617
1685
[[package]]
···
1626
1694
1627
1695
[[package]]
1628
1696
name = "num-bigint-dig"
1629
-
version = "0.8.4"
1697
+
version = "0.8.5"
1630
1698
source = "registry+https://github.com/rust-lang/crates.io-index"
1631
-
checksum = "dc84195820f291c7697304f3cbdadd1cb7199c0efc917ff5eafd71225c136151"
1699
+
checksum = "82c79c15c05d4bf82b6f5ef163104cc81a760d8e874d38ac50ab67c8877b647b"
1632
1700
dependencies = [
1633
-
"byteorder",
1634
1701
"lazy_static",
1635
1702
"libm",
1636
1703
"num-integer",
···
1682
1749
]
1683
1750
1684
1751
[[package]]
1685
-
name = "object"
1686
-
version = "0.36.7"
1687
-
source = "registry+https://github.com/rust-lang/crates.io-index"
1688
-
checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87"
1689
-
dependencies = [
1690
-
"memchr",
1691
-
]
1692
-
1693
-
[[package]]
1694
1752
name = "once_cell"
1695
1753
version = "1.21.3"
1696
1754
source = "registry+https://github.com/rust-lang/crates.io-index"
···
1702
1760
1703
1761
[[package]]
1704
1762
name = "openssl"
1705
-
version = "0.10.73"
1763
+
version = "0.10.74"
1706
1764
source = "registry+https://github.com/rust-lang/crates.io-index"
1707
-
checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8"
1765
+
checksum = "24ad14dd45412269e1a30f52ad8f0664f0f4f4a89ee8fe28c3b3527021ebb654"
1708
1766
dependencies = [
1709
1767
"bitflags",
1710
1768
"cfg-if",
···
1723
1781
dependencies = [
1724
1782
"proc-macro2",
1725
1783
"quote",
1726
-
"syn",
1784
+
"syn 2.0.108",
1727
1785
]
1728
1786
1729
1787
[[package]]
···
1734
1792
1735
1793
[[package]]
1736
1794
name = "openssl-sys"
1737
-
version = "0.9.109"
1795
+
version = "0.9.110"
1738
1796
source = "registry+https://github.com/rust-lang/crates.io-index"
1739
-
checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571"
1797
+
checksum = "0a9f0075ba3c21b09f8e8b2026584b1d18d49388648f2fbbf3c97ea8deced8e2"
1740
1798
dependencies = [
1741
1799
"cc",
1742
1800
"libc",
···
1753
1811
"ecdsa",
1754
1812
"elliptic-curve",
1755
1813
"primeorder",
1814
+
"serdect",
1756
1815
"sha2",
1757
1816
]
1758
1817
···
1765
1824
"ecdsa",
1766
1825
"elliptic-curve",
1767
1826
"primeorder",
1827
+
"serdect",
1768
1828
"sha2",
1769
1829
]
1770
1830
···
1776
1836
1777
1837
[[package]]
1778
1838
name = "parking_lot"
1779
-
version = "0.12.4"
1839
+
version = "0.12.5"
1780
1840
source = "registry+https://github.com/rust-lang/crates.io-index"
1781
-
checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13"
1841
+
checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a"
1782
1842
dependencies = [
1783
1843
"lock_api",
1784
1844
"parking_lot_core",
···
1786
1846
1787
1847
[[package]]
1788
1848
name = "parking_lot_core"
1789
-
version = "0.9.11"
1849
+
version = "0.9.12"
1790
1850
source = "registry+https://github.com/rust-lang/crates.io-index"
1791
-
checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5"
1851
+
checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
1792
1852
dependencies = [
1793
1853
"cfg-if",
1794
1854
"libc",
1795
1855
"redox_syscall",
1796
1856
"smallvec",
1797
-
"windows-targets 0.52.6",
1857
+
"windows-link 0.2.1",
1798
1858
]
1799
1859
1800
1860
[[package]]
···
1859
1919
1860
1920
[[package]]
1861
1921
name = "potential_utf"
1862
-
version = "0.1.3"
1922
+
version = "0.1.4"
1863
1923
source = "registry+https://github.com/rust-lang/crates.io-index"
1864
-
checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a"
1924
+
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
1865
1925
dependencies = [
1866
1926
"zerovec",
1867
1927
]
···
1882
1942
checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6"
1883
1943
dependencies = [
1884
1944
"elliptic-curve",
1945
+
"serdect",
1885
1946
]
1886
1947
1887
1948
[[package]]
1888
1949
name = "proc-macro2"
1889
-
version = "1.0.101"
1950
+
version = "1.0.103"
1890
1951
source = "registry+https://github.com/rust-lang/crates.io-index"
1891
-
checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de"
1952
+
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
1892
1953
dependencies = [
1893
1954
"unicode-ident",
1894
1955
]
1895
1956
1896
1957
[[package]]
1897
1958
name = "quickdid"
1898
-
version = "1.0.0-rc.3"
1959
+
version = "1.0.0-rc.5"
1899
1960
dependencies = [
1900
1961
"anyhow",
1901
1962
"async-trait",
1902
1963
"atproto-identity",
1964
+
"atproto-jetstream",
1965
+
"atproto-lexicon",
1903
1966
"axum",
1904
1967
"bincode",
1968
+
"cadence",
1905
1969
"deadpool-redis",
1970
+
"httpdate",
1906
1971
"metrohash",
1972
+
"once_cell",
1907
1973
"reqwest",
1908
1974
"serde",
1909
1975
"serde_json",
1910
1976
"sqlx",
1911
-
"thiserror 2.0.16",
1977
+
"thiserror 2.0.17",
1912
1978
"tokio",
1913
1979
"tokio-util",
1980
+
"tower-http",
1914
1981
"tracing",
1915
1982
"tracing-subscriber",
1916
1983
]
···
1928
1995
"quinn-udp",
1929
1996
"rustc-hash",
1930
1997
"rustls",
1931
-
"socket2 0.6.0",
1932
-
"thiserror 2.0.16",
1998
+
"socket2 0.6.1",
1999
+
"thiserror 2.0.17",
1933
2000
"tokio",
1934
2001
"tracing",
1935
2002
"web-time",
···
1942
2009
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
1943
2010
dependencies = [
1944
2011
"bytes",
1945
-
"getrandom 0.3.3",
2012
+
"getrandom 0.3.4",
1946
2013
"lru-slab",
1947
2014
"rand 0.9.2",
1948
2015
"ring",
···
1950
2017
"rustls",
1951
2018
"rustls-pki-types",
1952
2019
"slab",
1953
-
"thiserror 2.0.16",
2020
+
"thiserror 2.0.17",
1954
2021
"tinyvec",
1955
2022
"tracing",
1956
2023
"web-time",
···
1965
2032
"cfg_aliases",
1966
2033
"libc",
1967
2034
"once_cell",
1968
-
"socket2 0.6.0",
2035
+
"socket2 0.6.1",
1969
2036
"tracing",
1970
2037
"windows-sys 0.60.2",
1971
2038
]
1972
2039
1973
2040
[[package]]
1974
2041
name = "quote"
1975
-
version = "1.0.40"
2042
+
version = "1.0.41"
1976
2043
source = "registry+https://github.com/rust-lang/crates.io-index"
1977
-
checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d"
2044
+
checksum = "ce25767e7b499d1b604768e7cde645d14cc8584231ea6b295e9c9eb22c02e1d1"
1978
2045
dependencies = [
1979
2046
"proc-macro2",
1980
2047
]
···
2041
2108
source = "registry+https://github.com/rust-lang/crates.io-index"
2042
2109
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
2043
2110
dependencies = [
2044
-
"getrandom 0.3.3",
2111
+
"getrandom 0.3.4",
2045
2112
]
2046
2113
2047
2114
[[package]]
2048
2115
name = "redis"
2049
-
version = "0.32.5"
2116
+
version = "0.32.7"
2050
2117
source = "registry+https://github.com/rust-lang/crates.io-index"
2051
-
checksum = "7cd3650deebc68526b304898b192fa4102a4ef0b9ada24da096559cb60e0eef8"
2118
+
checksum = "014cc767fefab6a3e798ca45112bccad9c6e0e218fbd49720042716c73cfef44"
2052
2119
dependencies = [
2053
2120
"arc-swap",
2054
2121
"backon",
···
2064
2131
"rustls",
2065
2132
"rustls-native-certs",
2066
2133
"ryu",
2067
-
"socket2 0.6.0",
2134
+
"socket2 0.6.1",
2068
2135
"tokio",
2069
2136
"tokio-rustls",
2070
2137
"tokio-util",
···
2073
2140
2074
2141
[[package]]
2075
2142
name = "redox_syscall"
2076
-
version = "0.5.17"
2143
+
version = "0.5.18"
2077
2144
source = "registry+https://github.com/rust-lang/crates.io-index"
2078
-
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
2145
+
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
2079
2146
dependencies = [
2080
2147
"bitflags",
2081
2148
]
2082
2149
2083
2150
[[package]]
2084
2151
name = "regex-automata"
2085
-
version = "0.4.10"
2152
+
version = "0.4.13"
2086
2153
source = "registry+https://github.com/rust-lang/crates.io-index"
2087
-
checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6"
2154
+
checksum = "5276caf25ac86c8d810222b3dbb938e512c55c6831a10f3e6ed1c93b84041f1c"
2088
2155
dependencies = [
2089
2156
"aho-corasick",
2090
2157
"memchr",
···
2093
2160
2094
2161
[[package]]
2095
2162
name = "regex-syntax"
2096
-
version = "0.8.6"
2163
+
version = "0.8.8"
2097
2164
source = "registry+https://github.com/rust-lang/crates.io-index"
2098
-
checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
2165
+
checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
2099
2166
2100
2167
[[package]]
2101
2168
name = "reqwest"
2102
-
version = "0.12.23"
2169
+
version = "0.12.24"
2103
2170
source = "registry+https://github.com/rust-lang/crates.io-index"
2104
-
checksum = "d429f34c8092b2d42c7c93cec323bb4adeb7c67698f70839adec842ec10c7ceb"
2171
+
checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
2105
2172
dependencies = [
2106
2173
"base64",
2107
2174
"bytes",
2108
2175
"encoding_rs",
2109
2176
"futures-core",
2177
+
"futures-util",
2110
2178
"h2",
2111
2179
"http",
2112
2180
"http-body",
···
2118
2186
"js-sys",
2119
2187
"log",
2120
2188
"mime",
2189
+
"mime_guess",
2121
2190
"native-tls",
2122
2191
"percent-encoding",
2123
2192
"pin-project-lite",
···
2142
2211
]
2143
2212
2144
2213
[[package]]
2214
+
name = "reqwest-chain"
2215
+
version = "1.0.0"
2216
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2217
+
checksum = "da5c014fb79a8227db44a0433d748107750d2550b7fca55c59a3d7ee7d2ee2b2"
2218
+
dependencies = [
2219
+
"anyhow",
2220
+
"async-trait",
2221
+
"http",
2222
+
"reqwest-middleware",
2223
+
]
2224
+
2225
+
[[package]]
2226
+
name = "reqwest-middleware"
2227
+
version = "0.4.2"
2228
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2229
+
checksum = "57f17d28a6e6acfe1733fe24bcd30774d13bffa4b8a22535b4c8c98423088d4e"
2230
+
dependencies = [
2231
+
"anyhow",
2232
+
"async-trait",
2233
+
"http",
2234
+
"reqwest",
2235
+
"serde",
2236
+
"thiserror 1.0.69",
2237
+
"tower-service",
2238
+
]
2239
+
2240
+
[[package]]
2145
2241
name = "resolv-conf"
2146
-
version = "0.7.4"
2242
+
version = "0.7.5"
2147
2243
source = "registry+https://github.com/rust-lang/crates.io-index"
2148
-
checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3"
2244
+
checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799"
2149
2245
2150
2246
[[package]]
2151
2247
name = "rfc6979"
···
2192
2288
]
2193
2289
2194
2290
[[package]]
2195
-
name = "rustc-demangle"
2196
-
version = "0.1.26"
2197
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2198
-
checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace"
2199
-
2200
-
[[package]]
2201
2291
name = "rustc-hash"
2202
2292
version = "2.1.1"
2203
2293
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2214
2304
2215
2305
[[package]]
2216
2306
name = "rustix"
2217
-
version = "1.0.8"
2307
+
version = "1.1.2"
2218
2308
source = "registry+https://github.com/rust-lang/crates.io-index"
2219
-
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
2309
+
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
2220
2310
dependencies = [
2221
2311
"bitflags",
2222
2312
"errno",
2223
2313
"libc",
2224
2314
"linux-raw-sys",
2225
-
"windows-sys 0.60.2",
2315
+
"windows-sys 0.61.2",
2226
2316
]
2227
2317
2228
2318
[[package]]
2229
2319
name = "rustls"
2230
-
version = "0.23.31"
2320
+
version = "0.23.34"
2231
2321
source = "registry+https://github.com/rust-lang/crates.io-index"
2232
-
checksum = "c0ebcbd2f03de0fc1122ad9bb24b127a5a6cd51d72604a3f3c50ac459762b6cc"
2322
+
checksum = "6a9586e9ee2b4f8fab52a0048ca7334d7024eef48e2cb9407e3497bb7cab7fa7"
2233
2323
dependencies = [
2234
2324
"once_cell",
2235
2325
"ring",
···
2241
2331
2242
2332
[[package]]
2243
2333
name = "rustls-native-certs"
2244
-
version = "0.8.1"
2334
+
version = "0.8.2"
2245
2335
source = "registry+https://github.com/rust-lang/crates.io-index"
2246
-
checksum = "7fcff2dd52b58a8d98a70243663a0d234c4e2b79235637849d15913394a247d3"
2336
+
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
2247
2337
dependencies = [
2248
2338
"openssl-probe",
2249
2339
"rustls-pki-types",
2250
2340
"schannel",
2251
-
"security-framework 3.3.0",
2341
+
"security-framework 3.5.1",
2252
2342
]
2253
2343
2254
2344
[[package]]
2255
2345
name = "rustls-pki-types"
2256
-
version = "1.12.0"
2346
+
version = "1.13.0"
2257
2347
source = "registry+https://github.com/rust-lang/crates.io-index"
2258
-
checksum = "229a4a4c221013e7e1f1a043678c5cc39fe5171437c88fb47151a21e6f5b5c79"
2348
+
checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a"
2259
2349
dependencies = [
2260
2350
"web-time",
2261
2351
"zeroize",
···
2263
2353
2264
2354
[[package]]
2265
2355
name = "rustls-webpki"
2266
-
version = "0.103.4"
2356
+
version = "0.103.8"
2267
2357
source = "registry+https://github.com/rust-lang/crates.io-index"
2268
-
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
2358
+
checksum = "2ffdfa2f5286e2247234e03f680868ac2815974dc39e00ea15adc445d0aafe52"
2269
2359
dependencies = [
2270
2360
"ring",
2271
2361
"rustls-pki-types",
···
2286
2376
2287
2377
[[package]]
2288
2378
name = "schannel"
2289
-
version = "0.1.27"
2379
+
version = "0.1.28"
2290
2380
source = "registry+https://github.com/rust-lang/crates.io-index"
2291
-
checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
2381
+
checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1"
2292
2382
dependencies = [
2293
-
"windows-sys 0.59.0",
2383
+
"windows-sys 0.61.2",
2294
2384
]
2295
2385
2296
2386
[[package]]
2297
-
name = "scoped-tls"
2298
-
version = "1.0.1"
2299
-
source = "registry+https://github.com/rust-lang/crates.io-index"
2300
-
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
2301
-
2302
-
[[package]]
2303
2387
name = "scopeguard"
2304
2388
version = "1.2.0"
2305
2389
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2335
2419
2336
2420
[[package]]
2337
2421
name = "security-framework"
2338
-
version = "3.3.0"
2422
+
version = "3.5.1"
2339
2423
source = "registry+https://github.com/rust-lang/crates.io-index"
2340
-
checksum = "80fb1d92c5028aa318b4b8bd7302a5bfcf48be96a37fc6fc790f806b0004ee0c"
2424
+
checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef"
2341
2425
dependencies = [
2342
2426
"bitflags",
2343
2427
"core-foundation 0.10.1",
···
2348
2432
2349
2433
[[package]]
2350
2434
name = "security-framework-sys"
2351
-
version = "2.14.0"
2435
+
version = "2.15.0"
2352
2436
source = "registry+https://github.com/rust-lang/crates.io-index"
2353
-
checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32"
2437
+
checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0"
2354
2438
dependencies = [
2355
2439
"core-foundation-sys",
2356
2440
"libc",
···
2358
2442
2359
2443
[[package]]
2360
2444
name = "semver"
2361
-
version = "1.0.26"
2445
+
version = "1.0.27"
2362
2446
source = "registry+https://github.com/rust-lang/crates.io-index"
2363
-
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
2447
+
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
2364
2448
2365
2449
[[package]]
2366
2450
name = "serde"
2367
-
version = "1.0.219"
2451
+
version = "1.0.228"
2368
2452
source = "registry+https://github.com/rust-lang/crates.io-index"
2369
-
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
2453
+
checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e"
2370
2454
dependencies = [
2455
+
"serde_core",
2371
2456
"serde_derive",
2372
2457
]
2373
2458
2374
2459
[[package]]
2375
2460
name = "serde_bytes"
2376
-
version = "0.11.17"
2461
+
version = "0.11.19"
2377
2462
source = "registry+https://github.com/rust-lang/crates.io-index"
2378
-
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
2463
+
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
2379
2464
dependencies = [
2380
2465
"serde",
2466
+
"serde_core",
2467
+
]
2468
+
2469
+
[[package]]
2470
+
name = "serde_core"
2471
+
version = "1.0.228"
2472
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2473
+
checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad"
2474
+
dependencies = [
2475
+
"serde_derive",
2381
2476
]
2382
2477
2383
2478
[[package]]
2384
2479
name = "serde_derive"
2385
-
version = "1.0.219"
2480
+
version = "1.0.228"
2386
2481
source = "registry+https://github.com/rust-lang/crates.io-index"
2387
-
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
2482
+
checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
2388
2483
dependencies = [
2389
2484
"proc-macro2",
2390
2485
"quote",
2391
-
"syn",
2486
+
"syn 2.0.108",
2392
2487
]
2393
2488
2394
2489
[[package]]
2395
2490
name = "serde_ipld_dagcbor"
2396
-
version = "0.6.3"
2491
+
version = "0.6.4"
2397
2492
source = "registry+https://github.com/rust-lang/crates.io-index"
2398
-
checksum = "99600723cf53fb000a66175555098db7e75217c415bdd9a16a65d52a19dcc4fc"
2493
+
checksum = "46182f4f08349a02b45c998ba3215d3f9de826246ba02bb9dddfe9a2a2100778"
2399
2494
dependencies = [
2400
2495
"cbor4ii",
2401
2496
"ipld-core",
···
2405
2500
2406
2501
[[package]]
2407
2502
name = "serde_json"
2408
-
version = "1.0.143"
2503
+
version = "1.0.145"
2409
2504
source = "registry+https://github.com/rust-lang/crates.io-index"
2410
-
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
2505
+
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
2411
2506
dependencies = [
2412
2507
"itoa",
2413
2508
"memchr",
2414
2509
"ryu",
2415
2510
"serde",
2511
+
"serde_core",
2416
2512
]
2417
2513
2418
2514
[[package]]
2419
2515
name = "serde_path_to_error"
2420
-
version = "0.1.17"
2516
+
version = "0.1.20"
2421
2517
source = "registry+https://github.com/rust-lang/crates.io-index"
2422
-
checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a"
2518
+
checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457"
2423
2519
dependencies = [
2424
2520
"itoa",
2425
2521
"serde",
2522
+
"serde_core",
2426
2523
]
2427
2524
2428
2525
[[package]]
···
2504
2601
]
2505
2602
2506
2603
[[package]]
2604
+
name = "simdutf8"
2605
+
version = "0.1.5"
2606
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2607
+
checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
2608
+
2609
+
[[package]]
2507
2610
name = "slab"
2508
2611
version = "0.4.11"
2509
2612
source = "registry+https://github.com/rust-lang/crates.io-index"
···
2530
2633
2531
2634
[[package]]
2532
2635
name = "socket2"
2533
-
version = "0.6.0"
2636
+
version = "0.6.1"
2534
2637
source = "registry+https://github.com/rust-lang/crates.io-index"
2535
-
checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807"
2638
+
checksum = "17129e116933cf371d018bb80ae557e889637989d8638274fb25622827b03881"
2536
2639
dependencies = [
2537
2640
"libc",
2538
-
"windows-sys 0.59.0",
2641
+
"windows-sys 0.60.2",
2539
2642
]
2540
2643
2541
2644
[[package]]
···
2578
2681
dependencies = [
2579
2682
"base64",
2580
2683
"bytes",
2581
-
"chrono",
2582
2684
"crc",
2583
2685
"crossbeam-queue",
2584
2686
"either",
···
2587
2689
"futures-intrusive",
2588
2690
"futures-io",
2589
2691
"futures-util",
2590
-
"hashbrown",
2692
+
"hashbrown 0.15.5",
2591
2693
"hashlink",
2592
2694
"indexmap",
2593
2695
"log",
···
2598
2700
"serde_json",
2599
2701
"sha2",
2600
2702
"smallvec",
2601
-
"thiserror 2.0.16",
2703
+
"thiserror 2.0.17",
2602
2704
"tokio",
2603
2705
"tokio-stream",
2604
2706
"tracing",
···
2615
2717
"quote",
2616
2718
"sqlx-core",
2617
2719
"sqlx-macros-core",
2618
-
"syn",
2720
+
"syn 2.0.108",
2619
2721
]
2620
2722
2621
2723
[[package]]
···
2638
2740
"sqlx-mysql",
2639
2741
"sqlx-postgres",
2640
2742
"sqlx-sqlite",
2641
-
"syn",
2743
+
"syn 2.0.108",
2642
2744
"tokio",
2643
2745
"url",
2644
2746
]
···
2654
2756
"bitflags",
2655
2757
"byteorder",
2656
2758
"bytes",
2657
-
"chrono",
2658
2759
"crc",
2659
2760
"digest",
2660
2761
"dotenvy",
···
2681
2782
"smallvec",
2682
2783
"sqlx-core",
2683
2784
"stringprep",
2684
-
"thiserror 2.0.16",
2785
+
"thiserror 2.0.17",
2685
2786
"tracing",
2686
2787
"whoami",
2687
2788
]
···
2696
2797
"base64",
2697
2798
"bitflags",
2698
2799
"byteorder",
2699
-
"chrono",
2700
2800
"crc",
2701
2801
"dotenvy",
2702
2802
"etcetera",
···
2719
2819
"smallvec",
2720
2820
"sqlx-core",
2721
2821
"stringprep",
2722
-
"thiserror 2.0.16",
2822
+
"thiserror 2.0.17",
2723
2823
"tracing",
2724
2824
"whoami",
2725
2825
]
···
2731
2831
checksum = "c2d12fe70b2c1b4401038055f90f151b78208de1f9f89a7dbfd41587a10c3eea"
2732
2832
dependencies = [
2733
2833
"atoi",
2734
-
"chrono",
2735
2834
"flume",
2736
2835
"futures-channel",
2737
2836
"futures-core",
···
2744
2843
"serde",
2745
2844
"serde_urlencoded",
2746
2845
"sqlx-core",
2747
-
"thiserror 2.0.16",
2846
+
"thiserror 2.0.17",
2748
2847
"tracing",
2749
2848
"url",
2750
2849
]
2751
2850
2752
2851
[[package]]
2753
2852
name = "stable_deref_trait"
2754
-
version = "1.2.0"
2853
+
version = "1.2.1"
2755
2854
source = "registry+https://github.com/rust-lang/crates.io-index"
2756
-
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
2855
+
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
2757
2856
2758
2857
[[package]]
2759
2858
name = "stringprep"
···
2774
2873
2775
2874
[[package]]
2776
2875
name = "syn"
2777
-
version = "2.0.106"
2876
+
version = "1.0.109"
2778
2877
source = "registry+https://github.com/rust-lang/crates.io-index"
2779
-
checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6"
2878
+
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
2879
+
dependencies = [
2880
+
"proc-macro2",
2881
+
"quote",
2882
+
"unicode-ident",
2883
+
]
2884
+
2885
+
[[package]]
2886
+
name = "syn"
2887
+
version = "2.0.108"
2888
+
source = "registry+https://github.com/rust-lang/crates.io-index"
2889
+
checksum = "da58917d35242480a05c2897064da0a80589a2a0476c9a3f2fdc83b53502e917"
2780
2890
dependencies = [
2781
2891
"proc-macro2",
2782
2892
"quote",
···
2800
2910
dependencies = [
2801
2911
"proc-macro2",
2802
2912
"quote",
2803
-
"syn",
2913
+
"syn 2.0.108",
2804
2914
]
2805
2915
2806
2916
[[package]]
···
2832
2942
2833
2943
[[package]]
2834
2944
name = "tempfile"
2835
-
version = "3.21.0"
2945
+
version = "3.23.0"
2836
2946
source = "registry+https://github.com/rust-lang/crates.io-index"
2837
-
checksum = "15b61f8f20e3a6f7e0649d825294eaf317edce30f82cf6026e7e4cb9222a7d1e"
2947
+
checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16"
2838
2948
dependencies = [
2839
2949
"fastrand",
2840
-
"getrandom 0.3.3",
2950
+
"getrandom 0.3.4",
2841
2951
"once_cell",
2842
2952
"rustix",
2843
-
"windows-sys 0.60.2",
2953
+
"windows-sys 0.61.2",
2844
2954
]
2845
2955
2846
2956
[[package]]
···
2854
2964
2855
2965
[[package]]
2856
2966
name = "thiserror"
2857
-
version = "2.0.16"
2967
+
version = "2.0.17"
2858
2968
source = "registry+https://github.com/rust-lang/crates.io-index"
2859
-
checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0"
2969
+
checksum = "f63587ca0f12b72a0600bcba1d40081f830876000bb46dd2337a3051618f4fc8"
2860
2970
dependencies = [
2861
-
"thiserror-impl 2.0.16",
2971
+
"thiserror-impl 2.0.17",
2862
2972
]
2863
2973
2864
2974
[[package]]
···
2869
2979
dependencies = [
2870
2980
"proc-macro2",
2871
2981
"quote",
2872
-
"syn",
2982
+
"syn 2.0.108",
2873
2983
]
2874
2984
2875
2985
[[package]]
2876
2986
name = "thiserror-impl"
2877
-
version = "2.0.16"
2987
+
version = "2.0.17"
2878
2988
source = "registry+https://github.com/rust-lang/crates.io-index"
2879
-
checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960"
2989
+
checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
2880
2990
dependencies = [
2881
2991
"proc-macro2",
2882
2992
"quote",
2883
-
"syn",
2993
+
"syn 2.0.108",
2884
2994
]
2885
2995
2886
2996
[[package]]
···
2894
3004
2895
3005
[[package]]
2896
3006
name = "tinystr"
2897
-
version = "0.8.1"
3007
+
version = "0.8.2"
2898
3008
source = "registry+https://github.com/rust-lang/crates.io-index"
2899
-
checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b"
3009
+
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
2900
3010
dependencies = [
2901
3011
"displaydoc",
2902
3012
"zerovec",
···
2919
3029
2920
3030
[[package]]
2921
3031
name = "tokio"
2922
-
version = "1.47.1"
3032
+
version = "1.48.0"
2923
3033
source = "registry+https://github.com/rust-lang/crates.io-index"
2924
-
checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038"
3034
+
checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408"
2925
3035
dependencies = [
2926
-
"backtrace",
2927
3036
"bytes",
2928
-
"io-uring",
2929
3037
"libc",
2930
3038
"mio",
3039
+
"parking_lot",
2931
3040
"pin-project-lite",
2932
3041
"signal-hook-registry",
2933
-
"slab",
2934
-
"socket2 0.6.0",
3042
+
"socket2 0.6.1",
2935
3043
"tokio-macros",
2936
-
"windows-sys 0.59.0",
3044
+
"windows-sys 0.61.2",
2937
3045
]
2938
3046
2939
3047
[[package]]
2940
3048
name = "tokio-macros"
2941
-
version = "2.5.0"
3049
+
version = "2.6.0"
2942
3050
source = "registry+https://github.com/rust-lang/crates.io-index"
2943
-
checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8"
3051
+
checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
2944
3052
dependencies = [
2945
3053
"proc-macro2",
2946
3054
"quote",
2947
-
"syn",
3055
+
"syn 2.0.108",
2948
3056
]
2949
3057
2950
3058
[[package]]
···
2959
3067
2960
3068
[[package]]
2961
3069
name = "tokio-rustls"
2962
-
version = "0.26.2"
3070
+
version = "0.26.4"
2963
3071
source = "registry+https://github.com/rust-lang/crates.io-index"
2964
-
checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b"
3072
+
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
2965
3073
dependencies = [
2966
3074
"rustls",
2967
3075
"tokio",
···
2980
3088
2981
3089
[[package]]
2982
3090
name = "tokio-util"
2983
-
version = "0.7.16"
3091
+
version = "0.7.17"
2984
3092
source = "registry+https://github.com/rust-lang/crates.io-index"
2985
-
checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5"
3093
+
checksum = "2efa149fe76073d6e8fd97ef4f4eca7b67f599660115591483572e406e165594"
2986
3094
dependencies = [
2987
3095
"bytes",
2988
3096
"futures-core",
···
2993
3101
]
2994
3102
2995
3103
[[package]]
3104
+
name = "tokio-websockets"
3105
+
version = "0.11.4"
3106
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3107
+
checksum = "9fcaf159b4e7a376b05b5bfd77bfd38f3324f5fce751b4213bfc7eaa47affb4e"
3108
+
dependencies = [
3109
+
"base64",
3110
+
"bytes",
3111
+
"futures-core",
3112
+
"futures-sink",
3113
+
"http",
3114
+
"httparse",
3115
+
"rand 0.9.2",
3116
+
"ring",
3117
+
"rustls-native-certs",
3118
+
"rustls-pki-types",
3119
+
"simdutf8",
3120
+
"tokio",
3121
+
"tokio-rustls",
3122
+
"tokio-util",
3123
+
]
3124
+
3125
+
[[package]]
2996
3126
name = "tower"
2997
3127
version = "0.5.2"
2998
3128
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3016
3146
dependencies = [
3017
3147
"bitflags",
3018
3148
"bytes",
3149
+
"futures-core",
3019
3150
"futures-util",
3020
3151
"http",
3021
3152
"http-body",
3153
+
"http-body-util",
3154
+
"http-range-header",
3155
+
"httpdate",
3022
3156
"iri-string",
3157
+
"mime",
3158
+
"mime_guess",
3159
+
"percent-encoding",
3023
3160
"pin-project-lite",
3161
+
"tokio",
3162
+
"tokio-util",
3024
3163
"tower",
3025
3164
"tower-layer",
3026
3165
"tower-service",
3166
+
"tracing",
3027
3167
]
3028
3168
3029
3169
[[package]]
···
3058
3198
dependencies = [
3059
3199
"proc-macro2",
3060
3200
"quote",
3061
-
"syn",
3201
+
"syn 2.0.108",
3062
3202
]
3063
3203
3064
3204
[[package]]
···
3108
3248
3109
3249
[[package]]
3110
3250
name = "typenum"
3111
-
version = "1.18.0"
3251
+
version = "1.19.0"
3252
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3253
+
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
3254
+
3255
+
[[package]]
3256
+
name = "ulid"
3257
+
version = "1.2.1"
3258
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3259
+
checksum = "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe"
3260
+
dependencies = [
3261
+
"rand 0.9.2",
3262
+
"web-time",
3263
+
]
3264
+
3265
+
[[package]]
3266
+
name = "unicase"
3267
+
version = "2.8.1"
3112
3268
source = "registry+https://github.com/rust-lang/crates.io-index"
3113
-
checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f"
3269
+
checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539"
3114
3270
3115
3271
[[package]]
3116
3272
name = "unicode-bidi"
···
3120
3276
3121
3277
[[package]]
3122
3278
name = "unicode-ident"
3123
-
version = "1.0.18"
3279
+
version = "1.0.22"
3124
3280
source = "registry+https://github.com/rust-lang/crates.io-index"
3125
-
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
3281
+
checksum = "9312f7c4f6ff9069b165498234ce8be658059c6728633667c526e27dc2cf1df5"
3126
3282
3127
3283
[[package]]
3128
3284
name = "unicode-normalization"
3129
-
version = "0.1.24"
3285
+
version = "0.1.25"
3130
3286
source = "registry+https://github.com/rust-lang/crates.io-index"
3131
-
checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956"
3287
+
checksum = "5fd4f6878c9cb28d874b009da9e8d183b5abc80117c40bbd187a1fde336be6e8"
3132
3288
dependencies = [
3133
3289
"tinyvec",
3134
3290
]
3135
3291
3136
3292
[[package]]
3137
3293
name = "unicode-properties"
3138
-
version = "0.1.3"
3294
+
version = "0.1.4"
3139
3295
source = "registry+https://github.com/rust-lang/crates.io-index"
3140
-
checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0"
3296
+
checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
3141
3297
3142
3298
[[package]]
3143
3299
name = "unsigned-varint"
···
3170
3326
]
3171
3327
3172
3328
[[package]]
3329
+
name = "urlencoding"
3330
+
version = "2.1.3"
3331
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3332
+
checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da"
3333
+
3334
+
[[package]]
3173
3335
name = "utf8_iter"
3174
3336
version = "1.0.4"
3175
3337
source = "registry+https://github.com/rust-lang/crates.io-index"
···
3181
3343
source = "registry+https://github.com/rust-lang/crates.io-index"
3182
3344
checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2"
3183
3345
dependencies = [
3184
-
"getrandom 0.3.3",
3346
+
"getrandom 0.3.4",
3185
3347
"js-sys",
3186
3348
"wasm-bindgen",
3187
3349
]
···
3226
3388
checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
3227
3389
3228
3390
[[package]]
3229
-
name = "wasi"
3230
-
version = "0.14.3+wasi-0.2.4"
3391
+
name = "wasip2"
3392
+
version = "1.0.1+wasi-0.2.4"
3231
3393
source = "registry+https://github.com/rust-lang/crates.io-index"
3232
-
checksum = "6a51ae83037bdd272a9e28ce236db8c07016dd0d50c27038b3f407533c030c95"
3394
+
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
3233
3395
dependencies = [
3234
3396
"wit-bindgen",
3235
3397
]
···
3242
3404
3243
3405
[[package]]
3244
3406
name = "wasm-bindgen"
3245
-
version = "0.2.101"
3407
+
version = "0.2.105"
3246
3408
source = "registry+https://github.com/rust-lang/crates.io-index"
3247
-
checksum = "7e14915cadd45b529bb8d1f343c4ed0ac1de926144b746e2710f9cd05df6603b"
3409
+
checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60"
3248
3410
dependencies = [
3249
3411
"cfg-if",
3250
3412
"once_cell",
···
3254
3416
]
3255
3417
3256
3418
[[package]]
3257
-
name = "wasm-bindgen-backend"
3258
-
version = "0.2.101"
3259
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3260
-
checksum = "e28d1ba982ca7923fd01448d5c30c6864d0a14109560296a162f80f305fb93bb"
3261
-
dependencies = [
3262
-
"bumpalo",
3263
-
"log",
3264
-
"proc-macro2",
3265
-
"quote",
3266
-
"syn",
3267
-
"wasm-bindgen-shared",
3268
-
]
3269
-
3270
-
[[package]]
3271
3419
name = "wasm-bindgen-futures"
3272
-
version = "0.4.51"
3420
+
version = "0.4.55"
3273
3421
source = "registry+https://github.com/rust-lang/crates.io-index"
3274
-
checksum = "0ca85039a9b469b38336411d6d6ced91f3fc87109a2a27b0c197663f5144dffe"
3422
+
checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0"
3275
3423
dependencies = [
3276
3424
"cfg-if",
3277
3425
"js-sys",
···
3282
3430
3283
3431
[[package]]
3284
3432
name = "wasm-bindgen-macro"
3285
-
version = "0.2.101"
3433
+
version = "0.2.105"
3286
3434
source = "registry+https://github.com/rust-lang/crates.io-index"
3287
-
checksum = "7c3d463ae3eff775b0c45df9da45d68837702ac35af998361e2c84e7c5ec1b0d"
3435
+
checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2"
3288
3436
dependencies = [
3289
3437
"quote",
3290
3438
"wasm-bindgen-macro-support",
···
3292
3440
3293
3441
[[package]]
3294
3442
name = "wasm-bindgen-macro-support"
3295
-
version = "0.2.101"
3443
+
version = "0.2.105"
3296
3444
source = "registry+https://github.com/rust-lang/crates.io-index"
3297
-
checksum = "7bb4ce89b08211f923caf51d527662b75bdc9c9c7aab40f86dcb9fb85ac552aa"
3445
+
checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc"
3298
3446
dependencies = [
3447
+
"bumpalo",
3299
3448
"proc-macro2",
3300
3449
"quote",
3301
-
"syn",
3302
-
"wasm-bindgen-backend",
3450
+
"syn 2.0.108",
3303
3451
"wasm-bindgen-shared",
3304
3452
]
3305
3453
3306
3454
[[package]]
3307
3455
name = "wasm-bindgen-shared"
3308
-
version = "0.2.101"
3456
+
version = "0.2.105"
3309
3457
source = "registry+https://github.com/rust-lang/crates.io-index"
3310
-
checksum = "f143854a3b13752c6950862c906306adb27c7e839f7414cec8fea35beab624c1"
3458
+
checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76"
3311
3459
dependencies = [
3312
3460
"unicode-ident",
3313
3461
]
3314
3462
3315
3463
[[package]]
3316
3464
name = "web-sys"
3317
-
version = "0.3.78"
3465
+
version = "0.3.82"
3318
3466
source = "registry+https://github.com/rust-lang/crates.io-index"
3319
-
checksum = "77e4b637749ff0d92b8fad63aa1f7cff3cbe125fd49c175cd6345e7272638b12"
3467
+
checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1"
3320
3468
dependencies = [
3321
3469
"js-sys",
3322
3470
"wasm-bindgen",
···
3334
3482
3335
3483
[[package]]
3336
3484
name = "webpki-roots"
3337
-
version = "1.0.2"
3485
+
version = "1.0.4"
3338
3486
source = "registry+https://github.com/rust-lang/crates.io-index"
3339
-
checksum = "7e8983c3ab33d6fb807cfcdad2491c4ea8cbc8ed839181c7dfd9c67c83e261b2"
3487
+
checksum = "b2878ef029c47c6e8cf779119f20fcf52bde7ad42a731b2a304bc221df17571e"
3340
3488
dependencies = [
3341
3489
"rustls-pki-types",
3342
3490
]
···
3353
3501
3354
3502
[[package]]
3355
3503
name = "widestring"
3356
-
version = "1.2.0"
3357
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3358
-
checksum = "dd7cf3379ca1aac9eea11fba24fd7e315d621f8dfe35c8d7d2be8b793726e07d"
3359
-
3360
-
[[package]]
3361
-
name = "windows"
3362
-
version = "0.61.3"
3363
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3364
-
checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893"
3365
-
dependencies = [
3366
-
"windows-collections",
3367
-
"windows-core",
3368
-
"windows-future",
3369
-
"windows-link",
3370
-
"windows-numerics",
3371
-
]
3372
-
3373
-
[[package]]
3374
-
name = "windows-collections"
3375
-
version = "0.2.0"
3376
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3377
-
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
3378
-
dependencies = [
3379
-
"windows-core",
3380
-
]
3381
-
3382
-
[[package]]
3383
-
name = "windows-core"
3384
-
version = "0.61.2"
3385
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3386
-
checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3"
3387
-
dependencies = [
3388
-
"windows-implement",
3389
-
"windows-interface",
3390
-
"windows-link",
3391
-
"windows-result",
3392
-
"windows-strings",
3393
-
]
3394
-
3395
-
[[package]]
3396
-
name = "windows-future"
3397
-
version = "0.2.1"
3504
+
version = "1.2.1"
3398
3505
source = "registry+https://github.com/rust-lang/crates.io-index"
3399
-
checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e"
3400
-
dependencies = [
3401
-
"windows-core",
3402
-
"windows-link",
3403
-
"windows-threading",
3404
-
]
3405
-
3406
-
[[package]]
3407
-
name = "windows-implement"
3408
-
version = "0.60.0"
3409
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3410
-
checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836"
3411
-
dependencies = [
3412
-
"proc-macro2",
3413
-
"quote",
3414
-
"syn",
3415
-
]
3416
-
3417
-
[[package]]
3418
-
name = "windows-interface"
3419
-
version = "0.59.1"
3420
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3421
-
checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8"
3422
-
dependencies = [
3423
-
"proc-macro2",
3424
-
"quote",
3425
-
"syn",
3426
-
]
3506
+
checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471"
3427
3507
3428
3508
[[package]]
3429
3509
name = "windows-link"
···
3432
3512
checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a"
3433
3513
3434
3514
[[package]]
3435
-
name = "windows-numerics"
3436
-
version = "0.2.0"
3515
+
name = "windows-link"
3516
+
version = "0.2.1"
3437
3517
source = "registry+https://github.com/rust-lang/crates.io-index"
3438
-
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
3439
-
dependencies = [
3440
-
"windows-core",
3441
-
"windows-link",
3442
-
]
3518
+
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
3443
3519
3444
3520
[[package]]
3445
3521
name = "windows-registry"
···
3447
3523
source = "registry+https://github.com/rust-lang/crates.io-index"
3448
3524
checksum = "5b8a9ed28765efc97bbc954883f4e6796c33a06546ebafacbabee9696967499e"
3449
3525
dependencies = [
3450
-
"windows-link",
3526
+
"windows-link 0.1.3",
3451
3527
"windows-result",
3452
3528
"windows-strings",
3453
3529
]
···
3458
3534
source = "registry+https://github.com/rust-lang/crates.io-index"
3459
3535
checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6"
3460
3536
dependencies = [
3461
-
"windows-link",
3537
+
"windows-link 0.1.3",
3462
3538
]
3463
3539
3464
3540
[[package]]
···
3467
3543
source = "registry+https://github.com/rust-lang/crates.io-index"
3468
3544
checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57"
3469
3545
dependencies = [
3470
-
"windows-link",
3546
+
"windows-link 0.1.3",
3471
3547
]
3472
3548
3473
3549
[[package]]
···
3490
3566
3491
3567
[[package]]
3492
3568
name = "windows-sys"
3493
-
version = "0.59.0"
3569
+
version = "0.60.2"
3494
3570
source = "registry+https://github.com/rust-lang/crates.io-index"
3495
-
checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b"
3571
+
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3496
3572
dependencies = [
3497
-
"windows-targets 0.52.6",
3573
+
"windows-targets 0.53.5",
3498
3574
]
3499
3575
3500
3576
[[package]]
3501
3577
name = "windows-sys"
3502
-
version = "0.60.2"
3578
+
version = "0.61.2"
3503
3579
source = "registry+https://github.com/rust-lang/crates.io-index"
3504
-
checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb"
3580
+
checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc"
3505
3581
dependencies = [
3506
-
"windows-targets 0.53.3",
3582
+
"windows-link 0.2.1",
3507
3583
]
3508
3584
3509
3585
[[package]]
···
3539
3615
3540
3616
[[package]]
3541
3617
name = "windows-targets"
3542
-
version = "0.53.3"
3618
+
version = "0.53.5"
3543
3619
source = "registry+https://github.com/rust-lang/crates.io-index"
3544
-
checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91"
3620
+
checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3"
3545
3621
dependencies = [
3546
-
"windows-link",
3547
-
"windows_aarch64_gnullvm 0.53.0",
3548
-
"windows_aarch64_msvc 0.53.0",
3549
-
"windows_i686_gnu 0.53.0",
3550
-
"windows_i686_gnullvm 0.53.0",
3551
-
"windows_i686_msvc 0.53.0",
3552
-
"windows_x86_64_gnu 0.53.0",
3553
-
"windows_x86_64_gnullvm 0.53.0",
3554
-
"windows_x86_64_msvc 0.53.0",
3555
-
]
3556
-
3557
-
[[package]]
3558
-
name = "windows-threading"
3559
-
version = "0.1.0"
3560
-
source = "registry+https://github.com/rust-lang/crates.io-index"
3561
-
checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6"
3562
-
dependencies = [
3563
-
"windows-link",
3622
+
"windows-link 0.2.1",
3623
+
"windows_aarch64_gnullvm 0.53.1",
3624
+
"windows_aarch64_msvc 0.53.1",
3625
+
"windows_i686_gnu 0.53.1",
3626
+
"windows_i686_gnullvm 0.53.1",
3627
+
"windows_i686_msvc 0.53.1",
3628
+
"windows_x86_64_gnu 0.53.1",
3629
+
"windows_x86_64_gnullvm 0.53.1",
3630
+
"windows_x86_64_msvc 0.53.1",
3564
3631
]
3565
3632
3566
3633
[[package]]
···
3577
3644
3578
3645
[[package]]
3579
3646
name = "windows_aarch64_gnullvm"
3580
-
version = "0.53.0"
3647
+
version = "0.53.1"
3581
3648
source = "registry+https://github.com/rust-lang/crates.io-index"
3582
-
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
3649
+
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
3583
3650
3584
3651
[[package]]
3585
3652
name = "windows_aarch64_msvc"
···
3595
3662
3596
3663
[[package]]
3597
3664
name = "windows_aarch64_msvc"
3598
-
version = "0.53.0"
3665
+
version = "0.53.1"
3599
3666
source = "registry+https://github.com/rust-lang/crates.io-index"
3600
-
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
3667
+
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
3601
3668
3602
3669
[[package]]
3603
3670
name = "windows_i686_gnu"
···
3613
3680
3614
3681
[[package]]
3615
3682
name = "windows_i686_gnu"
3616
-
version = "0.53.0"
3683
+
version = "0.53.1"
3617
3684
source = "registry+https://github.com/rust-lang/crates.io-index"
3618
-
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
3685
+
checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3"
3619
3686
3620
3687
[[package]]
3621
3688
name = "windows_i686_gnullvm"
···
3625
3692
3626
3693
[[package]]
3627
3694
name = "windows_i686_gnullvm"
3628
-
version = "0.53.0"
3695
+
version = "0.53.1"
3629
3696
source = "registry+https://github.com/rust-lang/crates.io-index"
3630
-
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
3697
+
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
3631
3698
3632
3699
[[package]]
3633
3700
name = "windows_i686_msvc"
···
3643
3710
3644
3711
[[package]]
3645
3712
name = "windows_i686_msvc"
3646
-
version = "0.53.0"
3713
+
version = "0.53.1"
3647
3714
source = "registry+https://github.com/rust-lang/crates.io-index"
3648
-
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
3715
+
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
3649
3716
3650
3717
[[package]]
3651
3718
name = "windows_x86_64_gnu"
···
3661
3728
3662
3729
[[package]]
3663
3730
name = "windows_x86_64_gnu"
3664
-
version = "0.53.0"
3731
+
version = "0.53.1"
3665
3732
source = "registry+https://github.com/rust-lang/crates.io-index"
3666
-
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
3733
+
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
3667
3734
3668
3735
[[package]]
3669
3736
name = "windows_x86_64_gnullvm"
···
3679
3746
3680
3747
[[package]]
3681
3748
name = "windows_x86_64_gnullvm"
3682
-
version = "0.53.0"
3749
+
version = "0.53.1"
3683
3750
source = "registry+https://github.com/rust-lang/crates.io-index"
3684
-
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
3751
+
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
3685
3752
3686
3753
[[package]]
3687
3754
name = "windows_x86_64_msvc"
···
3697
3764
3698
3765
[[package]]
3699
3766
name = "windows_x86_64_msvc"
3700
-
version = "0.53.0"
3767
+
version = "0.53.1"
3701
3768
source = "registry+https://github.com/rust-lang/crates.io-index"
3702
-
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
3769
+
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
3703
3770
3704
3771
[[package]]
3705
3772
name = "winreg"
···
3713
3780
3714
3781
[[package]]
3715
3782
name = "wit-bindgen"
3716
-
version = "0.45.1"
3783
+
version = "0.46.0"
3717
3784
source = "registry+https://github.com/rust-lang/crates.io-index"
3718
-
checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36"
3785
+
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
3719
3786
3720
3787
[[package]]
3721
3788
name = "writeable"
3722
-
version = "0.6.1"
3789
+
version = "0.6.2"
3723
3790
source = "registry+https://github.com/rust-lang/crates.io-index"
3724
-
checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb"
3791
+
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
3725
3792
3726
3793
[[package]]
3727
3794
name = "yoke"
3728
-
version = "0.8.0"
3795
+
version = "0.8.1"
3729
3796
source = "registry+https://github.com/rust-lang/crates.io-index"
3730
-
checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc"
3797
+
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
3731
3798
dependencies = [
3732
-
"serde",
3733
3799
"stable_deref_trait",
3734
3800
"yoke-derive",
3735
3801
"zerofrom",
···
3737
3803
3738
3804
[[package]]
3739
3805
name = "yoke-derive"
3740
-
version = "0.8.0"
3806
+
version = "0.8.1"
3741
3807
source = "registry+https://github.com/rust-lang/crates.io-index"
3742
-
checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6"
3808
+
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
3743
3809
dependencies = [
3744
3810
"proc-macro2",
3745
3811
"quote",
3746
-
"syn",
3812
+
"syn 2.0.108",
3747
3813
"synstructure",
3748
3814
]
3749
3815
3750
3816
[[package]]
3751
3817
name = "zerocopy"
3752
-
version = "0.8.26"
3818
+
version = "0.8.27"
3753
3819
source = "registry+https://github.com/rust-lang/crates.io-index"
3754
-
checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f"
3820
+
checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c"
3755
3821
dependencies = [
3756
3822
"zerocopy-derive",
3757
3823
]
3758
3824
3759
3825
[[package]]
3760
3826
name = "zerocopy-derive"
3761
-
version = "0.8.26"
3827
+
version = "0.8.27"
3762
3828
source = "registry+https://github.com/rust-lang/crates.io-index"
3763
-
checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181"
3829
+
checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831"
3764
3830
dependencies = [
3765
3831
"proc-macro2",
3766
3832
"quote",
3767
-
"syn",
3833
+
"syn 2.0.108",
3768
3834
]
3769
3835
3770
3836
[[package]]
···
3784
3850
dependencies = [
3785
3851
"proc-macro2",
3786
3852
"quote",
3787
-
"syn",
3853
+
"syn 2.0.108",
3788
3854
"synstructure",
3789
3855
]
3790
3856
3791
3857
[[package]]
3792
3858
name = "zeroize"
3793
-
version = "1.8.1"
3859
+
version = "1.8.2"
3794
3860
source = "registry+https://github.com/rust-lang/crates.io-index"
3795
-
checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde"
3861
+
checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0"
3796
3862
3797
3863
[[package]]
3798
3864
name = "zerotrie"
3799
-
version = "0.2.2"
3865
+
version = "0.2.3"
3800
3866
source = "registry+https://github.com/rust-lang/crates.io-index"
3801
-
checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595"
3867
+
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
3802
3868
dependencies = [
3803
3869
"displaydoc",
3804
3870
"yoke",
···
3807
3873
3808
3874
[[package]]
3809
3875
name = "zerovec"
3810
-
version = "0.11.4"
3876
+
version = "0.11.5"
3811
3877
source = "registry+https://github.com/rust-lang/crates.io-index"
3812
-
checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b"
3878
+
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
3813
3879
dependencies = [
3814
3880
"yoke",
3815
3881
"zerofrom",
···
3818
3884
3819
3885
[[package]]
3820
3886
name = "zerovec-derive"
3821
-
version = "0.11.1"
3887
+
version = "0.11.2"
3822
3888
source = "registry+https://github.com/rust-lang/crates.io-index"
3823
-
checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f"
3889
+
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
3824
3890
dependencies = [
3825
3891
"proc-macro2",
3826
3892
"quote",
3827
-
"syn",
3893
+
"syn 2.0.108",
3894
+
]
3895
+
3896
+
[[package]]
3897
+
name = "zstd"
3898
+
version = "0.13.3"
3899
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3900
+
checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a"
3901
+
dependencies = [
3902
+
"zstd-safe",
3903
+
]
3904
+
3905
+
[[package]]
3906
+
name = "zstd-safe"
3907
+
version = "7.2.4"
3908
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3909
+
checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d"
3910
+
dependencies = [
3911
+
"zstd-sys",
3912
+
]
3913
+
3914
+
[[package]]
3915
+
name = "zstd-sys"
3916
+
version = "2.0.16+zstd.1.5.7"
3917
+
source = "registry+https://github.com/rust-lang/crates.io-index"
3918
+
checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748"
3919
+
dependencies = [
3920
+
"cc",
3921
+
"pkg-config",
3828
3922
]
+13
-5
Cargo.toml
+13
-5
Cargo.toml
···
1
1
[package]
2
2
name = "quickdid"
3
-
version = "1.0.0-rc.3"
3
+
version = "1.0.0-rc.5"
4
4
edition = "2024"
5
5
authors = ["Nick Gerakines <nick.gerakines@gmail.com>"]
6
6
description = "A fast and scalable com.atproto.identity.resolveHandle service"
···
16
16
[dependencies]
17
17
anyhow = "1.0"
18
18
async-trait = "0.1"
19
-
atproto-identity = { version = "0.11.3" }
19
+
atproto-identity = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
+
atproto-jetstream = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
21
+
atproto-lexicon = { git = "https://tangled.org/@smokesignal.events/atproto-identity-rs" }
20
22
axum = { version = "0.8" }
21
-
bincode = { version = "2.0.1", features = ["serde"] }
23
+
bincode = { version = "2.0", features = ["serde"] }
24
+
cadence = "1.6"
22
25
deadpool-redis = { version = "0.22", features = ["connection-manager", "tokio-comp", "tokio-rustls-comp"] }
23
-
metrohash = "1.0.7"
26
+
httpdate = "1.0"
27
+
metrohash = "1.0"
24
28
reqwest = { version = "0.12", features = ["json"] }
25
29
serde = { version = "1.0", features = ["derive"] }
26
30
serde_json = "1.0"
27
-
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite", "chrono"] }
31
+
sqlx = { version = "0.8", features = ["runtime-tokio", "sqlite"] }
28
32
thiserror = "2.0"
29
33
tokio = { version = "1.35", features = ["rt-multi-thread", "macros", "signal", "sync", "time", "net", "fs"] }
30
34
tokio-util = { version = "0.7", features = ["rt"] }
35
+
tower-http = { version = "0.6", features = ["fs"] }
31
36
tracing = "0.1"
32
37
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
38
+
39
+
[dev-dependencies]
40
+
once_cell = "1.20"
+6
-2
Dockerfile
+6
-2
Dockerfile
···
1
1
# syntax=docker/dockerfile:1.4
2
-
FROM rust:1.89-slim AS builder
2
+
FROM rust:1.90-slim-bookworm AS builder
3
3
4
4
RUN apt-get update && apt-get install -y \
5
5
pkg-config \
···
19
19
LABEL org.opencontainers.image.licenses="MIT"
20
20
LABEL org.opencontainers.image.authors="Nick Gerakines <nick.gerakines@gmail.com>"
21
21
LABEL org.opencontainers.image.source="https://tangled.sh/@smokesignal.events/quickdid"
22
-
LABEL org.opencontainers.image.version="1.0.0-rc.3"
22
+
LABEL org.opencontainers.image.version="1.0.0-rc.5"
23
23
24
24
WORKDIR /app
25
25
COPY --from=builder /app/target/release/quickdid /app/quickdid
26
26
27
+
# Copy static files for serving
28
+
COPY www /app/www
29
+
27
30
ENV HTTP_PORT=8080
31
+
ENV STATIC_FILES_DIR=/app/www
28
32
ENV RUST_LOG=info
29
33
ENV RUST_BACKTRACE=full
30
34
+101
-13
README.md
+101
-13
README.md
···
1
1
# QuickDID
2
2
3
-
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting in-memory, Redis-backed, and SQLite-backed persistent caching with binary serialization for optimal storage efficiency.
3
+
QuickDID is a high-performance AT Protocol identity resolution service written in Rust. It provides blazing-fast handle-to-DID resolution with intelligent caching strategies, supporting in-memory, Redis-backed, and SQLite-backed persistent caching with binary serialization for optimal storage efficiency. The service includes proactive cache refreshing to maintain optimal performance and comprehensive metrics support for production monitoring.
4
4
5
5
Built following the 12-factor app methodology with minimal dependencies and optimized for production use, QuickDID delivers exceptional performance while maintaining a lean footprint. Configuration is handled exclusively through environment variables, with only `--version` and `--help` command-line arguments supported.
6
6
···
21
21
## Features
22
22
23
23
- **Fast Handle Resolution**: Resolves AT Protocol handles to DIDs using DNS TXT records and HTTP well-known endpoints
24
+
- **Bidirectional Caching**: Supports both handle-to-DID and DID-to-handle lookups with automatic cache synchronization
24
25
- **Multi-Layer Caching**: Flexible caching with three tiers:
25
26
- In-memory caching with configurable TTL (default: 600 seconds)
26
27
- Redis-backed persistent caching (default: 90-day TTL)
27
28
- SQLite-backed persistent caching (default: 90-day TTL)
29
+
- **Jetstream Consumer**: Real-time cache updates from AT Protocol firehose:
30
+
- Processes Account and Identity events
31
+
- Automatically purges deleted/deactivated accounts
32
+
- Updates handle-to-DID mappings in real-time
33
+
- Comprehensive metrics for event processing
34
+
- Automatic reconnection with backoff
35
+
- **HTTP Caching**: Client-side caching support with:
36
+
- ETag generation with configurable seed for cache invalidation
37
+
- Cache-Control headers with max-age, stale-while-revalidate, and stale-if-error directives
38
+
- CORS headers for cross-origin requests
28
39
- **Rate Limiting**: Semaphore-based concurrency control with optional timeout to protect upstream services
29
40
- **Binary Serialization**: Compact storage format reduces cache size by ~40% compared to JSON
30
41
- **Queue Processing**: Asynchronous handle resolution with multiple adapters:
···
32
43
- Redis (distributed)
33
44
- SQLite (persistent with work shedding)
34
45
- No-op (testing)
46
+
- **Metrics & Monitoring**:
47
+
- StatsD metrics support for counters, gauges, and timings
48
+
- Resolution timing measurements
49
+
- Jetstream event processing metrics
50
+
- Configurable tags for environment/service identification
51
+
- Integration guides for Telegraf and TimescaleDB
52
+
- Configurable bind address for StatsD UDP socket (IPv4/IPv6)
53
+
- **Proactive Cache Refresh**:
54
+
- Automatically refreshes cache entries before expiration
55
+
- Configurable refresh threshold
56
+
- Prevents cache misses for frequently accessed handles
57
+
- Metrics tracking for refresh operations
58
+
- **Queue Deduplication**:
59
+
- Redis-based deduplication for queue items
60
+
- Prevents duplicate handle resolution work
61
+
- Configurable TTL for deduplication keys
62
+
- **Cache Management APIs**:
63
+
- `purge` method for removing entries by handle or DID
64
+
- `set` method for manually updating handle-to-DID mappings
65
+
- Chainable operations across resolver layers
35
66
- **AT Protocol Compatible**: Implements XRPC endpoints for seamless integration with AT Protocol infrastructure
36
67
- **Comprehensive Error Handling**: Structured errors with unique identifiers (e.g., `error-quickdid-config-1`), health checks, and graceful shutdown
37
68
- **12-Factor App**: Environment-based configuration following cloud-native best practices
···
64
95
65
96
## Minimum Configuration
66
97
67
-
QuickDID requires the following environment variables to run. Configuration is validated at startup, and the service will exit with specific error codes if validation fails.
98
+
QuickDID requires minimal configuration to run. Configuration is validated at startup, and the service will exit with specific error codes if validation fails.
68
99
69
100
### Required
70
101
71
102
- `HTTP_EXTERNAL`: External hostname for service endpoints (e.g., `localhost:3007`)
72
-
- `SERVICE_KEY`: Private key for service identity in DID format (e.g., `did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK`)
73
103
74
104
### Example Minimal Setup
75
105
76
106
```bash
77
-
HTTP_EXTERNAL=localhost:3007 \
78
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
79
-
cargo run
107
+
HTTP_EXTERNAL=localhost:3007 cargo run
108
+
```
109
+
110
+
### Static Files
111
+
112
+
QuickDID serves static files from the `www` directory by default. This includes:
113
+
- Landing page (`index.html`)
114
+
- AT Protocol well-known files (`.well-known/atproto-did` and `.well-known/did.json`)
115
+
116
+
Generate the `.well-known` files for your deployment:
117
+
118
+
```bash
119
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
80
120
```
81
121
82
122
This will start QuickDID with:
···
110
150
- `QUEUE_BUFFER_SIZE`: MPSC queue buffer size (default: 1000)
111
151
- `QUEUE_REDIS_PREFIX`: Redis key prefix for queues (default: queue:handleresolver:)
112
152
- `QUEUE_REDIS_TIMEOUT`: Redis blocking timeout in seconds (default: 5)
153
+
- `QUEUE_REDIS_DEDUP_ENABLED`: Enable queue deduplication (default: false)
154
+
- `QUEUE_REDIS_DEDUP_TTL`: TTL for deduplication keys in seconds (default: 60)
113
155
- `QUEUE_SQLITE_MAX_SIZE`: Max SQLite queue size for work shedding (default: 10000)
114
156
115
157
#### Rate Limiting
116
158
- `RESOLVER_MAX_CONCURRENT`: Maximum concurrent handle resolutions (default: 0 = disabled)
117
159
- `RESOLVER_MAX_CONCURRENT_TIMEOUT_MS`: Timeout for acquiring rate limit permit in ms (default: 0 = no timeout)
118
160
161
+
#### HTTP Cache Control
162
+
- `CACHE_MAX_AGE`: Max-age for Cache-Control header in seconds (default: 86400)
163
+
- `CACHE_STALE_IF_ERROR`: Stale-if-error directive in seconds (default: 172800)
164
+
- `CACHE_STALE_WHILE_REVALIDATE`: Stale-while-revalidate in seconds (default: 86400)
165
+
- `CACHE_MAX_STALE`: Max-stale directive in seconds (default: 86400)
166
+
- `ETAG_SEED`: Seed value for ETag generation (default: application version)
167
+
168
+
#### Metrics
169
+
- `METRICS_ADAPTER`: Metrics adapter type - 'noop' or 'statsd' (default: noop)
170
+
- `METRICS_STATSD_HOST`: StatsD host and port (required when METRICS_ADAPTER=statsd)
171
+
- `METRICS_STATSD_BIND`: Bind address for StatsD UDP socket (default: [::]:0 for IPv6, can use 0.0.0.0:0 for IPv4)
172
+
- `METRICS_PREFIX`: Prefix for all metrics (default: quickdid)
173
+
- `METRICS_TAGS`: Comma-separated tags (e.g., env:prod,service:quickdid)
174
+
175
+
#### Proactive Refresh
176
+
- `PROACTIVE_REFRESH_ENABLED`: Enable proactive cache refreshing (default: false)
177
+
- `PROACTIVE_REFRESH_THRESHOLD`: Refresh when TTL remaining is below this threshold (0.0-1.0, default: 0.8)
178
+
179
+
#### Jetstream Consumer
180
+
- `JETSTREAM_ENABLED`: Enable Jetstream consumer for real-time cache updates (default: false)
181
+
- `JETSTREAM_HOSTNAME`: Jetstream WebSocket hostname (default: jetstream.atproto.tools)
182
+
183
+
#### Static Files
184
+
- `STATIC_FILES_DIR`: Directory for serving static files (default: www)
185
+
119
186
#### Logging
120
187
- `RUST_LOG`: Logging level (e.g., debug, info, warn, error)
121
188
122
189
### Production Examples
123
190
124
-
#### Redis-based (Multi-instance/HA)
191
+
#### Redis-based with Metrics and Jetstream (Multi-instance/HA)
125
192
```bash
126
193
HTTP_EXTERNAL=quickdid.example.com \
127
-
SERVICE_KEY=did:key:yourkeyhere \
128
194
HTTP_PORT=3000 \
129
195
REDIS_URL=redis://localhost:6379 \
130
196
CACHE_TTL_REDIS=86400 \
···
132
198
QUEUE_WORKER_ID=prod-worker-1 \
133
199
RESOLVER_MAX_CONCURRENT=100 \
134
200
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 \
201
+
METRICS_ADAPTER=statsd \
202
+
METRICS_STATSD_HOST=localhost:8125 \
203
+
METRICS_PREFIX=quickdid \
204
+
METRICS_TAGS=env:prod,service:quickdid \
205
+
CACHE_MAX_AGE=86400 \
206
+
JETSTREAM_ENABLED=true \
207
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools \
135
208
RUST_LOG=info \
136
209
./target/release/quickdid
137
210
```
···
139
212
#### SQLite-based (Single-instance)
140
213
```bash
141
214
HTTP_EXTERNAL=quickdid.example.com \
142
-
SERVICE_KEY=did:key:yourkeyhere \
143
215
HTTP_PORT=3000 \
144
216
SQLITE_URL=sqlite:./quickdid.db \
145
217
CACHE_TTL_SQLITE=86400 \
···
155
227
QuickDID uses a layered architecture for optimal performance:
156
228
157
229
```
158
-
Request โ Cache Layer โ Rate Limiter โ Base Resolver โ DNS/HTTP
159
-
โ โ โ
160
-
Memory/Redis/ Semaphore AT Protocol
161
-
SQLite (optional) Infrastructure
230
+
Request โ Cache Layer โ Proactive Refresh โ Rate Limiter โ Base Resolver โ DNS/HTTP
231
+
โ โ โ โ
232
+
Memory/Redis/ Background Semaphore AT Protocol
233
+
SQLite Refresher (optional) Infrastructure
234
+
โ
235
+
Jetstream Consumer โ Real-time Updates from AT Protocol Firehose
162
236
```
163
237
164
238
### Cache Priority
···
167
241
2. SQLite (if configured) - Best for single-instance with persistence
168
242
3. In-memory (fallback) - Always available
169
243
244
+
### Real-time Cache Updates
245
+
When Jetstream is enabled, QuickDID maintains cache consistency by:
246
+
- Processing Account events to purge deleted/deactivated accounts
247
+
- Processing Identity events to update handle-to-DID mappings
248
+
- Automatically reconnecting with exponential backoff on connection failures
249
+
- Tracking metrics for successful and failed event processing
250
+
170
251
### Deployment Strategies
171
252
172
253
- **Single-instance**: Use SQLite for both caching and queuing
173
254
- **Multi-instance/HA**: Use Redis for distributed caching and queuing
174
255
- **Development**: Use in-memory caching with MPSC queuing
256
+
- **Real-time sync**: Enable Jetstream consumer for live cache updates
175
257
176
258
## API Endpoints
177
259
178
260
- `GET /_health` - Health check endpoint
179
261
- `GET /xrpc/com.atproto.identity.resolveHandle` - Resolve handle to DID
180
262
- `GET /.well-known/atproto-did` - Serve DID document for the service
263
+
- `OPTIONS /*` - CORS preflight support for all endpoints
181
264
182
265
## Docker Deployment
183
266
···
201
284
202
285
- [Configuration Reference](docs/configuration-reference.md) - Complete list of all configuration options
203
286
- [Production Deployment Guide](docs/production-deployment.md) - Docker, monitoring, and production best practices
287
+
- [Metrics Guide](docs/telegraf-timescaledb-metrics-guide.md) - Setting up metrics with Telegraf and TimescaleDB
204
288
- [Development Guide](CLAUDE.md) - Architecture details and development patterns
289
+
290
+
## Railway Deployment
291
+
292
+
QuickDID includes Railway deployment resources in the `railway-resources/` directory for easy deployment with metrics support via Telegraf. See the deployment configurations for one-click deployment options.
205
293
206
294
## License
207
295
+41
docker-compose.yml
+41
docker-compose.yml
···
1
+
version: '3.8'
2
+
3
+
services:
4
+
quickdid:
5
+
image: quickdid:latest
6
+
build: .
7
+
ports:
8
+
- "3007:8080"
9
+
environment:
10
+
- HTTP_EXTERNAL=localhost:3007
11
+
- HTTP_PORT=8080
12
+
- RUST_LOG=info
13
+
# Optional: Override the static files directory
14
+
# - STATIC_FILES_DIR=/app/custom-www
15
+
volumes:
16
+
# Optional: Mount custom static files from host
17
+
# - ./custom-www:/app/custom-www:ro
18
+
# Optional: Mount custom .well-known files
19
+
# - ./www/.well-known:/app/www/.well-known:ro
20
+
# Optional: Use SQLite for caching
21
+
# - ./data:/app/data
22
+
# environment:
23
+
# SQLite cache configuration
24
+
# - SQLITE_URL=sqlite:/app/data/quickdid.db
25
+
# - CACHE_TTL_SQLITE=86400
26
+
27
+
# Redis cache configuration (if using external Redis)
28
+
# - REDIS_URL=redis://redis:6379
29
+
# - CACHE_TTL_REDIS=86400
30
+
# - QUEUE_ADAPTER=redis
31
+
32
+
# Optional: Redis service for caching
33
+
# redis:
34
+
# image: redis:7-alpine
35
+
# ports:
36
+
# - "6379:6379"
37
+
# volumes:
38
+
# - redis-data:/data
39
+
40
+
volumes:
41
+
redis-data:
+439
-41
docs/configuration-reference.md
+439
-41
docs/configuration-reference.md
···
10
10
- [Queue Configuration](#queue-configuration)
11
11
- [Rate Limiting Configuration](#rate-limiting-configuration)
12
12
- [HTTP Caching Configuration](#http-caching-configuration)
13
+
- [Metrics Configuration](#metrics-configuration)
14
+
- [Proactive Refresh Configuration](#proactive-refresh-configuration)
15
+
- [Jetstream Consumer Configuration](#jetstream-consumer-configuration)
16
+
- [Static Files Configuration](#static-files-configuration)
13
17
- [Configuration Examples](#configuration-examples)
14
18
- [Validation Rules](#validation-rules)
15
19
···
40
44
**Constraints**:
41
45
- Must be a valid hostname or hostname:port combination
42
46
- Port (if specified) must be between 1-65535
43
-
- Used to generate service DID (did:web:{HTTP_EXTERNAL})
44
-
45
-
### `SERVICE_KEY`
46
-
47
-
**Required**: Yes
48
-
**Type**: String
49
-
**Format**: DID private key
50
-
**Security**: SENSITIVE - Never commit to version control
51
-
52
-
The private key for the service's AT Protocol identity. This key is used to sign responses and authenticate the service.
53
-
54
-
**Examples**:
55
-
```bash
56
-
# did:key format (Ed25519)
57
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
58
-
59
-
# did:plc format
60
-
SERVICE_KEY=did:plc:xyz123abc456def789
61
-
```
62
-
63
-
**Constraints**:
64
-
- Must be a valid DID format
65
-
- Must include the private key component
66
-
- Should be stored securely (e.g., secrets manager, encrypted storage)
67
47
68
48
## Network Configuration
69
49
···
378
358
QUEUE_REDIS_TIMEOUT=30 # Minimal polling, slow shutdown
379
359
```
380
360
361
+
### `QUEUE_REDIS_DEDUP_ENABLED`
362
+
363
+
**Required**: No
364
+
**Type**: Boolean
365
+
**Default**: `false`
366
+
367
+
Enable deduplication for Redis queue to prevent duplicate handles from being queued multiple times within the TTL window. When enabled, uses Redis SET with TTL to track handles currently being processed.
368
+
369
+
**Examples**:
370
+
```bash
371
+
# Enable deduplication (recommended for production)
372
+
QUEUE_REDIS_DEDUP_ENABLED=true
373
+
374
+
# Disable deduplication (default)
375
+
QUEUE_REDIS_DEDUP_ENABLED=false
376
+
```
377
+
378
+
**Use cases**:
379
+
- **Production**: Enable to prevent duplicate work and reduce load
380
+
- **High-traffic**: Essential to avoid processing the same handle multiple times
381
+
- **Development**: Can be disabled for simpler debugging
382
+
383
+
### `QUEUE_REDIS_DEDUP_TTL`
384
+
385
+
**Required**: No
386
+
**Type**: Integer (seconds)
387
+
**Default**: `60`
388
+
**Range**: 10-300 (recommended)
389
+
**Constraints**: Must be > 0 when deduplication is enabled
390
+
391
+
TTL for Redis queue deduplication keys in seconds. Determines how long to prevent duplicate handle resolution requests.
392
+
393
+
**Examples**:
394
+
```bash
395
+
# Quick deduplication window (10 seconds)
396
+
QUEUE_REDIS_DEDUP_TTL=10
397
+
398
+
# Default (1 minute)
399
+
QUEUE_REDIS_DEDUP_TTL=60
400
+
401
+
# Extended deduplication (5 minutes)
402
+
QUEUE_REDIS_DEDUP_TTL=300
403
+
```
404
+
405
+
**Recommendations**:
406
+
- **Fast processing**: 10-30 seconds
407
+
- **Normal processing**: 60 seconds (default)
408
+
- **Slow processing or high load**: 120-300 seconds
409
+
381
410
### `QUEUE_WORKER_ID`
382
411
383
412
**Required**: No
···
548
577
Rate limit permit acquisition timed out after {timeout}ms
549
578
```
550
579
580
+
## Metrics Configuration
581
+
582
+
### `METRICS_ADAPTER`
583
+
584
+
**Required**: No
585
+
**Type**: String
586
+
**Default**: `noop`
587
+
**Values**: `noop`, `statsd`
588
+
589
+
Metrics adapter type for collecting and publishing metrics.
590
+
591
+
**Options**:
592
+
- `noop`: No metrics collection (default)
593
+
- `statsd`: Send metrics to StatsD server
594
+
595
+
**Examples**:
596
+
```bash
597
+
# No metrics (default)
598
+
METRICS_ADAPTER=noop
599
+
600
+
# Enable StatsD metrics
601
+
METRICS_ADAPTER=statsd
602
+
```
603
+
604
+
### `METRICS_STATSD_HOST`
605
+
606
+
**Required**: Yes (when METRICS_ADAPTER=statsd)
607
+
**Type**: String
608
+
**Format**: hostname:port
609
+
610
+
StatsD server host and port for metrics collection.
611
+
612
+
**Examples**:
613
+
```bash
614
+
# Local StatsD
615
+
METRICS_STATSD_HOST=localhost:8125
616
+
617
+
# Remote StatsD
618
+
METRICS_STATSD_HOST=statsd.example.com:8125
619
+
620
+
# Docker network
621
+
METRICS_STATSD_HOST=statsd:8125
622
+
```
623
+
624
+
### `METRICS_STATSD_BIND`
625
+
626
+
**Required**: No
627
+
**Type**: String
628
+
**Default**: `[::]:0`
629
+
630
+
Bind address for StatsD UDP socket. Controls which local address to bind for sending UDP packets.
631
+
632
+
**Examples**:
633
+
```bash
634
+
# IPv6 any address, random port (default)
635
+
METRICS_STATSD_BIND=[::]:0
636
+
637
+
# IPv4 any address, random port
638
+
METRICS_STATSD_BIND=0.0.0.0:0
639
+
640
+
# Specific interface
641
+
METRICS_STATSD_BIND=192.168.1.100:0
642
+
643
+
# Specific port
644
+
METRICS_STATSD_BIND=[::]:8126
645
+
```
646
+
647
+
### `METRICS_PREFIX`
648
+
649
+
**Required**: No
650
+
**Type**: String
651
+
**Default**: `quickdid`
652
+
653
+
Prefix for all metrics. Used to namespace metrics in your monitoring system.
654
+
655
+
**Examples**:
656
+
```bash
657
+
# Default
658
+
METRICS_PREFIX=quickdid
659
+
660
+
# Environment-specific
661
+
METRICS_PREFIX=prod.quickdid
662
+
METRICS_PREFIX=staging.quickdid
663
+
664
+
# Region-specific
665
+
METRICS_PREFIX=us-east-1.quickdid
666
+
METRICS_PREFIX=eu-west-1.quickdid
667
+
668
+
# Service-specific
669
+
METRICS_PREFIX=api.quickdid
670
+
```
671
+
672
+
### `METRICS_TAGS`
673
+
674
+
**Required**: No
675
+
**Type**: String (comma-separated key:value pairs)
676
+
**Default**: None
677
+
678
+
Default tags for all metrics. Added to all metrics for filtering and grouping.
679
+
680
+
**Examples**:
681
+
```bash
682
+
# Basic tags
683
+
METRICS_TAGS=env:production,service:quickdid
684
+
685
+
# Detailed tags
686
+
METRICS_TAGS=env:production,service:quickdid,region:us-east-1,version:1.0.0
687
+
688
+
# Deployment-specific
689
+
METRICS_TAGS=env:staging,cluster:k8s-staging,namespace:quickdid
690
+
```
691
+
692
+
**Common tag patterns**:
693
+
- `env`: Environment (production, staging, development)
694
+
- `service`: Service name
695
+
- `region`: Geographic region
696
+
- `version`: Application version
697
+
- `cluster`: Kubernetes cluster name
698
+
- `instance`: Instance identifier
699
+
700
+
## Proactive Refresh Configuration
701
+
702
+
### `PROACTIVE_REFRESH_ENABLED`
703
+
704
+
**Required**: No
705
+
**Type**: Boolean
706
+
**Default**: `false`
707
+
708
+
Enable proactive cache refresh for frequently accessed handles. When enabled, cache entries that have reached the refresh threshold will be queued for background refresh to keep the cache warm.
709
+
710
+
**Examples**:
711
+
```bash
712
+
# Enable proactive refresh (recommended for production)
713
+
PROACTIVE_REFRESH_ENABLED=true
714
+
715
+
# Disable proactive refresh (default)
716
+
PROACTIVE_REFRESH_ENABLED=false
717
+
```
718
+
719
+
**Benefits**:
720
+
- Prevents cache misses for popular handles
721
+
- Maintains consistent response times
722
+
- Reduces latency spikes during cache expiration
723
+
724
+
**Considerations**:
725
+
- Increases background processing load
726
+
- More DNS/HTTP requests to upstream services
727
+
- Best for high-traffic services with predictable access patterns
728
+
729
+
### `PROACTIVE_REFRESH_THRESHOLD`
730
+
731
+
**Required**: No
732
+
**Type**: Float
733
+
**Default**: `0.8`
734
+
**Range**: 0.0-1.0
735
+
**Constraints**: Must be between 0.0 and 1.0
736
+
737
+
Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh. For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
738
+
739
+
**Examples**:
740
+
```bash
741
+
# Very aggressive (refresh at 50% of TTL)
742
+
PROACTIVE_REFRESH_THRESHOLD=0.5
743
+
744
+
# Moderate (refresh at 70% of TTL)
745
+
PROACTIVE_REFRESH_THRESHOLD=0.7
746
+
747
+
# Default (refresh at 80% of TTL)
748
+
PROACTIVE_REFRESH_THRESHOLD=0.8
749
+
750
+
# Conservative (refresh at 90% of TTL)
751
+
PROACTIVE_REFRESH_THRESHOLD=0.9
752
+
753
+
# Very conservative (refresh at 95% of TTL)
754
+
PROACTIVE_REFRESH_THRESHOLD=0.95
755
+
```
756
+
757
+
**Recommendations**:
758
+
- **High-traffic services**: 0.5-0.7 (aggressive refresh)
759
+
- **Normal traffic**: 0.8 (default, balanced)
760
+
- **Low traffic**: 0.9-0.95 (conservative)
761
+
- **Development**: 0.5 (test refresh behavior)
762
+
763
+
**Impact on different cache TTLs**:
764
+
- TTL=600s (10 min), threshold=0.8: Refresh after 8 minutes
765
+
- TTL=3600s (1 hour), threshold=0.8: Refresh after 48 minutes
766
+
- TTL=86400s (1 day), threshold=0.8: Refresh after 19.2 hours
767
+
768
+
## Jetstream Consumer Configuration
769
+
770
+
### `JETSTREAM_ENABLED`
771
+
772
+
**Required**: No
773
+
**Type**: Boolean
774
+
**Default**: `false`
775
+
776
+
Enable Jetstream consumer for real-time cache updates from the AT Protocol firehose. When enabled, QuickDID connects to the Jetstream WebSocket service to receive live updates about account and identity changes.
777
+
778
+
**How it works**:
779
+
- Subscribes to Account and Identity events from the firehose
780
+
- Processes Account events to purge deleted/deactivated accounts
781
+
- Processes Identity events to update handle-to-DID mappings
782
+
- Automatically reconnects with exponential backoff on connection failures
783
+
- Tracks metrics for successful and failed event processing
784
+
785
+
**Examples**:
786
+
```bash
787
+
# Enable Jetstream consumer (recommended for production)
788
+
JETSTREAM_ENABLED=true
789
+
790
+
# Disable Jetstream consumer (default)
791
+
JETSTREAM_ENABLED=false
792
+
```
793
+
794
+
**Benefits**:
795
+
- Real-time cache synchronization with AT Protocol network
796
+
- Automatic removal of deleted/deactivated accounts
797
+
- Immediate handle change updates
798
+
- Reduces stale data in cache
799
+
800
+
**Considerations**:
801
+
- Requires stable WebSocket connection
802
+
- Increases network traffic (incoming events)
803
+
- Best for services requiring up-to-date handle mappings
804
+
- Automatically handles reconnection on failures
805
+
806
+
### `JETSTREAM_HOSTNAME`
807
+
808
+
**Required**: No
809
+
**Type**: String
810
+
**Default**: `jetstream.atproto.tools`
811
+
812
+
The hostname of the Jetstream WebSocket service to connect to for real-time AT Protocol events. Only used when `JETSTREAM_ENABLED=true`.
813
+
814
+
**Examples**:
815
+
```bash
816
+
# Production firehose (default)
817
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
818
+
819
+
# Staging environment
820
+
JETSTREAM_HOSTNAME=jetstream-staging.atproto.tools
821
+
822
+
# Local development firehose
823
+
JETSTREAM_HOSTNAME=localhost:6008
824
+
825
+
# Custom deployment
826
+
JETSTREAM_HOSTNAME=jetstream.example.com
827
+
```
828
+
829
+
**Event Processing**:
830
+
- **Account events**:
831
+
- `status: deleted` โ Purges handle and DID from all caches
832
+
- `status: deactivated` โ Purges handle and DID from all caches
833
+
- Other statuses โ Ignored
834
+
835
+
- **Identity events**:
836
+
- Updates handle-to-DID mapping in cache
837
+
- Removes old handle mapping if changed
838
+
- Maintains bidirectional cache consistency
839
+
840
+
**Metrics Tracked** (when metrics are enabled):
841
+
- `jetstream.events.received`: Total events received
842
+
- `jetstream.events.processed`: Successfully processed events
843
+
- `jetstream.events.failed`: Failed event processing
844
+
- `jetstream.connections.established`: Successful connections
845
+
- `jetstream.connections.failed`: Failed connection attempts
846
+
847
+
**Reconnection Behavior**:
848
+
- Initial retry delay: 1 second
849
+
- Maximum retry delay: 60 seconds
850
+
- Exponential backoff with jitter
851
+
- Automatic recovery on transient failures
852
+
853
+
**Recommendations**:
854
+
- **Production**: Use default `jetstream.atproto.tools`
855
+
- **Development**: Consider local firehose for testing
856
+
- **High availability**: Monitor connection metrics
857
+
- **Network issues**: Check WebSocket connectivity
858
+
859
+
## Static Files Configuration
860
+
861
+
### `STATIC_FILES_DIR`
862
+
863
+
**Required**: No
864
+
**Type**: String (directory path)
865
+
**Default**: `www`
866
+
867
+
Directory path for serving static files. This directory should contain the landing page and AT Protocol well-known files.
868
+
869
+
**Directory Structure**:
870
+
```
871
+
www/
872
+
โโโ index.html # Landing page
873
+
โโโ .well-known/
874
+
โ โโโ atproto-did # Service DID identifier
875
+
โ โโโ did.json # DID document
876
+
โโโ (other static assets)
877
+
```
878
+
879
+
**Examples**:
880
+
```bash
881
+
# Default (relative to working directory)
882
+
STATIC_FILES_DIR=www
883
+
884
+
# Absolute path
885
+
STATIC_FILES_DIR=/var/www/quickdid
886
+
887
+
# Docker container path
888
+
STATIC_FILES_DIR=/app/www
889
+
890
+
# Custom directory
891
+
STATIC_FILES_DIR=./public
892
+
```
893
+
894
+
**Docker Volume Mounting**:
895
+
```yaml
896
+
volumes:
897
+
# Mount entire custom directory
898
+
- ./custom-www:/app/www:ro
899
+
900
+
# Mount specific files
901
+
- ./custom-index.html:/app/www/index.html:ro
902
+
- ./well-known:/app/www/.well-known:ro
903
+
```
904
+
905
+
**Generating Well-Known Files**:
906
+
```bash
907
+
# Generate .well-known files for your domain
908
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
909
+
```
910
+
551
911
## HTTP Caching Configuration
552
912
553
913
### `CACHE_MAX_AGE`
···
721
1081
```bash
722
1082
# .env.development
723
1083
HTTP_EXTERNAL=localhost:3007
724
-
SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
725
1084
RUST_LOG=debug
726
1085
```
727
1086
···
731
1090
# .env.production.redis
732
1091
# Required
733
1092
HTTP_EXTERNAL=quickdid.example.com
734
-
SERVICE_KEY=${SECRET_SERVICE_KEY} # From secrets manager
735
1093
736
1094
# Network
737
1095
HTTP_PORT=8080
···
746
1104
QUEUE_ADAPTER=redis
747
1105
QUEUE_REDIS_TIMEOUT=5
748
1106
QUEUE_BUFFER_SIZE=5000
1107
+
QUEUE_REDIS_DEDUP_ENABLED=true # Prevent duplicate work
1108
+
QUEUE_REDIS_DEDUP_TTL=60
749
1109
750
1110
# Rate Limiting (optional, recommended for production)
751
1111
RESOLVER_MAX_CONCURRENT=100
752
1112
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
753
1113
1114
+
# Metrics (optional, recommended for production)
1115
+
METRICS_ADAPTER=statsd
1116
+
METRICS_STATSD_HOST=localhost:8125
1117
+
METRICS_PREFIX=quickdid
1118
+
METRICS_TAGS=env:prod,service:quickdid
1119
+
1120
+
# Proactive Refresh (optional, recommended for high-traffic)
1121
+
PROACTIVE_REFRESH_ENABLED=true
1122
+
PROACTIVE_REFRESH_THRESHOLD=0.8
1123
+
1124
+
# Jetstream Consumer (optional, recommended for real-time sync)
1125
+
JETSTREAM_ENABLED=true
1126
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1127
+
754
1128
# HTTP Caching (Cache-Control headers)
755
1129
CACHE_MAX_AGE=86400 # 24 hours
756
1130
CACHE_STALE_IF_ERROR=172800 # 48 hours
···
766
1140
# .env.production.sqlite
767
1141
# Required
768
1142
HTTP_EXTERNAL=quickdid.example.com
769
-
SERVICE_KEY=${SECRET_SERVICE_KEY} # From secrets manager
770
1143
771
1144
# Network
772
1145
HTTP_PORT=8080
···
786
1159
RESOLVER_MAX_CONCURRENT=100
787
1160
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=5000 # 5 second timeout
788
1161
1162
+
# Jetstream Consumer (optional, recommended for real-time sync)
1163
+
JETSTREAM_ENABLED=true
1164
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1165
+
789
1166
# HTTP Caching (Cache-Control headers)
790
1167
CACHE_MAX_AGE=86400 # 24 hours
791
1168
CACHE_STALE_IF_ERROR=172800 # 48 hours
···
801
1178
# .env.ha.redis
802
1179
# Required
803
1180
HTTP_EXTERNAL=quickdid.example.com
804
-
SERVICE_KEY=${SECRET_SERVICE_KEY}
805
1181
806
1182
# Network
807
1183
HTTP_PORT=8080
···
818
1194
QUEUE_REDIS_PREFIX=prod:queue:
819
1195
QUEUE_WORKER_ID=${HOSTNAME:-worker1}
820
1196
QUEUE_REDIS_TIMEOUT=10
1197
+
QUEUE_REDIS_DEDUP_ENABLED=true # Essential for multi-instance
1198
+
QUEUE_REDIS_DEDUP_TTL=120 # Longer TTL for HA
821
1199
822
1200
# Performance
823
1201
QUEUE_BUFFER_SIZE=10000
···
826
1204
RESOLVER_MAX_CONCURRENT=500
827
1205
RESOLVER_MAX_CONCURRENT_TIMEOUT_MS=10000 # 10 second timeout for HA
828
1206
1207
+
# Metrics (recommended for HA monitoring)
1208
+
METRICS_ADAPTER=statsd
1209
+
METRICS_STATSD_HOST=statsd:8125
1210
+
METRICS_PREFIX=quickdid.prod
1211
+
METRICS_TAGS=env:prod,service:quickdid,cluster:ha
1212
+
1213
+
# Proactive Refresh (recommended for HA)
1214
+
PROACTIVE_REFRESH_ENABLED=true
1215
+
PROACTIVE_REFRESH_THRESHOLD=0.7 # More aggressive for HA
1216
+
1217
+
# Jetstream Consumer (recommended for real-time sync in HA)
1218
+
JETSTREAM_ENABLED=true
1219
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
1220
+
829
1221
# Logging
830
1222
RUST_LOG=warn
831
1223
```
···
836
1228
# .env.hybrid
837
1229
# Required
838
1230
HTTP_EXTERNAL=quickdid.example.com
839
-
SERVICE_KEY=${SECRET_SERVICE_KEY}
840
1231
841
1232
# Network
842
1233
HTTP_PORT=8080
···
867
1258
image: quickdid:latest
868
1259
environment:
869
1260
HTTP_EXTERNAL: quickdid.example.com
870
-
SERVICE_KEY: ${SERVICE_KEY}
871
1261
HTTP_PORT: 8080
872
1262
REDIS_URL: redis://redis:6379/0
873
1263
CACHE_TTL_MEMORY: 600
874
1264
CACHE_TTL_REDIS: 86400
875
1265
QUEUE_ADAPTER: redis
876
1266
QUEUE_REDIS_TIMEOUT: 5
1267
+
JETSTREAM_ENABLED: true
1268
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
877
1269
RUST_LOG: info
878
1270
ports:
879
1271
- "8080:8080"
···
896
1288
image: quickdid:latest
897
1289
environment:
898
1290
HTTP_EXTERNAL: quickdid.example.com
899
-
SERVICE_KEY: ${SERVICE_KEY}
900
1291
HTTP_PORT: 8080
901
1292
SQLITE_URL: sqlite:/data/quickdid.db
902
1293
CACHE_TTL_MEMORY: 600
···
904
1295
QUEUE_ADAPTER: sqlite
905
1296
QUEUE_BUFFER_SIZE: 5000
906
1297
QUEUE_SQLITE_MAX_SIZE: 10000
1298
+
JETSTREAM_ENABLED: true
1299
+
JETSTREAM_HOSTNAME: jetstream.atproto.tools
907
1300
RUST_LOG: info
908
1301
ports:
909
1302
- "8080:8080"
···
922
1315
### Required Fields
923
1316
924
1317
1. **HTTP_EXTERNAL**: Must be provided
925
-
2. **SERVICE_KEY**: Must be provided
1318
+
2. **HTTP_EXTERNAL**: Must be provided
926
1319
927
1320
### Value Constraints
928
1321
···
967
1360
968
1361
```bash
969
1362
# Validate configuration
970
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1363
+
HTTP_EXTERNAL=test quickdid --help
971
1364
972
1365
# Test with specific values
973
1366
CACHE_TTL_MEMORY=0 quickdid --help # Will fail validation
974
1367
975
1368
# Check parsed configuration (with debug logging)
976
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1369
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
977
1370
```
978
1371
979
1372
## Best Practices
980
1373
981
1374
### Security
982
1375
983
-
1. **Never commit SERVICE_KEY** to version control
984
-
2. Use environment-specific key management (Vault, AWS Secrets, etc.)
985
-
3. Rotate SERVICE_KEY regularly
986
-
4. Use TLS for Redis connections in production (`rediss://`)
1376
+
1. Use environment-specific configuration management
1377
+
2. Use TLS for Redis connections in production (`rediss://`)
1378
+
3. Never commit sensitive configuration to version control
987
1379
5. Implement network segmentation for Redis access
988
1380
989
1381
### Performance
···
1000
1392
2. **Single-instance deployments**: Use SQLite for persistent caching and queuing
1001
1393
3. **Development/testing**: Use memory-only caching with MPSC queuing
1002
1394
4. **Hybrid setups**: Configure both Redis and SQLite for redundancy
1003
-
5. **Queue adapter guidelines**:
1395
+
5. **Real-time sync**: Enable Jetstream consumer for live cache updates
1396
+
6. **Queue adapter guidelines**:
1004
1397
- Redis: Best for multi-instance deployments with distributed processing
1005
1398
- SQLite: Best for single-instance deployments needing persistence
1006
1399
- MPSC: Best for single-instance deployments without persistence needs
1007
-
6. **Cache TTL guidelines**:
1400
+
7. **Cache TTL guidelines**:
1008
1401
- Redis: Shorter TTLs (1-7 days) for frequently updated handles
1009
1402
- SQLite: Longer TTLs (7-90 days) for stable single-instance caching
1010
1403
- Memory: Short TTLs (5-30 minutes) as fallback
1404
+
8. **Jetstream guidelines**:
1405
+
- Production: Enable for real-time cache synchronization
1406
+
- High-traffic: Essential for reducing stale data
1407
+
- Development: Can be disabled for simpler testing
1408
+
- Monitor WebSocket connection health in production
1011
1409
1012
1410
### Monitoring
1013
1411
···
1019
1417
### Deployment
1020
1418
1021
1419
1. Use `.env` files for local development
1022
-
2. Use secrets management for production SERVICE_KEY
1420
+
2. Use secrets management for production configurations
1023
1421
3. Set resource limits in container orchestration
1024
1422
4. Use health checks to monitor service availability
1025
1423
5. Implement gradual rollouts with feature flags
+117
-19
docs/production-deployment.md
+117
-19
docs/production-deployment.md
···
42
42
# - localhost:3007 (for testing only)
43
43
HTTP_EXTERNAL=quickdid.example.com
44
44
45
-
# Private key for service identity (DID format)
46
-
# Generate a new key for production using atproto-identity tools
47
-
# SECURITY: Keep this key secure and never commit to version control
48
-
# Example formats:
49
-
# - did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK
50
-
# - did:plc:xyz123abc456
51
-
SERVICE_KEY=did:key:YOUR_PRODUCTION_KEY_HERE
52
-
53
45
# ----------------------------------------------------------------------------
54
46
# NETWORK CONFIGURATION
55
47
# ----------------------------------------------------------------------------
···
133
125
# Higher = less polling overhead, slower shutdown
134
126
QUEUE_REDIS_TIMEOUT=5
135
127
128
+
# Enable deduplication for Redis queue to prevent duplicate handles (default: false)
129
+
# When enabled, uses Redis SET with TTL to track handles being processed
130
+
# Prevents the same handle from being queued multiple times within the TTL window
131
+
QUEUE_REDIS_DEDUP_ENABLED=false
132
+
133
+
# TTL for Redis queue deduplication keys in seconds (default: 60)
134
+
# Range: 10-300 recommended
135
+
# Determines how long to prevent duplicate handle resolution requests
136
+
QUEUE_REDIS_DEDUP_TTL=60
137
+
136
138
# Worker ID for Redis queue (defaults to "worker1")
137
139
# Set this for predictable worker identification in multi-instance deployments
138
140
# Examples: worker-001, prod-us-east-1, $(hostname)
···
158
160
# Identifies your service to other AT Protocol services
159
161
# Default: Auto-generated with current version from Cargo.toml
160
162
# Format: quickdid/{version} (+https://github.com/smokesignal.events/quickdid)
161
-
USER_AGENT=quickdid/1.0.0-rc.3 (+https://quickdid.example.com)
163
+
USER_AGENT=quickdid/1.0.0-rc.5 (+https://quickdid.example.com)
162
164
163
165
# Custom DNS nameservers (comma-separated)
164
166
# Use for custom DNS resolution or to bypass local DNS
···
239
241
CACHE_MIN_FRESH=3600
240
242
241
243
# ----------------------------------------------------------------------------
244
+
# METRICS CONFIGURATION
245
+
# ----------------------------------------------------------------------------
246
+
247
+
# Metrics adapter type: 'noop' or 'statsd' (default: noop)
248
+
# - 'noop': No metrics collection (default)
249
+
# - 'statsd': Send metrics to StatsD server
250
+
METRICS_ADAPTER=statsd
251
+
252
+
# StatsD host and port (required when METRICS_ADAPTER=statsd)
253
+
# Format: hostname:port
254
+
# Examples:
255
+
# - localhost:8125 (local StatsD)
256
+
# - statsd.example.com:8125 (remote StatsD)
257
+
METRICS_STATSD_HOST=localhost:8125
258
+
259
+
# Bind address for StatsD UDP socket (default: [::]:0)
260
+
# Controls which local address to bind for sending UDP packets
261
+
# Examples:
262
+
# - [::]:0 (IPv6 any address, random port - default)
263
+
# - 0.0.0.0:0 (IPv4 any address, random port)
264
+
# - 192.168.1.100:0 (specific interface)
265
+
METRICS_STATSD_BIND=[::]:0
266
+
267
+
# Prefix for all metrics (default: quickdid)
268
+
# Used to namespace metrics in your monitoring system
269
+
# Examples:
270
+
# - quickdid (default)
271
+
# - prod.quickdid
272
+
# - us-east-1.quickdid
273
+
METRICS_PREFIX=quickdid
274
+
275
+
# Tags for all metrics (comma-separated key:value pairs)
276
+
# Added to all metrics for filtering and grouping
277
+
# Examples:
278
+
# - env:production,service:quickdid
279
+
# - env:staging,region:us-east-1,version:1.0.0
280
+
METRICS_TAGS=env:production,service:quickdid
281
+
282
+
# ----------------------------------------------------------------------------
283
+
# PROACTIVE REFRESH CONFIGURATION
284
+
# ----------------------------------------------------------------------------
285
+
286
+
# Enable proactive cache refresh (default: false)
287
+
# When enabled, cache entries nearing expiration are automatically refreshed
288
+
# in the background to prevent cache misses for frequently accessed handles
289
+
PROACTIVE_REFRESH_ENABLED=false
290
+
291
+
# Threshold for proactive refresh as percentage of TTL (default: 0.8)
292
+
# Range: 0.0-1.0 (0% to 100% of TTL)
293
+
# Example: 0.8 means refresh when 80% of TTL has elapsed
294
+
# Lower values = more aggressive refreshing, higher load
295
+
# Higher values = less aggressive refreshing, more cache misses
296
+
PROACTIVE_REFRESH_THRESHOLD=0.8
297
+
298
+
# ----------------------------------------------------------------------------
299
+
# JETSTREAM CONSUMER CONFIGURATION
300
+
# ----------------------------------------------------------------------------
301
+
302
+
# Enable Jetstream consumer for real-time cache updates (default: false)
303
+
# When enabled, connects to AT Protocol firehose for live updates
304
+
# Processes Account events (deleted/deactivated) and Identity events (handle changes)
305
+
# Automatically reconnects with exponential backoff on connection failures
306
+
JETSTREAM_ENABLED=false
307
+
308
+
# Jetstream WebSocket hostname (default: jetstream.atproto.tools)
309
+
# The firehose service to connect to for real-time AT Protocol events
310
+
# Examples:
311
+
# - jetstream.atproto.tools (production firehose)
312
+
# - jetstream-staging.atproto.tools (staging environment)
313
+
# - localhost:6008 (local development)
314
+
JETSTREAM_HOSTNAME=jetstream.atproto.tools
315
+
316
+
# ----------------------------------------------------------------------------
317
+
# STATIC FILES CONFIGURATION
318
+
# ----------------------------------------------------------------------------
319
+
320
+
# Directory path for serving static files (default: www)
321
+
# This directory should contain:
322
+
# - index.html (landing page)
323
+
# - .well-known/atproto-did (service DID identifier)
324
+
# - .well-known/did.json (DID document)
325
+
# In Docker, this defaults to /app/www
326
+
# You can mount custom files via Docker volumes
327
+
STATIC_FILES_DIR=/app/www
328
+
329
+
# ----------------------------------------------------------------------------
242
330
# PERFORMANCE TUNING
243
331
# ----------------------------------------------------------------------------
244
332
···
343
431
344
432
## Docker Compose Setup
345
433
346
-
### Redis-based Production Setup
434
+
### Redis-based Production Setup with Jetstream
347
435
348
-
Create a `docker-compose.yml` file for a complete production setup with Redis:
436
+
Create a `docker-compose.yml` file for a complete production setup with Redis and optional Jetstream consumer:
349
437
350
438
```yaml
351
439
version: '3.8'
···
434
522
driver: local
435
523
```
436
524
437
-
### SQLite-based Single-Instance Setup
525
+
### SQLite-based Single-Instance Setup with Jetstream
438
526
439
-
For single-instance deployments without Redis, create a simpler `docker-compose.sqlite.yml`:
527
+
For single-instance deployments without Redis, create a simpler `docker-compose.sqlite.yml` with optional Jetstream consumer:
440
528
441
529
```yaml
442
530
version: '3.8'
···
447
535
container_name: quickdid-sqlite
448
536
environment:
449
537
HTTP_EXTERNAL: quickdid.example.com
450
-
SERVICE_KEY: ${SERVICE_KEY}
451
538
HTTP_PORT: 8080
452
539
SQLITE_URL: sqlite:/data/quickdid.db
453
540
CACHE_TTL_MEMORY: 600
···
455
542
QUEUE_ADAPTER: sqlite
456
543
QUEUE_BUFFER_SIZE: 5000
457
544
QUEUE_SQLITE_MAX_SIZE: 10000
545
+
# Optional: Enable Jetstream for real-time cache updates
546
+
# JETSTREAM_ENABLED: true
547
+
# JETSTREAM_HOSTNAME: jetstream.atproto.tools
458
548
RUST_LOG: info
459
549
ports:
460
550
- "8080:8080"
···
648
738
649
739
### 1. Service Key Protection
650
740
651
-
- **Never commit** the `SERVICE_KEY` to version control
741
+
- **Never commit** sensitive configuration to version control
652
742
- Store keys in a secure secret management system (e.g., HashiCorp Vault, AWS Secrets Manager)
653
743
- Rotate keys regularly
654
744
- Use different keys for different environments
···
693
783
docker logs quickdid
694
784
695
785
# Verify environment variables
696
-
docker exec quickdid env | grep -E "HTTP_EXTERNAL|SERVICE_KEY"
786
+
docker exec quickdid env | grep -E "HTTP_EXTERNAL|HTTP_PORT"
697
787
698
788
# Test Redis connectivity
699
789
docker exec quickdid redis-cli -h redis ping
···
836
926
2. **SQLite** (persistent, best for single-instance)
837
927
3. **Memory** (fast, but lost on restart)
838
928
929
+
**Real-time Updates with Jetstream**: When `JETSTREAM_ENABLED=true`, QuickDID:
930
+
- Connects to AT Protocol firehose for live cache updates
931
+
- Processes Account events to purge deleted/deactivated accounts
932
+
- Processes Identity events to update handle-to-DID mappings
933
+
- Automatically reconnects with exponential backoff on failures
934
+
- Tracks metrics for successful and failed event processing
935
+
839
936
**Recommendations by Deployment Type**:
840
937
- **Single instance, persistent**: Use SQLite for both caching and queuing (`SQLITE_URL=sqlite:./quickdid.db`, `QUEUE_ADAPTER=sqlite`)
841
938
- **Multi-instance, HA**: Use Redis for both caching and queuing (`REDIS_URL=redis://redis:6379/0`, `QUEUE_ADAPTER=redis`)
939
+
- **Real-time sync**: Enable Jetstream consumer (`JETSTREAM_ENABLED=true`) for live cache updates
842
940
- **Testing/development**: Use memory-only caching with MPSC queuing (`QUEUE_ADAPTER=mpsc`)
843
941
- **Hybrid**: Configure both Redis and SQLite for redundancy
844
942
···
878
976
### Required Fields
879
977
880
978
- **HTTP_EXTERNAL**: Must be provided
881
-
- **SERVICE_KEY**: Must be provided
979
+
- **HTTP_EXTERNAL**: Must be provided
882
980
883
981
### Value Constraints
884
982
···
917
1015
918
1016
```bash
919
1017
# Validate configuration without starting service
920
-
HTTP_EXTERNAL=test SERVICE_KEY=test quickdid --help
1018
+
HTTP_EXTERNAL=test quickdid --help
921
1019
922
1020
# Test with specific values (will fail validation)
923
1021
CACHE_TTL_MEMORY=0 quickdid --help
924
1022
925
1023
# Debug configuration parsing
926
-
RUST_LOG=debug HTTP_EXTERNAL=test SERVICE_KEY=test quickdid
1024
+
RUST_LOG=debug HTTP_EXTERNAL=test quickdid
927
1025
```
928
1026
929
1027
## Support and Resources
+714
docs/telegraf-timescaledb-metrics-guide.md
+714
docs/telegraf-timescaledb-metrics-guide.md
···
1
+
# Telegraf and TimescaleDB Metrics Collection Guide
2
+
3
+
This guide demonstrates how to set up a metrics collection pipeline using Telegraf to collect StatsD metrics and store them in PostgreSQL with TimescaleDB using Docker Compose.
4
+
5
+
## Overview
6
+
7
+
This setup creates a metrics pipeline that:
8
+
- Collects StatsD metrics via Telegraf on UDP port 8125
9
+
- Creates individual PostgreSQL tables for each metric type
10
+
- Stores metric tags as JSONB for flexible querying
11
+
- Automatically creates hypertables for time-series optimization
12
+
- Provides a complete Docker Compose configuration for easy deployment
13
+
14
+
## Important Note on Table Structure
15
+
16
+
The Telegraf PostgreSQL output plugin with the configuration in this guide creates **individual tables for each metric name**. For example:
17
+
- `quickdid.http.request.count` becomes table `"quickdid.http.request.count"`
18
+
- `quickdid.resolver.rate_limit.available_permits` becomes table `"quickdid.resolver.rate_limit.available_permits"`
19
+
20
+
Each table has the following structure:
21
+
- `time` (timestamptz) - The timestamp of the metric
22
+
- `tags` (jsonb) - All tags stored as a JSON object
23
+
- Metric-specific columns for values (e.g., `value`, `mean`, `p99`, etc.)
24
+
25
+
## Prerequisites
26
+
27
+
- Docker and Docker Compose installed
28
+
- Basic understanding of StatsD metrics format
29
+
- Familiarity with PostgreSQL/TimescaleDB concepts
30
+
31
+
## Project Structure
32
+
33
+
Create the following directory structure:
34
+
35
+
```
36
+
metrics-stack/
37
+
โโโ docker-compose.yml
38
+
โโโ telegraf/
39
+
โ โโโ telegraf.conf
40
+
โโโ test-scripts/
41
+
โ โโโ send-metrics.sh
42
+
โ โโโ verify-queries.sql
43
+
โโโ .env
44
+
```
45
+
46
+
## Configuration Files
47
+
48
+
### 1. Environment Variables (.env)
49
+
50
+
Create a `.env` file to store sensitive configuration:
51
+
52
+
```env
53
+
# PostgreSQL/TimescaleDB Configuration
54
+
POSTGRES_DB=metrics
55
+
POSTGRES_USER=postgres
56
+
POSTGRES_PASSWORD=secretpassword
57
+
58
+
# Telegraf Database User
59
+
TELEGRAF_DB_USER=postgres
60
+
TELEGRAF_DB_PASSWORD=secretpassword
61
+
62
+
# TimescaleDB Settings
63
+
TIMESCALE_TELEMETRY=off
64
+
```
65
+
66
+
### 2. Telegraf Configuration (telegraf/telegraf.conf)
67
+
68
+
Create the Telegraf configuration file:
69
+
70
+
```toml
71
+
# Global Telegraf Agent Configuration
72
+
[agent]
73
+
interval = "10s"
74
+
round_interval = true
75
+
metric_batch_size = 1000
76
+
metric_buffer_limit = 10000
77
+
collection_jitter = "0s"
78
+
flush_interval = "10s"
79
+
flush_jitter = "0s"
80
+
precision = ""
81
+
debug = false
82
+
quiet = false
83
+
hostname = "telegraf-agent"
84
+
omit_hostname = false
85
+
86
+
# StatsD Input Plugin
87
+
[[inputs.statsd]]
88
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
89
+
protocol = "udp"
90
+
delete_gauges = true
91
+
delete_counters = true
92
+
delete_sets = true
93
+
delete_timings = true
94
+
percentiles = [50, 90, 95, 99]
95
+
metric_separator = "."
96
+
allowed_pending_messages = 10000
97
+
datadog_extensions = true
98
+
datadog_distributions = true
99
+
100
+
# PostgreSQL (TimescaleDB) Output Plugin
101
+
[[outputs.postgresql]]
102
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
103
+
schema = "public"
104
+
105
+
# Create individual tables for each metric with hypertable support
106
+
create_templates = [
107
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
108
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
109
+
]
110
+
111
+
# Store all tags as JSONB for flexible querying
112
+
tags_as_jsonb = true
113
+
114
+
# Keep fields as separate columns for better performance on aggregations
115
+
fields_as_jsonb = false
116
+
```
117
+
118
+
### 3. Docker Compose Configuration (docker-compose.yml)
119
+
120
+
Create the Docker Compose file:
121
+
122
+
```yaml
123
+
version: '3.8'
124
+
125
+
services:
126
+
timescaledb:
127
+
image: timescale/timescaledb:latest-pg17
128
+
container_name: timescaledb
129
+
restart: unless-stopped
130
+
environment:
131
+
POSTGRES_DB: ${POSTGRES_DB}
132
+
POSTGRES_USER: ${POSTGRES_USER}
133
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
134
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
135
+
ports:
136
+
- "5442:5432"
137
+
volumes:
138
+
- timescale_data:/home/postgres/pgdata/data
139
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
140
+
command:
141
+
- postgres
142
+
- -c
143
+
- shared_buffers=1GB
144
+
- -c
145
+
- effective_cache_size=3GB
146
+
- -c
147
+
- maintenance_work_mem=512MB
148
+
- -c
149
+
- work_mem=32MB
150
+
- -c
151
+
- timescaledb.max_background_workers=8
152
+
- -c
153
+
- max_parallel_workers_per_gather=2
154
+
- -c
155
+
- max_parallel_workers=8
156
+
healthcheck:
157
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
158
+
interval: 10s
159
+
timeout: 5s
160
+
retries: 5
161
+
networks:
162
+
- metrics_network
163
+
164
+
telegraf:
165
+
image: telegraf:1.35
166
+
container_name: telegraf
167
+
restart: unless-stopped
168
+
environment:
169
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
170
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
171
+
POSTGRES_DB: ${POSTGRES_DB}
172
+
ports:
173
+
- "8125:8125/udp" # StatsD UDP port
174
+
volumes:
175
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
176
+
depends_on:
177
+
timescaledb:
178
+
condition: service_healthy
179
+
networks:
180
+
- metrics_network
181
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
182
+
183
+
redis:
184
+
image: redis:7-alpine
185
+
container_name: redis
186
+
restart: unless-stopped
187
+
ports:
188
+
- "6379:6379"
189
+
volumes:
190
+
- redis_data:/data
191
+
command: redis-server --appendonly yes --appendfsync everysec
192
+
healthcheck:
193
+
test: ["CMD", "redis-cli", "ping"]
194
+
interval: 10s
195
+
timeout: 5s
196
+
retries: 5
197
+
networks:
198
+
- metrics_network
199
+
200
+
networks:
201
+
metrics_network:
202
+
driver: bridge
203
+
204
+
volumes:
205
+
timescale_data:
206
+
redis_data:
207
+
```
208
+
209
+
### 4. Database Initialization Script (optional)
210
+
211
+
Create `init-scripts/01-init.sql` to set up the TimescaleDB extension:
212
+
213
+
```sql
214
+
-- Enable TimescaleDB extension
215
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
216
+
217
+
-- Enable additional useful extensions
218
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
219
+
```
220
+
221
+
## Test Scripts
222
+
223
+
### 1. Send Test Metrics Script (test-scripts/send-metrics.sh)
224
+
225
+
Create a script to send various types of metrics:
226
+
227
+
```bash
228
+
#!/bin/bash
229
+
230
+
# Send test metrics to StatsD/Telegraf
231
+
232
+
echo "Sending test metrics to StatsD on localhost:8125..."
233
+
234
+
# Counter metrics
235
+
for i in {1..10}; do
236
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
237
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
238
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
239
+
done
240
+
241
+
# Gauge metrics
242
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
243
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
244
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
245
+
246
+
# Timing metrics (in milliseconds)
247
+
for i in {1..20}; do
248
+
duration=$((RANDOM % 100 + 10))
249
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
250
+
done
251
+
252
+
for i in {1..10}; do
253
+
duration=$((RANDOM % 200 + 50))
254
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
255
+
done
256
+
257
+
# Histogram metrics
258
+
for i in {1..15}; do
259
+
resolution_time=$((RANDOM % 500 + 50))
260
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
261
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
262
+
done
263
+
264
+
# Cache metrics
265
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
266
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
267
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
268
+
269
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
270
+
sleep 15
271
+
echo "Done!"
272
+
```
273
+
274
+
### 2. Verify Queries Script (test-scripts/verify-queries.sql)
275
+
276
+
Create a SQL script to verify all queries work correctly:
277
+
278
+
```sql
279
+
-- Test script to verify all metrics queries work correctly
280
+
-- Run this after sending test metrics with send-metrics.sh
281
+
282
+
\echo '===== CHECKING AVAILABLE TABLES ====='
283
+
SELECT table_name
284
+
FROM information_schema.tables
285
+
WHERE table_schema = 'public'
286
+
AND table_name LIKE 'quickdid%'
287
+
ORDER BY table_name;
288
+
289
+
\echo ''
290
+
\echo '===== CHECKING TABLE STRUCTURES ====='
291
+
\echo 'Structure of quickdid.http.request.count table:'
292
+
\d "quickdid.http.request.count"
293
+
294
+
\echo ''
295
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
296
+
\d "quickdid.http.request.duration_ms"
297
+
298
+
\echo ''
299
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
300
+
SELECT
301
+
time,
302
+
tags,
303
+
tags->>'method' as method,
304
+
tags->>'path' as path,
305
+
tags->>'status' as status,
306
+
value
307
+
FROM "quickdid.http.request.count"
308
+
WHERE time > NOW() - INTERVAL '1 hour'
309
+
ORDER BY time DESC
310
+
LIMIT 10;
311
+
312
+
\echo ''
313
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
314
+
SELECT
315
+
time_bucket('1 minute', time) AS minute,
316
+
tags->>'method' as method,
317
+
tags->>'path' as path,
318
+
tags->>'status' as status,
319
+
COUNT(*) as request_count,
320
+
AVG(mean) as avg_duration_ms,
321
+
MAX(p99) as p99_duration_ms,
322
+
MIN(mean) as min_duration_ms
323
+
FROM "quickdid.http.request.duration_ms"
324
+
WHERE time > NOW() - INTERVAL '1 hour'
325
+
AND tags IS NOT NULL
326
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
327
+
ORDER BY minute DESC
328
+
LIMIT 10;
329
+
330
+
\echo ''
331
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
332
+
SELECT
333
+
time,
334
+
value as available_permits
335
+
FROM "quickdid.resolver.rate_limit.available_permits"
336
+
WHERE time > NOW() - INTERVAL '1 hour'
337
+
ORDER BY time DESC
338
+
LIMIT 10;
339
+
340
+
\echo ''
341
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
342
+
SELECT
343
+
tags->>'resolver' as resolver_type,
344
+
COUNT(*) as sample_count,
345
+
AVG(mean) as avg_resolution_time_ms,
346
+
MAX(p99) as p99_resolution_time_ms,
347
+
MIN(mean) as min_resolution_time_ms
348
+
FROM "quickdid.resolver.resolution_time"
349
+
WHERE time > NOW() - INTERVAL '1 hour'
350
+
AND tags->>'resolver' IS NOT NULL
351
+
GROUP BY tags->>'resolver'
352
+
ORDER BY avg_resolution_time_ms;
353
+
354
+
\echo ''
355
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
356
+
WITH cache_stats AS (
357
+
SELECT
358
+
'hits' as metric_type,
359
+
SUM(value) as total_count
360
+
FROM "quickdid.cache.hit.count"
361
+
WHERE time > NOW() - INTERVAL '1 hour'
362
+
UNION ALL
363
+
SELECT
364
+
'misses' as metric_type,
365
+
SUM(value) as total_count
366
+
FROM "quickdid.cache.miss.count"
367
+
WHERE time > NOW() - INTERVAL '1 hour'
368
+
)
369
+
SELECT
370
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
371
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
372
+
CASE
373
+
WHEN SUM(total_count) > 0 THEN
374
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
375
+
ELSE 0
376
+
END as hit_rate_percentage
377
+
FROM cache_stats;
378
+
379
+
\echo ''
380
+
\echo '===== QUERY 6: Hypertable Information ====='
381
+
SELECT
382
+
hypertable_schema,
383
+
hypertable_name,
384
+
owner,
385
+
num_dimensions,
386
+
num_chunks,
387
+
compression_enabled
388
+
FROM timescaledb_information.hypertables
389
+
WHERE hypertable_name LIKE 'quickdid%'
390
+
ORDER BY hypertable_name;
391
+
392
+
\echo ''
393
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
394
+
WITH status_counts AS (
395
+
SELECT
396
+
time_bucket('5 minutes', time) as period,
397
+
tags->>'path' as path,
398
+
CASE
399
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
400
+
ELSE 'success'
401
+
END as status_category,
402
+
SUM(value) as request_count
403
+
FROM "quickdid.http.request.count"
404
+
WHERE time > NOW() - INTERVAL '1 hour'
405
+
GROUP BY period, path, status_category
406
+
)
407
+
SELECT
408
+
period,
409
+
path,
410
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
411
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
412
+
CASE
413
+
WHEN SUM(request_count) > 0 THEN
414
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
415
+
ELSE 0
416
+
END as error_rate_percentage
417
+
FROM status_counts
418
+
GROUP BY period, path
419
+
HAVING SUM(request_count) > 0
420
+
ORDER BY period DESC, error_rate_percentage DESC;
421
+
422
+
\echo ''
423
+
\echo '===== TEST COMPLETED ====='
424
+
```
425
+
426
+
## Usage
427
+
428
+
### Starting the Stack
429
+
430
+
1. Navigate to your project directory:
431
+
```bash
432
+
cd metrics-stack
433
+
```
434
+
435
+
2. Make the test scripts executable:
436
+
```bash
437
+
chmod +x test-scripts/send-metrics.sh
438
+
```
439
+
440
+
3. Start the services:
441
+
```bash
442
+
docker-compose up -d
443
+
```
444
+
445
+
4. Check the logs to ensure everything is running:
446
+
```bash
447
+
docker-compose logs -f
448
+
```
449
+
450
+
5. Wait for services to be fully ready (about 30 seconds)
451
+
452
+
### Running the Test Suite
453
+
454
+
1. Send test metrics:
455
+
```bash
456
+
./test-scripts/send-metrics.sh
457
+
```
458
+
459
+
2. Verify all queries work:
460
+
```bash
461
+
docker exec -i timescaledb psql -U postgres -d metrics < test-scripts/verify-queries.sql
462
+
```
463
+
464
+
### Manual Querying
465
+
466
+
Connect to TimescaleDB to run queries manually:
467
+
468
+
```bash
469
+
# Connect to the database
470
+
docker exec -it timescaledb psql -U postgres -d metrics
471
+
472
+
# List all metric tables
473
+
\dt "quickdid*"
474
+
475
+
# Describe a specific table structure
476
+
\d "quickdid.http.request.duration_ms"
477
+
478
+
# Query with JSONB tag filtering
479
+
SELECT
480
+
time,
481
+
tags->>'method' as method,
482
+
mean as avg_ms,
483
+
'99_percentile' as p99_ms
484
+
FROM "quickdid.http.request.duration_ms"
485
+
WHERE tags @> '{"method": "GET"}'::jsonb
486
+
AND time > NOW() - INTERVAL '1 hour'
487
+
ORDER BY time DESC
488
+
LIMIT 10;
489
+
```
490
+
491
+
## Advanced Configuration
492
+
493
+
### Continuous Aggregates for Performance
494
+
495
+
Create continuous aggregates for frequently queried data:
496
+
497
+
```sql
498
+
-- Create hourly aggregates for HTTP metrics
499
+
CREATE MATERIALIZED VIEW http_metrics_hourly
500
+
WITH (timescaledb.continuous) AS
501
+
SELECT
502
+
time_bucket('1 hour', time) AS hour,
503
+
tags->>'method' as method,
504
+
tags->>'path' as path,
505
+
tags->>'status' as status,
506
+
COUNT(*) as request_count,
507
+
AVG(mean) as avg_duration_ms,
508
+
MAX('99_percentile') as p99_duration_ms,
509
+
MIN(mean) as min_duration_ms
510
+
FROM "quickdid.http.request.duration_ms"
511
+
WHERE tags IS NOT NULL
512
+
GROUP BY hour, method, path, status
513
+
WITH NO DATA;
514
+
515
+
-- Add refresh policy
516
+
SELECT add_continuous_aggregate_policy('http_metrics_hourly',
517
+
start_offset => INTERVAL '3 hours',
518
+
end_offset => INTERVAL '1 hour',
519
+
schedule_interval => INTERVAL '1 hour');
520
+
521
+
-- Manually refresh to populate initial data
522
+
CALL refresh_continuous_aggregate('http_metrics_hourly', NULL, NULL);
523
+
524
+
-- Query the aggregate
525
+
SELECT * FROM http_metrics_hourly
526
+
ORDER BY hour DESC, request_count DESC
527
+
LIMIT 20;
528
+
```
529
+
530
+
### Data Retention Policies
531
+
532
+
Set up automatic data retention:
533
+
534
+
```sql
535
+
-- Add retention policy to drop data older than 30 days
536
+
SELECT add_retention_policy('"quickdid.http.request.count"', INTERVAL '30 days');
537
+
SELECT add_retention_policy('"quickdid.http.request.duration_ms"', INTERVAL '30 days');
538
+
539
+
-- View retention policies
540
+
SELECT js.* FROM timescaledb_information.job_stats js
541
+
JOIN timescaledb_information.jobs j ON js.job_id = j.job_id
542
+
WHERE j.proc_name LIKE '%retention%';
543
+
```
544
+
545
+
### Compression for Storage Optimization
546
+
547
+
Enable compression for older data:
548
+
549
+
```sql
550
+
-- Enable compression on a hypertable
551
+
ALTER TABLE "quickdid.http.request.duration_ms" SET (
552
+
timescaledb.compress,
553
+
timescaledb.compress_segmentby = 'tags'
554
+
);
555
+
556
+
-- Add compression policy (compress chunks older than 7 days)
557
+
SELECT add_compression_policy('"quickdid.http.request.duration_ms"', INTERVAL '7 days');
558
+
559
+
-- Manually compress old chunks
560
+
SELECT compress_chunk(format('%I.%I', c.chunk_schema, c.chunk_name)::regclass)
561
+
FROM timescaledb_information.chunks c
562
+
WHERE c.hypertable_name = 'quickdid.http.request.duration_ms'
563
+
AND c.range_end < NOW() - INTERVAL '7 days'
564
+
AND NOT c.is_compressed;
565
+
566
+
-- Check compression status
567
+
SELECT
568
+
hypertable_name,
569
+
uncompressed_total_bytes,
570
+
compressed_total_bytes,
571
+
compression_ratio
572
+
FROM timescaledb_information.hypertable_compression_stats
573
+
WHERE hypertable_name LIKE 'quickdid%';
574
+
```
575
+
576
+
## Monitoring and Maintenance
577
+
578
+
### Health Checks
579
+
580
+
```sql
581
+
-- Check chunk distribution
582
+
SELECT
583
+
hypertable_name,
584
+
chunk_name,
585
+
range_start,
586
+
range_end,
587
+
is_compressed,
588
+
pg_size_pretty(total_bytes) as size
589
+
FROM timescaledb_information.chunks
590
+
WHERE hypertable_name LIKE 'quickdid%'
591
+
ORDER BY hypertable_name, range_start DESC
592
+
LIMIT 20;
593
+
594
+
-- Check background jobs
595
+
SELECT
596
+
job_id,
597
+
application_name,
598
+
job_type,
599
+
schedule_interval,
600
+
last_run_started_at,
601
+
last_successful_finish,
602
+
next_scheduled_run
603
+
FROM timescaledb_information.job_stats
604
+
ORDER BY job_id;
605
+
606
+
-- Check table sizes
607
+
SELECT
608
+
hypertable_name,
609
+
chunks_total_size,
610
+
chunks_compressed_size,
611
+
chunks_uncompressed_size
612
+
FROM timescaledb_information.hypertables
613
+
WHERE hypertable_name LIKE 'quickdid%';
614
+
```
615
+
616
+
### Troubleshooting
617
+
618
+
1. **Tables not being created:**
619
+
- Check Telegraf logs: `docker-compose logs telegraf | grep -i error`
620
+
- Verify PostgreSQL connectivity: `docker exec telegraf telegraf --test`
621
+
- Ensure metrics are being received: `docker-compose logs telegraf | grep statsd`
622
+
623
+
2. **Queries returning no data:**
624
+
- Verify tables exist: `\dt "quickdid*"` in psql
625
+
- Check table contents: `SELECT COUNT(*) FROM "quickdid.http.request.count";`
626
+
- Verify time ranges in WHERE clauses
627
+
628
+
3. **Performance issues:**
629
+
- Check if hypertables are created: Query `timescaledb_information.hypertables`
630
+
- Verify compression is working if enabled
631
+
- Consider creating appropriate indexes on JSONB paths:
632
+
```sql
633
+
CREATE INDEX idx_http_method ON "quickdid.http.request.duration_ms" ((tags->>'method'));
634
+
CREATE INDEX idx_http_path ON "quickdid.http.request.duration_ms" ((tags->>'path'));
635
+
```
636
+
637
+
## Integration with QuickDID
638
+
639
+
To integrate with QuickDID, configure it to send metrics to the Telegraf StatsD endpoint:
640
+
641
+
```bash
642
+
# Set environment variables for QuickDID
643
+
export METRICS_ADAPTER=statsd
644
+
export METRICS_STATSD_HOST=localhost:8125
645
+
export METRICS_PREFIX=quickdid.
646
+
export METRICS_TAGS=env:production,service:quickdid
647
+
648
+
# Start QuickDID
649
+
cargo run
650
+
```
651
+
652
+
QuickDID will automatically send metrics to Telegraf, which will store them in TimescaleDB for analysis.
653
+
654
+
## Key Differences from Generic Metrics Table Approach
655
+
656
+
This configuration creates **individual tables per metric** instead of a single generic metrics table. Benefits include:
657
+
658
+
1. **Better performance**: Each metric has its own optimized schema
659
+
2. **Clearer data model**: Tables directly represent metrics
660
+
3. **Easier querying**: No need to filter by metric name
661
+
4. **Type safety**: Each metric's fields have appropriate types
662
+
5. **Efficient compression**: Per-metric compression strategies
663
+
664
+
Trade-offs:
665
+
- More tables to manage (mitigated by TimescaleDB automation)
666
+
- Need to know metric names upfront for queries
667
+
- Schema changes require table alterations
668
+
669
+
## Security Considerations
670
+
671
+
1. **Use strong passwords:** Update the default passwords in `.env`
672
+
2. **Enable SSL:** Configure `sslmode=require` in production
673
+
3. **Network isolation:** Use Docker networks to isolate services
674
+
4. **Access control:** Create separate database users with minimal permissions:
675
+
```sql
676
+
CREATE USER metrics_reader WITH PASSWORD 'readonly_password';
677
+
GRANT CONNECT ON DATABASE metrics TO metrics_reader;
678
+
GRANT USAGE ON SCHEMA public TO metrics_reader;
679
+
GRANT SELECT ON ALL TABLES IN SCHEMA public TO metrics_reader;
680
+
```
681
+
5. **Regular updates:** Keep Docker images updated for security patches
682
+
683
+
## Performance Tuning
684
+
685
+
### PostgreSQL/TimescaleDB Settings
686
+
687
+
The docker-compose.yml includes optimized settings. Adjust based on your hardware:
688
+
689
+
- `shared_buffers`: 25% of system RAM
690
+
- `effective_cache_size`: 75% of system RAM
691
+
- `maintenance_work_mem`: 5% of system RAM
692
+
- `work_mem`: RAM / max_connections / 2
693
+
694
+
### Telegraf Buffer Settings
695
+
696
+
For high-volume metrics, adjust in telegraf.conf:
697
+
698
+
```toml
699
+
[agent]
700
+
metric_batch_size = 5000 # Increase for high volume
701
+
metric_buffer_limit = 100000 # Increase buffer size
702
+
flush_interval = "5s" # Decrease for more frequent writes
703
+
```
704
+
705
+
## Conclusion
706
+
707
+
This setup provides a robust metrics collection and storage solution with:
708
+
- **Individual metric tables** for optimal performance and clarity
709
+
- **JSONB tag storage** for flexible querying
710
+
- **TimescaleDB hypertables** for efficient time-series storage
711
+
- **Comprehensive test suite** to verify functionality
712
+
- **Production-ready configuration** with compression and retention policies
713
+
714
+
The system correctly handles StatsD metrics from QuickDID and provides powerful querying capabilities through PostgreSQL's JSONB support and TimescaleDB's time-series functions.
+59
generate-wellknown.sh
+59
generate-wellknown.sh
···
1
+
#!/bin/bash
2
+
3
+
# Script to generate .well-known static files based on QuickDID configuration
4
+
# Usage: HTTP_EXTERNAL=quickdid.smokesignal.tools ./generate-wellknown.sh
5
+
#
6
+
# Note: Since we no longer process SERVICE_KEY, you'll need to manually
7
+
# add the public key to the did.json file if you need DID document support.
8
+
9
+
set -e
10
+
11
+
# Check required environment variables
12
+
if [ -z "$HTTP_EXTERNAL" ]; then
13
+
echo "Error: HTTP_EXTERNAL environment variable is required"
14
+
echo "Usage: HTTP_EXTERNAL=example.com ./generate-wellknown.sh"
15
+
exit 1
16
+
fi
17
+
18
+
# Ensure www/.well-known directory exists
19
+
mkdir -p www/.well-known
20
+
21
+
# Generate service DID from HTTP_EXTERNAL
22
+
if [[ "$HTTP_EXTERNAL" == *":"* ]]; then
23
+
# Contains port - URL encode the colon
24
+
SERVICE_DID="did:web:${HTTP_EXTERNAL//:/%3A}"
25
+
else
26
+
SERVICE_DID="did:web:$HTTP_EXTERNAL"
27
+
fi
28
+
29
+
echo "Generating .well-known files for $SERVICE_DID"
30
+
31
+
# Write atproto-did file
32
+
echo "$SERVICE_DID" > www/.well-known/atproto-did
33
+
echo "Created: www/.well-known/atproto-did"
34
+
35
+
# Create a basic did.json template
36
+
# Note: You'll need to manually add the publicKeyMultibase if you need DID document support
37
+
38
+
cat > www/.well-known/did.json <<EOF
39
+
{
40
+
"@context": [
41
+
"https://www.w3.org/ns/did/v1",
42
+
"https://w3id.org/security/multikey/v1"
43
+
],
44
+
"id": "$SERVICE_DID",
45
+
"verificationMethod": [],
46
+
"service": [
47
+
{
48
+
"id": "${SERVICE_DID}#quickdid",
49
+
"type": "QuickDIDService",
50
+
"serviceEndpoint": "https://${HTTP_EXTERNAL}"
51
+
}
52
+
]
53
+
}
54
+
EOF
55
+
56
+
echo "Created: www/.well-known/did.json"
57
+
echo ""
58
+
echo "Note: The did.json file is a basic template. If you need DID document support,"
59
+
echo "you'll need to manually add the verificationMethod with your public key."
+18
railway-resources/telegraf/Dockerfile
+18
railway-resources/telegraf/Dockerfile
···
1
+
# Telegraf Dockerfile for Railway Deployment
2
+
FROM telegraf:1.33-alpine
3
+
4
+
# Install additional packages for health checks
5
+
RUN apk add --no-cache curl postgresql-client
6
+
7
+
# Create directories for custom configs
8
+
RUN mkdir -p /etc/telegraf/telegraf.d
9
+
10
+
# Copy main configuration
11
+
COPY railway-resources/telegraf/telegraf.conf /etc/telegraf/telegraf.conf
12
+
13
+
# Health check - test configuration validity
14
+
HEALTHCHECK --interval=30s --timeout=5s --start-period=10s --retries=3 \
15
+
CMD telegraf --config /etc/telegraf/telegraf.conf --test || exit 1
16
+
17
+
# Run telegraf with custom config
18
+
CMD ["telegraf", "--config", "/etc/telegraf/telegraf.conf", "--config-directory", "/etc/telegraf/telegraf.d"]
+48
railway-resources/telegraf/railway.toml
+48
railway-resources/telegraf/railway.toml
···
1
+
# Railway configuration for Telegraf service
2
+
# This file configures how Railway builds and deploys the Telegraf metrics collector
3
+
4
+
[build]
5
+
# Use Dockerfile for building
6
+
builder = "DOCKERFILE"
7
+
dockerfilePath = "railway-resources/telegraf/Dockerfile"
8
+
9
+
[deploy]
10
+
# Start command (handled by Dockerfile CMD)
11
+
startCommand = "telegraf --config /etc/telegraf/telegraf.conf"
12
+
13
+
# No health check path for Telegraf (uses container health check)
14
+
# healthcheckPath = ""
15
+
16
+
# Restart policy
17
+
restartPolicyType = "ALWAYS"
18
+
restartPolicyMaxRetries = 10
19
+
20
+
# Resource limits
21
+
memoryLimitMB = 1024
22
+
cpuLimitCores = 1
23
+
24
+
# Scaling (Telegraf should be singleton)
25
+
minReplicas = 1
26
+
maxReplicas = 1
27
+
28
+
# Graceful shutdown
29
+
stopTimeout = 10
30
+
31
+
# Service configuration for StatsD UDP endpoint
32
+
[[services]]
33
+
name = "telegraf-statsd"
34
+
port = 8125
35
+
protocol = "UDP"
36
+
internalPort = 8125
37
+
38
+
# Service configuration for Telegraf HTTP API (optional)
39
+
[[services]]
40
+
name = "telegraf-http"
41
+
port = 8086
42
+
protocol = "HTTP"
43
+
internalPort = 8086
44
+
45
+
# Environment-specific settings
46
+
[environments.production]
47
+
memoryLimitMB = 512
48
+
cpuLimitCores = 1
+77
railway-resources/telegraf/telegraf.conf
+77
railway-resources/telegraf/telegraf.conf
···
1
+
# Telegraf Configuration for QuickDID Metrics Collection
2
+
# Optimized for Railway deployment with TimescaleDB
3
+
4
+
# Global tags applied to all metrics
5
+
[global_tags]
6
+
environment = "${ENVIRONMENT:-production}"
7
+
service = "quickdid"
8
+
region = "${RAILWAY_REGION:-us-west1}"
9
+
deployment_id = "${RAILWAY_DEPLOYMENT_ID:-unknown}"
10
+
11
+
# Agent configuration
12
+
[agent]
13
+
## Default data collection interval
14
+
interval = "10s"
15
+
16
+
## Rounds collection interval to interval
17
+
round_interval = true
18
+
19
+
## Telegraf will send metrics to outputs in batches of at most metric_batch_size metrics.
20
+
metric_batch_size = 1000
21
+
22
+
## Maximum number of unwritten metrics per output
23
+
metric_buffer_limit = 10000
24
+
25
+
## Collection jitter is used to jitter the collection by a random amount
26
+
collection_jitter = "0s"
27
+
28
+
## Default flushing interval for all outputs
29
+
flush_interval = "10s"
30
+
31
+
## Jitter the flush interval by a random amount
32
+
flush_jitter = "0s"
33
+
34
+
## Precision of timestamps
35
+
precision = "1ms"
36
+
37
+
## Log level
38
+
debug = ${TELEGRAF_DEBUG:-false}
39
+
quiet = ${TELEGRAF_QUIET:-false}
40
+
41
+
## Override default hostname
42
+
hostname = "${HOSTNAME:-telegraf}"
43
+
44
+
## If true, do not set the "host" tag in the telegraf agent
45
+
omit_hostname = false
46
+
47
+
###############################################################################
48
+
# INPUT PLUGINS #
49
+
###############################################################################
50
+
51
+
# StatsD Server - receives metrics from QuickDID
52
+
[[inputs.statsd]]
53
+
service_address = ":8125" # Listen on UDP port 8125 for StatsD metrics
54
+
protocol = "udp"
55
+
delete_gauges = true
56
+
delete_counters = true
57
+
delete_sets = true
58
+
delete_timings = true
59
+
percentiles = [50, 90, 95, 99]
60
+
metric_separator = "."
61
+
allowed_pending_messages = 100
62
+
datadog_extensions = true
63
+
datadog_distributions = true
64
+
65
+
[[outputs.postgresql]]
66
+
connection = "${DATABASE_URL}"
67
+
68
+
schema = "public"
69
+
70
+
create_templates = [
71
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
72
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
73
+
]
74
+
75
+
tags_as_jsonb = true
76
+
77
+
fields_as_jsonb = false
+285
-65
src/bin/quickdid.rs
+285
-65
src/bin/quickdid.rs
···
1
1
use anyhow::Result;
2
2
use atproto_identity::{
3
3
config::{CertificateBundles, DnsNameservers},
4
-
key::{identify_key, to_public},
5
4
resolve::HickoryDnsResolver,
6
5
};
6
+
use atproto_jetstream::{Consumer as JetstreamConsumer, ConsumerTaskConfig};
7
+
use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
7
8
use quickdid::{
8
9
cache::create_redis_pool,
9
10
config::Config,
10
11
handle_resolver::{
11
-
create_base_resolver, create_caching_resolver, create_rate_limited_resolver_with_timeout,
12
+
create_base_resolver, create_caching_resolver,
13
+
create_proactive_refresh_resolver_with_metrics, create_rate_limited_resolver_with_timeout,
12
14
create_redis_resolver_with_ttl, create_sqlite_resolver_with_ttl,
13
15
},
14
16
handle_resolver_task::{HandleResolverTaskConfig, create_handle_resolver_task_with_config},
15
17
http::{AppContext, create_router},
18
+
jetstream_handler::QuickDidEventHandler,
19
+
lexicon_resolver::create_redis_lexicon_resolver_with_ttl,
20
+
metrics::create_metrics_publisher,
16
21
queue::{
17
22
HandleResolutionWork, QueueAdapter, create_mpsc_queue_from_channel, create_noop_queue,
18
-
create_redis_queue, create_sqlite_queue, create_sqlite_queue_with_max_size,
23
+
create_redis_queue, create_redis_queue_with_dedup, create_sqlite_queue,
24
+
create_sqlite_queue_with_max_size,
19
25
},
20
26
sqlite_schema::create_sqlite_pool,
21
27
task_manager::spawn_cancellable_task,
22
28
};
23
-
use serde_json::json;
24
29
use std::sync::Arc;
25
30
use tokio::signal;
26
31
use tokio_util::{sync::CancellationToken, task::TaskTracker};
···
76
81
println!(" -V, --version Print version information");
77
82
println!();
78
83
println!("ENVIRONMENT VARIABLES:");
79
-
println!(" SERVICE_KEY Private key for service identity (required)");
80
84
println!(
81
85
" HTTP_EXTERNAL External hostname for service endpoints (required)"
82
86
);
···
114
118
" QUEUE_REDIS_PREFIX Redis key prefix for queues (default: queue:handleresolver:)"
115
119
);
116
120
println!(" QUEUE_REDIS_TIMEOUT Queue blocking timeout in seconds (default: 5)");
121
+
println!(
122
+
" QUEUE_REDIS_DEDUP_ENABLED Enable queue deduplication (default: false)"
123
+
);
124
+
println!(" QUEUE_REDIS_DEDUP_TTL TTL for dedup keys in seconds (default: 60)");
117
125
println!(" QUEUE_WORKER_ID Worker ID for Redis queue (default: worker1)");
118
126
println!(" QUEUE_BUFFER_SIZE Buffer size for MPSC queue (default: 1000)");
119
127
println!(" QUEUE_SQLITE_MAX_SIZE Maximum SQLite queue size (default: 10000)");
···
126
134
" RESOLVER_MAX_CONCURRENT_TIMEOUT_MS Timeout for acquiring permits in ms (default: 0 = no timeout)"
127
135
);
128
136
println!();
137
+
println!(" METRICS:");
138
+
println!(
139
+
" METRICS_ADAPTER Metrics adapter: 'noop' or 'statsd' (default: noop)"
140
+
);
141
+
println!(
142
+
" METRICS_STATSD_HOST StatsD host when using statsd adapter (e.g., localhost:8125)"
143
+
);
144
+
println!(
145
+
" METRICS_STATSD_BIND Bind address for StatsD UDP socket (default: [::]:0)"
146
+
);
147
+
println!(" METRICS_PREFIX Prefix for all metrics (default: quickdid)");
148
+
println!(
149
+
" METRICS_TAGS Default tags for metrics (comma-separated key:value pairs)"
150
+
);
151
+
println!();
152
+
println!(" PROACTIVE CACHE REFRESH:");
153
+
println!(
154
+
" PROACTIVE_REFRESH_ENABLED Enable proactive cache refresh (default: false)"
155
+
);
156
+
println!(
157
+
" PROACTIVE_REFRESH_THRESHOLD Threshold as percentage of TTL (0.0-1.0, default: 0.8)"
158
+
);
159
+
println!();
160
+
println!(" JETSTREAM:");
161
+
println!(" JETSTREAM_ENABLED Enable Jetstream consumer (default: false)");
162
+
println!(
163
+
" JETSTREAM_HOSTNAME Jetstream hostname (default: jetstream.atproto.tools)"
164
+
);
165
+
println!();
129
166
println!(
130
167
"For more information, visit: https://github.com/smokesignal.events/quickdid"
131
168
);
···
161
198
config.validate()?;
162
199
163
200
tracing::info!("Starting QuickDID service on port {}", config.http_port);
164
-
tracing::info!("Service DID: {}", config.service_did);
165
201
tracing::info!(
166
202
"Cache TTL - Memory: {}s, Redis: {}s, SQLite: {}s",
167
203
config.cache_ttl_memory,
···
196
232
// Create DNS resolver
197
233
let dns_resolver = HickoryDnsResolver::create_resolver(dns_nameservers.as_ref());
198
234
199
-
// Process service key
200
-
let private_service_key_data = identify_key(&config.service_key)?;
201
-
let public_service_key_data = to_public(&private_service_key_data)?;
202
-
let public_service_key = public_service_key_data.to_string();
203
-
204
-
// Create service DID document
205
-
let service_document = json!({
206
-
"@context": vec!["https://www.w3.org/ns/did/v1", "https://w3id.org/security/multikey/v1"],
207
-
"id": config.service_did.clone(),
208
-
"verificationMethod": [{
209
-
"id": format!("{}#atproto", config.service_did),
210
-
"type": "Multikey",
211
-
"controller": config.service_did.clone(),
212
-
"publicKeyMultibase": public_service_key
213
-
}],
214
-
"service": []
215
-
});
235
+
// Clone DNS resolver for lexicon resolution before wrapping in Arc
236
+
let lexicon_dns_resolver = dns_resolver.clone();
216
237
217
-
// Create DNS resolver Arc for sharing
238
+
// Wrap DNS resolver in Arc for handle resolution
218
239
let dns_resolver_arc = Arc::new(dns_resolver);
219
240
241
+
// Create metrics publisher based on configuration
242
+
let metrics_publisher = create_metrics_publisher(&config).map_err(|e| {
243
+
tracing::error!("Failed to create metrics publisher: {}", e);
244
+
anyhow::anyhow!("Failed to create metrics publisher: {}", e)
245
+
})?;
246
+
247
+
tracing::info!(
248
+
"Metrics publisher created with {} adapter",
249
+
config.metrics_adapter
250
+
);
251
+
252
+
metrics_publisher.gauge("server", 1).await;
253
+
220
254
// Create base handle resolver using factory function
221
-
let mut base_handle_resolver =
222
-
create_base_resolver(dns_resolver_arc.clone(), http_client.clone());
255
+
let mut base_handle_resolver = create_base_resolver(
256
+
dns_resolver_arc.clone(),
257
+
http_client.clone(),
258
+
metrics_publisher.clone(),
259
+
);
223
260
224
261
// Apply rate limiting if configured
225
262
if config.resolver_max_concurrent > 0 {
···
237
274
base_handle_resolver,
238
275
config.resolver_max_concurrent,
239
276
config.resolver_max_concurrent_timeout_ms,
277
+
metrics_publisher.clone(),
240
278
);
241
279
}
242
280
···
253
291
None
254
292
};
255
293
256
-
// Create handle resolver with cache priority: Redis > SQLite > In-memory
257
-
let handle_resolver: Arc<dyn quickdid::handle_resolver::HandleResolver> =
258
-
if let Some(pool) = redis_pool {
259
-
tracing::info!(
260
-
"Using Redis-backed handle resolver with {}-second cache TTL",
261
-
config.cache_ttl_redis
262
-
);
263
-
create_redis_resolver_with_ttl(base_handle_resolver, pool, config.cache_ttl_redis)
264
-
} else if let Some(pool) = sqlite_pool {
265
-
tracing::info!(
266
-
"Using SQLite-backed handle resolver with {}-second cache TTL",
267
-
config.cache_ttl_sqlite
268
-
);
269
-
create_sqlite_resolver_with_ttl(base_handle_resolver, pool, config.cache_ttl_sqlite)
270
-
} else {
271
-
tracing::info!(
272
-
"Using in-memory handle resolver with {}-second cache TTL",
273
-
config.cache_ttl_memory
274
-
);
275
-
create_caching_resolver(base_handle_resolver, config.cache_ttl_memory)
276
-
};
277
-
278
294
// Create task tracker and cancellation token
279
295
let tracker = TaskTracker::new();
280
296
let token = CancellationToken::new();
281
297
282
-
// Setup background handle resolution task and get the queue adapter
298
+
// Create the queue adapter first (needed for proactive refresh)
283
299
let handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>> = {
284
300
// Create queue adapter based on configuration
285
301
let adapter: Arc<dyn QueueAdapter<HandleResolutionWork>> = match config
···
296
312
if let Some(url) = queue_redis_url {
297
313
if let Some(pool) = try_create_redis_pool(url, "queue adapter") {
298
314
tracing::info!(
299
-
"Creating Redis queue adapter with prefix: {}",
300
-
config.queue_redis_prefix
315
+
"Creating Redis queue adapter with prefix: {}, dedup: {}, dedup_ttl: {}s",
316
+
config.queue_redis_prefix,
317
+
config.queue_redis_dedup_enabled,
318
+
config.queue_redis_dedup_ttl
301
319
);
302
-
create_redis_queue::<HandleResolutionWork>(
303
-
pool,
304
-
config.queue_worker_id.clone(),
305
-
config.queue_redis_prefix.clone(),
306
-
config.queue_redis_timeout,
307
-
)
320
+
if config.queue_redis_dedup_enabled {
321
+
create_redis_queue_with_dedup::<HandleResolutionWork>(
322
+
pool,
323
+
config.queue_worker_id.clone(),
324
+
config.queue_redis_prefix.clone(),
325
+
config.queue_redis_timeout,
326
+
true,
327
+
config.queue_redis_dedup_ttl,
328
+
)
329
+
} else {
330
+
create_redis_queue::<HandleResolutionWork>(
331
+
pool,
332
+
config.queue_worker_id.clone(),
333
+
config.queue_redis_prefix.clone(),
334
+
config.queue_redis_timeout,
335
+
)
336
+
}
308
337
} else {
309
338
tracing::warn!("Falling back to MPSC queue adapter");
310
339
// Fall back to MPSC if Redis fails
···
381
410
}
382
411
};
383
412
384
-
// Keep a reference to the adapter for the AppContext
385
-
let adapter_for_context = adapter.clone();
413
+
adapter
414
+
};
415
+
416
+
// Create handle resolver with cache priority: Redis > SQLite > In-memory
417
+
let (mut handle_resolver, cache_ttl): (
418
+
Arc<dyn quickdid::handle_resolver::HandleResolver>,
419
+
u64,
420
+
) = if let Some(ref pool) = redis_pool {
421
+
tracing::info!(
422
+
"Using Redis-backed handle resolver with {}-second cache TTL",
423
+
config.cache_ttl_redis
424
+
);
425
+
(
426
+
create_redis_resolver_with_ttl(
427
+
base_handle_resolver,
428
+
pool.clone(),
429
+
config.cache_ttl_redis,
430
+
metrics_publisher.clone(),
431
+
),
432
+
config.cache_ttl_redis,
433
+
)
434
+
} else if let Some(pool) = sqlite_pool {
435
+
tracing::info!(
436
+
"Using SQLite-backed handle resolver with {}-second cache TTL",
437
+
config.cache_ttl_sqlite
438
+
);
439
+
(
440
+
create_sqlite_resolver_with_ttl(
441
+
base_handle_resolver,
442
+
pool,
443
+
config.cache_ttl_sqlite,
444
+
metrics_publisher.clone(),
445
+
),
446
+
config.cache_ttl_sqlite,
447
+
)
448
+
} else {
449
+
tracing::info!(
450
+
"Using in-memory handle resolver with {}-second cache TTL",
451
+
config.cache_ttl_memory
452
+
);
453
+
(
454
+
create_caching_resolver(
455
+
base_handle_resolver,
456
+
config.cache_ttl_memory,
457
+
metrics_publisher.clone(),
458
+
),
459
+
config.cache_ttl_memory,
460
+
)
461
+
};
462
+
463
+
// Apply proactive refresh if enabled
464
+
if config.proactive_refresh_enabled && !matches!(config.queue_adapter.as_str(), "noop" | "none")
465
+
{
466
+
tracing::info!(
467
+
"Enabling proactive cache refresh with {}% threshold",
468
+
(config.proactive_refresh_threshold * 100.0) as u32
469
+
);
470
+
handle_resolver = create_proactive_refresh_resolver_with_metrics(
471
+
handle_resolver,
472
+
handle_queue.clone(),
473
+
metrics_publisher.clone(),
474
+
cache_ttl,
475
+
config.proactive_refresh_threshold,
476
+
);
477
+
} else if config.proactive_refresh_enabled {
478
+
tracing::warn!(
479
+
"Proactive refresh enabled but queue adapter is no-op, skipping proactive refresh"
480
+
);
481
+
}
482
+
483
+
// Create lexicon resolver with Redis caching if available
484
+
let lexicon_resolver: Arc<dyn LexiconResolver> = {
485
+
let base_lexicon_resolver: Arc<dyn LexiconResolver> = Arc::new(
486
+
DefaultLexiconResolver::new(http_client.clone(), lexicon_dns_resolver),
487
+
);
488
+
489
+
if let Some(ref pool) = redis_pool {
490
+
tracing::info!(
491
+
"Using Redis-backed lexicon resolver with {}-second cache TTL",
492
+
config.cache_ttl_redis
493
+
);
494
+
create_redis_lexicon_resolver_with_ttl(
495
+
base_lexicon_resolver,
496
+
pool.clone(),
497
+
config.cache_ttl_redis,
498
+
metrics_publisher.clone(),
499
+
)
500
+
} else {
501
+
tracing::info!("Using base lexicon resolver without caching");
502
+
base_lexicon_resolver
503
+
}
504
+
};
505
+
506
+
// Setup background handle resolution task
507
+
{
508
+
let adapter_for_task = handle_queue.clone();
386
509
387
510
// Only spawn handle resolver task if not using noop adapter
388
511
if !matches!(config.queue_adapter.as_str(), "noop" | "none") {
···
393
516
394
517
// Create and start handle resolver task
395
518
let handle_task = create_handle_resolver_task_with_config(
396
-
adapter,
519
+
adapter_for_task,
397
520
handle_resolver.clone(),
398
521
token.clone(),
399
522
handle_task_config,
523
+
metrics_publisher.clone(),
400
524
);
401
525
402
526
// Spawn the handle resolver task
···
429
553
} else {
430
554
tracing::info!("Background handle resolution task disabled (using no-op adapter)");
431
555
}
432
-
433
-
// Return the adapter to be used in AppContext
434
-
adapter_for_context
435
556
};
436
557
437
558
// Create app context with the queue adapter
438
559
let app_context = AppContext::new(
439
-
service_document,
440
-
config.service_did.clone(),
441
560
handle_resolver.clone(),
442
561
handle_queue,
562
+
lexicon_resolver,
563
+
metrics_publisher.clone(),
443
564
config.etag_seed.clone(),
444
565
config.cache_control_header.clone(),
566
+
config.static_files_dir.clone(),
445
567
);
446
568
447
569
// Create router
···
488
610
signal_token.cancel();
489
611
tracing::info!("Signal handler task completed");
490
612
});
613
+
}
614
+
615
+
// Start Jetstream consumer if enabled
616
+
if config.jetstream_enabled {
617
+
let jetstream_resolver = handle_resolver.clone();
618
+
let jetstream_metrics = metrics_publisher.clone();
619
+
let jetstream_hostname = config.jetstream_hostname.clone();
620
+
let jetstream_user_agent = config.user_agent.clone();
621
+
622
+
spawn_cancellable_task(
623
+
&tracker,
624
+
token.clone(),
625
+
"jetstream_consumer",
626
+
move |cancel_token| async move {
627
+
tracing::info!(hostname = %jetstream_hostname, "Starting Jetstream consumer");
628
+
629
+
// Create event handler
630
+
let event_handler = Arc::new(QuickDidEventHandler::new(
631
+
jetstream_resolver,
632
+
jetstream_metrics.clone(),
633
+
));
634
+
635
+
// Reconnection loop
636
+
let mut reconnect_count = 0u32;
637
+
let max_reconnects_per_minute = 5;
638
+
let reconnect_window = std::time::Duration::from_secs(60);
639
+
let mut last_disconnect = std::time::Instant::now() - reconnect_window;
640
+
641
+
while !cancel_token.is_cancelled() {
642
+
let now = std::time::Instant::now();
643
+
if now.duration_since(last_disconnect) < reconnect_window {
644
+
reconnect_count += 1;
645
+
if reconnect_count > max_reconnects_per_minute {
646
+
tracing::warn!(
647
+
count = reconnect_count,
648
+
"Too many Jetstream reconnects, waiting 60 seconds"
649
+
);
650
+
tokio::time::sleep(reconnect_window).await;
651
+
reconnect_count = 0;
652
+
last_disconnect = now;
653
+
continue;
654
+
}
655
+
} else {
656
+
reconnect_count = 0;
657
+
}
658
+
659
+
// Create consumer configuration
660
+
let consumer_config = ConsumerTaskConfig {
661
+
user_agent: jetstream_user_agent.clone(),
662
+
compression: false,
663
+
zstd_dictionary_location: String::new(),
664
+
jetstream_hostname: jetstream_hostname.clone(),
665
+
// Listen to the "community.lexicon.collection.fake" collection
666
+
// so that we keep an active connection open but only for
667
+
// account and identity events.
668
+
collections: vec!["community.lexicon.collection.fake".to_string()], // Listen to all collections
669
+
dids: vec![],
670
+
max_message_size_bytes: None,
671
+
cursor: None,
672
+
require_hello: true,
673
+
};
674
+
675
+
let consumer = JetstreamConsumer::new(consumer_config);
676
+
677
+
// Register event handler
678
+
if let Err(e) = consumer.register_handler(event_handler.clone()).await {
679
+
tracing::error!(error = ?e, "Failed to register Jetstream event handler");
680
+
continue;
681
+
}
682
+
683
+
// Run consumer with cancellation support
684
+
match consumer.run_background(cancel_token.clone()).await {
685
+
Ok(()) => {
686
+
tracing::info!("Jetstream consumer stopped normally");
687
+
if cancel_token.is_cancelled() {
688
+
break;
689
+
}
690
+
last_disconnect = std::time::Instant::now();
691
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
692
+
}
693
+
Err(e) => {
694
+
tracing::error!(error = ?e, "Jetstream consumer connection failed, will reconnect");
695
+
jetstream_metrics.incr("jetstream.connection.error").await;
696
+
last_disconnect = std::time::Instant::now();
697
+
698
+
if !cancel_token.is_cancelled() {
699
+
tokio::time::sleep(std::time::Duration::from_secs(5)).await;
700
+
}
701
+
}
702
+
}
703
+
}
704
+
705
+
tracing::info!("Jetstream consumer task shutting down");
706
+
Ok(())
707
+
},
708
+
);
709
+
} else {
710
+
tracing::info!("Jetstream consumer disabled");
491
711
}
492
712
493
713
// Start HTTP server with cancellation support
+106
-32
src/config.rs
+106
-32
src/config.rs
···
13
13
//! ```bash
14
14
//! # Minimal configuration
15
15
//! HTTP_EXTERNAL=quickdid.example.com \
16
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
17
16
//! quickdid
18
17
//!
19
18
//! # Full configuration with Redis and custom settings
20
19
//! HTTP_EXTERNAL=quickdid.example.com \
21
-
//! SERVICE_KEY=did:key:z42tmZxD2mi1TfMKSFrsRfednwdaaPNZiiWHP4MPgcvXkDWK \
22
20
//! HTTP_PORT=3000 \
23
21
//! REDIS_URL=redis://localhost:6379 \
24
22
//! CACHE_TTL_MEMORY=300 \
···
38
36
pub enum ConfigError {
39
37
/// Missing required environment variable or command-line argument
40
38
///
41
-
/// Example: When SERVICE_KEY or HTTP_EXTERNAL are not provided
39
+
/// Example: When HTTP_EXTERNAL is not provided
42
40
#[error("error-quickdid-config-1 Missing required environment variable: {0}")]
43
41
MissingRequired(String),
44
42
···
97
95
/// config.validate()?;
98
96
///
99
97
/// println!("Service running at: {}", config.http_external);
100
-
/// println!("Service DID: {}", config.service_did);
101
98
/// # Ok(())
102
99
/// # }
103
100
/// ```
···
112
109
/// External hostname for service endpoints (e.g., "quickdid.example.com")
113
110
pub http_external: String,
114
111
115
-
/// Private key for service identity (e.g., "did:key:z42tm...")
116
-
pub service_key: String,
117
-
118
112
/// HTTP User-Agent for outgoing requests (e.g., "quickdid/1.0.0 (+https://...)")
119
113
pub user_agent: String,
120
114
121
-
/// Derived service DID (e.g., "did:web:quickdid.example.com")
122
-
/// Automatically generated from http_external with proper encoding
123
-
pub service_did: String,
124
-
125
115
/// Custom DNS nameservers, comma-separated (e.g., "8.8.8.8,8.8.4.4")
126
116
pub dns_nameservers: Option<String>,
127
117
···
161
151
/// Redis blocking timeout for queue operations in seconds (e.g., 5)
162
152
pub queue_redis_timeout: u64,
163
153
154
+
/// Enable deduplication for Redis queue to prevent duplicate handles
155
+
/// Default: false
156
+
pub queue_redis_dedup_enabled: bool,
157
+
158
+
/// TTL for Redis queue deduplication keys in seconds
159
+
/// Default: 60 (1 minute)
160
+
pub queue_redis_dedup_ttl: u64,
161
+
164
162
/// Maximum queue size for SQLite adapter work shedding (e.g., 10000)
165
163
/// When exceeded, oldest entries are deleted to maintain this limit.
166
164
/// Set to 0 to disable work shedding (unlimited queue size).
···
211
209
/// Calculated at startup for efficiency.
212
210
/// None if cache_max_age is 0 (disabled).
213
211
pub cache_control_header: Option<String>,
212
+
213
+
/// Metrics adapter type: "noop" or "statsd"
214
+
/// Default: "noop" (no metrics collection)
215
+
pub metrics_adapter: String,
216
+
217
+
/// StatsD host for metrics collection (e.g., "localhost:8125")
218
+
/// Required when metrics_adapter is "statsd"
219
+
pub metrics_statsd_host: Option<String>,
220
+
221
+
/// Bind address for StatsD UDP socket (e.g., "0.0.0.0:0" for IPv4 or "[::]:0" for IPv6)
222
+
/// Default: "[::]:0" (IPv6 any address, random port)
223
+
pub metrics_statsd_bind: String,
224
+
225
+
/// Metrics prefix for all metrics (e.g., "quickdid")
226
+
/// Default: "quickdid"
227
+
pub metrics_prefix: String,
228
+
229
+
/// Default tags for all metrics (comma-separated key:value pairs)
230
+
/// Example: "env:production,service:quickdid"
231
+
pub metrics_tags: Option<String>,
232
+
233
+
/// Enable proactive cache refresh for frequently accessed handles.
234
+
/// When enabled, cache entries that have reached the refresh threshold
235
+
/// will be queued for background refresh to keep the cache warm.
236
+
/// Default: false
237
+
pub proactive_refresh_enabled: bool,
238
+
239
+
/// Threshold as a percentage (0.0-1.0) of cache TTL when to trigger proactive refresh.
240
+
/// For example, 0.8 means refresh when an entry has lived for 80% of its TTL.
241
+
/// Default: 0.8 (80%)
242
+
pub proactive_refresh_threshold: f64,
243
+
244
+
/// Directory path for serving static files.
245
+
/// When set, the root handler will serve files from this directory.
246
+
/// Default: "www" (relative to working directory)
247
+
pub static_files_dir: String,
248
+
249
+
/// Enable Jetstream consumer for AT Protocol events.
250
+
/// When enabled, the service will consume Account and Identity events
251
+
/// to maintain cache consistency.
252
+
/// Default: false
253
+
pub jetstream_enabled: bool,
254
+
255
+
/// Jetstream WebSocket hostname for consuming AT Protocol events.
256
+
/// Example: "jetstream.atproto.tools" or "jetstream1.us-west.bsky.network"
257
+
/// Default: "jetstream.atproto.tools"
258
+
pub jetstream_hostname: String,
214
259
}
215
260
216
261
impl Config {
···
218
263
///
219
264
/// This method:
220
265
/// 1. Reads configuration from environment variables
221
-
/// 2. Validates required fields (HTTP_EXTERNAL and SERVICE_KEY)
222
-
/// 3. Generates derived values (service_did from http_external)
223
-
/// 4. Applies defaults where appropriate
266
+
/// 2. Validates required fields (HTTP_EXTERNAL)
267
+
/// 3. Applies defaults where appropriate
224
268
///
225
269
/// ## Example
226
270
///
···
231
275
/// // Parse from environment variables
232
276
/// let config = Config::from_env()?;
233
277
///
234
-
/// // The service DID is automatically generated from HTTP_EXTERNAL
235
-
/// assert!(config.service_did.starts_with("did:web:"));
236
278
/// # Ok(())
237
279
/// # }
238
280
/// ```
···
241
283
///
242
284
/// Returns `ConfigError::MissingRequired` if:
243
285
/// - HTTP_EXTERNAL is not provided
244
-
/// - SERVICE_KEY is not provided
245
286
pub fn from_env() -> Result<Self, ConfigError> {
246
287
// Required fields
247
288
let http_external = env::var("HTTP_EXTERNAL")
···
249
290
.filter(|s| !s.is_empty())
250
291
.ok_or_else(|| ConfigError::MissingRequired("HTTP_EXTERNAL".to_string()))?;
251
292
252
-
let service_key = env::var("SERVICE_KEY")
253
-
.ok()
254
-
.filter(|s| !s.is_empty())
255
-
.ok_or_else(|| ConfigError::MissingRequired("SERVICE_KEY".to_string()))?;
256
-
257
293
// Generate default user agent
258
294
let default_user_agent = format!(
259
295
"quickdid/{} (+https://github.com/smokesignal.events/quickdid)",
260
296
env!("CARGO_PKG_VERSION")
261
297
);
262
298
263
-
// Generate service DID from http_external
264
-
let service_did = if http_external.contains(':') {
265
-
let encoded_external = http_external.replace(':', "%3A");
266
-
format!("did:web:{}", encoded_external)
267
-
} else {
268
-
format!("did:web:{}", http_external)
269
-
};
270
-
271
299
let mut config = Config {
272
300
http_port: get_env_or_default("HTTP_PORT", Some("8080")).unwrap(),
273
301
plc_hostname: get_env_or_default("PLC_HOSTNAME", Some("plc.directory")).unwrap(),
274
302
http_external,
275
-
service_key,
276
303
user_agent: get_env_or_default("USER_AGENT", None).unwrap_or(default_user_agent),
277
-
service_did,
278
304
dns_nameservers: get_env_or_default("DNS_NAMESERVERS", None),
279
305
certificate_bundles: get_env_or_default("CERTIFICATE_BUNDLES", None),
280
306
redis_url: get_env_or_default("REDIS_URL", None),
···
292
318
cache_ttl_redis: parse_env("CACHE_TTL_REDIS", 7776000)?,
293
319
cache_ttl_sqlite: parse_env("CACHE_TTL_SQLITE", 7776000)?,
294
320
queue_redis_timeout: parse_env("QUEUE_REDIS_TIMEOUT", 5)?,
321
+
queue_redis_dedup_enabled: parse_env("QUEUE_REDIS_DEDUP_ENABLED", false)?,
322
+
queue_redis_dedup_ttl: parse_env("QUEUE_REDIS_DEDUP_TTL", 60)?,
295
323
queue_sqlite_max_size: parse_env("QUEUE_SQLITE_MAX_SIZE", 10000)?,
296
324
resolver_max_concurrent: parse_env("RESOLVER_MAX_CONCURRENT", 0)?,
297
325
resolver_max_concurrent_timeout_ms: parse_env("RESOLVER_MAX_CONCURRENT_TIMEOUT_MS", 0)?,
···
302
330
cache_max_stale: parse_env("CACHE_MAX_STALE", 172800)?, // 48 hours
303
331
cache_min_fresh: parse_env("CACHE_MIN_FRESH", 3600)?, // 1 hour
304
332
cache_control_header: None, // Will be calculated below
333
+
metrics_adapter: get_env_or_default("METRICS_ADAPTER", Some("noop")).unwrap(),
334
+
metrics_statsd_host: get_env_or_default("METRICS_STATSD_HOST", None),
335
+
metrics_statsd_bind: get_env_or_default("METRICS_STATSD_BIND", Some("[::]:0")).unwrap(),
336
+
metrics_prefix: get_env_or_default("METRICS_PREFIX", Some("quickdid")).unwrap(),
337
+
metrics_tags: get_env_or_default("METRICS_TAGS", None),
338
+
proactive_refresh_enabled: parse_env("PROACTIVE_REFRESH_ENABLED", false)?,
339
+
proactive_refresh_threshold: parse_env("PROACTIVE_REFRESH_THRESHOLD", 0.8)?,
340
+
static_files_dir: get_env_or_default("STATIC_FILES_DIR", Some("www")).unwrap(),
341
+
jetstream_enabled: parse_env("JETSTREAM_ENABLED", false)?,
342
+
jetstream_hostname: get_env_or_default(
343
+
"JETSTREAM_HOSTNAME",
344
+
Some("jetstream.atproto.tools"),
345
+
)
346
+
.unwrap(),
305
347
};
306
348
307
349
// Calculate the Cache-Control header value if enabled
···
371
413
"QUEUE_REDIS_TIMEOUT must be > 0".to_string(),
372
414
));
373
415
}
416
+
if self.queue_redis_dedup_enabled && self.queue_redis_dedup_ttl == 0 {
417
+
return Err(ConfigError::InvalidTtl(
418
+
"QUEUE_REDIS_DEDUP_TTL must be > 0 when deduplication is enabled".to_string(),
419
+
));
420
+
}
374
421
match self.queue_adapter.as_str() {
375
422
"mpsc" | "redis" | "sqlite" | "noop" | "none" => {}
376
423
_ => {
···
390
437
"RESOLVER_MAX_CONCURRENT_TIMEOUT_MS must be <= 60000 (60 seconds)".to_string(),
391
438
));
392
439
}
440
+
441
+
// Validate metrics configuration
442
+
match self.metrics_adapter.as_str() {
443
+
"noop" | "statsd" => {}
444
+
_ => {
445
+
return Err(ConfigError::InvalidValue(format!(
446
+
"Invalid METRICS_ADAPTER '{}', must be 'noop' or 'statsd'",
447
+
self.metrics_adapter
448
+
)));
449
+
}
450
+
}
451
+
452
+
// If statsd is configured, ensure host is provided
453
+
if self.metrics_adapter == "statsd" && self.metrics_statsd_host.is_none() {
454
+
return Err(ConfigError::MissingRequired(
455
+
"METRICS_STATSD_HOST is required when METRICS_ADAPTER is 'statsd'".to_string(),
456
+
));
457
+
}
458
+
459
+
// Validate proactive refresh threshold
460
+
if self.proactive_refresh_threshold < 0.0 || self.proactive_refresh_threshold > 1.0 {
461
+
return Err(ConfigError::InvalidValue(format!(
462
+
"PROACTIVE_REFRESH_THRESHOLD must be between 0.0 and 1.0, got {}",
463
+
self.proactive_refresh_threshold
464
+
)));
465
+
}
466
+
393
467
Ok(())
394
468
}
395
469
}
+50
-9
src/handle_resolver/base.rs
+50
-9
src/handle_resolver/base.rs
···
5
5
6
6
use super::errors::HandleResolverError;
7
7
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use async_trait::async_trait;
9
10
use atproto_identity::resolve::{DnsResolver, resolve_subject};
10
11
use reqwest::Client;
···
25
26
/// use reqwest::Client;
26
27
/// use atproto_identity::resolve::HickoryDnsResolver;
27
28
/// use quickdid::handle_resolver::{create_base_resolver, HandleResolver};
29
+
/// use quickdid::metrics::NoOpMetricsPublisher;
28
30
///
29
31
/// # async fn example() {
30
32
/// let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
31
33
/// let http_client = Client::new();
34
+
/// let metrics = Arc::new(NoOpMetricsPublisher);
32
35
///
33
36
/// let resolver = create_base_resolver(
34
37
/// dns_resolver,
35
38
/// http_client,
39
+
/// metrics,
36
40
/// );
37
41
///
38
42
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
···
45
49
46
50
/// HTTP client for DID document retrieval and well-known endpoint queries.
47
51
http_client: Client,
52
+
53
+
/// Metrics publisher for telemetry.
54
+
metrics: SharedMetricsPublisher,
48
55
}
49
56
50
57
#[async_trait]
51
58
impl HandleResolver for BaseHandleResolver {
52
59
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
53
-
let did = resolve_subject(&self.http_client, &*self.dns_resolver, s)
60
+
let start_time = std::time::Instant::now();
61
+
62
+
// Perform DNS/HTTP resolution
63
+
let result = resolve_subject(&self.http_client, &*self.dns_resolver, s)
54
64
.await
55
-
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()))?;
56
-
57
-
let timestamp = SystemTime::now()
58
-
.duration_since(UNIX_EPOCH)
59
-
.map_err(|e| HandleResolverError::ResolutionFailed(format!("System time error: {}", e)))?
60
-
.as_secs();
61
-
62
-
Ok((did, timestamp))
65
+
.map_err(|e| HandleResolverError::ResolutionFailed(e.to_string()));
66
+
67
+
let duration_ms = start_time.elapsed().as_millis() as u64;
68
+
69
+
// Publish metrics
70
+
71
+
match result {
72
+
Ok(did) => {
73
+
self.metrics
74
+
.time_with_tags(
75
+
"resolver.base.duration_ms",
76
+
duration_ms,
77
+
&[("success", "1")],
78
+
)
79
+
.await;
80
+
81
+
let timestamp = SystemTime::now()
82
+
.duration_since(UNIX_EPOCH)
83
+
.map_err(|e| {
84
+
HandleResolverError::ResolutionFailed(format!("System time error: {}", e))
85
+
})?
86
+
.as_secs();
87
+
88
+
Ok((did, timestamp))
89
+
}
90
+
Err(e) => {
91
+
self.metrics
92
+
.time_with_tags(
93
+
"resolver.base.duration_ms",
94
+
duration_ms,
95
+
&[("success", "0")],
96
+
)
97
+
.await;
98
+
Err(e)
99
+
}
100
+
}
63
101
}
64
102
}
65
103
···
72
110
///
73
111
/// * `dns_resolver` - DNS resolver for TXT record lookups
74
112
/// * `http_client` - HTTP client for well-known endpoint queries
113
+
/// * `metrics` - Metrics publisher for telemetry
75
114
pub fn create_base_resolver(
76
115
dns_resolver: Arc<dyn DnsResolver>,
77
116
http_client: Client,
117
+
metrics: SharedMetricsPublisher,
78
118
) -> Arc<dyn HandleResolver> {
79
119
Arc::new(BaseHandleResolver {
80
120
dns_resolver,
81
121
http_client,
122
+
metrics,
82
123
})
83
124
}
+3
src/handle_resolver/errors.rs
+3
src/handle_resolver/errors.rs
+62
-5
src/handle_resolver/memory.rs
+62
-5
src/handle_resolver/memory.rs
···
6
6
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
+
use crate::metrics::SharedMetricsPublisher;
9
10
use async_trait::async_trait;
10
11
use std::collections::HashMap;
11
12
use std::sync::Arc;
···
32
33
/// ```no_run
33
34
/// use std::sync::Arc;
34
35
/// use quickdid::handle_resolver::{create_caching_resolver, create_base_resolver, HandleResolver};
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
35
37
///
36
38
/// # async fn example() {
37
39
/// # use atproto_identity::resolve::HickoryDnsResolver;
38
40
/// # use reqwest::Client;
39
41
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
40
42
/// # let http_client = Client::new();
41
-
/// let base_resolver = create_base_resolver(dns_resolver, http_client);
43
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
44
+
/// let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
42
45
/// let caching_resolver = create_caching_resolver(
43
46
/// base_resolver,
44
-
/// 300 // 5 minute TTL
47
+
/// 300, // 5 minute TTL
48
+
/// metrics
45
49
/// );
46
50
///
47
51
/// // First call hits the underlying resolver
···
55
59
inner: Arc<dyn HandleResolver>,
56
60
cache: Arc<RwLock<HashMap<String, ResolveHandleResult>>>,
57
61
ttl_seconds: u64,
62
+
metrics: SharedMetricsPublisher,
58
63
}
59
64
60
65
impl CachingHandleResolver {
···
64
69
///
65
70
/// * `inner` - The underlying resolver to use for actual resolution
66
71
/// * `ttl_seconds` - How long to cache results in seconds
67
-
pub fn new(inner: Arc<dyn HandleResolver>, ttl_seconds: u64) -> Self {
72
+
/// * `metrics` - Metrics publisher for telemetry
73
+
pub fn new(
74
+
inner: Arc<dyn HandleResolver>,
75
+
ttl_seconds: u64,
76
+
metrics: SharedMetricsPublisher,
77
+
) -> Self {
68
78
Self {
69
79
inner,
70
80
cache: Arc::new(RwLock::new(HashMap::new())),
71
81
ttl_seconds,
82
+
metrics,
72
83
}
73
84
}
74
85
···
98
109
ResolveHandleResult::Found(timestamp, did) => {
99
110
if !self.is_expired(*timestamp) {
100
111
tracing::debug!("Cache hit for handle {}: {}", handle, did);
112
+
self.metrics.incr("resolver.memory.cache_hit").await;
101
113
return Ok((did.clone(), *timestamp));
102
114
}
103
115
tracing::debug!("Cache entry expired for handle {}", handle);
116
+
self.metrics.incr("resolver.memory.cache_expired").await;
104
117
}
105
118
ResolveHandleResult::NotFound(timestamp, error) => {
106
119
if !self.is_expired(*timestamp) {
···
109
122
handle,
110
123
error
111
124
);
125
+
self.metrics
126
+
.incr("resolver.memory.cache_hit_not_resolved")
127
+
.await;
112
128
return Err(HandleResolverError::HandleNotFoundCached(error.clone()));
113
129
}
114
130
tracing::debug!("Cache entry expired for handle {}", handle);
131
+
self.metrics.incr("resolver.memory.cache_expired").await;
115
132
}
116
133
}
117
134
}
···
119
136
120
137
// Not in cache or expired, resolve through inner resolver
121
138
tracing::debug!("Cache miss for handle {}, resolving...", handle);
139
+
self.metrics.incr("resolver.memory.cache_miss").await;
122
140
let result = self.inner.resolve(s).await;
123
141
124
142
// Store in cache
···
130
148
handle.clone(),
131
149
ResolveHandleResult::Found(*timestamp, did.clone()),
132
150
);
151
+
self.metrics.incr("resolver.memory.cache_set").await;
133
152
tracing::debug!(
134
153
"Cached successful resolution for handle {}: {}",
135
154
handle,
···
142
161
handle.clone(),
143
162
ResolveHandleResult::NotFound(timestamp, e.to_string()),
144
163
);
164
+
self.metrics.incr("resolver.memory.cache_set_error").await;
145
165
tracing::debug!("Cached failed resolution for handle {}: {}", handle, e);
146
166
}
147
167
}
168
+
169
+
// Track cache size
170
+
let cache_size = cache.len() as u64;
171
+
self.metrics
172
+
.gauge("resolver.memory.cache_entries", cache_size)
173
+
.await;
148
174
}
149
175
150
176
result
151
177
}
178
+
179
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
180
+
// Normalize the handle to lowercase
181
+
let handle = handle.to_lowercase();
182
+
183
+
// Update the in-memory cache
184
+
{
185
+
let mut cache = self.cache.write().await;
186
+
let timestamp = Self::current_timestamp();
187
+
cache.insert(
188
+
handle.clone(),
189
+
ResolveHandleResult::Found(timestamp, did.to_string()),
190
+
);
191
+
self.metrics.incr("resolver.memory.set").await;
192
+
tracing::debug!("Set handle {} -> DID {} in memory cache", handle, did);
193
+
194
+
// Track cache size
195
+
let cache_size = cache.len() as u64;
196
+
self.metrics
197
+
.gauge("resolver.memory.cache_entries", cache_size)
198
+
.await;
199
+
}
200
+
201
+
// Chain to inner resolver
202
+
self.inner.set(&handle, did).await
203
+
}
152
204
}
153
205
154
206
/// Create a new in-memory caching handle resolver.
···
160
212
///
161
213
/// * `inner` - The underlying resolver to use for actual resolution
162
214
/// * `ttl_seconds` - How long to cache results in seconds
215
+
/// * `metrics` - Metrics publisher for telemetry
163
216
///
164
217
/// # Example
165
218
///
166
219
/// ```no_run
167
220
/// use std::sync::Arc;
168
221
/// use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
222
+
/// use quickdid::metrics::NoOpMetricsPublisher;
169
223
///
170
224
/// # async fn example() {
171
225
/// # use atproto_identity::resolve::HickoryDnsResolver;
172
226
/// # use reqwest::Client;
173
227
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
174
228
/// # let http_client = Client::new();
229
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
175
230
/// let base = create_base_resolver(
176
231
/// dns_resolver,
177
232
/// http_client,
233
+
/// metrics.clone(),
178
234
/// );
179
235
///
180
-
/// let resolver = create_caching_resolver(base, 300); // 5 minute TTL
236
+
/// let resolver = create_caching_resolver(base, 300, metrics); // 5 minute TTL
181
237
/// let did = resolver.resolve("alice.bsky.social").await.unwrap();
182
238
/// # }
183
239
/// ```
184
240
pub fn create_caching_resolver(
185
241
inner: Arc<dyn HandleResolver>,
186
242
ttl_seconds: u64,
243
+
metrics: SharedMetricsPublisher,
187
244
) -> Arc<dyn HandleResolver> {
188
-
Arc::new(CachingHandleResolver::new(inner, ttl_seconds))
245
+
Arc::new(CachingHandleResolver::new(inner, ttl_seconds, metrics))
189
246
}
+10
-1
src/handle_resolver/mod.rs
+10
-1
src/handle_resolver/mod.rs
···
19
19
//! ```no_run
20
20
//! use std::sync::Arc;
21
21
//! use quickdid::handle_resolver::{create_base_resolver, create_caching_resolver, HandleResolver};
22
+
//! use quickdid::metrics::NoOpMetricsPublisher;
22
23
//!
23
24
//! # async fn example() -> Result<(), Box<dyn std::error::Error>> {
24
25
//! # use atproto_identity::resolve::HickoryDnsResolver;
25
26
//! # use reqwest::Client;
26
27
//! # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
27
28
//! # let http_client = Client::new();
29
+
//! # let metrics = Arc::new(NoOpMetricsPublisher);
28
30
//! // Create base resolver using factory function
29
31
//! let base = create_base_resolver(
30
32
//! dns_resolver,
31
33
//! http_client,
34
+
//! metrics.clone(),
32
35
//! );
33
36
//!
34
37
//! // Wrap with in-memory caching
35
-
//! let resolver = create_caching_resolver(base, 300);
38
+
//! let resolver = create_caching_resolver(base, 300, metrics);
36
39
//!
37
40
//! // Resolve a handle
38
41
//! let did = resolver.resolve("alice.bsky.social").await?;
···
44
47
mod base;
45
48
mod errors;
46
49
mod memory;
50
+
mod proactive_refresh;
47
51
mod rate_limited;
48
52
mod redis;
49
53
mod sqlite;
···
56
60
// Factory functions for creating resolvers
57
61
pub use base::create_base_resolver;
58
62
pub use memory::create_caching_resolver;
63
+
pub use proactive_refresh::{
64
+
ProactiveRefreshResolver, create_proactive_refresh_resolver,
65
+
create_proactive_refresh_resolver_dyn, create_proactive_refresh_resolver_with_metrics,
66
+
create_proactive_refresh_resolver_with_threshold,
67
+
};
59
68
pub use rate_limited::{create_rate_limited_resolver, create_rate_limited_resolver_with_timeout};
60
69
pub use redis::{create_redis_resolver, create_redis_resolver_with_ttl};
61
70
pub use sqlite::{create_sqlite_resolver, create_sqlite_resolver_with_ttl};
+438
src/handle_resolver/proactive_refresh.rs
+438
src/handle_resolver/proactive_refresh.rs
···
1
+
use crate::handle_resolution_result::HandleResolutionResult;
2
+
use crate::handle_resolver::{HandleResolver, HandleResolverError};
3
+
use crate::metrics::MetricsPublisher;
4
+
use crate::queue::{HandleResolutionWork, QueueAdapter};
5
+
use async_trait::async_trait;
6
+
use std::sync::Arc;
7
+
use std::time::{SystemTime, UNIX_EPOCH};
8
+
use tracing::{debug, trace};
9
+
10
+
/// Create a ProactiveRefreshResolver with default 80% threshold
11
+
///
12
+
/// # Arguments
13
+
/// * `inner` - The inner resolver to wrap
14
+
/// * `queue` - The queue adapter for background refresh tasks
15
+
/// * `cache_ttl` - The TTL in seconds for cache entries
16
+
pub fn create_proactive_refresh_resolver<R, Q>(
17
+
inner: Arc<R>,
18
+
queue: Arc<Q>,
19
+
cache_ttl: u64,
20
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
21
+
where
22
+
R: HandleResolver + Send + Sync + 'static,
23
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
24
+
{
25
+
Arc::new(ProactiveRefreshResolver::new(inner, queue, cache_ttl))
26
+
}
27
+
28
+
/// Create a ProactiveRefreshResolver with custom threshold
29
+
///
30
+
/// # Arguments
31
+
/// * `inner` - The inner resolver to wrap
32
+
/// * `queue` - The queue adapter for background refresh tasks
33
+
/// * `cache_ttl` - The TTL in seconds for cache entries
34
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
35
+
pub fn create_proactive_refresh_resolver_with_threshold<R, Q>(
36
+
inner: Arc<R>,
37
+
queue: Arc<Q>,
38
+
cache_ttl: u64,
39
+
threshold: f64,
40
+
) -> Arc<ProactiveRefreshResolver<R, Q>>
41
+
where
42
+
R: HandleResolver + Send + Sync + 'static,
43
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync + 'static,
44
+
{
45
+
Arc::new(ProactiveRefreshResolver::with_threshold(
46
+
inner, queue, cache_ttl, threshold,
47
+
))
48
+
}
49
+
50
+
/// Wrapper struct for dynamic dispatch with proactive refresh
51
+
/// This works with trait objects instead of concrete types
52
+
pub struct DynProactiveRefreshResolver {
53
+
inner: Arc<dyn HandleResolver>,
54
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
55
+
metrics: Option<Arc<dyn MetricsPublisher>>,
56
+
#[allow(dead_code)]
57
+
cache_ttl: u64,
58
+
#[allow(dead_code)]
59
+
refresh_threshold: f64,
60
+
}
61
+
62
+
impl DynProactiveRefreshResolver {
63
+
pub fn new(
64
+
inner: Arc<dyn HandleResolver>,
65
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
66
+
cache_ttl: u64,
67
+
refresh_threshold: f64,
68
+
) -> Self {
69
+
Self::with_metrics(inner, queue, None, cache_ttl, refresh_threshold)
70
+
}
71
+
72
+
pub fn with_metrics(
73
+
inner: Arc<dyn HandleResolver>,
74
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
75
+
metrics: Option<Arc<dyn MetricsPublisher>>,
76
+
cache_ttl: u64,
77
+
refresh_threshold: f64,
78
+
) -> Self {
79
+
Self {
80
+
inner,
81
+
queue,
82
+
metrics,
83
+
cache_ttl,
84
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
85
+
}
86
+
}
87
+
88
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
89
+
// If resolution took less than 5ms, it was probably a cache hit
90
+
if resolve_time < 5000 {
91
+
trace!(
92
+
handle = handle,
93
+
resolve_time_us = resolve_time,
94
+
"Fast resolution detected, considering proactive refresh"
95
+
);
96
+
97
+
if let Some(metrics) = &self.metrics {
98
+
metrics.incr("proactive_refresh.cache_hit_detected").await;
99
+
}
100
+
101
+
// Simple heuristic: queue for refresh with some probability
102
+
let now = SystemTime::now()
103
+
.duration_since(UNIX_EPOCH)
104
+
.unwrap_or_default()
105
+
.as_secs();
106
+
107
+
// Queue every N seconds for frequently accessed handles
108
+
if now % 60 == 0 {
109
+
let work = HandleResolutionWork {
110
+
handle: handle.to_string(),
111
+
};
112
+
113
+
if let Err(e) = self.queue.push(work).await {
114
+
debug!(
115
+
handle = handle,
116
+
error = %e,
117
+
"Failed to queue handle for proactive refresh"
118
+
);
119
+
if let Some(metrics) = &self.metrics {
120
+
metrics.incr("proactive_refresh.queue_error").await;
121
+
}
122
+
} else {
123
+
debug!(handle = handle, "Queued handle for proactive refresh");
124
+
if let Some(metrics) = &self.metrics {
125
+
metrics.incr("proactive_refresh.queued").await;
126
+
}
127
+
}
128
+
}
129
+
}
130
+
}
131
+
}
132
+
133
+
#[async_trait]
134
+
impl HandleResolver for DynProactiveRefreshResolver {
135
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
136
+
// Resolve through the inner resolver
137
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
138
+
139
+
// Check if we should queue for refresh based on resolution time
140
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
141
+
142
+
Ok((did, resolve_time))
143
+
}
144
+
145
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
146
+
// Simply chain to inner resolver - no proactive refresh needed for manual sets
147
+
self.inner.set(handle, did).await
148
+
}
149
+
}
150
+
151
+
/// Create a ProactiveRefreshResolver with custom threshold using trait objects
152
+
/// This version works with dyn HandleResolver and dyn QueueAdapter
153
+
///
154
+
/// # Arguments
155
+
/// * `inner` - The inner resolver to wrap
156
+
/// * `queue` - The queue adapter for background refresh tasks
157
+
/// * `cache_ttl` - The TTL in seconds for cache entries
158
+
/// * `threshold` - The threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
159
+
pub fn create_proactive_refresh_resolver_dyn(
160
+
inner: Arc<dyn HandleResolver>,
161
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
162
+
cache_ttl: u64,
163
+
threshold: f64,
164
+
) -> Arc<dyn HandleResolver> {
165
+
Arc::new(DynProactiveRefreshResolver::new(
166
+
inner, queue, cache_ttl, threshold,
167
+
))
168
+
}
169
+
170
+
/// Create a ProactiveRefreshResolver with metrics support
171
+
pub fn create_proactive_refresh_resolver_with_metrics(
172
+
inner: Arc<dyn HandleResolver>,
173
+
queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
174
+
metrics: Arc<dyn MetricsPublisher>,
175
+
cache_ttl: u64,
176
+
threshold: f64,
177
+
) -> Arc<dyn HandleResolver> {
178
+
Arc::new(DynProactiveRefreshResolver::with_metrics(
179
+
inner,
180
+
queue,
181
+
Some(metrics),
182
+
cache_ttl,
183
+
threshold,
184
+
))
185
+
}
186
+
187
+
/// A handle resolver that proactively refreshes cache entries when they reach
188
+
/// a certain staleness threshold (default 80% of TTL).
189
+
///
190
+
/// This resolver wraps another resolver and checks successful resolutions from cache.
191
+
/// When a cached entry has lived for more than the threshold percentage of its TTL,
192
+
/// it queues the handle for background refresh to keep the cache warm.
193
+
///
194
+
/// Note: Due to the current trait design, this implementation uses the resolution time
195
+
/// as a heuristic. When resolve_time is 0 (instant cache hit), it may queue for refresh.
196
+
/// For full functionality, the trait would need to expose cache timestamps.
197
+
pub struct ProactiveRefreshResolver<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> {
198
+
inner: Arc<R>,
199
+
queue: Arc<Q>,
200
+
/// TTL in seconds for cache entries
201
+
cache_ttl: u64,
202
+
/// Threshold as a percentage (0.0 to 1.0) of TTL when to trigger refresh
203
+
/// Default is 0.8 (80%)
204
+
refresh_threshold: f64,
205
+
}
206
+
207
+
impl<R: HandleResolver, Q: QueueAdapter<HandleResolutionWork>> ProactiveRefreshResolver<R, Q> {
208
+
pub fn new(inner: Arc<R>, queue: Arc<Q>, cache_ttl: u64) -> Self {
209
+
Self::with_threshold(inner, queue, cache_ttl, 0.8)
210
+
}
211
+
212
+
pub fn with_threshold(
213
+
inner: Arc<R>,
214
+
queue: Arc<Q>,
215
+
cache_ttl: u64,
216
+
refresh_threshold: f64,
217
+
) -> Self {
218
+
Self {
219
+
inner,
220
+
queue,
221
+
cache_ttl,
222
+
refresh_threshold: refresh_threshold.clamp(0.0, 1.0),
223
+
}
224
+
}
225
+
226
+
/// Check if a cached entry needs proactive refresh based on its age
227
+
#[allow(dead_code)]
228
+
fn needs_refresh(&self, result: &HandleResolutionResult) -> bool {
229
+
let now = SystemTime::now()
230
+
.duration_since(UNIX_EPOCH)
231
+
.unwrap_or_default()
232
+
.as_secs();
233
+
234
+
let age = now.saturating_sub(result.timestamp);
235
+
let threshold = (self.cache_ttl as f64 * self.refresh_threshold) as u64;
236
+
237
+
let needs_refresh = age >= threshold;
238
+
239
+
if needs_refresh {
240
+
debug!(
241
+
handle = ?result.to_did(),
242
+
age_seconds = age,
243
+
threshold_seconds = threshold,
244
+
cache_ttl = self.cache_ttl,
245
+
"Cache entry needs proactive refresh"
246
+
);
247
+
} else {
248
+
trace!(
249
+
handle = ?result.to_did(),
250
+
age_seconds = age,
251
+
threshold_seconds = threshold,
252
+
"Cache entry still fresh"
253
+
);
254
+
}
255
+
256
+
needs_refresh
257
+
}
258
+
259
+
/// Queue a handle for background refresh
260
+
async fn queue_for_refresh(&self, handle: &str) {
261
+
let work = HandleResolutionWork {
262
+
handle: handle.to_string(),
263
+
};
264
+
265
+
match self.queue.push(work).await {
266
+
Ok(_) => {
267
+
debug!(handle = handle, "Queued handle for proactive refresh");
268
+
}
269
+
Err(e) => {
270
+
// Don't fail the request if we can't queue for refresh
271
+
debug!(
272
+
handle = handle,
273
+
error = %e,
274
+
"Failed to queue handle for proactive refresh"
275
+
);
276
+
}
277
+
}
278
+
}
279
+
280
+
/// Check if we should queue for refresh based on resolution time
281
+
///
282
+
/// This is a heuristic approach:
283
+
/// - If resolve_time is very low (< 5ms), it was likely a cache hit
284
+
/// - We probabilistically queue for refresh based on time since service start
285
+
///
286
+
/// For proper implementation, the HandleResolver trait would need to expose
287
+
/// cache metadata or return HandleResolutionResult directly.
288
+
async fn maybe_queue_for_refresh(&self, handle: &str, resolve_time: u64) {
289
+
// If resolution took less than 5ms, it was probably a cache hit
290
+
if resolve_time < 5000 {
291
+
// Use a simple probabilistic approach for demonstration
292
+
// In production, you'd want access to the actual cache timestamp
293
+
trace!(
294
+
handle = handle,
295
+
resolve_time_us = resolve_time,
296
+
"Fast resolution detected, considering proactive refresh"
297
+
);
298
+
299
+
// Queue for refresh with some probability to avoid overwhelming the queue
300
+
// This is a simplified approach - ideally we'd have access to cache metadata
301
+
let now = SystemTime::now()
302
+
.duration_since(UNIX_EPOCH)
303
+
.unwrap_or_default()
304
+
.as_secs();
305
+
306
+
// Simple heuristic: queue every N seconds for frequently accessed handles
307
+
if now % 60 == 0 {
308
+
self.queue_for_refresh(handle).await;
309
+
}
310
+
}
311
+
}
312
+
}
313
+
314
+
#[async_trait]
315
+
impl<R, Q> HandleResolver for ProactiveRefreshResolver<R, Q>
316
+
where
317
+
R: HandleResolver + Send + Sync,
318
+
Q: QueueAdapter<HandleResolutionWork> + Send + Sync,
319
+
{
320
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
321
+
// Resolve through the inner resolver
322
+
let (did, resolve_time) = self.inner.resolve(handle).await?;
323
+
324
+
// Check if we should queue for refresh based on resolution time
325
+
self.maybe_queue_for_refresh(handle, resolve_time).await;
326
+
327
+
Ok((did, resolve_time))
328
+
}
329
+
}
330
+
331
+
#[cfg(test)]
332
+
mod tests {
333
+
use super::*;
334
+
use crate::handle_resolution_result::DidMethodType;
335
+
336
+
#[test]
337
+
fn test_needs_refresh_calculation() {
338
+
// Create a resolver with 100 second TTL and 80% threshold
339
+
let inner = Arc::new(MockResolver);
340
+
let queue = Arc::new(MockQueueAdapter);
341
+
let resolver = ProactiveRefreshResolver::new(inner, queue, 100);
342
+
343
+
let now = SystemTime::now()
344
+
.duration_since(UNIX_EPOCH)
345
+
.unwrap()
346
+
.as_secs();
347
+
348
+
// Test entry that's 50% through TTL (should not refresh)
349
+
let fresh_result = HandleResolutionResult {
350
+
timestamp: now - 50,
351
+
method_type: DidMethodType::Plc,
352
+
payload: "alice123".to_string(),
353
+
};
354
+
assert!(!resolver.needs_refresh(&fresh_result));
355
+
356
+
// Test entry that's 80% through TTL (should refresh)
357
+
let stale_result = HandleResolutionResult {
358
+
timestamp: now - 80,
359
+
method_type: DidMethodType::Plc,
360
+
payload: "alice123".to_string(),
361
+
};
362
+
assert!(resolver.needs_refresh(&stale_result));
363
+
364
+
// Test entry that's 90% through TTL (should definitely refresh)
365
+
let very_stale_result = HandleResolutionResult {
366
+
timestamp: now - 90,
367
+
method_type: DidMethodType::Plc,
368
+
payload: "alice123".to_string(),
369
+
};
370
+
assert!(resolver.needs_refresh(&very_stale_result));
371
+
}
372
+
373
+
#[test]
374
+
fn test_custom_threshold() {
375
+
let inner = Arc::new(MockResolver);
376
+
let queue = Arc::new(MockQueueAdapter);
377
+
378
+
// Create resolver with 50% threshold
379
+
let resolver = ProactiveRefreshResolver::with_threshold(inner, queue, 100, 0.5);
380
+
381
+
let now = SystemTime::now()
382
+
.duration_since(UNIX_EPOCH)
383
+
.unwrap()
384
+
.as_secs();
385
+
386
+
// Test entry that's 40% through TTL (should not refresh with 50% threshold)
387
+
let result_40 = HandleResolutionResult {
388
+
timestamp: now - 40,
389
+
method_type: DidMethodType::Plc,
390
+
payload: "alice123".to_string(),
391
+
};
392
+
assert!(!resolver.needs_refresh(&result_40));
393
+
394
+
// Test entry that's 60% through TTL (should refresh with 50% threshold)
395
+
let result_60 = HandleResolutionResult {
396
+
timestamp: now - 60,
397
+
method_type: DidMethodType::Plc,
398
+
payload: "alice123".to_string(),
399
+
};
400
+
assert!(resolver.needs_refresh(&result_60));
401
+
}
402
+
403
+
// Mock resolver for testing
404
+
struct MockResolver;
405
+
406
+
#[async_trait]
407
+
impl HandleResolver for MockResolver {
408
+
async fn resolve(&self, handle: &str) -> Result<(String, u64), HandleResolverError> {
409
+
Ok((format!("did:plc:{}", handle), 1000))
410
+
}
411
+
}
412
+
413
+
// Mock queue adapter for testing
414
+
struct MockQueueAdapter;
415
+
416
+
#[async_trait]
417
+
impl QueueAdapter<HandleResolutionWork> for MockQueueAdapter {
418
+
async fn pull(&self) -> Option<HandleResolutionWork> {
419
+
None
420
+
}
421
+
422
+
async fn push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
423
+
Ok(())
424
+
}
425
+
426
+
async fn ack(&self, _item: &HandleResolutionWork) -> crate::queue::Result<()> {
427
+
Ok(())
428
+
}
429
+
430
+
async fn try_push(&self, _work: HandleResolutionWork) -> crate::queue::Result<()> {
431
+
Ok(())
432
+
}
433
+
434
+
async fn is_healthy(&self) -> bool {
435
+
true
436
+
}
437
+
}
438
+
}
+80
-15
src/handle_resolver/rate_limited.rs
+80
-15
src/handle_resolver/rate_limited.rs
···
5
5
6
6
use super::errors::HandleResolverError;
7
7
use super::traits::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use async_trait::async_trait;
9
10
use std::sync::Arc;
10
11
use std::time::Duration;
···
34
35
/// create_rate_limited_resolver,
35
36
/// HandleResolver,
36
37
/// };
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
37
39
///
38
40
/// # async fn example() {
39
41
/// # use atproto_identity::resolve::HickoryDnsResolver;
40
42
/// # use reqwest::Client;
41
43
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
42
44
/// # let http_client = Client::new();
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
43
46
/// // Create base resolver
44
-
/// let base = create_base_resolver(dns_resolver, http_client);
47
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
45
48
///
46
49
/// // Wrap with rate limiting (max 10 concurrent resolutions)
47
-
/// let rate_limited = create_rate_limited_resolver(base, 10);
50
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
48
51
///
49
52
/// // Use the rate-limited resolver
50
53
/// let (did, timestamp) = rate_limited.resolve("alice.bsky.social").await.unwrap();
···
60
63
/// Optional timeout for acquiring permits (in milliseconds).
61
64
/// When None or 0, no timeout is applied.
62
65
timeout_ms: Option<u64>,
66
+
67
+
/// Metrics publisher for telemetry.
68
+
metrics: SharedMetricsPublisher,
63
69
}
64
70
65
71
impl RateLimitedHandleResolver {
···
69
75
///
70
76
/// * `inner` - The inner resolver to wrap
71
77
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
72
-
pub fn new(inner: Arc<dyn HandleResolver>, max_concurrent: usize) -> Self {
78
+
/// * `metrics` - Metrics publisher for telemetry
79
+
pub fn new(
80
+
inner: Arc<dyn HandleResolver>,
81
+
max_concurrent: usize,
82
+
metrics: SharedMetricsPublisher,
83
+
) -> Self {
73
84
Self {
74
85
inner,
75
86
semaphore: Arc::new(Semaphore::new(max_concurrent)),
76
87
timeout_ms: None,
88
+
metrics,
77
89
}
78
90
}
79
91
···
84
96
/// * `inner` - The inner resolver to wrap
85
97
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
86
98
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
99
+
/// * `metrics` - Metrics publisher for telemetry
87
100
pub fn new_with_timeout(
88
101
inner: Arc<dyn HandleResolver>,
89
102
max_concurrent: usize,
90
103
timeout_ms: u64,
104
+
metrics: SharedMetricsPublisher,
91
105
) -> Self {
92
106
Self {
93
107
inner,
···
97
111
} else {
98
112
None
99
113
},
114
+
metrics,
100
115
}
101
116
}
102
117
}
···
104
119
#[async_trait]
105
120
impl HandleResolver for RateLimitedHandleResolver {
106
121
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError> {
122
+
let permit_start = std::time::Instant::now();
123
+
124
+
// Track rate limiter queue depth
125
+
let available_permits = self.semaphore.available_permits();
126
+
self.metrics
127
+
.gauge(
128
+
"resolver.rate_limit.available_permits",
129
+
available_permits as u64,
130
+
)
131
+
.await;
132
+
107
133
// Acquire a permit from the semaphore, with optional timeout
108
134
let _permit = match self.timeout_ms {
109
135
Some(timeout_ms) if timeout_ms > 0 => {
110
136
// Apply timeout when acquiring permit
111
137
let duration = Duration::from_millis(timeout_ms);
112
138
match timeout(duration, self.semaphore.acquire()).await {
113
-
Ok(Ok(permit)) => permit,
139
+
Ok(Ok(permit)) => {
140
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
141
+
self.metrics
142
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
143
+
.await;
144
+
permit
145
+
}
114
146
Ok(Err(e)) => {
115
147
// Semaphore error (e.g., closed)
148
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
116
149
return Err(HandleResolverError::ResolutionFailed(format!(
117
150
"Failed to acquire rate limit permit: {}",
118
151
e
···
120
153
}
121
154
Err(_) => {
122
155
// Timeout occurred
156
+
self.metrics
157
+
.incr("resolver.rate_limit.permit_timeout")
158
+
.await;
123
159
return Err(HandleResolverError::ResolutionFailed(format!(
124
160
"Rate limit permit acquisition timed out after {}ms",
125
161
timeout_ms
···
129
165
}
130
166
_ => {
131
167
// No timeout configured, wait indefinitely
132
-
self.semaphore.acquire().await.map_err(|e| {
133
-
HandleResolverError::ResolutionFailed(format!(
134
-
"Failed to acquire rate limit permit: {}",
135
-
e
136
-
))
137
-
})?
168
+
match self.semaphore.acquire().await {
169
+
Ok(permit) => {
170
+
let wait_ms = permit_start.elapsed().as_millis() as u64;
171
+
self.metrics
172
+
.time("resolver.rate_limit.permit_acquired", wait_ms)
173
+
.await;
174
+
permit
175
+
}
176
+
Err(e) => {
177
+
self.metrics.incr("resolver.rate_limit.permit_error").await;
178
+
return Err(HandleResolverError::ResolutionFailed(format!(
179
+
"Failed to acquire rate limit permit: {}",
180
+
e
181
+
)));
182
+
}
183
+
}
138
184
}
139
185
};
140
186
141
187
// With permit acquired, forward to inner resolver
142
188
self.inner.resolve(s).await
189
+
}
190
+
191
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
192
+
// Set operations don't need rate limiting since they're typically administrative
193
+
// and don't involve network calls to external services
194
+
self.inner.set(handle, did).await
143
195
}
144
196
}
145
197
···
152
204
///
153
205
/// * `inner` - The resolver to wrap with rate limiting
154
206
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
207
+
/// * `metrics` - Metrics publisher for telemetry
155
208
///
156
209
/// # Returns
157
210
///
···
169
222
/// # async fn example() {
170
223
/// # use atproto_identity::resolve::HickoryDnsResolver;
171
224
/// # use reqwest::Client;
225
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
172
226
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
173
227
/// # let http_client = Client::new();
174
-
/// let base = create_base_resolver(dns_resolver, http_client);
175
-
/// let rate_limited = create_rate_limited_resolver(base, 10);
228
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
229
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
230
+
/// let rate_limited = create_rate_limited_resolver(base, 10, metrics);
176
231
/// # }
177
232
/// ```
178
233
pub fn create_rate_limited_resolver(
179
234
inner: Arc<dyn HandleResolver>,
180
235
max_concurrent: usize,
236
+
metrics: SharedMetricsPublisher,
181
237
) -> Arc<dyn HandleResolver> {
182
-
Arc::new(RateLimitedHandleResolver::new(inner, max_concurrent))
238
+
Arc::new(RateLimitedHandleResolver::new(
239
+
inner,
240
+
max_concurrent,
241
+
metrics,
242
+
))
183
243
}
184
244
185
245
/// Create a rate-limited handle resolver with timeout.
···
192
252
/// * `inner` - The resolver to wrap with rate limiting
193
253
/// * `max_concurrent` - Maximum number of concurrent resolutions allowed
194
254
/// * `timeout_ms` - Timeout in milliseconds for acquiring permits (0 = no timeout)
255
+
/// * `metrics` - Metrics publisher for telemetry
195
256
///
196
257
/// # Returns
197
258
///
···
209
270
/// # async fn example() {
210
271
/// # use atproto_identity::resolve::HickoryDnsResolver;
211
272
/// # use reqwest::Client;
273
+
/// # use quickdid::metrics::NoOpMetricsPublisher;
212
274
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
213
275
/// # let http_client = Client::new();
214
-
/// let base = create_base_resolver(dns_resolver, http_client);
276
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
277
+
/// let base = create_base_resolver(dns_resolver, http_client, metrics.clone());
215
278
/// // Rate limit with 10 concurrent resolutions and 5 second timeout
216
-
/// let rate_limited = create_rate_limited_resolver_with_timeout(base, 10, 5000);
279
+
/// let rate_limited = create_rate_limited_resolver_with_timeout(base, 10, 5000, metrics);
217
280
/// # }
218
281
/// ```
219
282
pub fn create_rate_limited_resolver_with_timeout(
220
283
inner: Arc<dyn HandleResolver>,
221
284
max_concurrent: usize,
222
285
timeout_ms: u64,
286
+
metrics: SharedMetricsPublisher,
223
287
) -> Arc<dyn HandleResolver> {
224
288
Arc::new(RateLimitedHandleResolver::new_with_timeout(
225
289
inner,
226
290
max_concurrent,
227
291
timeout_ms,
292
+
metrics,
228
293
))
229
294
}
+554
-9
src/handle_resolver/redis.rs
+554
-9
src/handle_resolver/redis.rs
···
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
9
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
10
11
use async_trait::async_trait;
12
+
use atproto_identity::resolve::{InputType, parse_input};
11
13
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
12
14
use metrohash::MetroHash64;
13
15
use std::hash::Hasher as _;
···
33
35
/// use std::sync::Arc;
34
36
/// use deadpool_redis::Pool;
35
37
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
38
+
/// use quickdid::metrics::NoOpMetricsPublisher;
36
39
///
37
40
/// # async fn example() {
38
41
/// # use atproto_identity::resolve::HickoryDnsResolver;
39
42
/// # use reqwest::Client;
40
43
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
41
44
/// # let http_client = Client::new();
42
-
/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
45
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
46
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
43
47
/// # let redis_pool: Pool = todo!();
44
48
/// // Create with default 90-day TTL
45
49
/// let resolver = create_redis_resolver(
46
50
/// base_resolver,
47
-
/// redis_pool
51
+
/// redis_pool,
52
+
/// metrics
48
53
/// );
49
54
/// # }
50
55
/// ```
···
57
62
key_prefix: String,
58
63
/// TTL for cache entries in seconds
59
64
ttl_seconds: u64,
65
+
/// Metrics publisher for telemetry
66
+
metrics: SharedMetricsPublisher,
60
67
}
61
68
62
69
impl RedisHandleResolver {
63
70
/// Create a new Redis-backed handle resolver with default 90-day TTL.
64
-
fn new(inner: Arc<dyn HandleResolver>, pool: RedisPool) -> Self {
65
-
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
71
+
fn new(
72
+
inner: Arc<dyn HandleResolver>,
73
+
pool: RedisPool,
74
+
metrics: SharedMetricsPublisher,
75
+
) -> Self {
76
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
66
77
}
67
78
68
79
/// Create a new Redis-backed handle resolver with custom TTL.
69
-
fn with_ttl(inner: Arc<dyn HandleResolver>, pool: RedisPool, ttl_seconds: u64) -> Self {
70
-
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds)
80
+
fn with_ttl(
81
+
inner: Arc<dyn HandleResolver>,
82
+
pool: RedisPool,
83
+
ttl_seconds: u64,
84
+
metrics: SharedMetricsPublisher,
85
+
) -> Self {
86
+
Self::with_full_config(inner, pool, "handle:".to_string(), ttl_seconds, metrics)
71
87
}
72
88
73
89
/// Create a new Redis-backed handle resolver with full configuration.
···
76
92
pool: RedisPool,
77
93
key_prefix: String,
78
94
ttl_seconds: u64,
95
+
metrics: SharedMetricsPublisher,
79
96
) -> Self {
80
97
Self {
81
98
inner,
82
99
pool,
83
100
key_prefix,
84
101
ttl_seconds,
102
+
metrics,
85
103
}
86
104
}
87
105
···
100
118
fn ttl_seconds(&self) -> u64 {
101
119
self.ttl_seconds
102
120
}
121
+
122
+
/// Purge a handle and its associated DID from the cache.
123
+
///
124
+
/// This method removes both the handle->DID mapping and the reverse DID->handle mapping.
125
+
async fn purge_handle(&self, handle: &str) -> Result<(), HandleResolverError> {
126
+
let handle_key = self.make_key(handle);
127
+
128
+
match self.pool.get().await {
129
+
Ok(mut conn) => {
130
+
// First, try to get the cached result to find the associated DID
131
+
let cached: Option<Vec<u8>> = match conn.get(&handle_key).await {
132
+
Ok(value) => value,
133
+
Err(e) => {
134
+
tracing::warn!("Failed to get handle from Redis for purging: {}", e);
135
+
self.metrics.incr("resolver.redis.purge_get_error").await;
136
+
None
137
+
}
138
+
};
139
+
140
+
// If we found a cached result, extract the DID and delete both keys
141
+
if let Some(cached_bytes) = cached {
142
+
if let Ok(cached_result) = HandleResolutionResult::from_bytes(&cached_bytes) {
143
+
if let Some(did) = cached_result.to_did() {
144
+
let did_key = self.make_key(&did);
145
+
146
+
// Delete both the handle key and the DID key
147
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
148
+
149
+
tracing::debug!("Purged handle {} and associated DID {}", handle, did);
150
+
self.metrics
151
+
.incr("resolver.redis.purge_handle_success")
152
+
.await;
153
+
} else {
154
+
// Just delete the handle key if no DID was resolved
155
+
let _: Result<(), _> = conn.del(&handle_key).await;
156
+
tracing::debug!("Purged unresolved handle {}", handle);
157
+
self.metrics
158
+
.incr("resolver.redis.purge_handle_unresolved")
159
+
.await;
160
+
}
161
+
} else {
162
+
// If we can't deserialize, just delete the handle key
163
+
let _: Result<(), _> = conn.del(&handle_key).await;
164
+
tracing::warn!("Purged handle {} with undeserializable data", handle);
165
+
self.metrics
166
+
.incr("resolver.redis.purge_handle_corrupt")
167
+
.await;
168
+
}
169
+
} else {
170
+
tracing::debug!("Handle {} not found in cache for purging", handle);
171
+
self.metrics
172
+
.incr("resolver.redis.purge_handle_not_found")
173
+
.await;
174
+
}
175
+
176
+
Ok(())
177
+
}
178
+
Err(e) => {
179
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
180
+
self.metrics
181
+
.incr("resolver.redis.purge_connection_error")
182
+
.await;
183
+
Err(HandleResolverError::ResolutionFailed(format!(
184
+
"Redis connection error: {}",
185
+
e
186
+
)))
187
+
}
188
+
}
189
+
}
190
+
191
+
/// Purge a DID and its associated handle from the cache.
192
+
///
193
+
/// This method removes both the DID->handle mapping and the handle->DID mapping.
194
+
async fn purge_did(&self, did: &str) -> Result<(), HandleResolverError> {
195
+
let did_key = self.make_key(did);
196
+
197
+
match self.pool.get().await {
198
+
Ok(mut conn) => {
199
+
// First, try to get the associated handle from the reverse mapping
200
+
let handle_bytes: Option<Vec<u8>> = match conn.get(&did_key).await {
201
+
Ok(value) => value,
202
+
Err(e) => {
203
+
tracing::warn!("Failed to get DID from Redis for purging: {}", e);
204
+
self.metrics.incr("resolver.redis.purge_get_error").await;
205
+
None
206
+
}
207
+
};
208
+
209
+
// If we found a handle, delete both keys
210
+
if let Some(handle_bytes) = handle_bytes {
211
+
if let Ok(handle) = String::from_utf8(handle_bytes) {
212
+
let handle_key = self.make_key(&handle);
213
+
214
+
// Delete both the DID key and the handle key
215
+
let _: Result<(), _> = conn.del(&[&did_key, &handle_key]).await;
216
+
217
+
tracing::debug!("Purged DID {} and associated handle {}", did, handle);
218
+
self.metrics.incr("resolver.redis.purge_did_success").await;
219
+
} else {
220
+
// If we can't parse the handle, just delete the DID key
221
+
let _: Result<(), _> = conn.del(&did_key).await;
222
+
tracing::warn!("Purged DID {} with unparseable handle data", did);
223
+
self.metrics.incr("resolver.redis.purge_did_corrupt").await;
224
+
}
225
+
} else {
226
+
tracing::debug!("DID {} not found in cache for purging", did);
227
+
self.metrics
228
+
.incr("resolver.redis.purge_did_not_found")
229
+
.await;
230
+
}
231
+
232
+
Ok(())
233
+
}
234
+
Err(e) => {
235
+
tracing::warn!("Failed to get Redis connection for purging: {}", e);
236
+
self.metrics
237
+
.incr("resolver.redis.purge_connection_error")
238
+
.await;
239
+
Err(HandleResolverError::ResolutionFailed(format!(
240
+
"Redis connection error: {}",
241
+
e
242
+
)))
243
+
}
244
+
}
245
+
}
103
246
}
104
247
105
248
#[async_trait]
···
115
258
let cached: Option<Vec<u8>> = match conn.get(&key).await {
116
259
Ok(value) => value,
117
260
Err(e) => {
261
+
self.metrics.incr("resolver.redis.get_error").await;
118
262
tracing::warn!("Failed to get handle from Redis cache: {}", e);
119
263
None
120
264
}
···
126
270
Ok(cached_result) => {
127
271
if let Some(did) = cached_result.to_did() {
128
272
tracing::debug!("Cache hit for handle {}: {}", handle, did);
273
+
self.metrics.incr("resolver.redis.cache_hit").await;
129
274
return Ok((did, cached_result.timestamp));
130
275
} else {
131
276
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
277
+
self.metrics
278
+
.incr("resolver.redis.cache_hit_not_resolved")
279
+
.await;
132
280
return Err(HandleResolverError::HandleNotFound);
133
281
}
134
282
}
···
138
286
handle,
139
287
e
140
288
);
289
+
self.metrics.incr("resolver.redis.deserialize_error").await;
141
290
// Fall through to re-resolve if deserialization fails
142
291
}
143
292
}
···
145
294
146
295
// Not in cache, resolve through inner resolver
147
296
tracing::debug!("Cache miss for handle {}, resolving...", handle);
297
+
self.metrics.incr("resolver.redis.cache_miss").await;
148
298
let result = self.inner.resolve(s).await;
149
299
150
300
// Create and serialize resolution result
···
159
309
Ok(res) => res,
160
310
Err(e) => {
161
311
tracing::warn!("Failed to create resolution result: {}", e);
312
+
self.metrics
313
+
.incr("resolver.redis.result_create_error")
314
+
.await;
162
315
return result;
163
316
}
164
317
}
···
169
322
Ok(res) => res,
170
323
Err(err) => {
171
324
tracing::warn!("Failed to create not_resolved result: {}", err);
325
+
self.metrics
326
+
.incr("resolver.redis.result_create_error")
327
+
.await;
172
328
return result;
173
329
}
174
330
}
···
184
340
.await
185
341
{
186
342
tracing::warn!("Failed to cache handle resolution in Redis: {}", e);
343
+
self.metrics.incr("resolver.redis.cache_set_error").await;
344
+
} else {
345
+
self.metrics.incr("resolver.redis.cache_set").await;
346
+
347
+
// For successful resolutions, also store reverse DID -> handle mapping
348
+
if let Ok((did, _)) = &result {
349
+
let did_key = self.make_key(did);
350
+
if let Err(e) = conn
351
+
.set_ex::<_, _, ()>(
352
+
&did_key,
353
+
handle.as_bytes(),
354
+
self.ttl_seconds(),
355
+
)
356
+
.await
357
+
{
358
+
tracing::warn!(
359
+
"Failed to cache reverse DID->handle mapping in Redis: {}",
360
+
e
361
+
);
362
+
self.metrics
363
+
.incr("resolver.redis.reverse_cache_set_error")
364
+
.await;
365
+
} else {
366
+
tracing::debug!(
367
+
"Cached reverse mapping for DID {}: {}",
368
+
did,
369
+
handle
370
+
);
371
+
self.metrics.incr("resolver.redis.reverse_cache_set").await;
372
+
}
373
+
}
187
374
}
188
375
}
189
376
Err(e) => {
···
192
379
handle,
193
380
e
194
381
);
382
+
self.metrics.incr("resolver.redis.serialize_error").await;
195
383
}
196
384
}
197
385
···
203
391
"Failed to get Redis connection, falling back to uncached resolution: {}",
204
392
e
205
393
);
394
+
self.metrics.incr("resolver.redis.connection_error").await;
206
395
self.inner.resolve(s).await
207
396
}
208
397
}
209
398
}
399
+
400
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
401
+
// Use atproto_identity's parse_input to properly identify the input type
402
+
let parsed_input = parse_input(subject)
403
+
.map_err(|_| HandleResolverError::InvalidSubject(subject.to_string()))?;
404
+
match parsed_input {
405
+
InputType::Handle(handle) => {
406
+
// It's a handle, purge using the lowercase version
407
+
self.purge_handle(&handle.to_lowercase()).await
408
+
}
409
+
InputType::Plc(did) | InputType::Web(did) => {
410
+
// It's a DID, purge the DID
411
+
self.purge_did(&did).await
412
+
}
413
+
}
414
+
}
415
+
416
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
417
+
// Normalize the handle to lowercase
418
+
let handle = handle.to_lowercase();
419
+
let handle_key = self.make_key(&handle);
420
+
let did_key = self.make_key(did);
421
+
422
+
match self.pool.get().await {
423
+
Ok(mut conn) => {
424
+
// Create a resolution result for the successful mapping
425
+
let resolution_result = match HandleResolutionResult::success(did) {
426
+
Ok(res) => res,
427
+
Err(e) => {
428
+
tracing::warn!(
429
+
"Failed to create resolution result for set operation: {}",
430
+
e
431
+
);
432
+
self.metrics
433
+
.incr("resolver.redis.set_result_create_error")
434
+
.await;
435
+
return Err(HandleResolverError::InvalidSubject(format!(
436
+
"Failed to create resolution result: {}",
437
+
e
438
+
)));
439
+
}
440
+
};
441
+
442
+
// Serialize to bytes
443
+
match resolution_result.to_bytes() {
444
+
Ok(bytes) => {
445
+
// Set the handle -> DID mapping with expiration
446
+
if let Err(e) = conn
447
+
.set_ex::<_, _, ()>(&handle_key, bytes, self.ttl_seconds())
448
+
.await
449
+
{
450
+
tracing::warn!("Failed to set handle->DID mapping in Redis: {}", e);
451
+
self.metrics.incr("resolver.redis.set_cache_error").await;
452
+
return Err(HandleResolverError::ResolutionFailed(format!(
453
+
"Failed to set cache: {}",
454
+
e
455
+
)));
456
+
}
457
+
458
+
// Set the reverse DID -> handle mapping
459
+
if let Err(e) = conn
460
+
.set_ex::<_, _, ()>(&did_key, handle.as_bytes(), self.ttl_seconds())
461
+
.await
462
+
{
463
+
tracing::warn!("Failed to set DID->handle mapping in Redis: {}", e);
464
+
self.metrics
465
+
.incr("resolver.redis.set_reverse_cache_error")
466
+
.await;
467
+
// Don't fail the operation, but log the warning
468
+
}
469
+
470
+
tracing::debug!("Set handle {} -> DID {} mapping in cache", handle, did);
471
+
self.metrics.incr("resolver.redis.set_success").await;
472
+
Ok(())
473
+
}
474
+
Err(e) => {
475
+
tracing::warn!(
476
+
"Failed to serialize resolution result for set operation: {}",
477
+
e
478
+
);
479
+
self.metrics
480
+
.incr("resolver.redis.set_serialize_error")
481
+
.await;
482
+
Err(HandleResolverError::InvalidSubject(format!(
483
+
"Failed to serialize: {}",
484
+
e
485
+
)))
486
+
}
487
+
}
488
+
}
489
+
Err(e) => {
490
+
tracing::warn!("Failed to get Redis connection for set operation: {}", e);
491
+
self.metrics
492
+
.incr("resolver.redis.set_connection_error")
493
+
.await;
494
+
Err(HandleResolverError::ResolutionFailed(format!(
495
+
"Redis connection error: {}",
496
+
e
497
+
)))
498
+
}
499
+
}
500
+
}
210
501
}
211
502
212
503
/// Create a new Redis-backed handle resolver with default 90-day TTL.
···
215
506
///
216
507
/// * `inner` - The underlying resolver to use for actual resolution
217
508
/// * `pool` - Redis connection pool
509
+
/// * `metrics` - Metrics publisher for telemetry
218
510
///
219
511
/// # Example
220
512
///
···
222
514
/// use std::sync::Arc;
223
515
/// use quickdid::handle_resolver::{create_base_resolver, create_redis_resolver, HandleResolver};
224
516
/// use quickdid::cache::create_redis_pool;
517
+
/// use quickdid::metrics::NoOpMetricsPublisher;
225
518
///
226
519
/// # async fn example() -> anyhow::Result<()> {
227
520
/// # use atproto_identity::resolve::HickoryDnsResolver;
228
521
/// # use reqwest::Client;
229
522
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
230
523
/// # let http_client = Client::new();
524
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
231
525
/// let base = create_base_resolver(
232
526
/// dns_resolver,
233
527
/// http_client,
528
+
/// metrics.clone(),
234
529
/// );
235
530
///
236
531
/// let pool = create_redis_pool("redis://localhost:6379")?;
237
-
/// let resolver = create_redis_resolver(base, pool);
532
+
/// let resolver = create_redis_resolver(base, pool, metrics);
238
533
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
239
534
/// # Ok(())
240
535
/// # }
···
242
537
pub fn create_redis_resolver(
243
538
inner: Arc<dyn HandleResolver>,
244
539
pool: RedisPool,
540
+
metrics: SharedMetricsPublisher,
245
541
) -> Arc<dyn HandleResolver> {
246
-
Arc::new(RedisHandleResolver::new(inner, pool))
542
+
Arc::new(RedisHandleResolver::new(inner, pool, metrics))
247
543
}
248
544
249
545
/// Create a new Redis-backed handle resolver with custom TTL.
···
253
549
/// * `inner` - The underlying resolver to use for actual resolution
254
550
/// * `pool` - Redis connection pool
255
551
/// * `ttl_seconds` - TTL for cache entries in seconds
552
+
/// * `metrics` - Metrics publisher for telemetry
256
553
pub fn create_redis_resolver_with_ttl(
257
554
inner: Arc<dyn HandleResolver>,
258
555
pool: RedisPool,
259
556
ttl_seconds: u64,
557
+
metrics: SharedMetricsPublisher,
260
558
) -> Arc<dyn HandleResolver> {
261
-
Arc::new(RedisHandleResolver::with_ttl(inner, pool, ttl_seconds))
559
+
Arc::new(RedisHandleResolver::with_ttl(
560
+
inner,
561
+
pool,
562
+
ttl_seconds,
563
+
metrics,
564
+
))
262
565
}
263
566
264
567
#[cfg(test)]
···
300
603
expected_did: "did:plc:testuser123".to_string(),
301
604
});
302
605
606
+
// Create metrics publisher
607
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
608
+
303
609
// Create Redis-backed resolver with a unique key prefix for testing
304
610
let test_prefix = format!(
305
611
"test:handle:{}:",
···
313
619
pool.clone(),
314
620
test_prefix.clone(),
315
621
3600,
622
+
metrics,
316
623
);
317
624
318
625
let test_handle = "alice.bsky.social";
···
336
643
}
337
644
338
645
#[tokio::test]
646
+
async fn test_redis_handle_resolver_bidirectional_purge() {
647
+
let pool = match crate::test_helpers::get_test_redis_pool() {
648
+
Some(p) => p,
649
+
None => return,
650
+
};
651
+
652
+
// Create mock resolver
653
+
let mock_resolver = Arc::new(MockHandleResolver {
654
+
should_fail: false,
655
+
expected_did: "did:plc:testuser456".to_string(),
656
+
});
657
+
658
+
// Create metrics publisher
659
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
660
+
661
+
// Create Redis-backed resolver with a unique key prefix for testing
662
+
let test_prefix = format!(
663
+
"test:handle:{}:",
664
+
std::time::SystemTime::now()
665
+
.duration_since(std::time::UNIX_EPOCH)
666
+
.unwrap()
667
+
.as_nanos()
668
+
);
669
+
let redis_resolver = RedisHandleResolver::with_full_config(
670
+
mock_resolver,
671
+
pool.clone(),
672
+
test_prefix.clone(),
673
+
3600,
674
+
metrics,
675
+
);
676
+
677
+
let test_handle = "bob.bsky.social";
678
+
let expected_did = "did:plc:testuser456";
679
+
680
+
// First resolution - should call inner resolver and cache both directions
681
+
let (result1, _) = redis_resolver.resolve(test_handle).await.unwrap();
682
+
assert_eq!(result1, expected_did);
683
+
684
+
// Verify both keys exist in Redis
685
+
if let Ok(mut conn) = pool.get().await {
686
+
let mut h = MetroHash64::default();
687
+
h.write(test_handle.as_bytes());
688
+
let handle_key = format!("{}{}", test_prefix, h.finish());
689
+
690
+
let mut h2 = MetroHash64::default();
691
+
h2.write(expected_did.as_bytes());
692
+
let did_key = format!("{}{}", test_prefix, h2.finish());
693
+
694
+
// Check handle -> DID mapping exists
695
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
696
+
assert!(handle_exists, "Handle key should exist in cache");
697
+
698
+
// Check DID -> handle mapping exists
699
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
700
+
assert!(did_exists, "DID key should exist in cache");
701
+
702
+
// Test purge by handle using the trait method
703
+
redis_resolver.purge(test_handle).await.unwrap();
704
+
705
+
// Verify both keys were deleted
706
+
let handle_exists_after: bool = conn.exists(&handle_key).await.unwrap();
707
+
assert!(
708
+
!handle_exists_after,
709
+
"Handle key should be deleted after purge"
710
+
);
711
+
712
+
let did_exists_after: bool = conn.exists(&did_key).await.unwrap();
713
+
assert!(!did_exists_after, "DID key should be deleted after purge");
714
+
}
715
+
716
+
// Re-resolve to cache again
717
+
let (result2, _) = redis_resolver.resolve(test_handle).await.unwrap();
718
+
assert_eq!(result2, expected_did);
719
+
720
+
// Test purge by DID using the trait method
721
+
redis_resolver.purge(expected_did).await.unwrap();
722
+
723
+
// Verify both keys were deleted again
724
+
if let Ok(mut conn) = pool.get().await {
725
+
let mut h = MetroHash64::default();
726
+
h.write(test_handle.as_bytes());
727
+
let handle_key = format!("{}{}", test_prefix, h.finish());
728
+
729
+
let mut h2 = MetroHash64::default();
730
+
h2.write(expected_did.as_bytes());
731
+
let did_key = format!("{}{}", test_prefix, h2.finish());
732
+
733
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
734
+
assert!(
735
+
!handle_exists,
736
+
"Handle key should be deleted after DID purge"
737
+
);
738
+
739
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
740
+
assert!(!did_exists, "DID key should be deleted after DID purge");
741
+
}
742
+
}
743
+
744
+
#[tokio::test]
745
+
async fn test_redis_handle_resolver_purge_input_types() {
746
+
let pool = match crate::test_helpers::get_test_redis_pool() {
747
+
Some(p) => p,
748
+
None => return,
749
+
};
750
+
751
+
// Create mock resolver
752
+
let mock_resolver = Arc::new(MockHandleResolver {
753
+
should_fail: false,
754
+
expected_did: "did:plc:testuser789".to_string(),
755
+
});
756
+
757
+
// Create metrics publisher
758
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
759
+
760
+
// Create Redis-backed resolver with a unique key prefix for testing
761
+
let test_prefix = format!(
762
+
"test:handle:{}:",
763
+
std::time::SystemTime::now()
764
+
.duration_since(std::time::UNIX_EPOCH)
765
+
.unwrap()
766
+
.as_nanos()
767
+
);
768
+
let redis_resolver = RedisHandleResolver::with_full_config(
769
+
mock_resolver,
770
+
pool.clone(),
771
+
test_prefix.clone(),
772
+
3600,
773
+
metrics,
774
+
);
775
+
776
+
// Test different input formats
777
+
let test_cases = vec![
778
+
("alice.bsky.social", "alice.bsky.social"), // Handle
779
+
("ALICE.BSKY.SOCIAL", "alice.bsky.social"), // Handle (uppercase)
780
+
("did:plc:abc123", "did:plc:abc123"), // PLC DID
781
+
("did:web:example.com", "did:web:example.com"), // Web DID
782
+
];
783
+
784
+
for (input, expected_key) in test_cases {
785
+
// Resolve first to cache it
786
+
if !input.starts_with("did:") {
787
+
let _ = redis_resolver.resolve(input).await;
788
+
}
789
+
790
+
// Test purging with different input formats
791
+
let result = redis_resolver.purge(input).await;
792
+
assert!(result.is_ok(), "Failed to purge {}: {:?}", input, result);
793
+
794
+
// Verify the key was handled correctly based on type
795
+
if let Ok(mut conn) = pool.get().await {
796
+
let mut h = MetroHash64::default();
797
+
h.write(expected_key.as_bytes());
798
+
let key = format!("{}{}", test_prefix, h.finish());
799
+
800
+
// After purge, key should not exist
801
+
let exists: bool = conn.exists(&key).await.unwrap_or(false);
802
+
assert!(!exists, "Key for {} should not exist after purge", input);
803
+
}
804
+
}
805
+
}
806
+
807
+
#[tokio::test]
808
+
async fn test_redis_handle_resolver_set_method() {
809
+
let pool = match crate::test_helpers::get_test_redis_pool() {
810
+
Some(p) => p,
811
+
None => return,
812
+
};
813
+
814
+
// Create mock resolver
815
+
let mock_resolver = Arc::new(MockHandleResolver {
816
+
should_fail: false,
817
+
expected_did: "did:plc:old".to_string(),
818
+
});
819
+
820
+
// Create metrics publisher
821
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
822
+
823
+
// Create Redis-backed resolver with a unique key prefix for testing
824
+
let test_prefix = format!(
825
+
"test:handle:{}:",
826
+
std::time::SystemTime::now()
827
+
.duration_since(std::time::UNIX_EPOCH)
828
+
.unwrap()
829
+
.as_nanos()
830
+
);
831
+
let redis_resolver = RedisHandleResolver::with_full_config(
832
+
mock_resolver,
833
+
pool.clone(),
834
+
test_prefix.clone(),
835
+
3600,
836
+
metrics,
837
+
);
838
+
839
+
let test_handle = "charlie.bsky.social";
840
+
let test_did = "did:plc:newuser123";
841
+
842
+
// Set the mapping using the trait method
843
+
redis_resolver.set(test_handle, test_did).await.unwrap();
844
+
845
+
// Verify the mapping by resolving the handle
846
+
let (resolved_did, _) = redis_resolver.resolve(test_handle).await.unwrap();
847
+
assert_eq!(resolved_did, test_did);
848
+
849
+
// Test that uppercase handles are normalized
850
+
redis_resolver
851
+
.set("DAVE.BSKY.SOCIAL", "did:plc:dave456")
852
+
.await
853
+
.unwrap();
854
+
let (resolved_did2, _) = redis_resolver.resolve("dave.bsky.social").await.unwrap();
855
+
assert_eq!(resolved_did2, "did:plc:dave456");
856
+
857
+
// Verify both forward and reverse mappings exist
858
+
if let Ok(mut conn) = pool.get().await {
859
+
let mut h = MetroHash64::default();
860
+
h.write(test_handle.as_bytes());
861
+
let handle_key = format!("{}{}", test_prefix, h.finish());
862
+
863
+
let mut h2 = MetroHash64::default();
864
+
h2.write(test_did.as_bytes());
865
+
let did_key = format!("{}{}", test_prefix, h2.finish());
866
+
867
+
// Check both keys exist
868
+
let handle_exists: bool = conn.exists(&handle_key).await.unwrap();
869
+
assert!(handle_exists, "Handle key should exist after set");
870
+
871
+
let did_exists: bool = conn.exists(&did_key).await.unwrap();
872
+
assert!(did_exists, "DID key should exist after set");
873
+
874
+
// Clean up test data
875
+
let _: Result<(), _> = conn.del(&[&handle_key, &did_key]).await;
876
+
}
877
+
}
878
+
879
+
#[tokio::test]
339
880
async fn test_redis_handle_resolver_cache_error() {
340
881
let pool = match crate::test_helpers::get_test_redis_pool() {
341
882
Some(p) => p,
···
348
889
expected_did: String::new(),
349
890
});
350
891
892
+
// Create metrics publisher
893
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
894
+
351
895
// Create Redis-backed resolver with a unique key prefix for testing
352
896
let test_prefix = format!(
353
897
"test:handle:{}:",
···
361
905
pool.clone(),
362
906
test_prefix.clone(),
363
907
3600,
908
+
metrics,
364
909
);
365
910
366
911
let test_handle = "error.bsky.social";
+140
-10
src/handle_resolver/sqlite.rs
+140
-10
src/handle_resolver/sqlite.rs
···
7
7
use super::errors::HandleResolverError;
8
8
use super::traits::HandleResolver;
9
9
use crate::handle_resolution_result::HandleResolutionResult;
10
+
use crate::metrics::SharedMetricsPublisher;
10
11
use async_trait::async_trait;
11
12
use metrohash::MetroHash64;
12
13
use sqlx::{Row, SqlitePool};
···
35
36
/// use std::sync::Arc;
36
37
/// use sqlx::SqlitePool;
37
38
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
39
+
/// use quickdid::metrics::NoOpMetricsPublisher;
38
40
///
39
41
/// # async fn example() {
40
42
/// # use atproto_identity::resolve::HickoryDnsResolver;
41
43
/// # use reqwest::Client;
42
44
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
43
45
/// # let http_client = Client::new();
44
-
/// # let base_resolver = create_base_resolver(dns_resolver, http_client);
46
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
47
+
/// # let base_resolver = create_base_resolver(dns_resolver, http_client, metrics.clone());
45
48
/// # let sqlite_pool: SqlitePool = todo!();
46
49
/// // Create with default 90-day TTL
47
50
/// let resolver = create_sqlite_resolver(
48
51
/// base_resolver,
49
-
/// sqlite_pool
52
+
/// sqlite_pool,
53
+
/// metrics
50
54
/// );
51
55
/// # }
52
56
/// ```
···
57
61
pool: SqlitePool,
58
62
/// TTL for cache entries in seconds
59
63
ttl_seconds: u64,
64
+
/// Metrics publisher for telemetry
65
+
metrics: SharedMetricsPublisher,
60
66
}
61
67
62
68
impl SqliteHandleResolver {
63
69
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
64
-
fn new(inner: Arc<dyn HandleResolver>, pool: SqlitePool) -> Self {
65
-
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60) // 90 days default
70
+
fn new(
71
+
inner: Arc<dyn HandleResolver>,
72
+
pool: SqlitePool,
73
+
metrics: SharedMetricsPublisher,
74
+
) -> Self {
75
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
66
76
}
67
77
68
78
/// Create a new SQLite-backed handle resolver with custom TTL.
69
-
fn with_ttl(inner: Arc<dyn HandleResolver>, pool: SqlitePool, ttl_seconds: u64) -> Self {
79
+
fn with_ttl(
80
+
inner: Arc<dyn HandleResolver>,
81
+
pool: SqlitePool,
82
+
ttl_seconds: u64,
83
+
metrics: SharedMetricsPublisher,
84
+
) -> Self {
70
85
Self {
71
86
inner,
72
87
pool,
73
88
ttl_seconds,
89
+
metrics,
74
90
}
75
91
}
76
92
···
121
137
Ok(cached_result) => {
122
138
if let Some(did) = cached_result.to_did() {
123
139
tracing::debug!("Cache hit for handle {}: {}", handle, did);
140
+
self.metrics.incr("resolver.sqlite.cache_hit").await;
124
141
return Ok((did, cached_result.timestamp));
125
142
} else {
126
143
tracing::debug!("Cache hit (not resolved) for handle {}", handle);
144
+
self.metrics
145
+
.incr("resolver.sqlite.cache_hit_not_resolved")
146
+
.await;
127
147
return Err(HandleResolverError::HandleNotFound);
128
148
}
129
149
}
···
133
153
handle,
134
154
e
135
155
);
156
+
self.metrics.incr("resolver.sqlite.deserialize_error").await;
136
157
// Fall through to re-resolve if deserialization fails
137
158
}
138
159
}
139
160
} else {
140
161
tracing::debug!("Cache entry expired for handle {}", handle);
162
+
self.metrics.incr("resolver.sqlite.cache_expired").await;
141
163
// Entry is expired, we'll re-resolve and update it
142
164
}
143
165
}
144
166
Ok(None) => {
145
167
tracing::debug!("Cache miss for handle {}, resolving...", handle);
168
+
self.metrics.incr("resolver.sqlite.cache_miss").await;
146
169
}
147
170
Err(e) => {
148
171
tracing::warn!("Failed to query SQLite cache for handle {}: {}", handle, e);
172
+
self.metrics.incr("resolver.sqlite.query_error").await;
149
173
// Fall through to resolve without caching on database error
150
174
}
151
175
}
···
165
189
Ok(res) => res,
166
190
Err(e) => {
167
191
tracing::warn!("Failed to create resolution result: {}", e);
192
+
self.metrics
193
+
.incr("resolver.sqlite.result_create_error")
194
+
.await;
168
195
return result;
169
196
}
170
197
}
···
175
202
Ok(res) => res,
176
203
Err(err) => {
177
204
tracing::warn!("Failed to create not_resolved result: {}", err);
205
+
self.metrics
206
+
.incr("resolver.sqlite.result_create_error")
207
+
.await;
178
208
return result;
179
209
}
180
210
}
···
208
238
209
239
if let Err(e) = query_result {
210
240
tracing::warn!("Failed to cache handle resolution in SQLite: {}", e);
241
+
self.metrics.incr("resolver.sqlite.cache_set_error").await;
242
+
} else {
243
+
self.metrics.incr("resolver.sqlite.cache_set").await;
211
244
}
212
245
}
213
246
Err(e) => {
···
216
249
handle,
217
250
e
218
251
);
252
+
self.metrics.incr("resolver.sqlite.serialize_error").await;
219
253
}
220
254
}
221
255
222
256
result
223
257
}
258
+
259
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
260
+
// Normalize the handle to lowercase
261
+
let handle = handle.to_lowercase();
262
+
263
+
// Update the SQLite cache
264
+
if let Ok(mut conn) = self.pool.acquire().await {
265
+
// Create a resolution result for the successful mapping
266
+
let resolution_result = match HandleResolutionResult::success(did) {
267
+
Ok(res) => res,
268
+
Err(e) => {
269
+
tracing::warn!(
270
+
"Failed to create resolution result for set operation: {}",
271
+
e
272
+
);
273
+
self.metrics
274
+
.incr("resolver.sqlite.set_result_create_error")
275
+
.await;
276
+
// Still chain to inner resolver even if we can't cache
277
+
return self.inner.set(&handle, did).await;
278
+
}
279
+
};
280
+
281
+
// Serialize to bytes
282
+
match resolution_result.to_bytes() {
283
+
Ok(bytes) => {
284
+
// Insert or update the cache entry
285
+
let timestamp = std::time::SystemTime::now()
286
+
.duration_since(std::time::UNIX_EPOCH)
287
+
.unwrap_or_default()
288
+
.as_secs() as i64;
289
+
290
+
let expires_at = timestamp + self.ttl_seconds as i64;
291
+
292
+
match sqlx::query(
293
+
"INSERT OR REPLACE INTO handle_resolution_cache (handle, resolved_value, created_at, expires_at) VALUES (?, ?, ?, ?)"
294
+
)
295
+
.bind(&handle)
296
+
.bind(&bytes)
297
+
.bind(timestamp)
298
+
.bind(expires_at)
299
+
.execute(&mut *conn)
300
+
.await
301
+
{
302
+
Ok(_) => {
303
+
tracing::debug!("Set handle {} -> DID {} in SQLite cache", handle, did);
304
+
self.metrics.incr("resolver.sqlite.set_success").await;
305
+
}
306
+
Err(e) => {
307
+
tracing::warn!("Failed to set handle->DID mapping in SQLite: {}", e);
308
+
self.metrics.incr("resolver.sqlite.set_cache_error").await;
309
+
// Still chain to inner resolver even if cache update fails
310
+
}
311
+
}
312
+
}
313
+
Err(e) => {
314
+
tracing::warn!(
315
+
"Failed to serialize resolution result for set operation: {}",
316
+
e
317
+
);
318
+
self.metrics
319
+
.incr("resolver.sqlite.set_serialize_error")
320
+
.await;
321
+
// Still chain to inner resolver even if serialization fails
322
+
}
323
+
}
324
+
} else {
325
+
tracing::warn!("Failed to get SQLite connection for set operation");
326
+
self.metrics
327
+
.incr("resolver.sqlite.set_connection_error")
328
+
.await;
329
+
}
330
+
331
+
// Chain to inner resolver
332
+
self.inner.set(&handle, did).await
333
+
}
224
334
}
225
335
226
336
/// Create a new SQLite-backed handle resolver with default 90-day TTL.
···
229
339
///
230
340
/// * `inner` - The underlying resolver to use for actual resolution
231
341
/// * `pool` - SQLite connection pool
342
+
/// * `metrics` - Metrics publisher for telemetry
232
343
///
233
344
/// # Example
234
345
///
···
236
347
/// use std::sync::Arc;
237
348
/// use quickdid::handle_resolver::{create_base_resolver, create_sqlite_resolver, HandleResolver};
238
349
/// use quickdid::sqlite_schema::create_sqlite_pool;
350
+
/// use quickdid::metrics::NoOpMetricsPublisher;
239
351
///
240
352
/// # async fn example() -> anyhow::Result<()> {
241
353
/// # use atproto_identity::resolve::HickoryDnsResolver;
242
354
/// # use reqwest::Client;
243
355
/// # let dns_resolver = Arc::new(HickoryDnsResolver::create_resolver(&[]));
244
356
/// # let http_client = Client::new();
357
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
245
358
/// let base = create_base_resolver(
246
359
/// dns_resolver,
247
360
/// http_client,
361
+
/// metrics.clone(),
248
362
/// );
249
363
///
250
364
/// let pool = create_sqlite_pool("sqlite:./quickdid.db").await?;
251
-
/// let resolver = create_sqlite_resolver(base, pool);
365
+
/// let resolver = create_sqlite_resolver(base, pool, metrics);
252
366
/// let (did, timestamp) = resolver.resolve("alice.bsky.social").await.unwrap();
253
367
/// # Ok(())
254
368
/// # }
···
256
370
pub fn create_sqlite_resolver(
257
371
inner: Arc<dyn HandleResolver>,
258
372
pool: SqlitePool,
373
+
metrics: SharedMetricsPublisher,
259
374
) -> Arc<dyn HandleResolver> {
260
-
Arc::new(SqliteHandleResolver::new(inner, pool))
375
+
Arc::new(SqliteHandleResolver::new(inner, pool, metrics))
261
376
}
262
377
263
378
/// Create a new SQLite-backed handle resolver with custom TTL.
···
267
382
/// * `inner` - The underlying resolver to use for actual resolution
268
383
/// * `pool` - SQLite connection pool
269
384
/// * `ttl_seconds` - TTL for cache entries in seconds
385
+
/// * `metrics` - Metrics publisher for telemetry
270
386
pub fn create_sqlite_resolver_with_ttl(
271
387
inner: Arc<dyn HandleResolver>,
272
388
pool: SqlitePool,
273
389
ttl_seconds: u64,
390
+
metrics: SharedMetricsPublisher,
274
391
) -> Arc<dyn HandleResolver> {
275
-
Arc::new(SqliteHandleResolver::with_ttl(inner, pool, ttl_seconds))
392
+
Arc::new(SqliteHandleResolver::with_ttl(
393
+
inner,
394
+
pool,
395
+
ttl_seconds,
396
+
metrics,
397
+
))
276
398
}
277
399
278
400
#[cfg(test)]
···
319
441
expected_did: "did:plc:testuser123".to_string(),
320
442
});
321
443
444
+
// Create metrics publisher
445
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
446
+
322
447
// Create SQLite-backed resolver
323
-
let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
448
+
let sqlite_resolver =
449
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
324
450
325
451
let test_handle = "alice.bsky.social";
326
452
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
···
408
534
expected_did: String::new(),
409
535
});
410
536
537
+
// Create metrics publisher
538
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
539
+
411
540
// Create SQLite-backed resolver
412
-
let sqlite_resolver = SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600);
541
+
let sqlite_resolver =
542
+
SqliteHandleResolver::with_ttl(mock_resolver, pool.clone(), 3600, metrics);
413
543
414
544
let test_handle = "error.bsky.social";
415
545
let expected_key = sqlite_resolver.make_key(test_handle) as i64;
+92
src/handle_resolver/traits.rs
+92
src/handle_resolver/traits.rs
···
55
55
/// - Network errors occur during resolution
56
56
/// - The handle is invalid or doesn't exist
57
57
async fn resolve(&self, s: &str) -> Result<(String, u64), HandleResolverError>;
58
+
59
+
/// Purge a handle or DID from the cache.
60
+
///
61
+
/// This method removes cached entries for the given identifier, which can be
62
+
/// either a handle (e.g., "alice.bsky.social") or a DID (e.g., "did:plc:xyz123").
63
+
/// Implementations should handle bidirectional purging where applicable.
64
+
///
65
+
/// # Arguments
66
+
///
67
+
/// * `identifier` - Either a handle or DID to purge from cache
68
+
///
69
+
/// # Returns
70
+
///
71
+
/// Ok(()) if the purge was successful or if the identifier wasn't cached.
72
+
/// Most implementations will simply return Ok(()) as a no-op.
73
+
///
74
+
/// # Default Implementation
75
+
///
76
+
/// The default implementation is a no-op that always returns Ok(()).
77
+
async fn purge(&self, _subject: &str) -> Result<(), HandleResolverError> {
78
+
Ok(())
79
+
}
80
+
81
+
/// Set a handle-to-DID mapping in the cache.
82
+
///
83
+
/// This method allows manually setting or updating a cached mapping between
84
+
/// a handle and its corresponding DID. This is useful for pre-populating
85
+
/// caches or updating stale entries.
86
+
///
87
+
/// # Arguments
88
+
///
89
+
/// * `handle` - The handle to cache (e.g., "alice.bsky.social")
90
+
/// * `did` - The DID to associate with the handle (e.g., "did:plc:xyz123")
91
+
///
92
+
/// # Returns
93
+
///
94
+
/// Ok(()) if the mapping was successfully set or if the implementation
95
+
/// doesn't support manual cache updates. Most implementations will simply
96
+
/// return Ok(()) as a no-op.
97
+
///
98
+
/// # Default Implementation
99
+
///
100
+
/// The default implementation is a no-op that always returns Ok(()).
101
+
async fn set(&self, _handle: &str, _did: &str) -> Result<(), HandleResolverError> {
102
+
Ok(())
103
+
}
104
+
}
105
+
106
+
#[cfg(test)]
107
+
mod tests {
108
+
use super::*;
109
+
110
+
// Simple test resolver that doesn't cache anything
111
+
struct NoOpTestResolver;
112
+
113
+
#[async_trait]
114
+
impl HandleResolver for NoOpTestResolver {
115
+
async fn resolve(&self, _s: &str) -> Result<(String, u64), HandleResolverError> {
116
+
Ok(("did:test:123".to_string(), 1234567890))
117
+
}
118
+
// Uses default purge implementation
119
+
}
120
+
121
+
#[tokio::test]
122
+
async fn test_default_purge_implementation() {
123
+
let resolver = NoOpTestResolver;
124
+
125
+
// Default implementation should always return Ok(())
126
+
assert!(resolver.purge("alice.bsky.social").await.is_ok());
127
+
assert!(resolver.purge("did:plc:xyz123").await.is_ok());
128
+
assert!(resolver.purge("").await.is_ok());
129
+
}
130
+
131
+
#[tokio::test]
132
+
async fn test_default_set_implementation() {
133
+
let resolver = NoOpTestResolver;
134
+
135
+
// Default implementation should always return Ok(())
136
+
assert!(
137
+
resolver
138
+
.set("alice.bsky.social", "did:plc:xyz123")
139
+
.await
140
+
.is_ok()
141
+
);
142
+
assert!(
143
+
resolver
144
+
.set("bob.example.com", "did:web:example.com")
145
+
.await
146
+
.is_ok()
147
+
);
148
+
assert!(resolver.set("", "").await.is_ok());
149
+
}
58
150
}
+135
-89
src/handle_resolver_task.rs
+135
-89
src/handle_resolver_task.rs
···
5
5
//! and ensures resolved handles are cached for efficient subsequent lookups.
6
6
7
7
use crate::handle_resolver::HandleResolver;
8
+
use crate::metrics::SharedMetricsPublisher;
8
9
use crate::queue::{HandleResolutionWork, QueueAdapter};
9
10
use anyhow::Result;
10
11
use std::sync::Arc;
···
36
37
}
37
38
}
38
39
39
-
/// Metrics for handle resolution processing
40
-
#[derive(Debug, Default)]
41
-
pub(crate) struct HandleResolverMetrics {
42
-
pub total_processed: std::sync::atomic::AtomicU64,
43
-
pub total_succeeded: std::sync::atomic::AtomicU64,
44
-
pub total_failed: std::sync::atomic::AtomicU64,
45
-
pub total_cached: std::sync::atomic::AtomicU64,
46
-
}
47
-
48
40
/// Handle resolver task processor
49
41
pub(crate) struct HandleResolverTask {
50
42
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
51
43
handle_resolver: Arc<dyn HandleResolver>,
52
44
cancel_token: CancellationToken,
53
45
config: HandleResolverTaskConfig,
54
-
metrics: Arc<HandleResolverMetrics>,
46
+
metrics_publisher: SharedMetricsPublisher,
55
47
}
56
48
57
49
impl HandleResolverTask {
···
60
52
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
61
53
handle_resolver: Arc<dyn HandleResolver>,
62
54
cancel_token: CancellationToken,
55
+
metrics_publisher: SharedMetricsPublisher,
63
56
) -> Self {
64
57
let config = HandleResolverTaskConfig::default();
65
58
Self {
···
67
60
handle_resolver,
68
61
cancel_token,
69
62
config,
70
-
metrics: Arc::new(HandleResolverMetrics::default()),
63
+
metrics_publisher,
71
64
}
72
65
}
73
66
···
77
70
handle_resolver: Arc<dyn HandleResolver>,
78
71
cancel_token: CancellationToken,
79
72
config: HandleResolverTaskConfig,
73
+
metrics_publisher: SharedMetricsPublisher,
80
74
) -> Self {
81
75
Self {
82
76
adapter,
83
77
handle_resolver,
84
78
cancel_token,
85
79
config,
86
-
metrics: Arc::new(HandleResolverMetrics::default()),
80
+
metrics_publisher,
87
81
}
88
82
}
89
83
···
116
110
117
111
// All work has been processed
118
112
info!("All handle resolutions completed");
119
-
120
-
info!(
121
-
total_processed = self
122
-
.metrics
123
-
.total_processed
124
-
.load(std::sync::atomic::Ordering::Relaxed),
125
-
total_succeeded = self
126
-
.metrics
127
-
.total_succeeded
128
-
.load(std::sync::atomic::Ordering::Relaxed),
129
-
total_failed = self
130
-
.metrics
131
-
.total_failed
132
-
.load(std::sync::atomic::Ordering::Relaxed),
133
-
total_cached = self
134
-
.metrics
135
-
.total_cached
136
-
.load(std::sync::atomic::Ordering::Relaxed),
137
-
"Handle resolver task processor stopped"
138
-
);
113
+
info!("Handle resolver task processor stopped");
139
114
140
115
Ok(())
141
116
}
142
117
118
+
/// Check if an error represents a soft failure (handle not found)
119
+
/// rather than a real error condition.
120
+
///
121
+
/// These atproto_identity library errors indicate the handle doesn't support
122
+
/// the specific resolution method, which is normal and expected:
123
+
/// - error-atproto-identity-resolve-4: DNS resolution failed (no records)
124
+
/// - error-atproto-identity-resolve-5: HTTP resolution failed (hostname not found)
125
+
fn is_soft_failure(error_str: &str) -> bool {
126
+
// Check for specific atproto_identity error codes that indicate "not found"
127
+
// rather than actual failures
128
+
if error_str.starts_with("error-atproto-identity-resolve-4") {
129
+
// DNS resolution - check if it's a "no records" scenario
130
+
error_str.contains("NoRecordsFound")
131
+
} else if error_str.starts_with("error-atproto-identity-resolve-6") {
132
+
// HTTP resolution - check if it's a DID format issue
133
+
error_str.contains("expected DID format")
134
+
} else if error_str.starts_with("error-atproto-identity-resolve-5") {
135
+
// HTTP resolution - check if it's a hostname lookup failure
136
+
error_str.contains("No address associated with hostname")
137
+
|| error_str.contains("failed to lookup address information")
138
+
} else {
139
+
false
140
+
}
141
+
}
142
+
143
143
/// Process a single handle resolution work item
144
144
#[instrument(skip(self), fields(
145
145
handle = %work.handle,
···
157
157
158
158
let duration_ms = start_time.elapsed().as_millis() as u64;
159
159
160
-
// Update metrics
161
-
self.metrics
162
-
.total_processed
163
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
160
+
// Publish metrics
161
+
self.metrics_publisher
162
+
.incr("task.handle_resolution.processed")
163
+
.await;
164
+
self.metrics_publisher
165
+
.time("task.handle_resolution.duration_ms", duration_ms)
166
+
.await;
164
167
165
168
match result {
166
169
Ok(Ok((did, _timestamp))) => {
167
-
self.metrics
168
-
.total_succeeded
169
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
170
-
self.metrics
171
-
.total_cached
172
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
170
+
// Publish success metrics
171
+
self.metrics_publisher
172
+
.incr("task.handle_resolution.success")
173
+
.await;
174
+
self.metrics_publisher
175
+
.incr("task.handle_resolution.cached")
176
+
.await;
173
177
174
-
info!(
178
+
debug!(
175
179
handle = %work.handle,
176
180
did = %did,
177
181
duration_ms = duration_ms,
···
179
183
);
180
184
}
181
185
Ok(Err(e)) => {
182
-
self.metrics
183
-
.total_failed
184
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
186
+
let error_str = e.to_string();
185
187
186
-
error!(
187
-
handle = %work.handle,
188
-
error = %e,
189
-
duration_ms = duration_ms,
190
-
"Handle resolution failed"
191
-
);
188
+
if Self::is_soft_failure(&error_str) {
189
+
// This is a soft failure - handle simply doesn't support this resolution method
190
+
// Publish not-found metrics
191
+
self.metrics_publisher
192
+
.incr("task.handle_resolution.not_found")
193
+
.await;
194
+
195
+
debug!(
196
+
handle = %work.handle,
197
+
error = %error_str,
198
+
duration_ms = duration_ms,
199
+
"Handle not found (soft failure)"
200
+
);
201
+
} else {
202
+
// This is a real error
203
+
// Publish failure metrics
204
+
self.metrics_publisher
205
+
.incr("task.handle_resolution.failed")
206
+
.await;
207
+
208
+
error!(
209
+
handle = %work.handle,
210
+
error = %error_str,
211
+
duration_ms = duration_ms,
212
+
"Handle resolution failed"
213
+
);
214
+
}
192
215
}
193
216
Err(_) => {
194
-
self.metrics
195
-
.total_failed
196
-
.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
217
+
// Publish timeout metrics
218
+
self.metrics_publisher
219
+
.incr("task.handle_resolution.timeout")
220
+
.await;
197
221
198
222
error!(
199
223
handle = %work.handle,
···
228
252
/// * `adapter` - Queue adapter for work items
229
253
/// * `handle_resolver` - Handle resolver implementation
230
254
/// * `cancel_token` - Token for graceful shutdown
255
+
/// * `metrics_publisher` - Metrics publisher for telemetry
231
256
pub fn create_handle_resolver_task(
232
257
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
233
258
handle_resolver: Arc<dyn HandleResolver>,
234
259
cancel_token: CancellationToken,
260
+
metrics_publisher: SharedMetricsPublisher,
235
261
) -> HandleResolverTaskHandle {
236
262
HandleResolverTaskHandle {
237
-
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token),
263
+
task: HandleResolverTask::new(adapter, handle_resolver, cancel_token, metrics_publisher),
238
264
}
239
265
}
240
266
···
246
272
/// * `handle_resolver` - Handle resolver implementation
247
273
/// * `cancel_token` - Token for graceful shutdown
248
274
/// * `config` - Task configuration
275
+
/// * `metrics_publisher` - Metrics publisher for telemetry
249
276
pub fn create_handle_resolver_task_with_config(
250
277
adapter: Arc<dyn QueueAdapter<HandleResolutionWork>>,
251
278
handle_resolver: Arc<dyn HandleResolver>,
252
279
cancel_token: CancellationToken,
253
280
config: HandleResolverTaskConfig,
281
+
metrics_publisher: SharedMetricsPublisher,
254
282
) -> HandleResolverTaskHandle {
255
283
HandleResolverTaskHandle {
256
-
task: HandleResolverTask::with_config(adapter, handle_resolver, cancel_token, config),
284
+
task: HandleResolverTask::with_config(
285
+
adapter,
286
+
handle_resolver,
287
+
cancel_token,
288
+
config,
289
+
metrics_publisher,
290
+
),
257
291
}
258
292
}
259
293
···
301
335
// Create cancellation token
302
336
let cancel_token = CancellationToken::new();
303
337
338
+
// Create metrics publisher
339
+
let metrics_publisher = Arc::new(crate::metrics::NoOpMetricsPublisher);
340
+
304
341
// Create task with custom config
305
342
let config = HandleResolverTaskConfig {
306
343
default_timeout_ms: 5000,
···
311
348
handle_resolver,
312
349
cancel_token.clone(),
313
350
config,
351
+
metrics_publisher,
314
352
);
315
353
316
354
// Create handle resolution work
···
319
357
// Send work to queue
320
358
sender.send(work).await.unwrap();
321
359
322
-
// Get metrics reference before moving task
323
-
let metrics = task.metrics.clone();
324
-
325
360
// Run task for a short time
326
361
let task_handle = tokio::spawn(async move { task.run().await });
327
362
···
334
369
// Wait for task to complete
335
370
let _ = task_handle.await;
336
371
337
-
// Verify metrics
338
-
assert_eq!(
339
-
metrics
340
-
.total_processed
341
-
.load(std::sync::atomic::Ordering::Relaxed),
342
-
1
343
-
);
344
-
assert_eq!(
345
-
metrics
346
-
.total_succeeded
347
-
.load(std::sync::atomic::Ordering::Relaxed),
348
-
1
349
-
);
372
+
// Test passes if task runs without panic
350
373
}
351
374
352
-
#[tokio::test]
353
-
async fn test_handle_resolver_metrics() {
354
-
use std::sync::atomic::Ordering;
375
+
#[test]
376
+
fn test_is_soft_failure() {
377
+
// Test DNS NoRecordsFound pattern (error-atproto-identity-resolve-4)
378
+
let dns_no_records = "error-atproto-identity-resolve-4 DNS resolution failed: ResolveError { kind: Proto(ProtoError { kind: NoRecordsFound { query: Query { name: Name(\"_atproto.noahshachtman.bsky.social.railway.internal.\"), query_type: TXT, query_class: IN }, soa: None, ns: None, negative_ttl: None, response_code: NotImp, trusted: true, authorities: None } }) }";
379
+
assert!(HandleResolverTask::is_soft_failure(dns_no_records));
355
380
356
-
let metrics = HandleResolverMetrics::default();
381
+
// Test HTTP hostname not found pattern (error-atproto-identity-resolve-5)
382
+
let http_no_hostname = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://mattie.thegem.city/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information: No address associated with hostname\" })) }";
383
+
assert!(HandleResolverTask::is_soft_failure(http_no_hostname));
357
384
358
-
// Test initial values
359
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 0);
360
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 0);
361
-
assert_eq!(metrics.total_failed.load(Ordering::Relaxed), 0);
362
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 0);
385
+
// Test alternate HTTP hostname failure message
386
+
let http_lookup_failed = "error-atproto-identity-resolve-5 HTTP resolution failed: reqwest::Error { kind: Request, url: \"https://example.com/.well-known/atproto-did\", source: hyper_util::client::legacy::Error(Connect, ConnectError(\"dns error\", Custom { kind: Uncategorized, error: \"failed to lookup address information\" })) }";
387
+
assert!(HandleResolverTask::is_soft_failure(http_lookup_failed));
363
388
364
-
// Test incrementing
365
-
metrics.total_processed.fetch_add(1, Ordering::Relaxed);
366
-
metrics.total_succeeded.fetch_add(1, Ordering::Relaxed);
367
-
metrics.total_cached.fetch_add(1, Ordering::Relaxed);
389
+
// Test HTTP invalid DID format (error-atproto-identity-resolve-6) - like reuters.com
390
+
let http_invalid_did = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
391
+
assert!(HandleResolverTask::is_soft_failure(http_invalid_did));
368
392
369
-
assert_eq!(metrics.total_processed.load(Ordering::Relaxed), 1);
370
-
assert_eq!(metrics.total_succeeded.load(Ordering::Relaxed), 1);
371
-
assert_eq!(metrics.total_cached.load(Ordering::Relaxed), 1);
393
+
// Test weratedogs.com case
394
+
let weratedogs_error = "error-atproto-identity-resolve-6 Invalid HTTP resolution response: expected DID format";
395
+
assert!(HandleResolverTask::is_soft_failure(weratedogs_error));
396
+
397
+
// Test DNS error that is NOT a soft failure (different DNS error)
398
+
let dns_real_error = "error-atproto-identity-resolve-4 DNS resolution failed: timeout";
399
+
assert!(!HandleResolverTask::is_soft_failure(dns_real_error));
400
+
401
+
// Test HTTP error that is NOT a soft failure (connection timeout)
402
+
let http_timeout =
403
+
"error-atproto-identity-resolve-5 HTTP resolution failed: connection timeout";
404
+
assert!(!HandleResolverTask::is_soft_failure(http_timeout));
405
+
406
+
// Test HTTP error that is NOT a soft failure (500 error)
407
+
let http_500 = "error-atproto-identity-resolve-5 HTTP resolution failed: status code 500";
408
+
assert!(!HandleResolverTask::is_soft_failure(http_500));
409
+
410
+
// Test QuickDID errors should never be soft failures
411
+
let quickdid_error =
412
+
"error-quickdid-resolve-1 Failed to resolve subject: internal server error";
413
+
assert!(!HandleResolverTask::is_soft_failure(quickdid_error));
414
+
415
+
// Test other atproto_identity error codes should not be soft failures
416
+
let other_atproto_error = "error-atproto-identity-resolve-1 Some other error";
417
+
assert!(!HandleResolverTask::is_soft_failure(other_atproto_error));
372
418
}
373
419
}
+250
-57
src/http/handle_xrpc_resolve_handle.rs
+250
-57
src/http/handle_xrpc_resolve_handle.rs
···
1
1
use std::sync::Arc;
2
+
use std::time::{Duration, SystemTime, UNIX_EPOCH};
2
3
3
4
use crate::{
4
5
handle_resolver::HandleResolver,
5
6
http::AppContext,
7
+
metrics::SharedMetricsPublisher,
6
8
queue::{HandleResolutionWork, QueueAdapter},
7
9
};
8
10
···
34
36
message: String,
35
37
}
36
38
39
+
/// Represents the result of a handle resolution
40
+
enum ResolutionResult {
41
+
Success {
42
+
did: String,
43
+
timestamp: u64,
44
+
etag: String,
45
+
},
46
+
Error {
47
+
error: String,
48
+
message: String,
49
+
timestamp: u64,
50
+
etag: String,
51
+
},
52
+
}
53
+
54
+
struct ResolutionResultView {
55
+
result: ResolutionResult,
56
+
cache_control: Option<String>,
57
+
if_none_match: Option<HeaderValue>,
58
+
if_modified_since: Option<HeaderValue>,
59
+
}
60
+
61
+
impl IntoResponse for ResolutionResultView {
62
+
fn into_response(self) -> Response {
63
+
let (last_modified, etag) = match &self.result {
64
+
ResolutionResult::Success {
65
+
timestamp, etag, ..
66
+
} => (*timestamp, etag),
67
+
ResolutionResult::Error {
68
+
timestamp, etag, ..
69
+
} => (*timestamp, etag),
70
+
};
71
+
72
+
let mut headers = HeaderMap::new();
73
+
74
+
// WARNING: this swallows errors
75
+
if let Ok(etag_value) = HeaderValue::from_str(etag) {
76
+
headers.insert(header::ETAG, etag_value);
77
+
}
78
+
79
+
// Add Last-Modified header
80
+
let last_modified_date = format_http_date(last_modified);
81
+
// WARNING: this swallows errors
82
+
if let Ok(last_modified_value) = HeaderValue::from_str(&last_modified_date) {
83
+
headers.insert(header::LAST_MODIFIED, last_modified_value);
84
+
}
85
+
86
+
// Add Cache-Control header if configured
87
+
if let Some(cache_control) = &self.cache_control {
88
+
// WARNING: this swallows errors
89
+
if let Ok(cache_control_value) = HeaderValue::from_str(cache_control) {
90
+
headers.insert(header::CACHE_CONTROL, cache_control_value);
91
+
}
92
+
}
93
+
94
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
95
+
headers.insert(
96
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
97
+
HeaderValue::from_static("*"),
98
+
);
99
+
headers.insert(
100
+
header::ACCESS_CONTROL_ALLOW_METHODS,
101
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
102
+
);
103
+
headers.insert(
104
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
105
+
HeaderValue::from_static("*"),
106
+
);
107
+
headers.insert(
108
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
109
+
HeaderValue::from_static("*"),
110
+
);
111
+
headers.insert(
112
+
header::ACCESS_CONTROL_MAX_AGE,
113
+
HeaderValue::from_static("86400"),
114
+
);
115
+
headers.insert(
116
+
"Access-Control-Request-Headers",
117
+
HeaderValue::from_static("*"),
118
+
);
119
+
headers.insert(
120
+
"Access-Control-Request-Method",
121
+
HeaderValue::from_static("GET"),
122
+
);
123
+
124
+
if let ResolutionResult::Success { .. } = self.result {
125
+
let fresh = self
126
+
.if_modified_since
127
+
.and_then(|inner_header_value| match inner_header_value.to_str() {
128
+
Ok(value) => Some(value.to_string()),
129
+
Err(_) => None,
130
+
})
131
+
.and_then(|inner_str_value| parse_http_date(&inner_str_value))
132
+
.is_some_and(|inner_if_modified_since| last_modified <= inner_if_modified_since);
133
+
134
+
if fresh {
135
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
136
+
}
137
+
}
138
+
139
+
let fresh = self
140
+
.if_none_match
141
+
.is_some_and(|if_none_match_value| if_none_match_value == etag);
142
+
if fresh {
143
+
return (StatusCode::NOT_MODIFIED, headers).into_response();
144
+
}
145
+
146
+
match &self.result {
147
+
ResolutionResult::Success { did, .. } => (
148
+
StatusCode::OK,
149
+
headers,
150
+
Json(ResolveHandleResponse { did: did.clone() }),
151
+
)
152
+
.into_response(),
153
+
ResolutionResult::Error { error, message, .. } => (
154
+
StatusCode::BAD_REQUEST,
155
+
headers,
156
+
Json(ErrorResponse {
157
+
error: error.clone(),
158
+
message: message.clone(),
159
+
}),
160
+
)
161
+
.into_response(),
162
+
}
163
+
164
+
// (status_code, headers).into_response()
165
+
}
166
+
}
167
+
37
168
/// Calculate a weak ETag for the given content using MetroHash64 with a seed
38
169
fn calculate_etag(content: &str, seed: &str) -> String {
39
170
let mut hasher = MetroHash64::new();
···
43
174
format!("W/\"{:x}\"", hash)
44
175
}
45
176
177
+
/// Format a UNIX timestamp as an HTTP date string (RFC 7231)
178
+
fn format_http_date(timestamp: u64) -> String {
179
+
let system_time = UNIX_EPOCH + Duration::from_secs(timestamp);
180
+
httpdate::fmt_http_date(system_time)
181
+
}
182
+
183
+
/// Parse an HTTP date string (RFC 7231) into a UNIX timestamp
184
+
fn parse_http_date(date_str: &str) -> Option<u64> {
185
+
httpdate::parse_http_date(date_str)
186
+
.ok()
187
+
.and_then(|system_time| system_time.duration_since(UNIX_EPOCH).ok())
188
+
.map(|duration| duration.as_secs())
189
+
}
190
+
46
191
pub(super) async fn handle_xrpc_resolve_handle(
47
192
headers: HeaderMap,
48
193
Query(params): Query<ResolveHandleParams>,
49
194
State(app_context): State<AppContext>,
50
195
State(handle_resolver): State<Arc<dyn HandleResolver>>,
51
196
State(queue): State<Arc<dyn QueueAdapter<HandleResolutionWork>>>,
52
-
) -> Result<Response, Response> {
197
+
State(metrics): State<SharedMetricsPublisher>,
198
+
) -> impl IntoResponse {
53
199
let validating = params.validate.is_some();
54
200
let queueing = params.queue.is_some();
55
201
···
57
203
let handle = match params.handle {
58
204
Some(h) => h,
59
205
None => {
60
-
return Err((
206
+
metrics
207
+
.incr_with_tags(
208
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
209
+
&[("reason", "missing")],
210
+
)
211
+
.await;
212
+
return (
61
213
StatusCode::BAD_REQUEST,
62
214
Json(ErrorResponse {
63
215
error: "InvalidRequest".to_string(),
64
216
message: "Error: Params must have the property \"handle\"".to_string(),
65
217
}),
66
218
)
67
-
.into_response());
219
+
.into_response();
68
220
}
69
221
};
70
222
71
223
// Validate that the input is a handle and not a DID
72
224
let handle = match parse_input(&handle) {
73
-
Ok(InputType::Handle(value)) => value,
225
+
Ok(InputType::Handle(value)) => value.to_lowercase(),
74
226
Ok(InputType::Plc(_)) | Ok(InputType::Web(_)) => {
75
227
// It's a DID, not a handle
76
-
return Err((
228
+
metrics
229
+
.incr_with_tags(
230
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
231
+
&[("reason", "did")],
232
+
)
233
+
.await;
234
+
return (
77
235
StatusCode::BAD_REQUEST,
78
236
Json(ErrorResponse {
79
237
error: "InvalidRequest".to_string(),
80
238
message: "Error: handle must be a valid handle".to_string(),
81
239
}),
82
240
)
83
-
.into_response());
241
+
.into_response();
84
242
}
85
243
Err(_) => {
86
-
return Err((
244
+
metrics
245
+
.incr_with_tags(
246
+
"xrpc.com.atproto.identity.resolveHandle.invalid_handle",
247
+
&[("reason", "error")],
248
+
)
249
+
.await;
250
+
return (
87
251
StatusCode::BAD_REQUEST,
88
252
Json(ErrorResponse {
89
253
error: "InvalidRequest".to_string(),
90
254
message: "Error: handle must be a valid handle".to_string(),
91
255
}),
92
256
)
93
-
.into_response());
257
+
.into_response();
94
258
}
95
259
};
96
260
97
261
if validating {
98
-
return Ok(StatusCode::NO_CONTENT.into_response());
262
+
metrics
263
+
.incr("xrpc.com.atproto.identity.resolveHandle")
264
+
.await;
265
+
return StatusCode::NO_CONTENT.into_response();
99
266
}
100
267
101
268
if queueing {
···
105
272
// Queue the work
106
273
match queue.push(work).await {
107
274
Ok(()) => {
275
+
metrics
276
+
.incr("xrpc.com.atproto.identity.resolveHandle")
277
+
.await;
108
278
tracing::debug!("Queued handle resolution for {}", handle);
109
279
}
110
280
Err(e) => {
281
+
metrics
282
+
.incr("xrpc.com.atproto.identity.resolveHandle.queue_failure")
283
+
.await;
111
284
tracing::error!("Failed to queue handle resolution: {}", e);
112
285
}
113
286
}
114
287
115
-
return Ok(StatusCode::NO_CONTENT.into_response());
288
+
return StatusCode::NO_CONTENT.into_response();
116
289
}
117
290
118
291
tracing::debug!(handle, "Resolving handle");
119
292
120
-
let if_none_match = headers.get(header::IF_NONE_MATCH);
293
+
// Get conditional request headers
294
+
let if_none_match = headers.get(header::IF_NONE_MATCH).cloned();
295
+
let if_modified_since = headers.get(header::IF_MODIFIED_SINCE).cloned();
121
296
122
-
let (mut response, etag) = match handle_resolver.resolve(&handle).await {
123
-
Ok((did, _timestamp)) => {
297
+
// Perform the resolution and build the response
298
+
let result = match handle_resolver.resolve(&handle).await {
299
+
Ok((did, timestamp)) => {
124
300
tracing::debug!(handle, did, "Found cached DID for handle");
125
301
126
-
// Calculate the weak etag for the successful response
127
-
let etag = calculate_etag(&did, app_context.etag_seed());
302
+
metrics
303
+
.incr_with_tags("handle.resolution.request", &[("success", "1")])
304
+
.await;
128
305
129
-
// Check if the client's etag matches our calculated one
130
-
if if_none_match.is_some_and(|value| value == &etag) {
131
-
(StatusCode::NOT_MODIFIED.into_response(), etag)
132
-
} else {
133
-
(Json(ResolveHandleResponse { did }).into_response(), etag)
306
+
let etag = calculate_etag(&did, app_context.etag_seed());
307
+
ResolutionResult::Success {
308
+
did,
309
+
timestamp,
310
+
etag,
134
311
}
135
312
}
136
313
Err(err) => {
137
314
tracing::debug!(error = ?err, handle, "Error resolving handle");
138
-
139
-
// Calculate the weak etag for the error response
140
-
// Use a combination of error message and handle for consistent error etags
315
+
metrics
316
+
.incr_with_tags("handle.resolution.request", &[("success", "0")])
317
+
.await;
141
318
let error_content = format!("error:{}:{}", handle, err);
142
319
let etag = calculate_etag(&error_content, app_context.etag_seed());
143
-
144
-
if if_none_match.is_some_and(|value| value == &etag) {
145
-
(StatusCode::NOT_MODIFIED.into_response(), etag)
146
-
} else {
147
-
(
148
-
(
149
-
StatusCode::BAD_REQUEST,
150
-
Json(ErrorResponse {
151
-
error: "InvalidRequest".to_string(),
152
-
message: "Unable to resolve handle".to_string(),
153
-
}),
154
-
)
155
-
.into_response(),
156
-
etag,
157
-
)
320
+
let timestamp = SystemTime::now()
321
+
.duration_since(UNIX_EPOCH)
322
+
.unwrap_or_default()
323
+
.as_secs();
324
+
ResolutionResult::Error {
325
+
error: "InvalidRequest".to_string(),
326
+
message: "Unable to resolve handle".to_string(),
327
+
timestamp,
328
+
etag,
158
329
}
159
330
}
160
331
};
161
332
162
-
// Add Cache-Control header if configured
163
-
if let Some(cache_control) = app_context.cache_control_header() {
164
-
if let Ok(cache_control_value) = HeaderValue::from_str(cache_control) {
165
-
response
166
-
.headers_mut()
167
-
.insert(header::CACHE_CONTROL, cache_control_value);
168
-
}
333
+
ResolutionResultView {
334
+
result,
335
+
cache_control: app_context.cache_control_header().map(|s| s.to_string()),
336
+
if_none_match,
337
+
if_modified_since,
169
338
}
339
+
.into_response()
340
+
}
170
341
171
-
// Add ETag header
172
-
match HeaderValue::from_str(&etag) {
173
-
Ok(etag_header_value) => {
174
-
response
175
-
.headers_mut()
176
-
.insert(header::ETAG, etag_header_value);
177
-
}
178
-
Err(err) => {
179
-
tracing::error!(error = ?err, "unable to create etag response value");
180
-
}
181
-
}
342
+
pub(super) async fn handle_xrpc_resolve_handle_options() -> impl IntoResponse {
343
+
let mut headers = HeaderMap::new();
182
344
183
-
Ok(response)
345
+
// Add CORS and Allow headers for OPTIONS request
346
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
347
+
headers.insert(
348
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
349
+
HeaderValue::from_static("*"),
350
+
);
351
+
headers.insert(
352
+
header::ACCESS_CONTROL_ALLOW_METHODS,
353
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
354
+
);
355
+
headers.insert(
356
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
357
+
HeaderValue::from_static("*"),
358
+
);
359
+
headers.insert(
360
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
361
+
HeaderValue::from_static("*"),
362
+
);
363
+
headers.insert(
364
+
header::ACCESS_CONTROL_MAX_AGE,
365
+
HeaderValue::from_static("86400"),
366
+
);
367
+
headers.insert(
368
+
"Access-Control-Request-Headers",
369
+
HeaderValue::from_static("*"),
370
+
);
371
+
headers.insert(
372
+
"Access-Control-Request-Method",
373
+
HeaderValue::from_static("GET"),
374
+
);
375
+
376
+
(StatusCode::NO_CONTENT, headers)
184
377
}
+126
src/http/handle_xrpc_resolve_lexicon.rs
+126
src/http/handle_xrpc_resolve_lexicon.rs
···
1
+
use std::sync::Arc;
2
+
3
+
use atproto_lexicon::resolve::LexiconResolver;
4
+
use axum::{
5
+
extract::{Query, State},
6
+
http::{HeaderMap, HeaderValue, StatusCode, header},
7
+
response::{IntoResponse, Json},
8
+
};
9
+
use serde::{Deserialize, Serialize};
10
+
11
+
use crate::metrics::SharedMetricsPublisher;
12
+
13
+
#[derive(Deserialize)]
14
+
pub(super) struct ResolveLexiconParams {
15
+
nsid: Option<String>,
16
+
}
17
+
18
+
#[derive(Serialize)]
19
+
pub(super) struct ErrorResponse {
20
+
error: String,
21
+
message: String,
22
+
}
23
+
24
+
pub(super) async fn handle_xrpc_resolve_lexicon(
25
+
Query(params): Query<ResolveLexiconParams>,
26
+
State(lexicon_resolver): State<Arc<dyn LexiconResolver>>,
27
+
State(metrics): State<SharedMetricsPublisher>,
28
+
) -> impl IntoResponse {
29
+
// Validate that nsid is provided
30
+
let nsid = match params.nsid {
31
+
Some(n) => n,
32
+
None => {
33
+
metrics
34
+
.incr_with_tags(
35
+
"xrpc.com.atproto.lexicon.resolveLexicon.invalid_nsid",
36
+
&[("reason", "missing")],
37
+
)
38
+
.await;
39
+
return (
40
+
StatusCode::BAD_REQUEST,
41
+
Json(ErrorResponse {
42
+
error: "InvalidRequest".to_string(),
43
+
message: "Error: Params must have the property \"nsid\"".to_string(),
44
+
}),
45
+
)
46
+
.into_response();
47
+
}
48
+
};
49
+
50
+
tracing::debug!(nsid, "Resolving lexicon");
51
+
52
+
// Perform the lexicon resolution
53
+
match lexicon_resolver.resolve(&nsid).await {
54
+
Ok(resolved) => {
55
+
tracing::debug!(nsid, "Successfully resolved lexicon");
56
+
57
+
metrics
58
+
.incr_with_tags("lexicon.resolution.request", &[("success", "1")])
59
+
.await;
60
+
61
+
let mut headers = HeaderMap::new();
62
+
add_cors_headers(&mut headers);
63
+
64
+
// The resolved value is already a serde_json::Value, so just return it as JSON
65
+
(StatusCode::OK, headers, Json(resolved)).into_response()
66
+
}
67
+
Err(err) => {
68
+
tracing::debug!(error = ?err, nsid, "Error resolving lexicon");
69
+
70
+
metrics
71
+
.incr_with_tags("lexicon.resolution.request", &[("success", "0")])
72
+
.await;
73
+
74
+
let mut headers = HeaderMap::new();
75
+
add_cors_headers(&mut headers);
76
+
77
+
(
78
+
StatusCode::BAD_REQUEST,
79
+
headers,
80
+
Json(ErrorResponse {
81
+
error: "LexiconNotFound".to_string(),
82
+
message: "No lexicon was resolved for the NSID".to_string(),
83
+
}),
84
+
)
85
+
.into_response()
86
+
}
87
+
}
88
+
}
89
+
90
+
pub(super) async fn handle_xrpc_resolve_lexicon_options() -> impl IntoResponse {
91
+
let mut headers = HeaderMap::new();
92
+
add_cors_headers(&mut headers);
93
+
(StatusCode::NO_CONTENT, headers)
94
+
}
95
+
96
+
fn add_cors_headers(headers: &mut HeaderMap) {
97
+
headers.insert("Allow", HeaderValue::from_static("GET, HEAD, OPTIONS"));
98
+
headers.insert(
99
+
header::ACCESS_CONTROL_ALLOW_HEADERS,
100
+
HeaderValue::from_static("*"),
101
+
);
102
+
headers.insert(
103
+
header::ACCESS_CONTROL_ALLOW_METHODS,
104
+
HeaderValue::from_static("GET, HEAD, OPTIONS"),
105
+
);
106
+
headers.insert(
107
+
header::ACCESS_CONTROL_ALLOW_ORIGIN,
108
+
HeaderValue::from_static("*"),
109
+
);
110
+
headers.insert(
111
+
header::ACCESS_CONTROL_EXPOSE_HEADERS,
112
+
HeaderValue::from_static("*"),
113
+
);
114
+
headers.insert(
115
+
header::ACCESS_CONTROL_MAX_AGE,
116
+
HeaderValue::from_static("86400"),
117
+
);
118
+
headers.insert(
119
+
"Access-Control-Request-Headers",
120
+
HeaderValue::from_static("*"),
121
+
);
122
+
headers.insert(
123
+
"Access-Control-Request-Method",
124
+
HeaderValue::from_static("GET"),
125
+
);
126
+
}
+1
src/http/mod.rs
+1
src/http/mod.rs
+74
-49
src/http/server.rs
+74
-49
src/http/server.rs
···
1
1
use crate::handle_resolver::HandleResolver;
2
+
use crate::metrics::SharedMetricsPublisher;
2
3
use crate::queue::{HandleResolutionWork, QueueAdapter};
4
+
use atproto_lexicon::resolve::LexiconResolver;
3
5
use axum::{
4
6
Router,
5
-
extract::State,
6
-
http::StatusCode,
7
-
response::{Html, IntoResponse, Json, Response},
7
+
extract::{MatchedPath, State},
8
+
http::Request,
9
+
middleware::{self, Next},
10
+
response::{Json, Response},
8
11
routing::get,
9
12
};
10
13
use serde_json::json;
11
14
use std::sync::Arc;
15
+
use std::time::Instant;
16
+
use tower_http::services::ServeDir;
12
17
13
18
pub(crate) struct InnerAppContext {
14
-
pub(crate) service_document: serde_json::Value,
15
-
pub(crate) service_did: String,
16
19
pub(crate) handle_resolver: Arc<dyn HandleResolver>,
17
20
pub(crate) handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
21
+
pub(crate) lexicon_resolver: Arc<dyn LexiconResolver>,
22
+
pub(crate) metrics: SharedMetricsPublisher,
18
23
pub(crate) etag_seed: String,
19
24
pub(crate) cache_control_header: Option<String>,
25
+
pub(crate) static_files_dir: String,
20
26
}
21
27
22
28
#[derive(Clone)]
···
25
31
impl AppContext {
26
32
/// Create a new AppContext with the provided configuration.
27
33
pub fn new(
28
-
service_document: serde_json::Value,
29
-
service_did: String,
30
34
handle_resolver: Arc<dyn HandleResolver>,
31
35
handle_queue: Arc<dyn QueueAdapter<HandleResolutionWork>>,
36
+
lexicon_resolver: Arc<dyn LexiconResolver>,
37
+
metrics: SharedMetricsPublisher,
32
38
etag_seed: String,
33
39
cache_control_header: Option<String>,
40
+
static_files_dir: String,
34
41
) -> Self {
35
42
Self(Arc::new(InnerAppContext {
36
-
service_document,
37
-
service_did,
38
43
handle_resolver,
39
44
handle_queue,
45
+
lexicon_resolver,
46
+
metrics,
40
47
etag_seed,
41
48
cache_control_header,
49
+
static_files_dir,
42
50
}))
43
51
}
44
52
45
53
// Internal accessor methods for handlers
46
-
pub(super) fn service_document(&self) -> &serde_json::Value {
47
-
&self.0.service_document
48
-
}
49
-
50
-
pub(super) fn service_did(&self) -> &str {
51
-
&self.0.service_did
52
-
}
53
-
54
54
pub(super) fn etag_seed(&self) -> &str {
55
55
&self.0.etag_seed
56
56
}
57
57
58
58
pub(super) fn cache_control_header(&self) -> Option<&str> {
59
59
self.0.cache_control_header.as_deref()
60
+
}
61
+
62
+
pub(super) fn static_files_dir(&self) -> &str {
63
+
&self.0.static_files_dir
60
64
}
61
65
}
62
66
···
78
82
handle_queue,
79
83
Arc<dyn QueueAdapter<HandleResolutionWork>>
80
84
);
85
+
impl_from_ref!(AppContext, lexicon_resolver, Arc<dyn LexiconResolver>);
86
+
impl_from_ref!(AppContext, metrics, SharedMetricsPublisher);
87
+
88
+
/// Middleware to track HTTP request metrics
89
+
async fn metrics_middleware(
90
+
State(metrics): State<SharedMetricsPublisher>,
91
+
matched_path: Option<MatchedPath>,
92
+
request: Request<axum::body::Body>,
93
+
next: Next,
94
+
) -> Response {
95
+
let start = Instant::now();
96
+
let method = request.method().to_string();
97
+
let path = matched_path
98
+
.as_ref()
99
+
.map(|p| p.as_str().to_string())
100
+
.unwrap_or_else(|| "unknown".to_string());
101
+
102
+
// Process the request
103
+
let response = next.run(request).await;
104
+
105
+
// Calculate duration
106
+
let duration_ms = start.elapsed().as_millis() as u64;
107
+
let status_code = response.status().as_u16().to_string();
108
+
109
+
// Publish metrics with tags
110
+
metrics
111
+
.time_with_tags(
112
+
"http.request.duration_ms",
113
+
duration_ms,
114
+
&[
115
+
("method", &method),
116
+
("path", &path),
117
+
("status", &status_code),
118
+
],
119
+
)
120
+
.await;
121
+
122
+
response
123
+
}
81
124
82
125
pub fn create_router(app_context: AppContext) -> Router {
126
+
let static_dir = app_context.static_files_dir().to_string();
127
+
83
128
Router::new()
84
-
.route("/", get(handle_index))
85
-
.route("/.well-known/did.json", get(handle_wellknown_did_json))
129
+
.route("/xrpc/_health", get(handle_xrpc_health))
86
130
.route(
87
-
"/.well-known/atproto-did",
88
-
get(handle_wellknown_atproto_did),
131
+
"/xrpc/com.atproto.identity.resolveHandle",
132
+
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle)
133
+
.options(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle_options),
89
134
)
90
-
.route("/xrpc/_health", get(handle_xrpc_health))
91
135
.route(
92
-
"/xrpc/com.atproto.identity.resolveHandle",
93
-
get(super::handle_xrpc_resolve_handle::handle_xrpc_resolve_handle),
136
+
"/xrpc/com.atproto.lexicon.resolveLexicon",
137
+
get(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon)
138
+
.options(super::handle_xrpc_resolve_lexicon::handle_xrpc_resolve_lexicon_options),
94
139
)
140
+
.fallback_service(ServeDir::new(static_dir))
141
+
.layer(middleware::from_fn_with_state(
142
+
app_context.0.metrics.clone(),
143
+
metrics_middleware,
144
+
))
95
145
.with_state(app_context)
96
-
}
97
-
98
-
pub(super) async fn handle_index() -> Html<&'static str> {
99
-
Html(
100
-
r#"<!DOCTYPE html>
101
-
<html>
102
-
<head>
103
-
<title>QuickDID</title>
104
-
</head>
105
-
<body>
106
-
<h1>QuickDID</h1>
107
-
<p>AT Protocol Identity Resolution Service</p>
108
-
</body>
109
-
</html>"#,
110
-
)
111
-
}
112
-
113
-
pub(super) async fn handle_wellknown_did_json(
114
-
State(context): State<AppContext>,
115
-
) -> Json<serde_json::Value> {
116
-
Json(context.service_document().clone())
117
-
}
118
-
119
-
pub(super) async fn handle_wellknown_atproto_did(State(context): State<AppContext>) -> Response {
120
-
(StatusCode::OK, context.service_did().to_string()).into_response()
121
146
}
122
147
123
148
pub(super) async fn handle_xrpc_health() -> Json<serde_json::Value> {
+360
src/jetstream_handler.rs
+360
src/jetstream_handler.rs
···
1
+
//! Jetstream event handler for QuickDID
2
+
//!
3
+
//! This module provides the event handler for processing AT Protocol Jetstream events,
4
+
//! specifically handling Account and Identity events to maintain cache consistency.
5
+
6
+
use crate::handle_resolver::HandleResolver;
7
+
use crate::metrics::MetricsPublisher;
8
+
use anyhow::Result;
9
+
use atproto_jetstream::{EventHandler, JetstreamEvent};
10
+
use std::sync::Arc;
11
+
use tracing::{debug, info, warn};
12
+
13
+
/// Jetstream event handler for QuickDID
14
+
///
15
+
/// This handler processes AT Protocol events from the Jetstream firehose to keep
16
+
/// the handle resolver cache in sync with the network state.
17
+
///
18
+
/// # Event Processing
19
+
///
20
+
/// ## Account Events
21
+
/// - When an account is marked as "deleted" or "deactivated", the DID is purged from the cache
22
+
/// - Metrics are tracked for successful and failed purge operations
23
+
///
24
+
/// ## Identity Events
25
+
/// - When an identity event contains a handle, the handle-to-DID mapping is updated
26
+
/// - When an identity event lacks a handle (indicating removal), the DID is purged
27
+
/// - Metrics are tracked for successful and failed update/purge operations
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use quickdid::jetstream_handler::QuickDidEventHandler;
33
+
/// use quickdid::handle_resolver::HandleResolver;
34
+
/// use quickdid::metrics::MetricsPublisher;
35
+
/// use std::sync::Arc;
36
+
///
37
+
/// # async fn example(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) {
38
+
/// let handler = QuickDidEventHandler::new(resolver, metrics);
39
+
/// // Register with a JetstreamConsumer
40
+
/// # }
41
+
/// ```
42
+
pub struct QuickDidEventHandler {
43
+
resolver: Arc<dyn HandleResolver>,
44
+
metrics: Arc<dyn MetricsPublisher>,
45
+
}
46
+
47
+
impl QuickDidEventHandler {
48
+
/// Create a new Jetstream event handler
49
+
///
50
+
/// # Arguments
51
+
///
52
+
/// * `resolver` - The handle resolver to use for cache operations
53
+
/// * `metrics` - The metrics publisher for tracking event processing
54
+
pub fn new(resolver: Arc<dyn HandleResolver>, metrics: Arc<dyn MetricsPublisher>) -> Self {
55
+
Self { resolver, metrics }
56
+
}
57
+
}
58
+
59
+
#[async_trait::async_trait]
60
+
impl EventHandler for QuickDidEventHandler {
61
+
fn handler_id(&self) -> String {
62
+
"quickdid_handler".to_string()
63
+
}
64
+
65
+
async fn handle_event(&self, event: JetstreamEvent) -> Result<()> {
66
+
match event {
67
+
JetstreamEvent::Account { did, kind, .. } => {
68
+
// If account kind is "deleted" or "deactivated", purge the DID
69
+
if kind == "deleted" || kind == "deactivated" {
70
+
info!(did = %did, kind = %kind, "Purging account");
71
+
match self.resolver.purge(&did).await {
72
+
Ok(()) => {
73
+
self.metrics.incr("jetstream.account.purged").await;
74
+
}
75
+
Err(e) => {
76
+
warn!(did = %did, error = ?e, "Failed to purge DID");
77
+
self.metrics.incr("jetstream.account.purge_error").await;
78
+
}
79
+
}
80
+
}
81
+
self.metrics.incr("jetstream.account.processed").await;
82
+
}
83
+
JetstreamEvent::Identity { did, identity, .. } => {
84
+
// Extract handle from identity JSON if available
85
+
if !identity.is_null() {
86
+
if let Some(handle_value) = identity.get("handle") {
87
+
if let Some(handle) = handle_value.as_str() {
88
+
info!(handle = %handle, did = %did, "Updating identity mapping");
89
+
match self.resolver.set(handle, &did).await {
90
+
Ok(()) => {
91
+
self.metrics.incr("jetstream.identity.updated").await;
92
+
}
93
+
Err(e) => {
94
+
warn!(handle = %handle, did = %did, error = ?e, "Failed to update mapping");
95
+
self.metrics.incr("jetstream.identity.update_error").await;
96
+
}
97
+
}
98
+
} else {
99
+
// No handle or invalid handle, purge the DID
100
+
info!(did = %did, "Purging identity without valid handle");
101
+
match self.resolver.purge(&did).await {
102
+
Ok(()) => {
103
+
self.metrics.incr("jetstream.identity.purged").await;
104
+
}
105
+
Err(e) => {
106
+
warn!(did = %did, error = ?e, "Failed to purge DID");
107
+
self.metrics.incr("jetstream.identity.purge_error").await;
108
+
}
109
+
}
110
+
}
111
+
} else {
112
+
// No handle field, purge the DID
113
+
info!(did = %did, "Purging identity without handle field");
114
+
match self.resolver.purge(&did).await {
115
+
Ok(()) => {
116
+
self.metrics.incr("jetstream.identity.purged").await;
117
+
}
118
+
Err(e) => {
119
+
warn!(did = %did, error = ?e, "Failed to purge DID");
120
+
self.metrics.incr("jetstream.identity.purge_error").await;
121
+
}
122
+
}
123
+
}
124
+
} else {
125
+
// Null identity means removed, purge the DID
126
+
info!(did = %did, "Purging identity with null info");
127
+
match self.resolver.purge(&did).await {
128
+
Ok(()) => {
129
+
self.metrics.incr("jetstream.identity.purged").await;
130
+
}
131
+
Err(e) => {
132
+
warn!(did = %did, error = ?e, "Failed to purge DID");
133
+
self.metrics.incr("jetstream.identity.purge_error").await;
134
+
}
135
+
}
136
+
}
137
+
self.metrics.incr("jetstream.identity.processed").await;
138
+
}
139
+
_ => {
140
+
// Other event types we don't care about
141
+
debug!("Ignoring unhandled Jetstream event type");
142
+
}
143
+
}
144
+
Ok(())
145
+
}
146
+
}
147
+
148
+
#[cfg(test)]
149
+
mod tests {
150
+
use super::*;
151
+
use crate::handle_resolver::HandleResolverError;
152
+
use crate::metrics::NoOpMetricsPublisher;
153
+
use async_trait::async_trait;
154
+
use serde_json::json;
155
+
156
+
/// Mock resolver for testing
157
+
struct MockResolver {
158
+
purge_called: std::sync::Arc<std::sync::Mutex<Vec<String>>>,
159
+
set_called: std::sync::Arc<std::sync::Mutex<Vec<(String, String)>>>,
160
+
}
161
+
162
+
impl MockResolver {
163
+
fn new() -> Self {
164
+
Self {
165
+
purge_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
166
+
set_called: std::sync::Arc::new(std::sync::Mutex::new(Vec::new())),
167
+
}
168
+
}
169
+
170
+
fn get_purge_calls(&self) -> Vec<String> {
171
+
self.purge_called.lock().unwrap().clone()
172
+
}
173
+
174
+
fn get_set_calls(&self) -> Vec<(String, String)> {
175
+
self.set_called.lock().unwrap().clone()
176
+
}
177
+
}
178
+
179
+
#[async_trait]
180
+
impl HandleResolver for MockResolver {
181
+
async fn resolve(&self, _handle: &str) -> Result<(String, u64), HandleResolverError> {
182
+
unimplemented!("Not needed for tests")
183
+
}
184
+
185
+
async fn purge(&self, subject: &str) -> Result<(), HandleResolverError> {
186
+
self.purge_called.lock().unwrap().push(subject.to_string());
187
+
Ok(())
188
+
}
189
+
190
+
async fn set(&self, handle: &str, did: &str) -> Result<(), HandleResolverError> {
191
+
self.set_called
192
+
.lock()
193
+
.unwrap()
194
+
.push((handle.to_string(), did.to_string()));
195
+
Ok(())
196
+
}
197
+
}
198
+
199
+
#[tokio::test]
200
+
async fn test_account_deleted_event() {
201
+
let resolver = Arc::new(MockResolver::new());
202
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
203
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
204
+
205
+
// Create a deleted account event
206
+
let event = JetstreamEvent::Account {
207
+
did: "did:plc:test123".to_string(),
208
+
kind: "deleted".to_string(),
209
+
time_us: 0,
210
+
account: json!(null),
211
+
};
212
+
213
+
handler.handle_event(event).await.unwrap();
214
+
215
+
// Verify the DID was purged
216
+
let purge_calls = resolver.get_purge_calls();
217
+
assert_eq!(purge_calls.len(), 1);
218
+
assert_eq!(purge_calls[0], "did:plc:test123");
219
+
}
220
+
221
+
#[tokio::test]
222
+
async fn test_account_deactivated_event() {
223
+
let resolver = Arc::new(MockResolver::new());
224
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
225
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
226
+
227
+
// Create a deactivated account event
228
+
let event = JetstreamEvent::Account {
229
+
did: "did:plc:test456".to_string(),
230
+
kind: "deactivated".to_string(),
231
+
time_us: 0,
232
+
account: json!(null),
233
+
};
234
+
235
+
handler.handle_event(event).await.unwrap();
236
+
237
+
// Verify the DID was purged
238
+
let purge_calls = resolver.get_purge_calls();
239
+
assert_eq!(purge_calls.len(), 1);
240
+
assert_eq!(purge_calls[0], "did:plc:test456");
241
+
}
242
+
243
+
#[tokio::test]
244
+
async fn test_account_active_event() {
245
+
let resolver = Arc::new(MockResolver::new());
246
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
247
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
248
+
249
+
// Create an active account event (should not purge)
250
+
let event = JetstreamEvent::Account {
251
+
did: "did:plc:test789".to_string(),
252
+
kind: "active".to_string(),
253
+
time_us: 0,
254
+
account: json!(null),
255
+
};
256
+
257
+
handler.handle_event(event).await.unwrap();
258
+
259
+
// Verify the DID was NOT purged
260
+
let purge_calls = resolver.get_purge_calls();
261
+
assert_eq!(purge_calls.len(), 0);
262
+
}
263
+
264
+
#[tokio::test]
265
+
async fn test_identity_with_handle_event() {
266
+
let resolver = Arc::new(MockResolver::new());
267
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
268
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
269
+
270
+
// Create an identity event with a handle
271
+
let event = JetstreamEvent::Identity {
272
+
did: "did:plc:testuser".to_string(),
273
+
kind: "update".to_string(),
274
+
time_us: 0,
275
+
identity: json!({
276
+
"handle": "alice.bsky.social"
277
+
}),
278
+
};
279
+
280
+
handler.handle_event(event).await.unwrap();
281
+
282
+
// Verify the set method was called
283
+
let set_calls = resolver.get_set_calls();
284
+
assert_eq!(set_calls.len(), 1);
285
+
assert_eq!(
286
+
set_calls[0],
287
+
(
288
+
"alice.bsky.social".to_string(),
289
+
"did:plc:testuser".to_string()
290
+
)
291
+
);
292
+
293
+
// Verify no purge was called
294
+
let purge_calls = resolver.get_purge_calls();
295
+
assert_eq!(purge_calls.len(), 0);
296
+
}
297
+
298
+
#[tokio::test]
299
+
async fn test_identity_without_handle_event() {
300
+
let resolver = Arc::new(MockResolver::new());
301
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
302
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
303
+
304
+
// Create an identity event without a handle field
305
+
let event = JetstreamEvent::Identity {
306
+
did: "did:plc:nohandle".to_string(),
307
+
kind: "update".to_string(),
308
+
time_us: 0,
309
+
identity: json!({
310
+
"other_field": "value"
311
+
}),
312
+
};
313
+
314
+
handler.handle_event(event).await.unwrap();
315
+
316
+
// Verify the DID was purged
317
+
let purge_calls = resolver.get_purge_calls();
318
+
assert_eq!(purge_calls.len(), 1);
319
+
assert_eq!(purge_calls[0], "did:plc:nohandle");
320
+
321
+
// Verify set was not called
322
+
let set_calls = resolver.get_set_calls();
323
+
assert_eq!(set_calls.len(), 0);
324
+
}
325
+
326
+
#[tokio::test]
327
+
async fn test_identity_with_null_identity() {
328
+
let resolver = Arc::new(MockResolver::new());
329
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
330
+
let handler = QuickDidEventHandler::new(resolver.clone(), metrics);
331
+
332
+
// Create an identity event with null identity
333
+
let event = JetstreamEvent::Identity {
334
+
did: "did:plc:nullidentity".to_string(),
335
+
kind: "delete".to_string(),
336
+
time_us: 0,
337
+
identity: json!(null),
338
+
};
339
+
340
+
handler.handle_event(event).await.unwrap();
341
+
342
+
// Verify the DID was purged
343
+
let purge_calls = resolver.get_purge_calls();
344
+
assert_eq!(purge_calls.len(), 1);
345
+
assert_eq!(purge_calls[0], "did:plc:nullidentity");
346
+
347
+
// Verify set was not called
348
+
let set_calls = resolver.get_set_calls();
349
+
assert_eq!(set_calls.len(), 0);
350
+
}
351
+
352
+
#[tokio::test]
353
+
async fn test_handler_id() {
354
+
let resolver = Arc::new(MockResolver::new());
355
+
let metrics = Arc::new(NoOpMetricsPublisher::new());
356
+
let handler = QuickDidEventHandler::new(resolver, metrics);
357
+
358
+
assert_eq!(handler.handler_id(), "quickdid_handler");
359
+
}
360
+
}
+8
src/lexicon_resolver/mod.rs
+8
src/lexicon_resolver/mod.rs
···
1
+
//! Lexicon resolution with caching support.
2
+
//!
3
+
//! This module provides implementations for resolving AT Protocol lexicons (NSIDs)
4
+
//! to their schemas with various caching strategies.
5
+
6
+
mod redis;
7
+
8
+
pub use redis::{create_redis_lexicon_resolver, create_redis_lexicon_resolver_with_ttl};
+458
src/lexicon_resolver/redis.rs
+458
src/lexicon_resolver/redis.rs
···
1
+
//! Redis-backed caching lexicon resolver.
2
+
//!
3
+
//! This module provides a lexicon resolver that caches resolution results in Redis
4
+
//! with configurable expiration times. Redis caching provides persistence across
5
+
//! service restarts and allows sharing of cached results across multiple instances.
6
+
7
+
use crate::metrics::SharedMetricsPublisher;
8
+
use async_trait::async_trait;
9
+
use atproto_lexicon::resolve::LexiconResolver;
10
+
use deadpool_redis::{Pool as RedisPool, redis::AsyncCommands};
11
+
use metrohash::MetroHash64;
12
+
use std::hash::Hasher as _;
13
+
use std::sync::Arc;
14
+
15
+
/// Redis-backed caching lexicon resolver.
16
+
///
17
+
/// This resolver caches lexicon resolution results in Redis with a configurable TTL.
18
+
/// Results are stored as JSON bytes to minimize storage overhead while maintaining
19
+
/// the schema structure.
20
+
///
21
+
/// # Features
22
+
///
23
+
/// - Persistent caching across service restarts
24
+
/// - Shared cache across multiple service instances
25
+
/// - Configurable TTL (default: 90 days)
26
+
/// - JSON storage format for lexicon schemas
27
+
/// - Graceful fallback if Redis is unavailable
28
+
///
29
+
/// # Example
30
+
///
31
+
/// ```no_run
32
+
/// use std::sync::Arc;
33
+
/// use deadpool_redis::Pool;
34
+
/// use atproto_lexicon::resolve::LexiconResolver;
35
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
36
+
/// use quickdid::metrics::NoOpMetricsPublisher;
37
+
///
38
+
/// # async fn example() {
39
+
/// # let inner_resolver: Arc<dyn LexiconResolver> = todo!();
40
+
/// # let redis_pool: Pool = todo!();
41
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
42
+
/// // Create with default 90-day TTL
43
+
/// let resolver = create_redis_lexicon_resolver(
44
+
/// inner_resolver,
45
+
/// redis_pool,
46
+
/// metrics
47
+
/// );
48
+
/// # }
49
+
/// ```
50
+
pub(super) struct RedisLexiconResolver {
51
+
/// Base lexicon resolver to perform actual resolution
52
+
inner: Arc<dyn LexiconResolver>,
53
+
/// Redis connection pool
54
+
pool: RedisPool,
55
+
/// Redis key prefix for lexicon resolution cache
56
+
key_prefix: String,
57
+
/// TTL for cache entries in seconds
58
+
ttl_seconds: u64,
59
+
/// Metrics publisher for telemetry
60
+
metrics: SharedMetricsPublisher,
61
+
}
62
+
63
+
impl RedisLexiconResolver {
64
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
65
+
fn new(
66
+
inner: Arc<dyn LexiconResolver>,
67
+
pool: RedisPool,
68
+
metrics: SharedMetricsPublisher,
69
+
) -> Self {
70
+
Self::with_ttl(inner, pool, 90 * 24 * 60 * 60, metrics) // 90 days default
71
+
}
72
+
73
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
74
+
fn with_ttl(
75
+
inner: Arc<dyn LexiconResolver>,
76
+
pool: RedisPool,
77
+
ttl_seconds: u64,
78
+
metrics: SharedMetricsPublisher,
79
+
) -> Self {
80
+
Self::with_full_config(inner, pool, "lexicon:".to_string(), ttl_seconds, metrics)
81
+
}
82
+
83
+
/// Create a new Redis-backed lexicon resolver with full configuration.
84
+
fn with_full_config(
85
+
inner: Arc<dyn LexiconResolver>,
86
+
pool: RedisPool,
87
+
key_prefix: String,
88
+
ttl_seconds: u64,
89
+
metrics: SharedMetricsPublisher,
90
+
) -> Self {
91
+
Self {
92
+
inner,
93
+
pool,
94
+
key_prefix,
95
+
ttl_seconds,
96
+
metrics,
97
+
}
98
+
}
99
+
100
+
/// Generate the Redis key for an NSID.
101
+
///
102
+
/// Uses MetroHash64 to generate a consistent hash of the NSID
103
+
/// for use as the Redis key. This provides better key distribution
104
+
/// and avoids issues with special characters in NSIDs.
105
+
fn make_key(&self, nsid: &str) -> String {
106
+
let mut h = MetroHash64::default();
107
+
h.write(nsid.as_bytes());
108
+
format!("{}{}", self.key_prefix, h.finish())
109
+
}
110
+
111
+
/// Get the TTL in seconds.
112
+
fn ttl_seconds(&self) -> u64 {
113
+
self.ttl_seconds
114
+
}
115
+
}
116
+
117
+
#[async_trait]
118
+
impl LexiconResolver for RedisLexiconResolver {
119
+
async fn resolve(&self, nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
120
+
let key = self.make_key(nsid);
121
+
122
+
// Try to get from Redis cache first
123
+
match self.pool.get().await {
124
+
Ok(mut conn) => {
125
+
// Check if the key exists in Redis (stored as JSON bytes)
126
+
let cached: Option<Vec<u8>> = match conn.get(&key).await {
127
+
Ok(value) => value,
128
+
Err(e) => {
129
+
self.metrics.incr("lexicon_resolver.redis.get_error").await;
130
+
tracing::warn!("Failed to get NSID from Redis cache: {}", e);
131
+
None
132
+
}
133
+
};
134
+
135
+
if let Some(cached_bytes) = cached {
136
+
// Deserialize the cached JSON
137
+
match serde_json::from_slice::<serde_json::Value>(&cached_bytes) {
138
+
Ok(cached_value) => {
139
+
tracing::debug!("Cache hit for NSID {}", nsid);
140
+
self.metrics.incr("lexicon_resolver.redis.cache_hit").await;
141
+
return Ok(cached_value);
142
+
}
143
+
Err(e) => {
144
+
tracing::warn!(
145
+
"Failed to deserialize cached lexicon for NSID {}: {}",
146
+
nsid,
147
+
e
148
+
);
149
+
self.metrics
150
+
.incr("lexicon_resolver.redis.deserialize_error")
151
+
.await;
152
+
// Fall through to re-resolve if deserialization fails
153
+
}
154
+
}
155
+
}
156
+
157
+
// Not in cache, resolve through inner resolver
158
+
tracing::debug!("Cache miss for NSID {}, resolving...", nsid);
159
+
self.metrics.incr("lexicon_resolver.redis.cache_miss").await;
160
+
let result = self.inner.resolve(nsid).await;
161
+
162
+
// Cache successful result
163
+
if let Ok(ref schema) = result {
164
+
// Serialize to JSON bytes
165
+
match serde_json::to_vec(schema) {
166
+
Ok(bytes) => {
167
+
// Set with expiration (ignore errors to not fail the resolution)
168
+
if let Err(e) = conn
169
+
.set_ex::<_, _, ()>(&key, bytes, self.ttl_seconds())
170
+
.await
171
+
{
172
+
tracing::warn!(
173
+
"Failed to cache lexicon resolution in Redis: {}",
174
+
e
175
+
);
176
+
self.metrics
177
+
.incr("lexicon_resolver.redis.cache_set_error")
178
+
.await;
179
+
} else {
180
+
tracing::debug!("Cached lexicon for NSID {}", nsid);
181
+
self.metrics.incr("lexicon_resolver.redis.cache_set").await;
182
+
}
183
+
}
184
+
Err(e) => {
185
+
tracing::warn!(
186
+
"Failed to serialize lexicon result for NSID {}: {}",
187
+
nsid,
188
+
e
189
+
);
190
+
self.metrics
191
+
.incr("lexicon_resolver.redis.serialize_error")
192
+
.await;
193
+
}
194
+
}
195
+
}
196
+
197
+
result
198
+
}
199
+
Err(e) => {
200
+
// Redis connection failed, fall back to inner resolver
201
+
tracing::warn!(
202
+
"Failed to get Redis connection, falling back to uncached resolution: {}",
203
+
e
204
+
);
205
+
self.metrics
206
+
.incr("lexicon_resolver.redis.connection_error")
207
+
.await;
208
+
self.inner.resolve(nsid).await
209
+
}
210
+
}
211
+
}
212
+
}
213
+
214
+
/// Create a new Redis-backed lexicon resolver with default 90-day TTL.
215
+
///
216
+
/// # Arguments
217
+
///
218
+
/// * `inner` - The underlying resolver to use for actual resolution
219
+
/// * `pool` - Redis connection pool
220
+
/// * `metrics` - Metrics publisher for telemetry
221
+
///
222
+
/// # Example
223
+
///
224
+
/// ```no_run
225
+
/// use std::sync::Arc;
226
+
/// use atproto_lexicon::resolve::{DefaultLexiconResolver, LexiconResolver};
227
+
/// use quickdid::lexicon_resolver::create_redis_lexicon_resolver;
228
+
/// use quickdid::cache::create_redis_pool;
229
+
/// use quickdid::metrics::NoOpMetricsPublisher;
230
+
///
231
+
/// # async fn example() -> anyhow::Result<()> {
232
+
/// # use atproto_identity::resolve::HickoryDnsResolver;
233
+
/// # use reqwest::Client;
234
+
/// # let dns_resolver = HickoryDnsResolver::create_resolver(&[]);
235
+
/// # let http_client = Client::new();
236
+
/// # let metrics = Arc::new(NoOpMetricsPublisher);
237
+
/// let base: Arc<dyn LexiconResolver> = Arc::new(
238
+
/// DefaultLexiconResolver::new(http_client, dns_resolver)
239
+
/// );
240
+
///
241
+
/// let pool = create_redis_pool("redis://localhost:6379")?;
242
+
/// let resolver = create_redis_lexicon_resolver(base, pool, metrics);
243
+
/// let schema = resolver.resolve("app.bsky.feed.post").await.unwrap();
244
+
/// # Ok(())
245
+
/// # }
246
+
/// ```
247
+
pub fn create_redis_lexicon_resolver(
248
+
inner: Arc<dyn LexiconResolver>,
249
+
pool: RedisPool,
250
+
metrics: SharedMetricsPublisher,
251
+
) -> Arc<dyn LexiconResolver> {
252
+
Arc::new(RedisLexiconResolver::new(inner, pool, metrics))
253
+
}
254
+
255
+
/// Create a new Redis-backed lexicon resolver with custom TTL.
256
+
///
257
+
/// # Arguments
258
+
///
259
+
/// * `inner` - The underlying resolver to use for actual resolution
260
+
/// * `pool` - Redis connection pool
261
+
/// * `ttl_seconds` - TTL for cache entries in seconds
262
+
/// * `metrics` - Metrics publisher for telemetry
263
+
pub fn create_redis_lexicon_resolver_with_ttl(
264
+
inner: Arc<dyn LexiconResolver>,
265
+
pool: RedisPool,
266
+
ttl_seconds: u64,
267
+
metrics: SharedMetricsPublisher,
268
+
) -> Arc<dyn LexiconResolver> {
269
+
Arc::new(RedisLexiconResolver::with_ttl(
270
+
inner,
271
+
pool,
272
+
ttl_seconds,
273
+
metrics,
274
+
))
275
+
}
276
+
277
+
#[cfg(test)]
278
+
mod tests {
279
+
use super::*;
280
+
281
+
// Mock lexicon resolver for testing
282
+
#[derive(Clone)]
283
+
struct MockLexiconResolver {
284
+
should_fail: bool,
285
+
expected_schema: serde_json::Value,
286
+
}
287
+
288
+
#[async_trait]
289
+
impl LexiconResolver for MockLexiconResolver {
290
+
async fn resolve(&self, _nsid: &str) -> Result<serde_json::Value, anyhow::Error> {
291
+
if self.should_fail {
292
+
Err(anyhow::anyhow!("Mock resolution failure"))
293
+
} else {
294
+
Ok(self.expected_schema.clone())
295
+
}
296
+
}
297
+
}
298
+
299
+
#[tokio::test]
300
+
async fn test_redis_lexicon_resolver_cache_hit() {
301
+
let pool = match crate::test_helpers::get_test_redis_pool() {
302
+
Some(p) => p,
303
+
None => return,
304
+
};
305
+
306
+
// Create mock resolver with sample schema
307
+
let schema = serde_json::json!({
308
+
"lexicon": 1,
309
+
"id": "app.bsky.feed.post",
310
+
"defs": {
311
+
"main": {
312
+
"type": "record",
313
+
"description": "A post record"
314
+
}
315
+
}
316
+
});
317
+
318
+
let mock_resolver = Arc::new(MockLexiconResolver {
319
+
should_fail: false,
320
+
expected_schema: schema.clone(),
321
+
});
322
+
323
+
// Create metrics publisher
324
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
325
+
326
+
// Create Redis-backed resolver with a unique key prefix for testing
327
+
let test_prefix = format!(
328
+
"test:lexicon:{}:",
329
+
std::time::SystemTime::now()
330
+
.duration_since(std::time::UNIX_EPOCH)
331
+
.unwrap()
332
+
.as_nanos()
333
+
);
334
+
let redis_resolver = RedisLexiconResolver::with_full_config(
335
+
mock_resolver,
336
+
pool.clone(),
337
+
test_prefix.clone(),
338
+
3600,
339
+
metrics,
340
+
);
341
+
342
+
let test_nsid = "app.bsky.feed.post";
343
+
344
+
// First resolution - should call inner resolver
345
+
let result1 = redis_resolver.resolve(test_nsid).await.unwrap();
346
+
assert_eq!(result1, schema);
347
+
348
+
// Second resolution - should hit cache
349
+
let result2 = redis_resolver.resolve(test_nsid).await.unwrap();
350
+
assert_eq!(result2, schema);
351
+
352
+
// Clean up test data
353
+
if let Ok(mut conn) = pool.get().await {
354
+
let mut h = MetroHash64::default();
355
+
h.write(test_nsid.as_bytes());
356
+
let key = format!("{}{}", test_prefix, h.finish());
357
+
let _: Result<(), _> = conn.del(key).await;
358
+
}
359
+
}
360
+
361
+
#[tokio::test]
362
+
async fn test_redis_lexicon_resolver_cache_miss() {
363
+
let pool = match crate::test_helpers::get_test_redis_pool() {
364
+
Some(p) => p,
365
+
None => return,
366
+
};
367
+
368
+
let schema = serde_json::json!({
369
+
"lexicon": 1,
370
+
"id": "com.example.test",
371
+
});
372
+
373
+
let mock_resolver = Arc::new(MockLexiconResolver {
374
+
should_fail: false,
375
+
expected_schema: schema.clone(),
376
+
});
377
+
378
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
379
+
380
+
let test_prefix = format!(
381
+
"test:lexicon:{}:",
382
+
std::time::SystemTime::now()
383
+
.duration_since(std::time::UNIX_EPOCH)
384
+
.unwrap()
385
+
.as_nanos()
386
+
);
387
+
let redis_resolver = RedisLexiconResolver::with_full_config(
388
+
mock_resolver,
389
+
pool.clone(),
390
+
test_prefix.clone(),
391
+
3600,
392
+
metrics,
393
+
);
394
+
395
+
let test_nsid = "com.example.test";
396
+
397
+
// Ensure key doesn't exist
398
+
if let Ok(mut conn) = pool.get().await {
399
+
let mut h = MetroHash64::default();
400
+
h.write(test_nsid.as_bytes());
401
+
let key = format!("{}{}", test_prefix, h.finish());
402
+
let _: Result<(), _> = conn.del(&key).await;
403
+
}
404
+
405
+
// Resolution should succeed and cache the result
406
+
let result = redis_resolver.resolve(test_nsid).await.unwrap();
407
+
assert_eq!(result, schema);
408
+
409
+
// Verify the result was cached
410
+
if let Ok(mut conn) = pool.get().await {
411
+
let mut h = MetroHash64::default();
412
+
h.write(test_nsid.as_bytes());
413
+
let key = format!("{}{}", test_prefix, h.finish());
414
+
let exists: bool = conn.exists(&key).await.unwrap();
415
+
assert!(exists, "Result should be cached");
416
+
417
+
// Clean up
418
+
let _: Result<(), _> = conn.del(key).await;
419
+
}
420
+
}
421
+
422
+
#[tokio::test]
423
+
async fn test_redis_lexicon_resolver_error_handling() {
424
+
let pool = match crate::test_helpers::get_test_redis_pool() {
425
+
Some(p) => p,
426
+
None => return,
427
+
};
428
+
429
+
// Create mock resolver that fails
430
+
let mock_resolver = Arc::new(MockLexiconResolver {
431
+
should_fail: true,
432
+
expected_schema: serde_json::Value::Null,
433
+
});
434
+
435
+
let metrics = Arc::new(crate::metrics::NoOpMetricsPublisher);
436
+
437
+
let test_prefix = format!(
438
+
"test:lexicon:{}:",
439
+
std::time::SystemTime::now()
440
+
.duration_since(std::time::UNIX_EPOCH)
441
+
.unwrap()
442
+
.as_nanos()
443
+
);
444
+
let redis_resolver = RedisLexiconResolver::with_full_config(
445
+
mock_resolver,
446
+
pool.clone(),
447
+
test_prefix,
448
+
3600,
449
+
metrics,
450
+
);
451
+
452
+
let test_nsid = "com.example.nonexistent";
453
+
454
+
// Resolution should fail
455
+
let result = redis_resolver.resolve(test_nsid).await;
456
+
assert!(result.is_err());
457
+
}
458
+
}
+3
src/lib.rs
+3
src/lib.rs
···
2
2
pub mod config; // Config and Args needed by binary
3
3
pub mod handle_resolver; // Only traits and factory functions exposed
4
4
pub mod http; // Only create_router exposed
5
+
pub mod jetstream_handler; // Jetstream event handler for AT Protocol events
6
+
pub mod lexicon_resolver; // Lexicon resolution with caching support
5
7
6
8
// Semi-public modules - needed by binary but with limited exposure
7
9
pub mod cache; // Only create_redis_pool exposed
8
10
pub mod handle_resolver_task; // Factory functions and TaskConfig exposed
11
+
pub mod metrics; // Metrics publishing trait and implementations
9
12
pub mod queue; // Queue adapter system with trait and factory functions
10
13
pub mod sqlite_schema; // SQLite schema management functions exposed
11
14
pub mod task_manager; // Only spawn_cancellable_task exposed
+547
src/metrics.rs
+547
src/metrics.rs
···
1
+
use crate::config::Config;
2
+
use async_trait::async_trait;
3
+
use cadence::{
4
+
BufferedUdpMetricSink, Counted, CountedExt, Gauged, Metric, QueuingMetricSink, StatsdClient,
5
+
Timed,
6
+
};
7
+
use std::net::UdpSocket;
8
+
use std::sync::Arc;
9
+
use thiserror::Error;
10
+
use tracing::{debug, error};
11
+
12
+
/// Trait for publishing metrics with counter and gauge support
13
+
/// Designed for minimal compatibility with cadence-style metrics
14
+
#[async_trait]
15
+
pub trait MetricsPublisher: Send + Sync {
16
+
/// Increment a counter by 1
17
+
async fn incr(&self, key: &str);
18
+
19
+
/// Increment a counter by a specific value
20
+
async fn count(&self, key: &str, value: u64);
21
+
22
+
/// Increment a counter with tags
23
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]);
24
+
25
+
/// Increment a counter by a specific value with tags
26
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
27
+
28
+
/// Record a gauge value
29
+
async fn gauge(&self, key: &str, value: u64);
30
+
31
+
/// Record a gauge value with tags
32
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]);
33
+
34
+
/// Record a timing in milliseconds
35
+
async fn time(&self, key: &str, millis: u64);
36
+
37
+
/// Record a timing with tags
38
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]);
39
+
}
40
+
41
+
/// No-op implementation for development and testing
42
+
#[derive(Debug, Clone, Default)]
43
+
pub struct NoOpMetricsPublisher;
44
+
45
+
impl NoOpMetricsPublisher {
46
+
pub fn new() -> Self {
47
+
Self
48
+
}
49
+
}
50
+
51
+
#[async_trait]
52
+
impl MetricsPublisher for NoOpMetricsPublisher {
53
+
async fn incr(&self, _key: &str) {
54
+
// No-op
55
+
}
56
+
57
+
async fn count(&self, _key: &str, _value: u64) {
58
+
// No-op
59
+
}
60
+
61
+
async fn incr_with_tags(&self, _key: &str, _tags: &[(&str, &str)]) {
62
+
// No-op
63
+
}
64
+
65
+
async fn count_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
66
+
// No-op
67
+
}
68
+
69
+
async fn gauge(&self, _key: &str, _value: u64) {
70
+
// No-op
71
+
}
72
+
73
+
async fn gauge_with_tags(&self, _key: &str, _value: u64, _tags: &[(&str, &str)]) {
74
+
// No-op
75
+
}
76
+
77
+
async fn time(&self, _key: &str, _millis: u64) {
78
+
// No-op
79
+
}
80
+
81
+
async fn time_with_tags(&self, _key: &str, _millis: u64, _tags: &[(&str, &str)]) {
82
+
// No-op
83
+
}
84
+
}
85
+
86
+
/// Statsd-backed metrics publisher using cadence
87
+
pub struct StatsdMetricsPublisher {
88
+
client: StatsdClient,
89
+
default_tags: Vec<(String, String)>,
90
+
}
91
+
92
+
impl StatsdMetricsPublisher {
93
+
/// Create a new StatsdMetricsPublisher with default configuration
94
+
pub fn new(host: &str, prefix: &str) -> Result<Self, Box<dyn std::error::Error>> {
95
+
Self::new_with_bind(host, prefix, "[::]:0")
96
+
}
97
+
98
+
/// Create a new StatsdMetricsPublisher with custom bind address
99
+
pub fn new_with_bind(
100
+
host: &str,
101
+
prefix: &str,
102
+
bind_addr: &str,
103
+
) -> Result<Self, Box<dyn std::error::Error>> {
104
+
Self::new_with_bind_and_tags(host, prefix, bind_addr, vec![])
105
+
}
106
+
107
+
/// Create a new StatsdMetricsPublisher with default tags
108
+
pub fn new_with_tags(
109
+
host: &str,
110
+
prefix: &str,
111
+
default_tags: Vec<(String, String)>,
112
+
) -> Result<Self, Box<dyn std::error::Error>> {
113
+
Self::new_with_bind_and_tags(host, prefix, "[::]:0", default_tags)
114
+
}
115
+
116
+
/// Create a new StatsdMetricsPublisher with custom bind address and tags
117
+
pub fn new_with_bind_and_tags(
118
+
host: &str,
119
+
prefix: &str,
120
+
bind_addr: &str,
121
+
default_tags: Vec<(String, String)>,
122
+
) -> Result<Self, Box<dyn std::error::Error>> {
123
+
tracing::info!(
124
+
"Creating StatsdMetricsPublisher: host={}, prefix={}, bind={}, tags={:?}",
125
+
host,
126
+
prefix,
127
+
bind_addr,
128
+
default_tags
129
+
);
130
+
131
+
let socket = UdpSocket::bind(bind_addr)?;
132
+
socket.set_nonblocking(true)?;
133
+
134
+
let buffered_sink = BufferedUdpMetricSink::from(host, socket)?;
135
+
let queuing_sink = QueuingMetricSink::builder()
136
+
.with_error_handler(move |error| {
137
+
error!("Failed to send metric via sink: {}", error);
138
+
})
139
+
.build(buffered_sink);
140
+
let client = StatsdClient::from_sink(prefix, queuing_sink);
141
+
142
+
tracing::info!(
143
+
"StatsdMetricsPublisher created successfully with bind address: {}",
144
+
bind_addr
145
+
);
146
+
Ok(Self {
147
+
client,
148
+
default_tags,
149
+
})
150
+
}
151
+
152
+
/// Create from an existing StatsdClient
153
+
pub fn from_client(client: StatsdClient) -> Self {
154
+
Self::from_client_with_tags(client, vec![])
155
+
}
156
+
157
+
/// Create from an existing StatsdClient with default tags
158
+
pub fn from_client_with_tags(
159
+
client: StatsdClient,
160
+
default_tags: Vec<(String, String)>,
161
+
) -> Self {
162
+
Self {
163
+
client,
164
+
default_tags,
165
+
}
166
+
}
167
+
168
+
/// Apply default tags to a builder
169
+
fn apply_default_tags<'a, M>(
170
+
&'a self,
171
+
mut builder: cadence::MetricBuilder<'a, 'a, M>,
172
+
) -> cadence::MetricBuilder<'a, 'a, M>
173
+
where
174
+
M: Metric + From<String>,
175
+
{
176
+
for (k, v) in &self.default_tags {
177
+
builder = builder.with_tag(k.as_str(), v.as_str());
178
+
}
179
+
builder
180
+
}
181
+
}
182
+
183
+
#[async_trait]
184
+
impl MetricsPublisher for StatsdMetricsPublisher {
185
+
async fn incr(&self, key: &str) {
186
+
debug!("Sending metric incr: {}", key);
187
+
if self.default_tags.is_empty() {
188
+
match self.client.incr(key) {
189
+
Ok(_) => debug!("Successfully sent metric: {}", key),
190
+
Err(e) => error!("Failed to send metric {}: {}", key, e),
191
+
}
192
+
} else {
193
+
let builder = self.client.incr_with_tags(key);
194
+
let builder = self.apply_default_tags(builder);
195
+
let _ = builder.send();
196
+
debug!("Sent metric with tags: {}", key);
197
+
}
198
+
}
199
+
200
+
async fn count(&self, key: &str, value: u64) {
201
+
if self.default_tags.is_empty() {
202
+
let _ = self.client.count(key, value);
203
+
} else {
204
+
let builder = self.client.count_with_tags(key, value);
205
+
let builder = self.apply_default_tags(builder);
206
+
let _ = builder.send();
207
+
}
208
+
}
209
+
210
+
async fn incr_with_tags(&self, key: &str, tags: &[(&str, &str)]) {
211
+
let mut builder = self.client.incr_with_tags(key);
212
+
builder = self.apply_default_tags(builder);
213
+
for (k, v) in tags {
214
+
builder = builder.with_tag(k, v);
215
+
}
216
+
let _ = builder.send();
217
+
}
218
+
219
+
async fn count_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
220
+
let mut builder = self.client.count_with_tags(key, value);
221
+
builder = self.apply_default_tags(builder);
222
+
for (k, v) in tags {
223
+
builder = builder.with_tag(k, v);
224
+
}
225
+
let _ = builder.send();
226
+
}
227
+
228
+
async fn gauge(&self, key: &str, value: u64) {
229
+
debug!("Sending metric gauge: {} = {}", key, value);
230
+
if self.default_tags.is_empty() {
231
+
match self.client.gauge(key, value) {
232
+
Ok(_) => debug!("Successfully sent gauge: {} = {}", key, value),
233
+
Err(e) => error!("Failed to send gauge {} = {}: {}", key, value, e),
234
+
}
235
+
} else {
236
+
let builder = self.client.gauge_with_tags(key, value);
237
+
let builder = self.apply_default_tags(builder);
238
+
builder.send();
239
+
debug!("Sent gauge with tags: {} = {}", key, value);
240
+
}
241
+
}
242
+
243
+
async fn gauge_with_tags(&self, key: &str, value: u64, tags: &[(&str, &str)]) {
244
+
let mut builder = self.client.gauge_with_tags(key, value);
245
+
builder = self.apply_default_tags(builder);
246
+
for (k, v) in tags {
247
+
builder = builder.with_tag(k, v);
248
+
}
249
+
let _ = builder.send();
250
+
}
251
+
252
+
async fn time(&self, key: &str, millis: u64) {
253
+
if self.default_tags.is_empty() {
254
+
let _ = self.client.time(key, millis);
255
+
} else {
256
+
let builder = self.client.time_with_tags(key, millis);
257
+
let builder = self.apply_default_tags(builder);
258
+
let _ = builder.send();
259
+
}
260
+
}
261
+
262
+
async fn time_with_tags(&self, key: &str, millis: u64, tags: &[(&str, &str)]) {
263
+
let mut builder = self.client.time_with_tags(key, millis);
264
+
builder = self.apply_default_tags(builder);
265
+
for (k, v) in tags {
266
+
builder = builder.with_tag(k, v);
267
+
}
268
+
let _ = builder.send();
269
+
}
270
+
}
271
+
272
+
/// Type alias for shared metrics publisher
273
+
pub type SharedMetricsPublisher = Arc<dyn MetricsPublisher>;
274
+
275
+
/// Metrics-specific errors
276
+
#[derive(Debug, Error)]
277
+
pub enum MetricsError {
278
+
/// Failed to create metrics publisher
279
+
#[error("error-quickdid-metrics-1 Failed to create metrics publisher: {0}")]
280
+
CreationFailed(String),
281
+
282
+
/// Invalid configuration for metrics
283
+
#[error("error-quickdid-metrics-2 Invalid metrics configuration: {0}")]
284
+
InvalidConfig(String),
285
+
}
286
+
287
+
/// Create a metrics publisher based on configuration
288
+
///
289
+
/// Returns either a no-op publisher or a StatsD publisher based on the
290
+
/// `metrics_adapter` configuration value.
291
+
///
292
+
/// ## Example
293
+
///
294
+
/// ```rust,no_run
295
+
/// use quickdid::config::Config;
296
+
/// use quickdid::metrics::create_metrics_publisher;
297
+
///
298
+
/// # async fn example() -> Result<(), Box<dyn std::error::Error>> {
299
+
/// let config = Config::from_env()?;
300
+
/// let metrics = create_metrics_publisher(&config)?;
301
+
///
302
+
/// // Use the metrics publisher
303
+
/// metrics.incr("request.count").await;
304
+
/// # Ok(())
305
+
/// # }
306
+
/// ```
307
+
pub fn create_metrics_publisher(config: &Config) -> Result<SharedMetricsPublisher, MetricsError> {
308
+
match config.metrics_adapter.as_str() {
309
+
"noop" => Ok(Arc::new(NoOpMetricsPublisher::new())),
310
+
"statsd" => {
311
+
let host = config.metrics_statsd_host.as_ref().ok_or_else(|| {
312
+
MetricsError::InvalidConfig(
313
+
"METRICS_STATSD_HOST is required when using statsd adapter".to_string(),
314
+
)
315
+
})?;
316
+
317
+
// Parse tags from comma-separated key:value pairs
318
+
let default_tags = if let Some(tags_str) = &config.metrics_tags {
319
+
tags_str
320
+
.split(',')
321
+
.filter_map(|tag| {
322
+
let parts: Vec<&str> = tag.trim().split(':').collect();
323
+
if parts.len() == 2 {
324
+
Some((parts[0].to_string(), parts[1].to_string()))
325
+
} else {
326
+
error!("Invalid tag format: {}", tag);
327
+
None
328
+
}
329
+
})
330
+
.collect()
331
+
} else {
332
+
vec![]
333
+
};
334
+
335
+
let publisher = StatsdMetricsPublisher::new_with_bind_and_tags(
336
+
host,
337
+
&config.metrics_prefix,
338
+
&config.metrics_statsd_bind,
339
+
default_tags,
340
+
)
341
+
.map_err(|e| MetricsError::CreationFailed(e.to_string()))?;
342
+
343
+
Ok(Arc::new(publisher))
344
+
}
345
+
_ => Err(MetricsError::InvalidConfig(format!(
346
+
"Unknown metrics adapter: {}",
347
+
config.metrics_adapter
348
+
))),
349
+
}
350
+
}
351
+
352
+
#[cfg(test)]
353
+
mod tests {
354
+
use super::*;
355
+
use once_cell::sync::Lazy;
356
+
use std::sync::Mutex;
357
+
358
+
// Use a mutex to serialize tests that modify environment variables
359
+
static ENV_MUTEX: Lazy<Mutex<()>> = Lazy::new(|| Mutex::new(()));
360
+
361
+
#[tokio::test]
362
+
async fn test_noop_metrics() {
363
+
let metrics = NoOpMetricsPublisher::new();
364
+
365
+
// These should all be no-ops and not panic
366
+
metrics.incr("test.counter").await;
367
+
metrics.count("test.counter", 5).await;
368
+
metrics
369
+
.incr_with_tags("test.counter", &[("env", "test")])
370
+
.await;
371
+
metrics
372
+
.count_with_tags(
373
+
"test.counter",
374
+
10,
375
+
&[("env", "test"), ("service", "quickdid")],
376
+
)
377
+
.await;
378
+
metrics.gauge("test.gauge", 100).await;
379
+
metrics
380
+
.gauge_with_tags("test.gauge", 200, &[("host", "localhost")])
381
+
.await;
382
+
metrics.time("test.timing", 42).await;
383
+
metrics
384
+
.time_with_tags("test.timing", 84, &[("endpoint", "/resolve")])
385
+
.await;
386
+
}
387
+
388
+
#[tokio::test]
389
+
async fn test_shared_metrics() {
390
+
let metrics: SharedMetricsPublisher = Arc::new(NoOpMetricsPublisher::new());
391
+
392
+
// Verify it can be used as a shared reference
393
+
metrics.incr("shared.counter").await;
394
+
metrics.gauge("shared.gauge", 50).await;
395
+
396
+
// Verify it can be cloned
397
+
let metrics2 = Arc::clone(&metrics);
398
+
metrics2.count("cloned.counter", 3).await;
399
+
}
400
+
401
+
#[test]
402
+
fn test_create_noop_publisher() {
403
+
use std::env;
404
+
405
+
// Lock mutex to prevent concurrent environment variable modification
406
+
let _guard = ENV_MUTEX.lock().unwrap();
407
+
408
+
// Clean up any existing environment variables first
409
+
unsafe {
410
+
env::remove_var("METRICS_ADAPTER");
411
+
env::remove_var("METRICS_STATSD_HOST");
412
+
env::remove_var("METRICS_PREFIX");
413
+
env::remove_var("METRICS_TAGS");
414
+
}
415
+
416
+
// Set up environment for noop adapter
417
+
unsafe {
418
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
419
+
env::set_var("METRICS_ADAPTER", "noop");
420
+
}
421
+
422
+
let config = Config::from_env().unwrap();
423
+
let metrics = create_metrics_publisher(&config).unwrap();
424
+
425
+
// Should create successfully - actual type checking happens at compile time
426
+
assert!(Arc::strong_count(&metrics) == 1);
427
+
428
+
// Clean up
429
+
unsafe {
430
+
env::remove_var("METRICS_ADAPTER");
431
+
env::remove_var("HTTP_EXTERNAL");
432
+
}
433
+
}
434
+
435
+
#[test]
436
+
fn test_create_statsd_publisher() {
437
+
use std::env;
438
+
439
+
// Lock mutex to prevent concurrent environment variable modification
440
+
let _guard = ENV_MUTEX.lock().unwrap();
441
+
442
+
// Clean up any existing environment variables first
443
+
unsafe {
444
+
env::remove_var("METRICS_ADAPTER");
445
+
env::remove_var("METRICS_STATSD_HOST");
446
+
env::remove_var("METRICS_PREFIX");
447
+
env::remove_var("METRICS_TAGS");
448
+
}
449
+
450
+
// Set up environment for statsd adapter
451
+
unsafe {
452
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
453
+
env::set_var("METRICS_ADAPTER", "statsd");
454
+
env::set_var("METRICS_STATSD_HOST", "localhost:8125");
455
+
env::set_var("METRICS_PREFIX", "test");
456
+
env::set_var("METRICS_TAGS", "env:test,service:quickdid");
457
+
}
458
+
459
+
let config = Config::from_env().unwrap();
460
+
let metrics = create_metrics_publisher(&config).unwrap();
461
+
462
+
// Should create successfully
463
+
assert!(Arc::strong_count(&metrics) == 1);
464
+
465
+
// Clean up
466
+
unsafe {
467
+
env::remove_var("METRICS_ADAPTER");
468
+
env::remove_var("METRICS_STATSD_HOST");
469
+
env::remove_var("METRICS_PREFIX");
470
+
env::remove_var("METRICS_TAGS");
471
+
env::remove_var("HTTP_EXTERNAL");
472
+
}
473
+
}
474
+
475
+
#[test]
476
+
fn test_missing_statsd_host() {
477
+
use std::env;
478
+
479
+
// Lock mutex to prevent concurrent environment variable modification
480
+
let _guard = ENV_MUTEX.lock().unwrap();
481
+
482
+
// Clean up any existing environment variables first
483
+
unsafe {
484
+
env::remove_var("METRICS_ADAPTER");
485
+
env::remove_var("METRICS_STATSD_HOST");
486
+
env::remove_var("METRICS_PREFIX");
487
+
env::remove_var("METRICS_TAGS");
488
+
}
489
+
490
+
// Set up environment for statsd adapter without host
491
+
unsafe {
492
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
493
+
env::set_var("METRICS_ADAPTER", "statsd");
494
+
env::remove_var("METRICS_STATSD_HOST");
495
+
}
496
+
497
+
let config = Config::from_env().unwrap();
498
+
let result = create_metrics_publisher(&config);
499
+
500
+
// Should fail with invalid config error
501
+
assert!(result.is_err());
502
+
if let Err(e) = result {
503
+
assert!(matches!(e, MetricsError::InvalidConfig(_)));
504
+
}
505
+
506
+
// Clean up
507
+
unsafe {
508
+
env::remove_var("METRICS_ADAPTER");
509
+
env::remove_var("HTTP_EXTERNAL");
510
+
}
511
+
}
512
+
513
+
#[test]
514
+
fn test_invalid_adapter() {
515
+
use std::env;
516
+
517
+
// Lock mutex to prevent concurrent environment variable modification
518
+
let _guard = ENV_MUTEX.lock().unwrap();
519
+
520
+
// Clean up any existing environment variables first
521
+
unsafe {
522
+
env::remove_var("METRICS_ADAPTER");
523
+
env::remove_var("METRICS_STATSD_HOST");
524
+
env::remove_var("METRICS_PREFIX");
525
+
env::remove_var("METRICS_TAGS");
526
+
}
527
+
528
+
// Set up environment with invalid adapter
529
+
unsafe {
530
+
env::set_var("HTTP_EXTERNAL", "test.example.com");
531
+
env::set_var("METRICS_ADAPTER", "invalid");
532
+
env::remove_var("METRICS_STATSD_HOST"); // Clean up from other tests
533
+
}
534
+
535
+
let config = Config::from_env().unwrap();
536
+
537
+
// Config validation should catch this
538
+
let validation_result = config.validate();
539
+
assert!(validation_result.is_err());
540
+
541
+
// Clean up
542
+
unsafe {
543
+
env::remove_var("METRICS_ADAPTER");
544
+
env::remove_var("HTTP_EXTERNAL");
545
+
}
546
+
}
547
+
}
+60
-4
src/queue/factory.rs
+60
-4
src/queue/factory.rs
···
10
10
11
11
use super::{
12
12
adapter::QueueAdapter, mpsc::MpscQueueAdapter, noop::NoopQueueAdapter,
13
-
redis::RedisQueueAdapter, sqlite::SqliteQueueAdapter,
13
+
redis::RedisQueueAdapter, sqlite::SqliteQueueAdapter, work::DedupKey,
14
14
};
15
15
16
16
// ========= MPSC Queue Factories =========
···
81
81
/// # Examples
82
82
///
83
83
/// ```no_run
84
-
/// use quickdid::queue::create_redis_queue;
84
+
/// use quickdid::queue::{create_redis_queue, HandleResolutionWork};
85
85
/// use deadpool_redis::Config;
86
86
///
87
87
/// # async fn example() -> anyhow::Result<()> {
88
88
/// let cfg = Config::from_url("redis://localhost:6379");
89
89
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
90
90
///
91
-
/// let queue = create_redis_queue::<String>(
91
+
/// let queue = create_redis_queue::<HandleResolutionWork>(
92
92
/// pool,
93
93
/// "worker-1".to_string(),
94
94
/// "queue:myapp:".to_string(),
···
104
104
timeout_seconds: u64,
105
105
) -> Arc<dyn QueueAdapter<T>>
106
106
where
107
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
107
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
108
108
{
109
109
Arc::new(RedisQueueAdapter::new(
110
110
pool,
111
111
worker_id,
112
112
key_prefix,
113
113
timeout_seconds,
114
+
))
115
+
}
116
+
117
+
/// Create a new Redis-backed queue adapter with deduplication.
118
+
///
119
+
/// This creates a distributed queue with deduplication to prevent duplicate items
120
+
/// from being queued within the specified TTL window.
121
+
///
122
+
/// # Arguments
123
+
///
124
+
/// * `pool` - Redis connection pool
125
+
/// * `worker_id` - Worker identifier for this queue instance
126
+
/// * `key_prefix` - Redis key prefix for queue operations
127
+
/// * `timeout_seconds` - Timeout for blocking operations
128
+
/// * `dedup_enabled` - Whether to enable deduplication
129
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
130
+
///
131
+
/// # Examples
132
+
///
133
+
/// ```no_run
134
+
/// use quickdid::queue::{create_redis_queue_with_dedup, HandleResolutionWork};
135
+
/// use deadpool_redis::Config;
136
+
///
137
+
/// # async fn example() -> anyhow::Result<()> {
138
+
/// let cfg = Config::from_url("redis://localhost:6379");
139
+
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
140
+
///
141
+
/// let queue = create_redis_queue_with_dedup::<HandleResolutionWork>(
142
+
/// pool,
143
+
/// "worker-1".to_string(),
144
+
/// "queue:myapp:".to_string(),
145
+
/// 5,
146
+
/// true, // Enable deduplication
147
+
/// 60, // 60 second dedup window
148
+
/// );
149
+
/// # Ok(())
150
+
/// # }
151
+
/// ```
152
+
pub fn create_redis_queue_with_dedup<T>(
153
+
pool: RedisPool,
154
+
worker_id: String,
155
+
key_prefix: String,
156
+
timeout_seconds: u64,
157
+
dedup_enabled: bool,
158
+
dedup_ttl: u64,
159
+
) -> Arc<dyn QueueAdapter<T>>
160
+
where
161
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
162
+
{
163
+
Arc::new(RedisQueueAdapter::with_dedup(
164
+
pool,
165
+
worker_id,
166
+
key_prefix,
167
+
timeout_seconds,
168
+
dedup_enabled,
169
+
dedup_ttl,
114
170
))
115
171
}
116
172
+2
-2
src/queue/mod.rs
+2
-2
src/queue/mod.rs
···
63
63
// Re-export core types
64
64
pub use adapter::QueueAdapter;
65
65
pub use error::{QueueError, Result};
66
-
pub use work::HandleResolutionWork;
66
+
pub use work::{DedupKey, HandleResolutionWork};
67
67
68
68
// Re-export implementations (with limited visibility)
69
69
pub use mpsc::MpscQueueAdapter;
···
74
74
// Re-export factory functions
75
75
pub use factory::{
76
76
create_mpsc_queue, create_mpsc_queue_from_channel, create_noop_queue, create_redis_queue,
77
-
create_sqlite_queue, create_sqlite_queue_with_max_size,
77
+
create_redis_queue_with_dedup, create_sqlite_queue, create_sqlite_queue_with_max_size,
78
78
};
+237
-5
src/queue/redis.rs
+237
-5
src/queue/redis.rs
···
10
10
11
11
use super::adapter::QueueAdapter;
12
12
use super::error::{QueueError, Result};
13
+
use super::work::DedupKey;
13
14
14
15
/// Redis-backed queue adapter implementation.
15
16
///
···
40
41
/// # Examples
41
42
///
42
43
/// ```no_run
43
-
/// use quickdid::queue::RedisQueueAdapter;
44
-
/// use quickdid::queue::QueueAdapter;
44
+
/// use quickdid::queue::{RedisQueueAdapter, QueueAdapter, HandleResolutionWork};
45
45
/// use deadpool_redis::Config;
46
46
///
47
47
/// # async fn example() -> anyhow::Result<()> {
···
50
50
/// let pool = cfg.create_pool(Some(deadpool_redis::Runtime::Tokio1))?;
51
51
///
52
52
/// // Create queue adapter
53
-
/// let queue = RedisQueueAdapter::<String>::new(
53
+
/// let queue = RedisQueueAdapter::<HandleResolutionWork>::new(
54
54
/// pool,
55
55
/// "worker-1".to_string(),
56
56
/// "queue:myapp:".to_string(),
···
58
58
/// );
59
59
///
60
60
/// // Use the queue
61
-
/// queue.push("work-item".to_string()).await?;
61
+
/// let work = HandleResolutionWork::new("alice.bsky.social".to_string());
62
+
/// queue.push(work.clone()).await?;
62
63
/// if let Some(item) = queue.pull().await {
63
64
/// // Process item
64
65
/// queue.ack(&item).await?;
···
78
79
key_prefix: String,
79
80
/// Timeout for blocking RPOPLPUSH operations (in seconds)
80
81
timeout_seconds: u64,
82
+
/// Enable deduplication to prevent duplicate items in queue
83
+
dedup_enabled: bool,
84
+
/// TTL for deduplication keys in seconds
85
+
dedup_ttl: u64,
81
86
/// Type marker for generic parameter
82
87
_phantom: std::marker::PhantomData<T>,
83
88
}
···
120
125
key_prefix: String,
121
126
timeout_seconds: u64,
122
127
) -> Self {
128
+
Self::with_dedup(
129
+
pool,
130
+
worker_id,
131
+
key_prefix,
132
+
timeout_seconds,
133
+
false,
134
+
60, // Default TTL of 60 seconds
135
+
)
136
+
}
137
+
138
+
/// Create a new Redis queue adapter with deduplication settings.
139
+
///
140
+
/// # Arguments
141
+
///
142
+
/// * `pool` - Redis connection pool
143
+
/// * `worker_id` - Unique identifier for this worker instance
144
+
/// * `key_prefix` - Redis key prefix for queue operations
145
+
/// * `timeout_seconds` - Timeout for blocking pull operations
146
+
/// * `dedup_enabled` - Whether to enable deduplication
147
+
/// * `dedup_ttl` - TTL for deduplication keys in seconds
148
+
pub fn with_dedup(
149
+
pool: RedisPool,
150
+
worker_id: String,
151
+
key_prefix: String,
152
+
timeout_seconds: u64,
153
+
dedup_enabled: bool,
154
+
dedup_ttl: u64,
155
+
) -> Self {
123
156
Self {
124
157
pool,
125
158
worker_id,
126
159
key_prefix,
127
160
timeout_seconds,
161
+
dedup_enabled,
162
+
dedup_ttl,
128
163
_phantom: std::marker::PhantomData,
129
164
}
130
165
}
···
138
173
fn worker_queue_key(&self) -> String {
139
174
format!("{}{}", self.key_prefix, self.worker_id)
140
175
}
176
+
177
+
/// Get the deduplication key for an item.
178
+
/// This key is used to track if an item is already queued.
179
+
fn dedup_key(&self, item_id: &str) -> String {
180
+
format!("{}dedup:{}", self.key_prefix, item_id)
181
+
}
182
+
183
+
/// Check and mark an item for deduplication.
184
+
/// Returns true if the item was successfully marked (not duplicate),
185
+
/// false if it was already in the deduplication set (duplicate).
186
+
async fn check_and_mark_dedup(
187
+
&self,
188
+
conn: &mut deadpool_redis::Connection,
189
+
item_id: &str,
190
+
) -> Result<bool> {
191
+
if !self.dedup_enabled {
192
+
return Ok(true); // Always allow if dedup is disabled
193
+
}
194
+
195
+
let dedup_key = self.dedup_key(item_id);
196
+
197
+
// Use SET NX EX to atomically set if not exists with expiry
198
+
// Returns OK if the key was set, Nil if it already existed
199
+
let result: Option<String> = deadpool_redis::redis::cmd("SET")
200
+
.arg(&dedup_key)
201
+
.arg("1")
202
+
.arg("NX") // Only set if not exists
203
+
.arg("EX") // Set expiry
204
+
.arg(self.dedup_ttl)
205
+
.query_async(conn)
206
+
.await
207
+
.map_err(|e| QueueError::RedisOperationFailed {
208
+
operation: "SET NX EX".to_string(),
209
+
details: e.to_string(),
210
+
})?;
211
+
212
+
// If result is Some("OK"), the key was set (not duplicate)
213
+
// If result is None, the key already existed (duplicate)
214
+
Ok(result.is_some())
215
+
}
141
216
}
142
217
143
218
#[async_trait]
144
219
impl<T> QueueAdapter<T> for RedisQueueAdapter<T>
145
220
where
146
-
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + 'static,
221
+
T: Send + Sync + Serialize + for<'de> Deserialize<'de> + DedupKey + 'static,
147
222
{
148
223
async fn pull(&self) -> Option<T> {
149
224
match self.pool.get().await {
···
198
273
.get()
199
274
.await
200
275
.map_err(|e| QueueError::RedisConnectionFailed(e.to_string()))?;
276
+
277
+
// Check for deduplication if enabled
278
+
if self.dedup_enabled {
279
+
let dedup_id = work.dedup_key();
280
+
let is_new = self.check_and_mark_dedup(&mut conn, &dedup_id).await?;
281
+
282
+
if !is_new {
283
+
debug!(
284
+
dedup_key = %dedup_id,
285
+
"Item already queued, skipping duplicate"
286
+
);
287
+
return Ok(()); // Successfully deduplicated
288
+
}
289
+
}
201
290
202
291
let data = serde_json::to_vec(&work)
203
292
.map_err(|e| QueueError::SerializationFailed(e.to_string()))?;
···
429
518
430
519
// Should be healthy if Redis is running
431
520
assert!(adapter.is_healthy().await);
521
+
}
522
+
523
+
#[tokio::test]
524
+
async fn test_redis_queue_deduplication() {
525
+
use crate::queue::HandleResolutionWork;
526
+
527
+
let pool = match crate::test_helpers::get_test_redis_pool() {
528
+
Some(p) => p,
529
+
None => {
530
+
eprintln!("Skipping Redis test - no Redis connection available");
531
+
return;
532
+
}
533
+
};
534
+
535
+
let test_prefix = format!(
536
+
"test:queue:dedup:{}:",
537
+
std::time::SystemTime::now()
538
+
.duration_since(std::time::UNIX_EPOCH)
539
+
.unwrap()
540
+
.as_nanos()
541
+
);
542
+
543
+
// Create adapter with deduplication enabled
544
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
545
+
pool.clone(),
546
+
"test-worker-dedup".to_string(),
547
+
test_prefix.clone(),
548
+
1,
549
+
true, // Enable deduplication
550
+
2, // 2 second TTL for quick testing
551
+
);
552
+
553
+
let work = HandleResolutionWork::new("alice.example.com".to_string());
554
+
555
+
// First push should succeed
556
+
adapter
557
+
.push(work.clone())
558
+
.await
559
+
.expect("First push should succeed");
560
+
561
+
// Second push of same item should be deduplicated (but still return Ok)
562
+
adapter
563
+
.push(work.clone())
564
+
.await
565
+
.expect("Second push should succeed (deduplicated)");
566
+
567
+
// Queue should only have one item
568
+
let depth = adapter.depth().await;
569
+
assert_eq!(
570
+
depth,
571
+
Some(1),
572
+
"Queue should only have one item after deduplication"
573
+
);
574
+
575
+
// Pull the item
576
+
let pulled = adapter.pull().await;
577
+
assert_eq!(pulled, Some(work.clone()));
578
+
579
+
// Queue should now be empty
580
+
let depth = adapter.depth().await;
581
+
assert_eq!(depth, Some(0), "Queue should be empty after pulling");
582
+
583
+
// Wait for dedup TTL to expire
584
+
tokio::time::sleep(tokio::time::Duration::from_secs(3)).await;
585
+
586
+
// Should be able to push again after TTL expires
587
+
adapter
588
+
.push(work.clone())
589
+
.await
590
+
.expect("Push after TTL expiry should succeed");
591
+
592
+
let depth = adapter.depth().await;
593
+
assert_eq!(
594
+
depth,
595
+
Some(1),
596
+
"Queue should have one item after TTL expiry"
597
+
);
598
+
}
599
+
600
+
#[tokio::test]
601
+
async fn test_redis_queue_deduplication_disabled() {
602
+
use crate::queue::HandleResolutionWork;
603
+
604
+
let pool = match crate::test_helpers::get_test_redis_pool() {
605
+
Some(p) => p,
606
+
None => {
607
+
eprintln!("Skipping Redis test - no Redis connection available");
608
+
return;
609
+
}
610
+
};
611
+
612
+
let test_prefix = format!(
613
+
"test:queue:nodedup:{}:",
614
+
std::time::SystemTime::now()
615
+
.duration_since(std::time::UNIX_EPOCH)
616
+
.unwrap()
617
+
.as_nanos()
618
+
);
619
+
620
+
// Create adapter with deduplication disabled
621
+
let adapter = RedisQueueAdapter::<HandleResolutionWork>::with_dedup(
622
+
pool.clone(),
623
+
"test-worker-nodedup".to_string(),
624
+
test_prefix.clone(),
625
+
1,
626
+
false, // Disable deduplication
627
+
60,
628
+
);
629
+
630
+
let work = HandleResolutionWork::new("bob.example.com".to_string());
631
+
632
+
// Push same item twice
633
+
adapter
634
+
.push(work.clone())
635
+
.await
636
+
.expect("First push should succeed");
637
+
adapter
638
+
.push(work.clone())
639
+
.await
640
+
.expect("Second push should succeed");
641
+
642
+
// Queue should have two items (no deduplication)
643
+
let depth = adapter.depth().await;
644
+
assert_eq!(
645
+
depth,
646
+
Some(2),
647
+
"Queue should have two items when deduplication is disabled"
648
+
);
649
+
650
+
// Pull both items
651
+
let pulled1 = adapter.pull().await;
652
+
assert_eq!(pulled1, Some(work.clone()));
653
+
654
+
let pulled2 = adapter.pull().await;
655
+
assert_eq!(pulled2, Some(work.clone()));
656
+
657
+
// Queue should now be empty
658
+
let depth = adapter.depth().await;
659
+
assert_eq!(
660
+
depth,
661
+
Some(0),
662
+
"Queue should be empty after pulling all items"
663
+
);
432
664
}
433
665
434
666
#[tokio::test]
+38
src/queue/work.rs
+38
src/queue/work.rs
···
50
50
}
51
51
}
52
52
53
+
/// Trait for getting a unique deduplication key from a work item.
54
+
/// This is used by the Redis queue adapter to prevent duplicate items.
55
+
pub trait DedupKey {
56
+
/// Get a unique key for deduplication purposes.
57
+
/// This should return a consistent identifier for equivalent work items.
58
+
fn dedup_key(&self) -> String;
59
+
}
60
+
61
+
impl DedupKey for HandleResolutionWork {
62
+
fn dedup_key(&self) -> String {
63
+
// Use the handle itself as the dedup key
64
+
self.handle.clone()
65
+
}
66
+
}
67
+
68
+
// For testing purposes, implement DedupKey for String
69
+
#[cfg(test)]
70
+
impl DedupKey for String {
71
+
fn dedup_key(&self) -> String {
72
+
self.clone()
73
+
}
74
+
}
75
+
53
76
#[cfg(test)]
54
77
mod tests {
55
78
use super::*;
···
91
114
92
115
assert_eq!(work1, work2);
93
116
assert_ne!(work1, work3);
117
+
}
118
+
119
+
#[test]
120
+
fn test_handle_resolution_work_dedup_key() {
121
+
let work1 = HandleResolutionWork::new("alice.example.com".to_string());
122
+
let work2 = HandleResolutionWork::new("alice.example.com".to_string());
123
+
let work3 = HandleResolutionWork::new("bob.example.com".to_string());
124
+
125
+
// Same handle should have same dedup key
126
+
assert_eq!(work1.dedup_key(), work2.dedup_key());
127
+
assert_eq!(work1.dedup_key(), "alice.example.com");
128
+
129
+
// Different handle should have different dedup key
130
+
assert_ne!(work1.dedup_key(), work3.dedup_key());
131
+
assert_eq!(work3.dedup_key(), "bob.example.com");
94
132
}
95
133
}
+364
test-scripts/docker-test.sh
+364
test-scripts/docker-test.sh
···
1
+
#!/bin/bash
2
+
3
+
# Comprehensive test script for Telegraf/TimescaleDB metrics setup
4
+
# This script validates the entire metrics pipeline
5
+
6
+
set -e
7
+
8
+
echo "========================================="
9
+
echo "Telegraf/TimescaleDB Metrics Test Suite"
10
+
echo "========================================="
11
+
echo ""
12
+
13
+
# Check if Docker is running
14
+
if ! docker info > /dev/null 2>&1; then
15
+
echo "โ Docker is not running. Please start Docker first."
16
+
exit 1
17
+
fi
18
+
19
+
# Function to wait for a service to be healthy
20
+
wait_for_service() {
21
+
local service=$1
22
+
local max_attempts=30
23
+
local attempt=1
24
+
25
+
echo -n "Waiting for $service to be healthy"
26
+
while [ $attempt -le $max_attempts ]; do
27
+
if docker-compose ps $service | grep -q "healthy"; then
28
+
echo " โ
"
29
+
return 0
30
+
fi
31
+
echo -n "."
32
+
sleep 2
33
+
attempt=$((attempt + 1))
34
+
done
35
+
echo " โ"
36
+
echo "Service $service failed to become healthy after $max_attempts attempts"
37
+
return 1
38
+
}
39
+
40
+
# Function to run SQL query
41
+
run_query() {
42
+
docker exec -i timescaledb psql -U postgres -d metrics -t -c "$1" 2>/dev/null
43
+
}
44
+
45
+
# Function to check table exists
46
+
check_table() {
47
+
local table=$1
48
+
local result=$(run_query "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_name = '$table');")
49
+
if [[ "$result" =~ "t" ]]; then
50
+
echo "โ
Table '$table' exists"
51
+
return 0
52
+
else
53
+
echo "โ Table '$table' does not exist"
54
+
return 1
55
+
fi
56
+
}
57
+
58
+
# Navigate to the metrics-stack directory (create if needed)
59
+
if [ ! -d "metrics-stack" ]; then
60
+
echo "Creating metrics-stack directory..."
61
+
mkdir -p metrics-stack/telegraf
62
+
mkdir -p metrics-stack/test-scripts
63
+
mkdir -p metrics-stack/init-scripts
64
+
fi
65
+
66
+
cd metrics-stack
67
+
68
+
# Create .env file if it doesn't exist
69
+
if [ ! -f ".env" ]; then
70
+
echo "Creating .env file..."
71
+
cat > .env << 'EOF'
72
+
# PostgreSQL/TimescaleDB Configuration
73
+
POSTGRES_DB=metrics
74
+
POSTGRES_USER=postgres
75
+
POSTGRES_PASSWORD=secretpassword
76
+
77
+
# Telegraf Database User
78
+
TELEGRAF_DB_USER=postgres
79
+
TELEGRAF_DB_PASSWORD=secretpassword
80
+
81
+
# TimescaleDB Settings
82
+
TIMESCALE_TELEMETRY=off
83
+
EOF
84
+
fi
85
+
86
+
# Copy configuration files if they don't exist
87
+
if [ ! -f "telegraf/telegraf.conf" ]; then
88
+
echo "Creating telegraf.conf..."
89
+
cat > telegraf/telegraf.conf << 'EOF'
90
+
[agent]
91
+
interval = "10s"
92
+
round_interval = true
93
+
metric_batch_size = 1000
94
+
metric_buffer_limit = 10000
95
+
collection_jitter = "0s"
96
+
flush_interval = "10s"
97
+
flush_jitter = "0s"
98
+
precision = ""
99
+
debug = false
100
+
quiet = false
101
+
hostname = "telegraf-agent"
102
+
omit_hostname = false
103
+
104
+
[[inputs.statsd]]
105
+
service_address = ":8125"
106
+
protocol = "udp"
107
+
delete_gauges = true
108
+
delete_counters = true
109
+
delete_sets = true
110
+
delete_timings = true
111
+
percentiles = [50, 90, 95, 99]
112
+
metric_separator = "."
113
+
allowed_pending_messages = 10000
114
+
datadog_extensions = true
115
+
datadog_distributions = true
116
+
117
+
[[outputs.postgresql]]
118
+
connection = "host=timescaledb user=${TELEGRAF_DB_USER} password=${TELEGRAF_DB_PASSWORD} dbname=${POSTGRES_DB} sslmode=disable"
119
+
schema = "public"
120
+
create_templates = [
121
+
'''CREATE TABLE IF NOT EXISTS {{.table}} ({{.columns}})''',
122
+
'''SELECT create_hypertable({{.table|quoteLiteral}}, 'time', if_not_exists => TRUE)''',
123
+
]
124
+
tags_as_jsonb = true
125
+
fields_as_jsonb = false
126
+
EOF
127
+
fi
128
+
129
+
# Copy docker-compose.yml if it doesn't exist
130
+
if [ ! -f "docker-compose.yml" ]; then
131
+
echo "Creating docker-compose.yml..."
132
+
cat > docker-compose.yml << 'EOF'
133
+
version: '3.8'
134
+
135
+
services:
136
+
timescaledb:
137
+
image: timescale/timescaledb:latest-pg17
138
+
container_name: timescaledb
139
+
restart: unless-stopped
140
+
environment:
141
+
POSTGRES_DB: ${POSTGRES_DB}
142
+
POSTGRES_USER: ${POSTGRES_USER}
143
+
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
144
+
TIMESCALE_TELEMETRY: ${TIMESCALE_TELEMETRY}
145
+
ports:
146
+
- "5442:5432"
147
+
volumes:
148
+
- timescale_data:/home/postgres/pgdata/data
149
+
- ./init-scripts:/docker-entrypoint-initdb.d:ro
150
+
command:
151
+
- postgres
152
+
- -c
153
+
- shared_buffers=256MB
154
+
- -c
155
+
- effective_cache_size=1GB
156
+
- -c
157
+
- maintenance_work_mem=64MB
158
+
- -c
159
+
- work_mem=8MB
160
+
healthcheck:
161
+
test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER} -d ${POSTGRES_DB}"]
162
+
interval: 10s
163
+
timeout: 5s
164
+
retries: 5
165
+
networks:
166
+
- metrics_network
167
+
168
+
telegraf:
169
+
image: telegraf:1.35
170
+
container_name: telegraf
171
+
restart: unless-stopped
172
+
environment:
173
+
TELEGRAF_DB_USER: ${TELEGRAF_DB_USER}
174
+
TELEGRAF_DB_PASSWORD: ${TELEGRAF_DB_PASSWORD}
175
+
POSTGRES_DB: ${POSTGRES_DB}
176
+
ports:
177
+
- "8125:8125/udp"
178
+
volumes:
179
+
- ./telegraf/telegraf.conf:/etc/telegraf/telegraf.conf:ro
180
+
depends_on:
181
+
timescaledb:
182
+
condition: service_healthy
183
+
networks:
184
+
- metrics_network
185
+
command: ["telegraf", "--config", "/etc/telegraf/telegraf.conf"]
186
+
187
+
networks:
188
+
metrics_network:
189
+
driver: bridge
190
+
191
+
volumes:
192
+
timescale_data:
193
+
EOF
194
+
fi
195
+
196
+
# Create init script
197
+
if [ ! -f "init-scripts/01-init.sql" ]; then
198
+
echo "Creating init script..."
199
+
cat > init-scripts/01-init.sql << 'EOF'
200
+
-- Enable TimescaleDB extension
201
+
CREATE EXTENSION IF NOT EXISTS timescaledb;
202
+
CREATE EXTENSION IF NOT EXISTS pg_stat_statements;
203
+
EOF
204
+
fi
205
+
206
+
echo ""
207
+
echo "Step 1: Starting Docker services..."
208
+
echo "========================================="
209
+
docker-compose down -v 2>/dev/null || true
210
+
docker-compose up -d
211
+
212
+
echo ""
213
+
echo "Step 2: Waiting for services to be healthy..."
214
+
echo "========================================="
215
+
wait_for_service timescaledb
216
+
sleep 5 # Extra time for Telegraf to connect
217
+
218
+
echo ""
219
+
echo "Step 3: Sending test metrics..."
220
+
echo "========================================="
221
+
222
+
# Send various types of metrics
223
+
echo "Sending counter metrics..."
224
+
for i in {1..5}; do
225
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
226
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
227
+
done
228
+
229
+
echo "Sending gauge metrics..."
230
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
231
+
sleep 1
232
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
233
+
234
+
echo "Sending timing metrics..."
235
+
for i in {1..10}; do
236
+
duration=$((RANDOM % 100 + 10))
237
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
238
+
done
239
+
240
+
echo "Sending histogram metrics..."
241
+
for i in {1..5}; do
242
+
resolution_time=$((RANDOM % 500 + 50))
243
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
244
+
done
245
+
246
+
echo "Waiting 15 seconds for Telegraf to flush metrics..."
247
+
sleep 15
248
+
249
+
echo ""
250
+
echo "Step 4: Verifying table creation..."
251
+
echo "========================================="
252
+
253
+
# Check if tables were created
254
+
check_table "quickdid.http.request.count"
255
+
check_table "quickdid.http.request.duration_ms"
256
+
check_table "quickdid.resolver.rate_limit.available_permits"
257
+
check_table "quickdid.resolver.resolution_time"
258
+
259
+
echo ""
260
+
echo "Step 5: Verifying data insertion..."
261
+
echo "========================================="
262
+
263
+
# Check row counts
264
+
for table in "quickdid.http.request.count" "quickdid.http.request.duration_ms" "quickdid.resolver.rate_limit.available_permits" "quickdid.resolver.resolution_time"; do
265
+
count=$(run_query "SELECT COUNT(*) FROM \"$table\";" | tr -d ' ')
266
+
if [ "$count" -gt 0 ]; then
267
+
echo "โ
Table '$table' has $count rows"
268
+
else
269
+
echo "โ Table '$table' is empty"
270
+
fi
271
+
done
272
+
273
+
echo ""
274
+
echo "Step 6: Testing JSONB tag queries..."
275
+
echo "========================================="
276
+
277
+
# Test JSONB tag filtering
278
+
result=$(run_query "SELECT COUNT(*) FROM \"quickdid.http.request.count\" WHERE tags->>'method' = 'GET';" | tr -d ' ')
279
+
if [ "$result" -gt 0 ]; then
280
+
echo "โ
JSONB tag filtering works (found $result GET requests)"
281
+
else
282
+
echo "โ JSONB tag filtering failed"
283
+
fi
284
+
285
+
echo ""
286
+
echo "Step 7: Testing TimescaleDB functions..."
287
+
echo "========================================="
288
+
289
+
# Test time_bucket function
290
+
result=$(run_query "SELECT COUNT(*) FROM (SELECT time_bucket('1 minute', time) FROM \"quickdid.http.request.count\" GROUP BY 1) t;" | tr -d ' ')
291
+
if [ "$result" -gt 0 ]; then
292
+
echo "โ
time_bucket function works"
293
+
else
294
+
echo "โ time_bucket function failed"
295
+
fi
296
+
297
+
# Check if hypertables were created
298
+
hypertable_count=$(run_query "SELECT COUNT(*) FROM timescaledb_information.hypertables WHERE hypertable_name LIKE 'quickdid%';" | tr -d ' ')
299
+
if [ "$hypertable_count" -gt 0 ]; then
300
+
echo "โ
Found $hypertable_count hypertables"
301
+
else
302
+
echo "โ No hypertables found"
303
+
fi
304
+
305
+
echo ""
306
+
echo "Step 8: Running comprehensive query tests..."
307
+
echo "========================================="
308
+
309
+
# Run the verify-queries.sql script if it exists
310
+
if [ -f "../test-scripts/verify-queries.sql" ]; then
311
+
echo "Running verify-queries.sql..."
312
+
docker exec -i timescaledb psql -U postgres -d metrics < ../test-scripts/verify-queries.sql > query_results.txt 2>&1
313
+
if [ $? -eq 0 ]; then
314
+
echo "โ
All queries executed successfully"
315
+
echo " Results saved to query_results.txt"
316
+
else
317
+
echo "โ Some queries failed. Check query_results.txt for details"
318
+
fi
319
+
else
320
+
echo "โ ๏ธ verify-queries.sql not found, skipping comprehensive query tests"
321
+
fi
322
+
323
+
echo ""
324
+
echo "========================================="
325
+
echo "Test Summary"
326
+
echo "========================================="
327
+
328
+
# Generate summary
329
+
failures=0
330
+
successes=0
331
+
332
+
# Count successes and failures from the output
333
+
if check_table "quickdid.http.request.count" > /dev/null 2>&1; then
334
+
successes=$((successes + 1))
335
+
else
336
+
failures=$((failures + 1))
337
+
fi
338
+
339
+
if [ "$hypertable_count" -gt 0 ]; then
340
+
successes=$((successes + 1))
341
+
else
342
+
failures=$((failures + 1))
343
+
fi
344
+
345
+
echo ""
346
+
if [ $failures -eq 0 ]; then
347
+
echo "โ
All tests passed successfully!"
348
+
echo ""
349
+
echo "You can now:"
350
+
echo "1. Connect to the database: docker exec -it timescaledb psql -U postgres -d metrics"
351
+
echo "2. View logs: docker-compose logs -f"
352
+
echo "3. Send more metrics: echo 'metric.name:value|type|#tag:value' | nc -u -w0 localhost 8125"
353
+
echo "4. Stop services: docker-compose down"
354
+
else
355
+
echo "โ ๏ธ Some tests failed. Please check the output above for details."
356
+
echo ""
357
+
echo "Troubleshooting tips:"
358
+
echo "1. Check Telegraf logs: docker-compose logs telegraf"
359
+
echo "2. Check TimescaleDB logs: docker-compose logs timescaledb"
360
+
echo "3. Verify connectivity: docker exec telegraf telegraf --test"
361
+
fi
362
+
363
+
echo ""
364
+
echo "Test complete!"
+44
test-scripts/send-metrics.sh
+44
test-scripts/send-metrics.sh
···
1
+
#!/bin/bash
2
+
3
+
# Send test metrics to StatsD/Telegraf
4
+
5
+
echo "Sending test metrics to StatsD on localhost:8125..."
6
+
7
+
# Counter metrics
8
+
for i in {1..10}; do
9
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
10
+
echo "quickdid.http.request.count:1|c|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
11
+
echo "quickdid.http.request.count:1|c|#method:GET,path:/resolve,status:404" | nc -u -w0 localhost 8125
12
+
done
13
+
14
+
# Gauge metrics
15
+
echo "quickdid.resolver.rate_limit.available_permits:10|g" | nc -u -w0 localhost 8125
16
+
echo "quickdid.resolver.rate_limit.available_permits:8|g" | nc -u -w0 localhost 8125
17
+
echo "quickdid.resolver.rate_limit.available_permits:5|g" | nc -u -w0 localhost 8125
18
+
19
+
# Timing metrics (in milliseconds)
20
+
for i in {1..20}; do
21
+
duration=$((RANDOM % 100 + 10))
22
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:GET,path:/resolve,status:200" | nc -u -w0 localhost 8125
23
+
done
24
+
25
+
for i in {1..10}; do
26
+
duration=$((RANDOM % 200 + 50))
27
+
echo "quickdid.http.request.duration_ms:${duration}|ms|#method:POST,path:/api,status:201" | nc -u -w0 localhost 8125
28
+
done
29
+
30
+
# Histogram metrics
31
+
for i in {1..15}; do
32
+
resolution_time=$((RANDOM % 500 + 50))
33
+
echo "quickdid.resolver.resolution_time:${resolution_time}|h|#resolver:redis" | nc -u -w0 localhost 8125
34
+
echo "quickdid.resolver.resolution_time:$((resolution_time * 2))|h|#resolver:base" | nc -u -w0 localhost 8125
35
+
done
36
+
37
+
# Cache metrics
38
+
echo "quickdid.cache.hit.count:45|c|#cache_type:redis" | nc -u -w0 localhost 8125
39
+
echo "quickdid.cache.miss.count:5|c|#cache_type:redis" | nc -u -w0 localhost 8125
40
+
echo "quickdid.cache.size:1024|g|#cache_type:memory" | nc -u -w0 localhost 8125
41
+
42
+
echo "Metrics sent! Wait 15 seconds for Telegraf to flush..."
43
+
sleep 15
44
+
echo "Done!"
+145
test-scripts/verify-queries.sql
+145
test-scripts/verify-queries.sql
···
1
+
-- Test script to verify all metrics queries work correctly
2
+
-- Run this after sending test metrics with send-metrics.sh
3
+
4
+
\echo '===== CHECKING AVAILABLE TABLES ====='
5
+
SELECT table_name
6
+
FROM information_schema.tables
7
+
WHERE table_schema = 'public'
8
+
AND table_name LIKE 'quickdid%'
9
+
ORDER BY table_name;
10
+
11
+
\echo ''
12
+
\echo '===== CHECKING TABLE STRUCTURES ====='
13
+
\echo 'Structure of quickdid.http.request.count table:'
14
+
\d "quickdid.http.request.count"
15
+
16
+
\echo ''
17
+
\echo 'Structure of quickdid.http.request.duration_ms table:'
18
+
\d "quickdid.http.request.duration_ms"
19
+
20
+
\echo ''
21
+
\echo '===== QUERY 1: Recent HTTP Request Counts ====='
22
+
SELECT
23
+
time,
24
+
tags,
25
+
tags->>'method' as method,
26
+
tags->>'path' as path,
27
+
tags->>'status' as status,
28
+
value
29
+
FROM "quickdid.http.request.count"
30
+
WHERE time > NOW() - INTERVAL '1 hour'
31
+
ORDER BY time DESC
32
+
LIMIT 10;
33
+
34
+
\echo ''
35
+
\echo '===== QUERY 2: HTTP Request Duration Statistics by Endpoint ====='
36
+
SELECT
37
+
time_bucket('1 minute', time) AS minute,
38
+
tags->>'method' as method,
39
+
tags->>'path' as path,
40
+
tags->>'status' as status,
41
+
COUNT(*) as request_count,
42
+
AVG(mean) as avg_duration_ms,
43
+
MAX(p99) as p99_duration_ms,
44
+
MIN(mean) as min_duration_ms
45
+
FROM "quickdid.http.request.duration_ms"
46
+
WHERE time > NOW() - INTERVAL '1 hour'
47
+
AND tags IS NOT NULL
48
+
GROUP BY minute, tags->>'method', tags->>'path', tags->>'status'
49
+
ORDER BY minute DESC
50
+
LIMIT 10;
51
+
52
+
\echo ''
53
+
\echo '===== QUERY 3: Rate Limiter Status Over Time ====='
54
+
SELECT
55
+
time,
56
+
value as available_permits
57
+
FROM "quickdid.resolver.rate_limit.available_permits"
58
+
WHERE time > NOW() - INTERVAL '1 hour'
59
+
ORDER BY time DESC
60
+
LIMIT 10;
61
+
62
+
\echo ''
63
+
\echo '===== QUERY 4: Resolver Performance Comparison ====='
64
+
SELECT
65
+
tags->>'resolver' as resolver_type,
66
+
COUNT(*) as sample_count,
67
+
AVG(mean) as avg_resolution_time_ms,
68
+
MAX(p99) as p99_resolution_time_ms,
69
+
MIN(mean) as min_resolution_time_ms
70
+
FROM "quickdid.resolver.resolution_time"
71
+
WHERE time > NOW() - INTERVAL '1 hour'
72
+
AND tags->>'resolver' IS NOT NULL
73
+
GROUP BY tags->>'resolver'
74
+
ORDER BY avg_resolution_time_ms;
75
+
76
+
\echo ''
77
+
\echo '===== QUERY 5: Cache Hit Rate Analysis ====='
78
+
WITH cache_stats AS (
79
+
SELECT
80
+
'hits' as metric_type,
81
+
SUM(value) as total_count
82
+
FROM "quickdid.cache.hit.count"
83
+
WHERE time > NOW() - INTERVAL '1 hour'
84
+
UNION ALL
85
+
SELECT
86
+
'misses' as metric_type,
87
+
SUM(value) as total_count
88
+
FROM "quickdid.cache.miss.count"
89
+
WHERE time > NOW() - INTERVAL '1 hour'
90
+
)
91
+
SELECT
92
+
SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) as total_hits,
93
+
SUM(CASE WHEN metric_type = 'misses' THEN total_count ELSE 0 END) as total_misses,
94
+
CASE
95
+
WHEN SUM(total_count) > 0 THEN
96
+
ROUND(100.0 * SUM(CASE WHEN metric_type = 'hits' THEN total_count ELSE 0 END) / SUM(total_count), 2)
97
+
ELSE 0
98
+
END as hit_rate_percentage
99
+
FROM cache_stats;
100
+
101
+
\echo ''
102
+
\echo '===== QUERY 6: Hypertable Information ====='
103
+
SELECT
104
+
hypertable_schema,
105
+
hypertable_name,
106
+
owner,
107
+
num_dimensions,
108
+
num_chunks,
109
+
compression_enabled
110
+
FROM timescaledb_information.hypertables
111
+
WHERE hypertable_name LIKE 'quickdid%'
112
+
ORDER BY hypertable_name;
113
+
114
+
\echo ''
115
+
\echo '===== QUERY 7: HTTP Error Rate by Endpoint ====='
116
+
WITH status_counts AS (
117
+
SELECT
118
+
time_bucket('5 minutes', time) as period,
119
+
tags->>'path' as path,
120
+
CASE
121
+
WHEN (tags->>'status')::int >= 400 THEN 'error'
122
+
ELSE 'success'
123
+
END as status_category,
124
+
SUM(value) as request_count
125
+
FROM "quickdid.http.request.count"
126
+
WHERE time > NOW() - INTERVAL '1 hour'
127
+
GROUP BY period, path, status_category
128
+
)
129
+
SELECT
130
+
period,
131
+
path,
132
+
SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) as error_count,
133
+
SUM(CASE WHEN status_category = 'success' THEN request_count ELSE 0 END) as success_count,
134
+
CASE
135
+
WHEN SUM(request_count) > 0 THEN
136
+
ROUND(100.0 * SUM(CASE WHEN status_category = 'error' THEN request_count ELSE 0 END) / SUM(request_count), 2)
137
+
ELSE 0
138
+
END as error_rate_percentage
139
+
FROM status_counts
140
+
GROUP BY period, path
141
+
HAVING SUM(request_count) > 0
142
+
ORDER BY period DESC, error_rate_percentage DESC;
143
+
144
+
\echo ''
145
+
\echo '===== TEST COMPLETED ====='
+1
www/.well-known/atproto-did
+1
www/.well-known/atproto-did
···
1
+
did:web:quickdid.smokesignal.tools
+15
www/.well-known/did.json
+15
www/.well-known/did.json
···
1
+
{
2
+
"@context": [
3
+
"https://www.w3.org/ns/did/v1",
4
+
"https://w3id.org/security/multikey/v1"
5
+
],
6
+
"id": "did:web:quickdid.smokesignal.tools",
7
+
"verificationMethod": [],
8
+
"service": [
9
+
{
10
+
"id": "#quickdid",
11
+
"type": "QuickDIDService",
12
+
"serviceEndpoint": "https://quickdid.smokesignal.tools"
13
+
}
14
+
]
15
+
}
+74
www/README.md
+74
www/README.md
···
1
+
# QuickDID Static Files Directory
2
+
3
+
This directory contains static files that are served by QuickDID. By default, QuickDID serves files from the `www` directory, but this can be configured using the `STATIC_FILES_DIR` environment variable.
4
+
5
+
## Directory Structure
6
+
7
+
```
8
+
www/
9
+
โโโ .well-known/
10
+
โ โโโ atproto-did # AT Protocol DID identifier
11
+
โ โโโ did.json # DID document
12
+
โโโ index.html # Landing page
13
+
โโโ README.md # This file
14
+
```
15
+
16
+
## Files
17
+
18
+
### `.well-known/atproto-did`
19
+
Contains the service's DID identifier (e.g., `did:web:example.com`). This file is used by AT Protocol clients to discover the service's DID.
20
+
21
+
### `.well-known/did.json`
22
+
Contains the DID document with verification methods and service endpoints. This is a JSON-LD document following the W3C DID specification.
23
+
24
+
### `index.html`
25
+
The landing page shown when users visit the root URL. This provides information about the service and available endpoints.
26
+
27
+
## Customization
28
+
29
+
### Using the Generation Script
30
+
31
+
You can generate the `.well-known` files for your deployment using the provided script:
32
+
33
+
```bash
34
+
HTTP_EXTERNAL=your-domain.com ./generate-wellknown.sh
35
+
```
36
+
37
+
This will create the appropriate files based on your domain.
38
+
39
+
### Manual Customization
40
+
41
+
1. **Update `.well-known/atproto-did`**: Replace with your service's DID
42
+
2. **Update `.well-known/did.json`**: Add your public key to the `verificationMethod` array if needed
43
+
3. **Customize `index.html`**: Modify the landing page to match your branding
44
+
45
+
### Docker Deployment
46
+
47
+
When using Docker, you can mount custom static files:
48
+
49
+
```yaml
50
+
volumes:
51
+
- ./custom-www:/app/www:ro
52
+
```
53
+
54
+
Or just override specific files:
55
+
56
+
```yaml
57
+
volumes:
58
+
- ./custom-index.html:/app/www/index.html:ro
59
+
- ./custom-wellknown:/app/www/.well-known:ro
60
+
```
61
+
62
+
### Environment Variable
63
+
64
+
You can change the static files directory using:
65
+
66
+
```bash
67
+
STATIC_FILES_DIR=/path/to/custom/www
68
+
```
69
+
70
+
## Security Notes
71
+
72
+
- Static files are served with automatic MIME type detection
73
+
- The `.well-known` files are crucial for AT Protocol compatibility
74
+
- Ensure proper permissions on mounted volumes in production
+4
www/css/pico.classless.green.min.css
+4
www/css/pico.classless.green.min.css
···
1
+
@charset "UTF-8";/*!
2
+
* Pico CSS โจ v2.1.1 (https://picocss.com)
3
+
* Copyright 2019-2025 - Licensed under MIT
4
+
*/:host,:root{--pico-font-family-emoji:"Apple Color Emoji","Segoe UI Emoji","Segoe UI Symbol","Noto Color Emoji";--pico-font-family-sans-serif:system-ui,"Segoe UI",Roboto,Oxygen,Ubuntu,Cantarell,Helvetica,Arial,"Helvetica Neue",sans-serif,var(--pico-font-family-emoji);--pico-font-family-monospace:ui-monospace,SFMono-Regular,"SF Mono",Menlo,Consolas,"Liberation Mono",monospace,var(--pico-font-family-emoji);--pico-font-family:var(--pico-font-family-sans-serif);--pico-line-height:1.5;--pico-font-weight:400;--pico-font-size:100%;--pico-text-underline-offset:0.1rem;--pico-border-radius:0.25rem;--pico-border-width:0.0625rem;--pico-outline-width:0.125rem;--pico-transition:0.2s ease-in-out;--pico-spacing:1rem;--pico-typography-spacing-vertical:1rem;--pico-block-spacing-vertical:var(--pico-spacing);--pico-block-spacing-horizontal:var(--pico-spacing);--pico-form-element-spacing-vertical:0.75rem;--pico-form-element-spacing-horizontal:1rem;--pico-group-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-group-box-shadow-focus-with-button:0 0 0 var(--pico-outline-width) var(--pico-primary-focus);--pico-group-box-shadow-focus-with-input:0 0 0 0.0625rem var(--pico-form-element-border-color);--pico-modal-overlay-backdrop-filter:blur(0.375rem);--pico-nav-element-spacing-vertical:1rem;--pico-nav-element-spacing-horizontal:0.5rem;--pico-nav-link-spacing-vertical:0.5rem;--pico-nav-link-spacing-horizontal:0.5rem;--pico-nav-breadcrumb-divider:">";--pico-icon-checkbox:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-minus:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(255, 255, 255)' stroke-width='4' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='5' y1='12' x2='19' y2='12'%3E%3C/line%3E%3C/svg%3E");--pico-icon-chevron:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='6 9 12 15 18 9'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-date:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Crect x='3' y='4' width='18' height='18' rx='2' ry='2'%3E%3C/rect%3E%3Cline x1='16' y1='2' x2='16' y2='6'%3E%3C/line%3E%3Cline x1='8' y1='2' x2='8' y2='6'%3E%3C/line%3E%3Cline x1='3' y1='10' x2='21' y2='10'%3E%3C/line%3E%3C/svg%3E");--pico-icon-time:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cpolyline points='12 6 12 12 16 14'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-search:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='1.5' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='11' cy='11' r='8'%3E%3C/circle%3E%3Cline x1='21' y1='21' x2='16.65' y2='16.65'%3E%3C/line%3E%3C/svg%3E");--pico-icon-close:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(136, 145, 164)' stroke-width='3' stroke-linecap='round' stroke-linejoin='round'%3E%3Cline x1='18' y1='6' x2='6' y2='18'%3E%3C/line%3E%3Cline x1='6' y1='6' x2='18' y2='18'%3E%3C/line%3E%3C/svg%3E");--pico-icon-loading:url("data:image/svg+xml,%3Csvg fill='none' height='24' width='24' viewBox='0 0 24 24' xmlns='http://www.w3.org/2000/svg' %3E%3Cstyle%3E g %7B animation: rotate 2s linear infinite; transform-origin: center center; %7D circle %7B stroke-dasharray: 75,100; stroke-dashoffset: -5; animation: dash 1.5s ease-in-out infinite; stroke-linecap: round; %7D @keyframes rotate %7B 0%25 %7B transform: rotate(0deg); %7D 100%25 %7B transform: rotate(360deg); %7D %7D @keyframes dash %7B 0%25 %7B stroke-dasharray: 1,100; stroke-dashoffset: 0; %7D 50%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -17.5; %7D 100%25 %7B stroke-dasharray: 44.5,100; stroke-dashoffset: -62; %7D %7D %3C/style%3E%3Cg%3E%3Ccircle cx='12' cy='12' r='10' fill='none' stroke='rgb(136, 145, 164)' stroke-width='4' /%3E%3C/g%3E%3C/svg%3E")}@media (min-width:576px){:host,:root{--pico-font-size:106.25%}}@media (min-width:768px){:host,:root{--pico-font-size:112.5%}}@media (min-width:1024px){:host,:root{--pico-font-size:118.75%}}@media (min-width:1280px){:host,:root{--pico-font-size:125%}}@media (min-width:1536px){:host,:root{--pico-font-size:131.25%}}a{--pico-text-decoration:underline}small{--pico-font-size:0.875em}h1,h2,h3,h4,h5,h6{--pico-font-weight:700}h1{--pico-font-size:2rem;--pico-line-height:1.125;--pico-typography-spacing-top:3rem}h2{--pico-font-size:1.75rem;--pico-line-height:1.15;--pico-typography-spacing-top:2.625rem}h3{--pico-font-size:1.5rem;--pico-line-height:1.175;--pico-typography-spacing-top:2.25rem}h4{--pico-font-size:1.25rem;--pico-line-height:1.2;--pico-typography-spacing-top:1.874rem}h5{--pico-font-size:1.125rem;--pico-line-height:1.225;--pico-typography-spacing-top:1.6875rem}h6{--pico-font-size:1rem;--pico-line-height:1.25;--pico-typography-spacing-top:1.5rem}tfoot td,tfoot th,thead td,thead th{--pico-font-weight:600;--pico-border-width:0.1875rem}code,kbd,pre,samp{--pico-font-family:var(--pico-font-family-monospace)}kbd{--pico-font-weight:bolder}:where(select,textarea),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-outline-width:0.0625rem}[type=search]{--pico-border-radius:5rem}[type=checkbox],[type=radio]{--pico-border-width:0.125rem}[type=checkbox][role=switch]{--pico-border-width:0.1875rem}[role=search]{--pico-border-radius:5rem}[role=group] [role=button],[role=group] [type=button],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=submit],[role=search] button{--pico-form-element-spacing-horizontal:2rem}details summary[role=button]::after{filter:brightness(0) invert(1)}[aria-busy=true]:not(input,select,textarea):is(button,[type=submit],[type=button],[type=reset],[role=button])::before{filter:brightness(0) invert(1)}:host(:not([data-theme=dark])),:root:not([data-theme=dark]),[data-theme=light]{color-scheme:light;--pico-background-color:#fff;--pico-color:#373c44;--pico-text-selection-color:rgba(71, 164, 23, 0.25);--pico-muted-color:#646b79;--pico-muted-border-color:rgb(231, 234, 239.5);--pico-primary:#33790f;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(51, 121, 15, 0.5);--pico-primary-hover:#265e09;--pico-primary-hover-background:#33790f;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(71, 164, 23, 0.5);--pico-primary-inverse:#fff;--pico-secondary:#5d6b89;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(93, 107, 137, 0.5);--pico-secondary-hover:#48536b;--pico-secondary-hover-background:#48536b;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(93, 107, 137, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#181c25;--pico-contrast-background:#181c25;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(24, 28, 37, 0.5);--pico-contrast-hover:#000;--pico-contrast-hover-background:#000;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-secondary-hover);--pico-contrast-focus:rgba(93, 107, 137, 0.25);--pico-contrast-inverse:#fff;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(129, 145, 181, 0.01698),0.0335rem 0.067rem 0.402rem rgba(129, 145, 181, 0.024),0.0625rem 0.125rem 0.75rem rgba(129, 145, 181, 0.03),0.1125rem 0.225rem 1.35rem rgba(129, 145, 181, 0.036),0.2085rem 0.417rem 2.502rem rgba(129, 145, 181, 0.04302),0.5rem 1rem 6rem rgba(129, 145, 181, 0.06),0 0 0 0.0625rem rgba(129, 145, 181, 0.015);--pico-h1-color:#2d3138;--pico-h2-color:#373c44;--pico-h3-color:#424751;--pico-h4-color:#4d535e;--pico-h5-color:#5c6370;--pico-h6-color:#646b79;--pico-mark-background-color:rgb(252.5, 230.5, 191.5);--pico-mark-color:#0f1114;--pico-ins-color:rgb(28.5, 105.5, 84);--pico-del-color:rgb(136, 56.5, 53);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(243, 244.5, 246.75);--pico-code-color:#646b79;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(251, 251.5, 252.25);--pico-form-element-selected-background-color:#dfe3eb;--pico-form-element-border-color:#cfd5e2;--pico-form-element-color:#23262c;--pico-form-element-placeholder-color:var(--pico-muted-color);--pico-form-element-active-background-color:#fff;--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(183.5, 105.5, 106.5);--pico-form-element-invalid-active-border-color:rgb(200.25, 79.25, 72.25);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:rgb(76, 154.5, 137.5);--pico-form-element-valid-active-border-color:rgb(39, 152.75, 118.75);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#bfc7d9;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#dfe3eb;--pico-range-active-border-color:#bfc7d9;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:var(--pico-background-color);--pico-card-border-color:var(--pico-muted-border-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(251, 251.5, 252.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(232, 234, 237, 0.75);--pico-progress-background-color:#dfe3eb;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(76, 154.5, 137.5)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(200.25, 79.25, 72.25)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme=dark])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme=dark]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),[data-theme=light] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}@media only screen and (prefers-color-scheme:dark){:host(:not([data-theme])),:root:not([data-theme]){color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}:host(:not([data-theme])) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]),:root:not([data-theme]) input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}}[data-theme=dark]{color-scheme:dark;--pico-background-color:rgb(19, 22.5, 30.5);--pico-color:#c2c7d0;--pico-text-selection-color:rgba(78, 179, 27, 0.1875);--pico-muted-color:#7b8495;--pico-muted-border-color:#202632;--pico-primary:#4eb31b;--pico-primary-background:#398712;--pico-primary-border:var(--pico-primary-background);--pico-primary-underline:rgba(78, 179, 27, 0.5);--pico-primary-hover:#5dd121;--pico-primary-hover-background:#409614;--pico-primary-hover-border:var(--pico-primary-hover-background);--pico-primary-hover-underline:var(--pico-primary-hover);--pico-primary-focus:rgba(78, 179, 27, 0.375);--pico-primary-inverse:#fff;--pico-secondary:#969eaf;--pico-secondary-background:#525f7a;--pico-secondary-border:var(--pico-secondary-background);--pico-secondary-underline:rgba(150, 158, 175, 0.5);--pico-secondary-hover:#b3b9c5;--pico-secondary-hover-background:#5d6b89;--pico-secondary-hover-border:var(--pico-secondary-hover-background);--pico-secondary-hover-underline:var(--pico-secondary-hover);--pico-secondary-focus:rgba(144, 158, 190, 0.25);--pico-secondary-inverse:#fff;--pico-contrast:#dfe3eb;--pico-contrast-background:#eff1f4;--pico-contrast-border:var(--pico-contrast-background);--pico-contrast-underline:rgba(223, 227, 235, 0.5);--pico-contrast-hover:#fff;--pico-contrast-hover-background:#fff;--pico-contrast-hover-border:var(--pico-contrast-hover-background);--pico-contrast-hover-underline:var(--pico-contrast-hover);--pico-contrast-focus:rgba(207, 213, 226, 0.25);--pico-contrast-inverse:#000;--pico-box-shadow:0.0145rem 0.029rem 0.174rem rgba(7, 8.5, 12, 0.01698),0.0335rem 0.067rem 0.402rem rgba(7, 8.5, 12, 0.024),0.0625rem 0.125rem 0.75rem rgba(7, 8.5, 12, 0.03),0.1125rem 0.225rem 1.35rem rgba(7, 8.5, 12, 0.036),0.2085rem 0.417rem 2.502rem rgba(7, 8.5, 12, 0.04302),0.5rem 1rem 6rem rgba(7, 8.5, 12, 0.06),0 0 0 0.0625rem rgba(7, 8.5, 12, 0.015);--pico-h1-color:#f0f1f3;--pico-h2-color:#e0e3e7;--pico-h3-color:#c2c7d0;--pico-h4-color:#b3b9c5;--pico-h5-color:#a4acba;--pico-h6-color:#8891a4;--pico-mark-background-color:#014063;--pico-mark-color:#fff;--pico-ins-color:#62af9a;--pico-del-color:rgb(205.5, 126, 123);--pico-blockquote-border-color:var(--pico-muted-border-color);--pico-blockquote-footer-color:var(--pico-muted-color);--pico-button-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-button-hover-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-table-border-color:var(--pico-muted-border-color);--pico-table-row-stripped-background-color:rgba(111, 120, 135, 0.0375);--pico-code-background-color:rgb(26, 30.5, 40.25);--pico-code-color:#8891a4;--pico-code-kbd-background-color:var(--pico-color);--pico-code-kbd-color:var(--pico-background-color);--pico-form-element-background-color:rgb(28, 33, 43.5);--pico-form-element-selected-background-color:#2a3140;--pico-form-element-border-color:#2a3140;--pico-form-element-color:#e0e3e7;--pico-form-element-placeholder-color:#8891a4;--pico-form-element-active-background-color:rgb(26, 30.5, 40.25);--pico-form-element-active-border-color:var(--pico-primary-border);--pico-form-element-focus-color:var(--pico-primary-border);--pico-form-element-disabled-opacity:0.5;--pico-form-element-invalid-border-color:rgb(149.5, 74, 80);--pico-form-element-invalid-active-border-color:rgb(183.25, 63.5, 59);--pico-form-element-invalid-focus-color:var(--pico-form-element-invalid-active-border-color);--pico-form-element-valid-border-color:#2a7b6f;--pico-form-element-valid-active-border-color:rgb(22, 137, 105.5);--pico-form-element-valid-focus-color:var(--pico-form-element-valid-active-border-color);--pico-switch-background-color:#333c4e;--pico-switch-checked-background-color:var(--pico-primary-background);--pico-switch-color:#fff;--pico-switch-thumb-box-shadow:0 0 0 rgba(0, 0, 0, 0);--pico-range-border-color:#202632;--pico-range-active-border-color:#2a3140;--pico-range-thumb-border-color:var(--pico-background-color);--pico-range-thumb-color:var(--pico-secondary-background);--pico-range-thumb-active-color:var(--pico-primary-background);--pico-accordion-border-color:var(--pico-muted-border-color);--pico-accordion-active-summary-color:var(--pico-primary-hover);--pico-accordion-close-summary-color:var(--pico-color);--pico-accordion-open-summary-color:var(--pico-muted-color);--pico-card-background-color:#181c25;--pico-card-border-color:var(--pico-card-background-color);--pico-card-box-shadow:var(--pico-box-shadow);--pico-card-sectioning-background-color:rgb(26, 30.5, 40.25);--pico-loading-spinner-opacity:0.5;--pico-modal-overlay-background-color:rgba(7.5, 8.5, 10, 0.75);--pico-progress-background-color:#202632;--pico-progress-color:var(--pico-primary-background);--pico-tooltip-background-color:var(--pico-contrast-background);--pico-tooltip-color:var(--pico-contrast-inverse);--pico-icon-valid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(42, 123, 111)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Cpolyline points='20 6 9 17 4 12'%3E%3C/polyline%3E%3C/svg%3E");--pico-icon-invalid:url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='24' height='24' viewBox='0 0 24 24' fill='none' stroke='rgb(149.5, 74, 80)' stroke-width='2' stroke-linecap='round' stroke-linejoin='round'%3E%3Ccircle cx='12' cy='12' r='10'%3E%3C/circle%3E%3Cline x1='12' y1='8' x2='12' y2='12'%3E%3C/line%3E%3Cline x1='12' y1='16' x2='12.01' y2='16'%3E%3C/line%3E%3C/svg%3E")}[data-theme=dark] input:is([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[type=file]){--pico-form-element-focus-color:var(--pico-primary-focus)}[type=checkbox],[type=radio],[type=range],progress{accent-color:var(--pico-primary)}*,::after,::before{box-sizing:border-box;background-repeat:no-repeat}::after,::before{text-decoration:inherit;vertical-align:inherit}:where(:host),:where(:root){-webkit-tap-highlight-color:transparent;-webkit-text-size-adjust:100%;-moz-text-size-adjust:100%;text-size-adjust:100%;background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family);text-underline-offset:var(--pico-text-underline-offset);text-rendering:optimizeLegibility;overflow-wrap:break-word;-moz-tab-size:4;-o-tab-size:4;tab-size:4}body{width:100%;margin:0}main{display:block}body>footer,body>header,body>main{width:100%;margin-right:auto;margin-left:auto;padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal)}@media (min-width:576px){body>footer,body>header,body>main{max-width:510px;padding-right:0;padding-left:0}}@media (min-width:768px){body>footer,body>header,body>main{max-width:700px}}@media (min-width:1024px){body>footer,body>header,body>main{max-width:950px}}@media (min-width:1280px){body>footer,body>header,body>main{max-width:1200px}}@media (min-width:1536px){body>footer,body>header,body>main{max-width:1450px}}section{margin-bottom:var(--pico-block-spacing-vertical)}b,strong{font-weight:bolder}sub,sup{position:relative;font-size:.75em;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}address,blockquote,dl,ol,p,pre,table,ul{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-style:normal;font-weight:var(--pico-font-weight)}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:var(--pico-typography-spacing-vertical);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:var(--pico-font-size);line-height:var(--pico-line-height);font-family:var(--pico-font-family)}h1{--pico-color:var(--pico-h1-color)}h2{--pico-color:var(--pico-h2-color)}h3{--pico-color:var(--pico-h3-color)}h4{--pico-color:var(--pico-h4-color)}h5{--pico-color:var(--pico-h5-color)}h6{--pico-color:var(--pico-h6-color)}:where(article,address,blockquote,dl,figure,form,ol,p,pre,table,ul)~:is(h1,h2,h3,h4,h5,h6){margin-top:var(--pico-typography-spacing-top)}p{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup{margin-bottom:var(--pico-typography-spacing-vertical)}hgroup>*{margin-top:0;margin-bottom:0}hgroup>:not(:first-child):last-child{--pico-color:var(--pico-muted-color);--pico-font-weight:unset;font-size:1rem}:where(ol,ul) li{margin-bottom:calc(var(--pico-typography-spacing-vertical) * .25)}:where(dl,ol,ul) :where(dl,ol,ul){margin:0;margin-top:calc(var(--pico-typography-spacing-vertical) * .25)}ul li{list-style:square}mark{padding:.125rem .25rem;background-color:var(--pico-mark-background-color);color:var(--pico-mark-color);vertical-align:baseline}blockquote{display:block;margin:var(--pico-typography-spacing-vertical) 0;padding:var(--pico-spacing);border-right:none;border-left:.25rem solid var(--pico-blockquote-border-color);border-inline-start:0.25rem solid var(--pico-blockquote-border-color);border-inline-end:none}blockquote footer{margin-top:calc(var(--pico-typography-spacing-vertical) * .5);color:var(--pico-blockquote-footer-color)}abbr[title]{border-bottom:1px dotted;text-decoration:none;cursor:help}ins{color:var(--pico-ins-color);text-decoration:none}del{color:var(--pico-del-color)}::-moz-selection{background-color:var(--pico-text-selection-color)}::selection{background-color:var(--pico-text-selection-color)}:where(a:not([role=button])),[role=link]{--pico-color:var(--pico-primary);--pico-background-color:transparent;--pico-underline:var(--pico-primary-underline);outline:0;background-color:var(--pico-background-color);color:var(--pico-color);-webkit-text-decoration:var(--pico-text-decoration);text-decoration:var(--pico-text-decoration);text-decoration-color:var(--pico-underline);text-underline-offset:0.125em;transition:background-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),color var(--pico-transition),text-decoration var(--pico-transition),box-shadow var(--pico-transition),-webkit-text-decoration var(--pico-transition)}:where(a:not([role=button])):is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[role=link]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-color:var(--pico-primary-hover);--pico-underline:var(--pico-primary-hover-underline);--pico-text-decoration:underline}:where(a:not([role=button])):focus-visible,[role=link]:focus-visible{box-shadow:0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}a[role=button]{display:inline-block}button{margin:0;overflow:visible;font-family:inherit;text-transform:none}[type=button],[type=reset],[type=submit],button{-webkit-appearance:button}[role=button],[type=button],[type=file]::file-selector-button,[type=reset],[type=submit],button{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);--pico-color:var(--pico-primary-inverse);--pico-box-shadow:var(--pico-button-box-shadow, 0 0 0 rgba(0, 0, 0, 0));padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal);border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);font-size:1rem;line-height:var(--pico-line-height);text-align:center;text-decoration:none;cursor:pointer;-webkit-user-select:none;-moz-user-select:none;user-select:none;transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}[role=button]:is(:hover,:active,:focus),[role=button]:is([aria-current]:not([aria-current=false])),[type=button]:is(:hover,:active,:focus),[type=button]:is([aria-current]:not([aria-current=false])),[type=file]::file-selector-button:is(:hover,:active,:focus),[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])),[type=reset]:is(:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false])),[type=submit]:is(:hover,:active,:focus),[type=submit]:is([aria-current]:not([aria-current=false])),button:is(:hover,:active,:focus),button:is([aria-current]:not([aria-current=false])){--pico-background-color:var(--pico-primary-hover-background);--pico-border-color:var(--pico-primary-hover-border);--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0));--pico-color:var(--pico-primary-inverse)}[role=button]:focus,[role=button]:is([aria-current]:not([aria-current=false])):focus,[type=button]:focus,[type=button]:is([aria-current]:not([aria-current=false])):focus,[type=file]::file-selector-button:focus,[type=file]::file-selector-button:is([aria-current]:not([aria-current=false])):focus,[type=reset]:focus,[type=reset]:is([aria-current]:not([aria-current=false])):focus,[type=submit]:focus,[type=submit]:is([aria-current]:not([aria-current=false])):focus,button:focus,button:is([aria-current]:not([aria-current=false])):focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-primary-focus)}[type=button],[type=reset],[type=submit]{margin-bottom:var(--pico-spacing)}[type=file]::file-selector-button,[type=reset]{--pico-background-color:var(--pico-secondary-background);--pico-border-color:var(--pico-secondary-border);--pico-color:var(--pico-secondary-inverse);cursor:pointer}[type=file]::file-selector-button:is([aria-current]:not([aria-current=false]),:hover,:active,:focus),[type=reset]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border);--pico-color:var(--pico-secondary-inverse)}[type=file]::file-selector-button:focus,[type=reset]:focus{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}:where(button,[type=submit],[type=reset],[type=button],[role=button])[disabled],:where(fieldset[disabled]) :is(button,[type=submit],[type=button],[type=reset],[role=button]){opacity:.5;pointer-events:none}:where(table){width:100%;border-collapse:collapse;border-spacing:0;text-indent:0}td,th{padding:calc(var(--pico-spacing)/ 2) var(--pico-spacing);border-bottom:var(--pico-border-width) solid var(--pico-table-border-color);background-color:var(--pico-background-color);color:var(--pico-color);font-weight:var(--pico-font-weight);text-align:left;text-align:start}tfoot td,tfoot th{border-top:var(--pico-border-width) solid var(--pico-table-border-color);border-bottom:0}table.striped tbody tr:nth-child(odd) td,table.striped tbody tr:nth-child(odd) th{background-color:var(--pico-table-row-stripped-background-color)}:where(audio,canvas,iframe,img,svg,video){vertical-align:middle}audio,video{display:inline-block}audio:not([controls]){display:none;height:0}:where(iframe){border-style:none}img{max-width:100%;height:auto;border-style:none}:where(svg:not([fill])){fill:currentColor}svg:not(:host),svg:not(:root){overflow:hidden}code,kbd,pre,samp{font-size:.875em;font-family:var(--pico-font-family)}pre code,pre samp{font-size:inherit;font-family:inherit}pre{-ms-overflow-style:scrollbar;overflow:auto}code,kbd,pre,samp{border-radius:var(--pico-border-radius);background:var(--pico-code-background-color);color:var(--pico-code-color);font-weight:var(--pico-font-weight);line-height:initial}code,kbd,samp{display:inline-block;padding:.375rem}pre{display:block;margin-bottom:var(--pico-spacing);overflow-x:auto}pre>code,pre>samp{display:block;padding:var(--pico-spacing);background:0 0;line-height:var(--pico-line-height)}kbd{background-color:var(--pico-code-kbd-background-color);color:var(--pico-code-kbd-color);vertical-align:baseline}figure{display:block;margin:0;padding:0}figure figcaption{padding:calc(var(--pico-spacing) * .5) 0;color:var(--pico-muted-color)}hr{height:0;margin:var(--pico-typography-spacing-vertical) 0;border:0;border-top:1px solid var(--pico-muted-border-color);color:inherit}[hidden],template{display:none!important}canvas{display:inline-block}input,optgroup,select,textarea{margin:0;font-size:1rem;line-height:var(--pico-line-height);font-family:inherit;letter-spacing:inherit}input{overflow:visible}select{text-transform:none}legend{max-width:100%;padding:0;color:inherit;white-space:normal}textarea{overflow:auto}[type=checkbox],[type=radio]{padding:0}::-webkit-inner-spin-button,::-webkit-outer-spin-button{height:auto}[type=search]{-webkit-appearance:textfield;outline-offset:-2px}[type=search]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{-webkit-appearance:button;font:inherit}::-moz-focus-inner{padding:0;border-style:none}:-moz-focusring{outline:0}:-moz-ui-invalid{box-shadow:none}::-ms-expand{display:none}[type=file],[type=range]{padding:0;border-width:0}input:not([type=checkbox],[type=radio],[type=range]){height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2)}fieldset{width:100%;margin:0;margin-bottom:var(--pico-spacing);padding:0;border:0}fieldset legend,label{display:block;margin-bottom:calc(var(--pico-spacing) * .375);color:var(--pico-color);font-weight:var(--pico-form-label-font-weight,var(--pico-font-weight))}fieldset legend{margin-bottom:calc(var(--pico-spacing) * .5)}button[type=submit],input:not([type=checkbox],[type=radio]),select,textarea{width:100%}input:not([type=checkbox],[type=radio],[type=range],[type=file]),select,textarea{-webkit-appearance:none;-moz-appearance:none;appearance:none;padding:var(--pico-form-element-spacing-vertical) var(--pico-form-element-spacing-horizontal)}input,select,textarea{--pico-background-color:var(--pico-form-element-background-color);--pico-border-color:var(--pico-form-element-border-color);--pico-color:var(--pico-form-element-color);--pico-box-shadow:none;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:var(--pico-border-radius);outline:0;background-color:var(--pico-background-color);box-shadow:var(--pico-box-shadow);color:var(--pico-color);font-weight:var(--pico-font-weight);transition:background-color var(--pico-transition),border-color var(--pico-transition),color var(--pico-transition),box-shadow var(--pico-transition)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[type=checkbox],[type=radio],[readonly]):is(:active,:focus){--pico-background-color:var(--pico-form-element-active-background-color)}:where(select,textarea):not([readonly]):is(:active,:focus),input:not([type=submit],[type=button],[type=reset],[role=switch],[readonly]):is(:active,:focus){--pico-border-color:var(--pico-form-element-active-border-color)}:where(select,textarea):not([readonly]):focus,input:not([type=submit],[type=button],[type=reset],[type=range],[type=file],[readonly]):focus{--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-focus-color)}:where(fieldset[disabled]) :is(input:not([type=submit],[type=button],[type=reset]),select,textarea),input:not([type=submit],[type=button],[type=reset])[disabled],label[aria-disabled=true],select[disabled],textarea[disabled]{opacity:var(--pico-form-element-disabled-opacity);pointer-events:none}label[aria-disabled=true] input[disabled]{opacity:1}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid]{padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal)!important;padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem)!important;background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=false]:not(select){background-image:var(--pico-icon-valid)}:where(input,select,textarea):not([type=checkbox],[type=radio],[type=date],[type=datetime-local],[type=month],[type=time],[type=week],[type=range])[aria-invalid=true]:not(select){background-image:var(--pico-icon-invalid)}:where(input,select,textarea)[aria-invalid=false]{--pico-border-color:var(--pico-form-element-valid-border-color)}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus){--pico-border-color:var(--pico-form-element-valid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=false]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-valid-focus-color)!important}:where(input,select,textarea)[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus){--pico-border-color:var(--pico-form-element-invalid-active-border-color)!important}:where(input,select,textarea)[aria-invalid=true]:is(:active,:focus):not([type=checkbox],[type=radio]){--pico-box-shadow:0 0 0 var(--pico-outline-width) var(--pico-form-element-invalid-focus-color)!important}[dir=rtl] :where(input,select,textarea):not([type=checkbox],[type=radio]):is([aria-invalid],[aria-invalid=true],[aria-invalid=false]){background-position:center left .75rem}input::-webkit-input-placeholder,input::placeholder,select:invalid,textarea::-webkit-input-placeholder,textarea::placeholder{color:var(--pico-form-element-placeholder-color);opacity:1}input:not([type=checkbox],[type=radio]),select,textarea{margin-bottom:var(--pico-spacing)}select::-ms-expand{border:0;background-color:transparent}select:not([multiple],[size]){padding-right:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);padding-left:var(--pico-form-element-spacing-horizontal);padding-inline-start:var(--pico-form-element-spacing-horizontal);padding-inline-end:calc(var(--pico-form-element-spacing-horizontal) + 1.5rem);background-image:var(--pico-icon-chevron);background-position:center right .75rem;background-size:1rem auto;background-repeat:no-repeat}select[multiple] option:checked{background:var(--pico-form-element-selected-background-color);color:var(--pico-form-element-color)}[dir=rtl] select:not([multiple],[size]){background-position:center left .75rem}textarea{display:block;resize:vertical}textarea[aria-invalid]{--pico-icon-height:calc(1rem * var(--pico-line-height) + var(--pico-form-element-spacing-vertical) * 2 + var(--pico-border-width) * 2);background-position:top right .75rem!important;background-size:1rem var(--pico-icon-height)!important}:where(input,select,textarea,fieldset)+small{display:block;width:100%;margin-top:calc(var(--pico-spacing) * -.75);margin-bottom:var(--pico-spacing);color:var(--pico-muted-color)}:where(input,select,textarea,fieldset)[aria-invalid=false]+small{color:var(--pico-ins-color)}:where(input,select,textarea,fieldset)[aria-invalid=true]+small{color:var(--pico-del-color)}label>:where(input,select,textarea){margin-top:calc(var(--pico-spacing) * .25)}label:has([type=checkbox],[type=radio]){width:-moz-fit-content;width:fit-content;cursor:pointer}[type=checkbox],[type=radio]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:1.25em;height:1.25em;margin-top:-.125em;margin-inline-end:.5em;border-width:var(--pico-border-width);vertical-align:middle;cursor:pointer}[type=checkbox]::-ms-check,[type=radio]::-ms-check{display:none}[type=checkbox]:checked,[type=checkbox]:checked:active,[type=checkbox]:checked:focus,[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-checkbox);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=checkbox]~label,[type=radio]~label{display:inline-block;margin-bottom:0;cursor:pointer}[type=checkbox]~label:not(:last-of-type),[type=radio]~label:not(:last-of-type){margin-inline-end:1em}[type=checkbox]:indeterminate{--pico-background-color:var(--pico-primary-background);--pico-border-color:var(--pico-primary-border);background-image:var(--pico-icon-minus);background-position:center;background-size:.75em auto;background-repeat:no-repeat}[type=radio]{border-radius:50%}[type=radio]:checked,[type=radio]:checked:active,[type=radio]:checked:focus{--pico-background-color:var(--pico-primary-inverse);border-width:.35em;background-image:none}[type=checkbox][role=switch]{--pico-background-color:var(--pico-switch-background-color);--pico-color:var(--pico-switch-color);width:2.25em;height:1.25em;border:var(--pico-border-width) solid var(--pico-border-color);border-radius:1.25em;background-color:var(--pico-background-color);line-height:1.25em}[type=checkbox][role=switch]:not([aria-invalid]){--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:before{display:block;aspect-ratio:1;height:100%;border-radius:50%;background-color:var(--pico-color);box-shadow:var(--pico-switch-thumb-box-shadow);content:"";transition:margin .1s ease-in-out}[type=checkbox][role=switch]:focus{--pico-background-color:var(--pico-switch-background-color);--pico-border-color:var(--pico-switch-background-color)}[type=checkbox][role=switch]:checked{--pico-background-color:var(--pico-switch-checked-background-color);--pico-border-color:var(--pico-switch-checked-background-color);background-image:none}[type=checkbox][role=switch]:checked::before{margin-inline-start:calc(2.25em - 1.25em)}[type=checkbox][role=switch][disabled]{--pico-background-color:var(--pico-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus{--pico-background-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true]{--pico-background-color:var(--pico-form-element-invalid-border-color)}[type=checkbox][aria-invalid=false]:checked,[type=checkbox][aria-invalid=false]:checked:active,[type=checkbox][aria-invalid=false]:checked:focus,[type=checkbox][role=switch][aria-invalid=false]:checked,[type=checkbox][role=switch][aria-invalid=false]:checked:active,[type=checkbox][role=switch][aria-invalid=false]:checked:focus,[type=radio][aria-invalid=false]:checked,[type=radio][aria-invalid=false]:checked:active,[type=radio][aria-invalid=false]:checked:focus{--pico-border-color:var(--pico-form-element-valid-border-color)}[type=checkbox]:checked:active[aria-invalid=true],[type=checkbox]:checked:focus[aria-invalid=true],[type=checkbox]:checked[aria-invalid=true],[type=checkbox][role=switch]:checked:active[aria-invalid=true],[type=checkbox][role=switch]:checked:focus[aria-invalid=true],[type=checkbox][role=switch]:checked[aria-invalid=true],[type=radio]:checked:active[aria-invalid=true],[type=radio]:checked:focus[aria-invalid=true],[type=radio]:checked[aria-invalid=true]{--pico-border-color:var(--pico-form-element-invalid-border-color)}[type=color]::-webkit-color-swatch-wrapper{padding:0}[type=color]::-moz-focus-inner{padding:0}[type=color]::-webkit-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}[type=color]::-moz-color-swatch{border:0;border-radius:calc(var(--pico-border-radius) * .5)}input:not([type=checkbox],[type=radio],[type=range],[type=file]):is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){--pico-icon-position:0.75rem;--pico-icon-width:1rem;padding-right:calc(var(--pico-icon-width) + var(--pico-icon-position));background-image:var(--pico-icon-date);background-position:center right var(--pico-icon-position);background-size:var(--pico-icon-width) auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=time]{background-image:var(--pico-icon-time)}[type=date]::-webkit-calendar-picker-indicator,[type=datetime-local]::-webkit-calendar-picker-indicator,[type=month]::-webkit-calendar-picker-indicator,[type=time]::-webkit-calendar-picker-indicator,[type=week]::-webkit-calendar-picker-indicator{width:var(--pico-icon-width);margin-right:calc(var(--pico-icon-width) * -1);margin-left:var(--pico-icon-position);opacity:0}@-moz-document url-prefix(){[type=date],[type=datetime-local],[type=month],[type=time],[type=week]{padding-right:var(--pico-form-element-spacing-horizontal)!important;background-image:none!important}}[dir=rtl] :is([type=date],[type=datetime-local],[type=month],[type=time],[type=week]){text-align:right}[type=file]{--pico-color:var(--pico-muted-color);margin-left:calc(var(--pico-outline-width) * -1);padding:calc(var(--pico-form-element-spacing-vertical) * .5) 0;padding-left:var(--pico-outline-width);border:0;border-radius:0;background:0 0}[type=file]::file-selector-button{margin-right:calc(var(--pico-spacing)/ 2);padding:calc(var(--pico-form-element-spacing-vertical) * .5) var(--pico-form-element-spacing-horizontal)}[type=file]:is(:hover,:active,:focus)::file-selector-button{--pico-background-color:var(--pico-secondary-hover-background);--pico-border-color:var(--pico-secondary-hover-border)}[type=file]:focus::file-selector-button{--pico-box-shadow:var(--pico-button-hover-box-shadow, 0 0 0 rgba(0, 0, 0, 0)),0 0 0 var(--pico-outline-width) var(--pico-secondary-focus)}[type=range]{-webkit-appearance:none;-moz-appearance:none;appearance:none;width:100%;height:1.25rem;background:0 0}[type=range]::-webkit-slider-runnable-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-webkit-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-moz-range-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-moz-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-ms-track{width:100%;height:.375rem;border-radius:var(--pico-border-radius);background-color:var(--pico-range-border-color);-ms-transition:background-color var(--pico-transition),box-shadow var(--pico-transition);transition:background-color var(--pico-transition),box-shadow var(--pico-transition)}[type=range]::-webkit-slider-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-webkit-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-moz-range-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-moz-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]::-ms-thumb{-webkit-appearance:none;width:1.25rem;height:1.25rem;margin-top:-.4375rem;border:2px solid var(--pico-range-thumb-border-color);border-radius:50%;background-color:var(--pico-range-thumb-color);cursor:pointer;-ms-transition:background-color var(--pico-transition),transform var(--pico-transition);transition:background-color var(--pico-transition),transform var(--pico-transition)}[type=range]:active,[type=range]:focus-within{--pico-range-border-color:var(--pico-range-active-border-color);--pico-range-thumb-color:var(--pico-range-thumb-active-color)}[type=range]:active::-webkit-slider-thumb{transform:scale(1.25)}[type=range]:active::-moz-range-thumb{transform:scale(1.25)}[type=range]:active::-ms-thumb{transform:scale(1.25)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem);background-image:var(--pico-icon-search);background-position:center left calc(var(--pico-form-element-spacing-horizontal) + .125rem);background-size:1rem auto;background-repeat:no-repeat}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{padding-inline-start:calc(var(--pico-form-element-spacing-horizontal) + 1.75rem)!important;background-position:center left 1.125rem,center right .75rem}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=false]{background-image:var(--pico-icon-search),var(--pico-icon-valid)}input:not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid=true]{background-image:var(--pico-icon-search),var(--pico-icon-invalid)}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search]{background-position:center right 1.125rem}[dir=rtl] :where(input):not([type=checkbox],[type=radio],[type=range],[type=file])[type=search][aria-invalid]{background-position:center right 1.125rem,center left .75rem}details{display:block;margin-bottom:var(--pico-spacing)}details summary{line-height:1rem;list-style-type:none;cursor:pointer;transition:color var(--pico-transition)}details summary:not([role]){color:var(--pico-accordion-close-summary-color)}details summary::-webkit-details-marker{display:none}details summary::marker{display:none}details summary::-moz-list-bullet{list-style-type:none}details summary::after{display:block;width:1rem;height:1rem;margin-inline-start:calc(var(--pico-spacing,1rem) * .5);float:right;transform:rotate(-90deg);background-image:var(--pico-icon-chevron);background-position:right center;background-size:1rem auto;background-repeat:no-repeat;content:"";transition:transform var(--pico-transition)}details summary:focus{outline:0}details summary:focus:not([role]){color:var(--pico-accordion-active-summary-color)}details summary:focus-visible:not([role]){outline:var(--pico-outline-width) solid var(--pico-primary-focus);outline-offset:calc(var(--pico-spacing,1rem) * 0.5);color:var(--pico-primary)}details summary[role=button]{width:100%;text-align:left}details summary[role=button]::after{height:calc(1rem * var(--pico-line-height,1.5))}details[open]>summary{margin-bottom:var(--pico-spacing)}details[open]>summary:not([role]):not(:focus){color:var(--pico-accordion-open-summary-color)}details[open]>summary::after{transform:rotate(0)}[dir=rtl] details summary{text-align:right}[dir=rtl] details summary::after{float:left;background-position:left center}article{margin-bottom:var(--pico-block-spacing-vertical);padding:var(--pico-block-spacing-vertical) var(--pico-block-spacing-horizontal);border-radius:var(--pico-border-radius);background:var(--pico-card-background-color);box-shadow:var(--pico-card-box-shadow)}article>footer,article>header{margin-right:calc(var(--pico-block-spacing-horizontal) * -1);margin-left:calc(var(--pico-block-spacing-horizontal) * -1);padding:calc(var(--pico-block-spacing-vertical) * .66) var(--pico-block-spacing-horizontal);background-color:var(--pico-card-sectioning-background-color)}article>header{margin-top:calc(var(--pico-block-spacing-vertical) * -1);margin-bottom:var(--pico-block-spacing-vertical);border-bottom:var(--pico-border-width) solid var(--pico-card-border-color);border-top-right-radius:var(--pico-border-radius);border-top-left-radius:var(--pico-border-radius)}article>footer{margin-top:var(--pico-block-spacing-vertical);margin-bottom:calc(var(--pico-block-spacing-vertical) * -1);border-top:var(--pico-border-width) solid var(--pico-card-border-color);border-bottom-right-radius:var(--pico-border-radius);border-bottom-left-radius:var(--pico-border-radius)}[role=group],[role=search]{display:inline-flex;position:relative;width:100%;margin-bottom:var(--pico-spacing);border-radius:var(--pico-border-radius);box-shadow:var(--pico-group-box-shadow,0 0 0 transparent);vertical-align:middle;transition:box-shadow var(--pico-transition)}[role=group] input:not([type=checkbox],[type=radio]),[role=group] select,[role=group]>*,[role=search] input:not([type=checkbox],[type=radio]),[role=search] select,[role=search]>*{position:relative;flex:1 1 auto;margin-bottom:0}[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=group]>:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child),[role=search]>:not(:first-child){margin-left:0;border-top-left-radius:0;border-bottom-left-radius:0}[role=group] input:not([type=checkbox],[type=radio]):not(:last-child),[role=group] select:not(:last-child),[role=group]>:not(:last-child),[role=search] input:not([type=checkbox],[type=radio]):not(:last-child),[role=search] select:not(:last-child),[role=search]>:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}[role=group] input:not([type=checkbox],[type=radio]):focus,[role=group] select:focus,[role=group]>:focus,[role=search] input:not([type=checkbox],[type=radio]):focus,[role=search] select:focus,[role=search]>:focus{z-index:2}[role=group] [role=button]:not(:first-child),[role=group] [type=button]:not(:first-child),[role=group] [type=reset]:not(:first-child),[role=group] [type=submit]:not(:first-child),[role=group] button:not(:first-child),[role=group] input:not([type=checkbox],[type=radio]):not(:first-child),[role=group] select:not(:first-child),[role=search] [role=button]:not(:first-child),[role=search] [type=button]:not(:first-child),[role=search] [type=reset]:not(:first-child),[role=search] [type=submit]:not(:first-child),[role=search] button:not(:first-child),[role=search] input:not([type=checkbox],[type=radio]):not(:first-child),[role=search] select:not(:first-child){margin-left:calc(var(--pico-border-width) * -1)}[role=group] [role=button],[role=group] [type=button],[role=group] [type=reset],[role=group] [type=submit],[role=group] button,[role=search] [role=button],[role=search] [type=button],[role=search] [type=reset],[role=search] [type=submit],[role=search] button{width:auto}@supports selector(:has(*)){[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-button)}[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=group]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select,[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) input:not([type=checkbox],[type=radio]),[role=search]:has(button:focus,[type=submit]:focus,[type=button]:focus,[role=button]:focus) select{border-color:transparent}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus),[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus){--pico-group-box-shadow:var(--pico-group-box-shadow-focus-with-input)}[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=group]:has(input:not([type=submit],[type=button]):focus,select:focus) button,[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [role=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=button],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) [type=submit],[role=search]:has(input:not([type=submit],[type=button]):focus,select:focus) button{--pico-button-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-border);--pico-button-hover-box-shadow:0 0 0 var(--pico-border-width) var(--pico-primary-hover-border)}[role=group] [role=button]:focus,[role=group] [type=button]:focus,[role=group] [type=reset]:focus,[role=group] [type=submit]:focus,[role=group] button:focus,[role=search] [role=button]:focus,[role=search] [type=button]:focus,[role=search] [type=reset]:focus,[role=search] [type=submit]:focus,[role=search] button:focus{box-shadow:none}}[role=search]>:first-child{border-top-left-radius:5rem;border-bottom-left-radius:5rem}[role=search]>:last-child{border-top-right-radius:5rem;border-bottom-right-radius:5rem}[aria-busy=true]:not(input,select,textarea,html,form){white-space:nowrap}[aria-busy=true]:not(input,select,textarea,html,form)::before{display:inline-block;width:1em;height:1em;background-image:var(--pico-icon-loading);background-size:1em auto;background-repeat:no-repeat;content:"";vertical-align:-.125em}[aria-busy=true]:not(input,select,textarea,html,form):not(:empty)::before{margin-inline-end:calc(var(--pico-spacing) * .5)}[aria-busy=true]:not(input,select,textarea,html,form):empty{text-align:center}[role=button][aria-busy=true],[type=button][aria-busy=true],[type=reset][aria-busy=true],[type=submit][aria-busy=true],a[aria-busy=true],button[aria-busy=true]{pointer-events:none}:host,:root{--pico-scrollbar-width:0px}dialog{display:flex;z-index:999;position:fixed;top:0;right:0;bottom:0;left:0;align-items:center;justify-content:center;width:inherit;min-width:100%;height:inherit;min-height:100%;padding:0;border:0;-webkit-backdrop-filter:var(--pico-modal-overlay-backdrop-filter);backdrop-filter:var(--pico-modal-overlay-backdrop-filter);background-color:var(--pico-modal-overlay-background-color);color:var(--pico-color)}dialog>article{width:100%;max-height:calc(100vh - var(--pico-spacing) * 2);margin:var(--pico-spacing);overflow:auto}@media (min-width:576px){dialog>article{max-width:510px}}@media (min-width:768px){dialog>article{max-width:700px}}dialog>article>header>*{margin-bottom:0}dialog>article>header :is(a,button)[rel=prev]{margin:0;margin-left:var(--pico-spacing);padding:0;float:right}dialog>article>footer{text-align:right}dialog>article>footer [role=button],dialog>article>footer button{margin-bottom:0}dialog>article>footer [role=button]:not(:first-of-type),dialog>article>footer button:not(:first-of-type){margin-left:calc(var(--pico-spacing) * .5)}dialog>article :is(a,button)[rel=prev]{display:block;width:1rem;height:1rem;margin-top:calc(var(--pico-spacing) * -1);margin-bottom:var(--pico-spacing);margin-left:auto;border:none;background-image:var(--pico-icon-close);background-position:center;background-size:auto 1rem;background-repeat:no-repeat;background-color:transparent;opacity:.5;transition:opacity var(--pico-transition)}dialog>article :is(a,button)[rel=prev]:is([aria-current]:not([aria-current=false]),:hover,:active,:focus){opacity:1}dialog:not([open]),dialog[open=false]{display:none}:where(nav li)::before{float:left;content:"โ"}nav,nav ul{display:flex}nav{justify-content:space-between;overflow:visible}nav ol,nav ul{align-items:center;margin-bottom:0;padding:0;list-style:none}nav ol:first-of-type,nav ul:first-of-type{margin-left:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav ol:last-of-type,nav ul:last-of-type{margin-right:calc(var(--pico-nav-element-spacing-horizontal) * -1)}nav li{display:inline-block;margin:0;padding:var(--pico-nav-element-spacing-vertical) var(--pico-nav-element-spacing-horizontal)}nav li :where(a,[role=link]){display:inline-block;margin:calc(var(--pico-nav-link-spacing-vertical) * -1) calc(var(--pico-nav-link-spacing-horizontal) * -1);padding:var(--pico-nav-link-spacing-vertical) var(--pico-nav-link-spacing-horizontal);border-radius:var(--pico-border-radius)}nav li :where(a,[role=link]):not(:hover){text-decoration:none}nav li [role=button],nav li [type=button],nav li button,nav li input:not([type=checkbox],[type=radio],[type=range],[type=file]),nav li select{height:auto;margin-right:inherit;margin-bottom:0;margin-left:inherit;padding:calc(var(--pico-nav-link-spacing-vertical) - var(--pico-border-width) * 2) var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb]{align-items:center;justify-content:start}nav[aria-label=breadcrumb] ul li:not(:first-child){margin-inline-start:var(--pico-nav-link-spacing-horizontal)}nav[aria-label=breadcrumb] ul li a{margin:calc(var(--pico-nav-link-spacing-vertical) * -1) 0;margin-inline-start:calc(var(--pico-nav-link-spacing-horizontal) * -1)}nav[aria-label=breadcrumb] ul li:not(:last-child)::after{display:inline-block;position:absolute;width:calc(var(--pico-nav-link-spacing-horizontal) * 4);margin:0 calc(var(--pico-nav-link-spacing-horizontal) * -1);content:var(--pico-nav-breadcrumb-divider);color:var(--pico-muted-color);text-align:center;text-decoration:none;white-space:nowrap}nav[aria-label=breadcrumb] a[aria-current]:not([aria-current=false]){background-color:transparent;color:inherit;text-decoration:none;pointer-events:none}aside li,aside nav,aside ol,aside ul{display:block}aside li{padding:calc(var(--pico-nav-element-spacing-vertical) * .5) var(--pico-nav-element-spacing-horizontal)}aside li a{display:block}aside li [role=button]{margin:inherit}[dir=rtl] nav[aria-label=breadcrumb] ul li:not(:last-child) ::after{content:"\\"}progress{display:inline-block;vertical-align:baseline}progress{-webkit-appearance:none;-moz-appearance:none;display:inline-block;appearance:none;width:100%;height:.5rem;margin-bottom:calc(var(--pico-spacing) * .5);overflow:hidden;border:0;border-radius:var(--pico-border-radius);background-color:var(--pico-progress-background-color);color:var(--pico-progress-color)}progress::-webkit-progress-bar{border-radius:var(--pico-border-radius);background:0 0}progress[value]::-webkit-progress-value{background-color:var(--pico-progress-color);-webkit-transition:inline-size var(--pico-transition);transition:inline-size var(--pico-transition)}progress::-moz-progress-bar{background-color:var(--pico-progress-color)}@media (prefers-reduced-motion:no-preference){progress:indeterminate{background:var(--pico-progress-background-color) linear-gradient(to right,var(--pico-progress-color) 30%,var(--pico-progress-background-color) 30%) top left/150% 150% no-repeat;animation:progress-indeterminate 1s linear infinite}progress:indeterminate[value]::-webkit-progress-value{background-color:transparent}progress:indeterminate::-moz-progress-bar{background-color:transparent}}@media (prefers-reduced-motion:no-preference){[dir=rtl] progress:indeterminate{animation-direction:reverse}}@keyframes progress-indeterminate{0%{background-position:200% 0}100%{background-position:-200% 0}}[data-tooltip]{position:relative}[data-tooltip]:not(a,button,input,[role=button]){border-bottom:1px dotted;text-decoration:none;cursor:help}[data-tooltip]::after,[data-tooltip]::before,[data-tooltip][data-placement=top]::after,[data-tooltip][data-placement=top]::before{display:block;z-index:99;position:absolute;bottom:100%;left:50%;padding:.25rem .5rem;overflow:hidden;transform:translate(-50%,-.25rem);border-radius:var(--pico-border-radius);background:var(--pico-tooltip-background-color);content:attr(data-tooltip);color:var(--pico-tooltip-color);font-style:normal;font-weight:var(--pico-font-weight);font-size:.875rem;text-decoration:none;text-overflow:ellipsis;white-space:nowrap;opacity:0;pointer-events:none}[data-tooltip]::after,[data-tooltip][data-placement=top]::after{padding:0;transform:translate(-50%,0);border-top:.3rem solid;border-right:.3rem solid transparent;border-left:.3rem solid transparent;border-radius:0;background-color:transparent;content:"";color:var(--pico-tooltip-background-color)}[data-tooltip][data-placement=bottom]::after,[data-tooltip][data-placement=bottom]::before{top:100%;bottom:auto;transform:translate(-50%,.25rem)}[data-tooltip][data-placement=bottom]:after{transform:translate(-50%,-.3rem);border:.3rem solid transparent;border-bottom:.3rem solid}[data-tooltip][data-placement=left]::after,[data-tooltip][data-placement=left]::before{top:50%;right:100%;bottom:auto;left:auto;transform:translate(-.25rem,-50%)}[data-tooltip][data-placement=left]:after{transform:translate(.3rem,-50%);border:.3rem solid transparent;border-left:.3rem solid}[data-tooltip][data-placement=right]::after,[data-tooltip][data-placement=right]::before{top:50%;right:auto;bottom:auto;left:100%;transform:translate(.25rem,-50%)}[data-tooltip][data-placement=right]:after{transform:translate(-.3rem,-50%);border:.3rem solid transparent;border-right:.3rem solid}[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{opacity:1}@media (hover:hover) and (pointer:fine){[data-tooltip]:focus::after,[data-tooltip]:focus::before,[data-tooltip]:hover::after,[data-tooltip]:hover::before{--pico-tooltip-slide-to:translate(-50%, -0.25rem);transform:translate(-50%,.75rem);animation-duration:.2s;animation-fill-mode:forwards;animation-name:tooltip-slide;opacity:0}[data-tooltip]:focus::after,[data-tooltip]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, 0rem);transform:translate(-50%,-.25rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:focus::before,[data-tooltip][data-placement=bottom]:hover::after,[data-tooltip][data-placement=bottom]:hover::before{--pico-tooltip-slide-to:translate(-50%, 0.25rem);transform:translate(-50%,-.75rem);animation-name:tooltip-slide}[data-tooltip][data-placement=bottom]:focus::after,[data-tooltip][data-placement=bottom]:hover::after{--pico-tooltip-caret-slide-to:translate(-50%, -0.3rem);transform:translate(-50%,-.5rem);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:focus::before,[data-tooltip][data-placement=left]:hover::after,[data-tooltip][data-placement=left]:hover::before{--pico-tooltip-slide-to:translate(-0.25rem, -50%);transform:translate(.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=left]:focus::after,[data-tooltip][data-placement=left]:hover::after{--pico-tooltip-caret-slide-to:translate(0.3rem, -50%);transform:translate(.05rem,-50%);animation-name:tooltip-caret-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:focus::before,[data-tooltip][data-placement=right]:hover::after,[data-tooltip][data-placement=right]:hover::before{--pico-tooltip-slide-to:translate(0.25rem, -50%);transform:translate(-.75rem,-50%);animation-name:tooltip-slide}[data-tooltip][data-placement=right]:focus::after,[data-tooltip][data-placement=right]:hover::after{--pico-tooltip-caret-slide-to:translate(-0.3rem, -50%);transform:translate(-.05rem,-50%);animation-name:tooltip-caret-slide}}@keyframes tooltip-slide{to{transform:var(--pico-tooltip-slide-to);opacity:1}}@keyframes tooltip-caret-slide{50%{opacity:0}to{transform:var(--pico-tooltip-caret-slide-to);opacity:1}}[aria-controls]{cursor:pointer}[aria-disabled=true],[disabled]{cursor:not-allowed}[aria-hidden=false][hidden]{display:initial}[aria-hidden=false][hidden]:not(:focus){clip:rect(0,0,0,0);position:absolute}[tabindex],a,area,button,input,label,select,summary,textarea{-ms-touch-action:manipulation}[dir=rtl]{direction:rtl}@media (prefers-reduced-motion:reduce){:not([aria-busy=true]),:not([aria-busy=true])::after,:not([aria-busy=true])::before{background-attachment:initial!important;animation-duration:1ms!important;animation-delay:-1ms!important;animation-iteration-count:1!important;scroll-behavior:auto!important;transition-delay:0s!important;transition-duration:0s!important}}
+224
www/index.html
+224
www/index.html
···
1
+
<!doctype html>
2
+
<html lang="en">
3
+
4
+
<head>
5
+
<meta charset="utf-8">
6
+
<meta name="viewport" content="width=device-width, initial-scale=1">
7
+
<meta name="color-scheme" content="light dark">
8
+
9
+
<!-- Primary Meta Tags -->
10
+
<title>QuickDID - AT Protocol Identity Resolution Service</title>
11
+
<meta name="title" content="QuickDID - AT Protocol Identity Resolution Service">
12
+
<meta name="description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
13
+
<meta name="keywords" content="ATProtocol, Bluesky, DID, handle resolution, decentralized identity, atproto">
14
+
<meta name="author" content="Nick Gerakines">
15
+
16
+
<!-- Open Graph / Facebook -->
17
+
<meta property="og:type" content="website">
18
+
<meta property="og:url" content="https://quickdid.smokesignal.tools/">
19
+
<meta property="og:title" content="QuickDID - AT Protocol Identity Resolution Service">
20
+
<meta property="og:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
21
+
<meta property="og:site_name" content="QuickDID">
22
+
23
+
<!-- Twitter -->
24
+
<meta property="twitter:card" content="summary_large_image">
25
+
<meta property="twitter:url" content="https://quickdid.smokesignal.tools/">
26
+
<meta property="twitter:title" content="QuickDID - AT Protocol Identity Resolution Service">
27
+
<meta property="twitter:description" content="High-performance handle-to-DID resolution service for the AT Protocol ecosystem. Resolve Bluesky and AT Protocol handles instantly.">
28
+
29
+
<!-- Additional Meta Tags -->
30
+
<meta name="robots" content="index, follow">
31
+
<meta name="language" content="English">
32
+
<meta name="theme-color" content="#1976d2">
33
+
<link rel="canonical" href="https://quickdid.smokesignal.tools/">
34
+
35
+
<!-- Stylesheet -->
36
+
<link rel="stylesheet" href="/css/pico.classless.green.min.css">
37
+
<style>
38
+
.resolver-form {
39
+
margin: 2rem 0;
40
+
padding: 1.5rem;
41
+
background: var(--card-background-color);
42
+
border-radius: var(--border-radius);
43
+
border: 1px solid var(--muted-border-color);
44
+
}
45
+
46
+
.resolver-result {
47
+
margin-top: 1rem;
48
+
padding: 1.5rem;
49
+
background: var(--code-background-color);
50
+
border-radius: var(--border-radius);
51
+
border: 1px solid var(--muted-border-color);
52
+
}
53
+
54
+
.result-content {
55
+
background: transparent;
56
+
padding: 1rem;
57
+
overflow-x: auto;
58
+
white-space: pre-wrap;
59
+
word-break: break-word;
60
+
}
61
+
62
+
code {
63
+
padding: 0.25rem 0.5rem;
64
+
background: var(--code-background-color);
65
+
border-radius: var(--border-radius);
66
+
}
67
+
68
+
span {
69
+
display: inline-block;
70
+
padding: 0.25rem 0.5rem;
71
+
background: var(--primary);
72
+
color: var(--primary-inverse);
73
+
border-radius: var(--border-radius);
74
+
font-size: 0.875rem;
75
+
font-weight: bold;
76
+
margin-right: 0.5rem;
77
+
}
78
+
79
+
.endpoint-section {
80
+
margin-bottom: 3rem;
81
+
}
82
+
</style>
83
+
</head>
84
+
85
+
<body>
86
+
<header>
87
+
<hgroup>
88
+
<h1>QuickDID</h1>
89
+
<p>AT Protocol Identity Resolution Service</p>
90
+
</hgroup>
91
+
</header>
92
+
<main>
93
+
<p>QuickDID provides high-performance resolution services for the AT Protocol ecosystem.</p>
94
+
95
+
<h2>Available Endpoints</h2>
96
+
97
+
<section class="endpoint-section">
98
+
<h3>GET /xrpc/com.atproto.identity.resolveHandle</h3>
99
+
<p>Resolve an AT Protocol handle to its DID</p>
100
+
<p>Parameters: <code>?handle={handle}</code></p>
101
+
102
+
<h4>Try It Out</h4>
103
+
<form id="handleResolveForm" class="resolver-form">
104
+
<label for="handleInput">
105
+
Enter an AT Protocol handle to resolve:
106
+
<input type="text" id="handleInput" name="handle" placeholder="e.g., alice.bsky.social" required>
107
+
</label>
108
+
<button type="submit">Resolve Handle</button>
109
+
</form>
110
+
111
+
<div id="handleResult" class="resolver-result" style="display: none;">
112
+
<h4>Result</h4>
113
+
<pre id="handleResultContent" class="result-content"></pre>
114
+
</div>
115
+
116
+
<h4>Example Usage</h4>
117
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.identity.resolveHandle?handle=ngerakines.me"</code>
118
+
</section>
119
+
120
+
<section class="endpoint-section">
121
+
<h3>GET /xrpc/com.atproto.lexicon.resolveLexicon</h3>
122
+
<p>Resolve an AT Protocol lexicon (NSID) to its schema</p>
123
+
<p>Parameters: <code>?nsid={nsid}</code></p>
124
+
125
+
<h4>Try It Out</h4>
126
+
<form id="lexiconResolveForm" class="resolver-form">
127
+
<label for="nsidInput">
128
+
Enter an AT Protocol NSID to resolve:
129
+
<input type="text" id="nsidInput" name="nsid" placeholder="e.g., app.bsky.feed.post" required>
130
+
</label>
131
+
<button type="submit">Resolve Lexicon</button>
132
+
</form>
133
+
134
+
<div id="lexiconResult" class="resolver-result" style="display: none;">
135
+
<h4>Result</h4>
136
+
<pre id="lexiconResultContent" class="result-content"></pre>
137
+
</div>
138
+
139
+
<h4>Example Usage</h4>
140
+
<code>curl "https://quickdid.smokesignal.tools/xrpc/com.atproto.lexicon.resolveLexicon?nsid=app.bsky.feed.post"</code>
141
+
</section>
142
+
143
+
<h2>Documentation</h2>
144
+
<p>
145
+
For more information, visit the
146
+
<a href="https://tangled.sh/@smokesignal.events/quickdid" target="_blank">
147
+
QuickDID repository
148
+
</a>
149
+
.
150
+
</p>
151
+
</main>
152
+
153
+
<script>
154
+
// Handle form submission for handle resolution
155
+
document.getElementById('handleResolveForm').addEventListener('submit', async (e) => {
156
+
e.preventDefault();
157
+
158
+
const handle = document.getElementById('handleInput').value.trim();
159
+
const resultDiv = document.getElementById('handleResult');
160
+
const resultContent = document.getElementById('handleResultContent');
161
+
162
+
// Show loading state
163
+
resultDiv.style.display = 'block';
164
+
resultContent.textContent = 'Loading...';
165
+
166
+
try {
167
+
// Build the request URL
168
+
const url = `/xrpc/com.atproto.identity.resolveHandle?handle=${encodeURIComponent(handle)}`;
169
+
170
+
// Make the GET request
171
+
const response = await fetch(url);
172
+
const data = await response.json();
173
+
174
+
// Display the result
175
+
if (response.ok) {
176
+
resultContent.textContent = JSON.stringify(data, null, 2);
177
+
resultContent.style.color = '';
178
+
} else {
179
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
180
+
resultContent.style.color = '#d32f2f';
181
+
}
182
+
} catch (error) {
183
+
resultContent.textContent = `Network Error: ${error.message}`;
184
+
resultContent.style.color = '#d32f2f';
185
+
}
186
+
});
187
+
188
+
// Handle form submission for lexicon resolution
189
+
document.getElementById('lexiconResolveForm').addEventListener('submit', async (e) => {
190
+
e.preventDefault();
191
+
192
+
const nsid = document.getElementById('nsidInput').value.trim();
193
+
const resultDiv = document.getElementById('lexiconResult');
194
+
const resultContent = document.getElementById('lexiconResultContent');
195
+
196
+
// Show loading state
197
+
resultDiv.style.display = 'block';
198
+
resultContent.textContent = 'Loading...';
199
+
200
+
try {
201
+
// Build the request URL
202
+
const url = `/xrpc/com.atproto.lexicon.resolveLexicon?nsid=${encodeURIComponent(nsid)}`;
203
+
204
+
// Make the GET request
205
+
const response = await fetch(url);
206
+
const data = await response.json();
207
+
208
+
// Display the result
209
+
if (response.ok) {
210
+
resultContent.textContent = JSON.stringify(data, null, 2);
211
+
resultContent.style.color = '';
212
+
} else {
213
+
resultContent.textContent = `Error: ${JSON.stringify(data, null, 2)}`;
214
+
resultContent.style.color = '#d32f2f';
215
+
}
216
+
} catch (error) {
217
+
resultContent.textContent = `Network Error: ${error.message}`;
218
+
resultContent.style.color = '#d32f2f';
219
+
}
220
+
});
221
+
</script>
222
+
</body>
223
+
224
+
</html>