docs: update env docs for docket and suppressed loggers (#535)

* docs: update env docs for docket and suppressed loggers

- fly.toml: update secrets comments with DOCKET_URL, remove stale entries
- fly.staging.toml: add secrets comments (was missing)
- .env.example: add DOCKET_* and LOGFIRE_SUPPRESSED_LOGGERS settings
- configuration.md: document suppressed_loggers setting

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

* feat: add docket_runs.py script to check task status

standalone uv script to inspect docket runs in redis:
- works with local, staging, or production environments
- no SSH required - connects directly to redis

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>

---------

Co-authored-by: Claude <noreply@anthropic.com>

authored by zzstoatzz.io Claude and committed by GitHub b7287f2e c498199d

Changed files
+115 -8
backend
docs
scripts
+6
backend/.env.example
··· 49 49 # LOGFIRE_ENABLED=false 50 50 # LOGFIRE_WRITE_TOKEN= 51 51 # LOGFIRE_ENVIRONMENT=local 52 + # LOGFIRE_SUPPRESSED_LOGGERS=docket # comma-separated logger names to suppress 52 53 53 54 # notifications (optional - bluesky DMs) 54 55 # NOTIFY_ENABLED=false 55 56 # NOTIFY_RECIPIENT_HANDLE= 56 57 # NOTIFY_BOT_HANDLE= 57 58 # NOTIFY_BOT_PASSWORD= 59 + 60 + # background tasks (optional - docket/redis) 61 + # DOCKET_URL=redis://localhost:6379 # enables docket; omit for asyncio fallback 62 + # DOCKET_NAME=plyr # queue namespace (default: plyr) 63 + # DOCKET_WORKER_CONCURRENCY=10 # concurrent task limit (default: 10)
+8 -1
backend/fly.staging.toml
··· 37 37 [[vm]] 38 38 memory = '1gb' 39 39 cpu_kind = 'shared' 40 - cpus = 1 40 + cpus = 1 41 + 42 + # secrets to set via: fly secrets set KEY=value -a relay-api-staging 43 + # - DATABASE_URL (neon postgres connection string) 44 + # - AWS_ACCESS_KEY_ID (cloudflare R2) 45 + # - AWS_SECRET_ACCESS_KEY (cloudflare R2) 46 + # - OAUTH_ENCRYPTION_KEY (generate: python -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())') 47 + # - DOCKET_URL (upstash redis: rediss://default:xxx@xxx.upstash.io:6379)
+6 -7
backend/fly.toml
··· 34 34 R2_PUBLIC_BUCKET_URL = 'https://pub-d4ed8a1e39d44dac85263d86ad5676fd.r2.dev' 35 35 ATPROTO_PDS_URL = 'https://pds.zzstoatzz.io' 36 36 37 - # secrets to set via: fly secrets set KEY=value 38 - # - DATABASE_URL 39 - # - AWS_ACCESS_KEY_ID 40 - # - AWS_SECRET_ACCESS_KEY 41 - # - ATPROTO_CLIENT_ID (will be https://api.plyr.fm/oauth-client-metadata.json after deployment) 42 - # - ATPROTO_REDIRECT_URI (will be https://api.plyr.fm/auth/callback after deployment) 43 - # - OAUTH_ENCRYPTION_KEY (44-character base64 Fernet key, generate with: python -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())') 37 + # secrets to set via: fly secrets set KEY=value -a relay-api 38 + # - DATABASE_URL (neon postgres connection string) 39 + # - AWS_ACCESS_KEY_ID (cloudflare R2) 40 + # - AWS_SECRET_ACCESS_KEY (cloudflare R2) 41 + # - OAUTH_ENCRYPTION_KEY (generate: python -c 'from cryptography.fernet import Fernet; print(Fernet.generate_key().decode())') 42 + # - DOCKET_URL (upstash redis: rediss://default:xxx@xxx.upstash.io:6379)
+2
docs/backend/configuration.md
··· 51 51 settings.observability.enabled # from LOGFIRE_ENABLED 52 52 settings.observability.write_token # from LOGFIRE_WRITE_TOKEN 53 53 settings.observability.environment # from LOGFIRE_ENVIRONMENT 54 + settings.observability.suppressed_loggers # from LOGFIRE_SUPPRESSED_LOGGERS (default: {"docket"}) 54 55 55 56 # notification settings 56 57 settings.notify.enabled # from NOTIFY_ENABLED ··· 101 102 # observability 102 103 LOGFIRE_ENABLED=true 103 104 LOGFIRE_WRITE_TOKEN=pylf_xxx 105 + LOGFIRE_SUPPRESSED_LOGGERS=docket # comma-separated, suppress noisy loggers 104 106 105 107 # notifications (bluesky DMs) 106 108 NOTIFY_ENABLED=true
+93
scripts/docket_runs.py
··· 1 + #!/usr/bin/env -S uv run --script 2 + # /// script 3 + # requires-python = ">=3.11" 4 + # dependencies = ["redis"] 5 + # /// 6 + """check recent docket task runs. 7 + 8 + usage: 9 + ./scripts/docket_runs.py # uses DOCKET_URL from env 10 + ./scripts/docket_runs.py --env staging # uses staging redis 11 + ./scripts/docket_runs.py --env production # uses production redis 12 + ./scripts/docket_runs.py --limit 20 # show more runs 13 + """ 14 + 15 + import argparse 16 + import os 17 + 18 + import redis 19 + 20 + 21 + def main(): 22 + parser = argparse.ArgumentParser(description="check docket task runs") 23 + parser.add_argument( 24 + "--env", 25 + choices=["local", "staging", "production"], 26 + default="local", 27 + help="environment to check (default: local, uses DOCKET_URL)", 28 + ) 29 + parser.add_argument( 30 + "--limit", type=int, default=10, help="number of runs to show (default: 10)" 31 + ) 32 + args = parser.parse_args() 33 + 34 + # get redis url 35 + if args.env == "local": 36 + url = os.environ.get("DOCKET_URL", "redis://localhost:6379") 37 + elif args.env == "staging": 38 + url = os.environ.get("DOCKET_URL_STAGING") 39 + if not url: 40 + print("error: DOCKET_URL_STAGING not set") 41 + print( 42 + "hint: export DOCKET_URL_STAGING=rediss://default:xxx@xxx.upstash.io:6379" 43 + ) 44 + return 1 45 + elif args.env == "production": 46 + url = os.environ.get("DOCKET_URL_PRODUCTION") 47 + if not url: 48 + print("error: DOCKET_URL_PRODUCTION not set") 49 + print( 50 + "hint: export DOCKET_URL_PRODUCTION=rediss://default:xxx@xxx.upstash.io:6379" 51 + ) 52 + return 1 53 + 54 + print(f"connecting to {args.env}...") 55 + r = redis.from_url(url) 56 + 57 + # get all run keys 58 + keys = r.keys("plyr:runs:*") 59 + if not keys: 60 + print("no runs found") 61 + return 0 62 + 63 + print(f"found {len(keys)} total runs, showing last {args.limit}:\n") 64 + 65 + for key in sorted(keys, reverse=True)[: args.limit]: 66 + data = r.hgetall(key) 67 + run_id = key.decode().split(":")[-1] 68 + 69 + # extract fields safely 70 + function = data.get(b"function", b"?").decode() 71 + state = data.get(b"state", b"?").decode() 72 + started = ( 73 + data.get(b"started_at", b"").decode()[:19] 74 + if data.get(b"started_at") 75 + else "?" 76 + ) 77 + completed = ( 78 + data.get(b"completed_at", b"").decode()[:19] 79 + if data.get(b"completed_at") 80 + else "-" 81 + ) 82 + # state emoji 83 + emoji = {"completed": "✓", "failed": "✗", "running": "⋯"}.get(state, "?") 84 + 85 + print( 86 + f"{emoji} {run_id[:8]} {function:<20} {state:<10} {started} → {completed}" 87 + ) 88 + 89 + return 0 90 + 91 + 92 + if __name__ == "__main__": 93 + raise SystemExit(main())