A container registry that uses the AT Protocol for manifest storage and S3 for blob storage. atcr.io
docker container atproto go

Compare changes

Choose any two refs to compare.

+8481 -5633
+1 -36
CLAUDE.md
··· 475 475 476 476 Read access: 477 477 - **Public hold** (`HOLD_PUBLIC=true`): Anonymous + all authenticated users 478 - - **Private hold** (`HOLD_PUBLIC=false`): Requires authentication + crew membership with blob:read OR blob:write permission 479 - - **Note:** `blob:write` implicitly grants `blob:read` access (can't push without pulling) 478 + - **Private hold** (`HOLD_PUBLIC=false`): Requires authentication + crew membership with blob:read permission 480 479 481 480 Write access: 482 481 - Hold owner OR crew members with blob:write permission 483 482 - Verified via `io.atcr.hold.crew` records in hold's embedded PDS 484 - 485 - **Permission Matrix:** 486 - 487 - | User Type | Public Read | Private Read | Write | Crew Admin | 488 - |-----------|-------------|--------------|-------|------------| 489 - | Anonymous | Yes | No | No | No | 490 - | Owner (captain) | Yes | Yes | Yes | Yes (implied) | 491 - | Crew (blob:read only) | Yes | Yes | No | No | 492 - | Crew (blob:write only) | Yes | Yes* | Yes | No | 493 - | Crew (blob:read + blob:write) | Yes | Yes | Yes | No | 494 - | Crew (crew:admin) | Yes | Yes | Yes | Yes | 495 - | Authenticated non-crew | Yes | No | No | No | 496 - 497 - *`blob:write` implicitly grants `blob:read` access 498 - 499 - **Authorization Error Format:** 500 - 501 - All authorization failures use consistent structured errors (`pkg/hold/pds/auth.go`): 502 - ``` 503 - access denied for [action]: [reason] (required: [permission(s)]) 504 - ``` 505 - 506 - Examples: 507 - - `access denied for blob:read: user is not a crew member (required: blob:read or blob:write)` 508 - - `access denied for blob:write: crew member lacks permission (required: blob:write)` 509 - - `access denied for crew:admin: user is not a crew member (required: crew:admin)` 510 - 511 - **Shared Error Constants** (`pkg/hold/pds/auth.go`): 512 - - `ErrMissingAuthHeader` - Missing Authorization header 513 - - `ErrInvalidAuthFormat` - Invalid Authorization header format 514 - - `ErrInvalidAuthScheme` - Invalid scheme (expected Bearer or DPoP) 515 - - `ErrInvalidJWTFormat` - Malformed JWT 516 - - `ErrMissingISSClaim` / `ErrMissingSubClaim` - Missing JWT claims 517 - - `ErrTokenExpired` - Token has expired 518 483 519 484 **Embedded PDS Endpoints** (`pkg/hold/pds/xrpc.go`): 520 485
+4 -4
Dockerfile.appview
··· 1 1 # Production build for ATCR AppView 2 2 # Result: ~30MB scratch image with static binary 3 - FROM docker.io/golang:1.25.4-trixie AS builder 3 + FROM docker.io/golang:1.25.2-trixie AS builder 4 4 5 5 ENV DEBIAN_FRONTEND=noninteractive 6 6 ··· 34 34 LABEL org.opencontainers.image.title="ATCR AppView" \ 35 35 org.opencontainers.image.description="ATProto Container Registry - OCI-compliant registry using AT Protocol for manifest storage" \ 36 36 org.opencontainers.image.authors="ATCR Contributors" \ 37 - org.opencontainers.image.source="https://tangled.org/evan.jarrett.net/at-container-registry" \ 38 - org.opencontainers.image.documentation="https://tangled.org/evan.jarrett.net/at-container-registry" \ 37 + org.opencontainers.image.source="https://tangled.org/@evan.jarrett.net/at-container-registry" \ 38 + org.opencontainers.image.documentation="https://tangled.org/@evan.jarrett.net/at-container-registry" \ 39 39 org.opencontainers.image.licenses="MIT" \ 40 40 org.opencontainers.image.version="0.1.0" \ 41 41 io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTNrRelfloN2emuWZDrWmPT0o93bAjEnozjD6UPgoVV9m4" \ 42 - io.atcr.readme="https://tangled.org/evan.jarrett.net/at-container-registry/raw/main/docs/appview.md" 42 + io.atcr.readme="https://tangled.org/@evan.jarrett.net/at-container-registry/raw/main/docs/appview.md" 43 43 44 44 ENTRYPOINT ["/atcr-appview"] 45 45 CMD ["serve"]
+1 -1
Dockerfile.dev
··· 1 1 # Development image with Air hot reload 2 2 # Build: docker build -f Dockerfile.dev -t atcr-appview-dev . 3 3 # Run: docker run -v $(pwd):/app -p 5000:5000 atcr-appview-dev 4 - FROM docker.io/golang:1.25.4-trixie 4 + FROM docker.io/golang:1.25.2-trixie 5 5 6 6 ENV DEBIAN_FRONTEND=noninteractive 7 7
+4 -4
Dockerfile.hold
··· 1 - FROM docker.io/golang:1.25.4-trixie AS builder 1 + FROM docker.io/golang:1.25.2-trixie AS builder 2 2 3 3 ENV DEBIAN_FRONTEND=noninteractive 4 4 ··· 38 38 LABEL org.opencontainers.image.title="ATCR Hold Service" \ 39 39 org.opencontainers.image.description="ATCR Hold Service - Bring Your Own Storage component for ATCR" \ 40 40 org.opencontainers.image.authors="ATCR Contributors" \ 41 - org.opencontainers.image.source="https://tangled.org/evan.jarrett.net/at-container-registry" \ 42 - org.opencontainers.image.documentation="https://tangled.org/evan.jarrett.net/at-container-registry" \ 41 + org.opencontainers.image.source="https://tangled.org/@evan.jarrett.net/at-container-registry" \ 42 + org.opencontainers.image.documentation="https://tangled.org/@evan.jarrett.net/at-container-registry" \ 43 43 org.opencontainers.image.licenses="MIT" \ 44 44 org.opencontainers.image.version="0.1.0" \ 45 45 io.atcr.icon="https://imgs.blue/evan.jarrett.net/1TpTOdtS60GdJWBYEqtK22y688jajbQ9a5kbYRFtwuqrkBAE" \ 46 - io.atcr.readme="https://tangled.org/evan.jarrett.net/at-container-registry/raw/main/docs/hold.md" 46 + io.atcr.readme="https://tangled.org/@evan.jarrett.net/at-container-registry/raw/main/docs/hold.md" 47 47 48 48 ENTRYPOINT ["/atcr-hold"]
+57 -32
cmd/appview/serve.go
··· 82 82 slog.Info("Initializing hold health checker", "cache_ttl", cfg.Health.CacheTTL) 83 83 healthChecker := holdhealth.NewChecker(cfg.Health.CacheTTL) 84 84 85 - // Initialize README fetcher for rendering repo page descriptions 86 - readmeFetcher := readme.NewFetcher() 85 + // Initialize README cache 86 + slog.Info("Initializing README cache", "cache_ttl", cfg.Health.ReadmeCacheTTL) 87 + readmeCache := readme.NewCache(uiDatabase, cfg.Health.ReadmeCacheTTL) 87 88 88 89 // Start background health check worker 89 90 startupDelay := 5 * time.Second // Wait for hold services to start (Docker compose) ··· 150 151 middleware.SetGlobalRefresher(refresher) 151 152 152 153 // Set global database for pull/push metrics tracking 153 - middleware.SetGlobalDatabase(uiDatabase) 154 + metricsDB := db.NewMetricsDB(uiDatabase) 155 + middleware.SetGlobalDatabase(metricsDB) 154 156 155 157 // Create RemoteHoldAuthorizer for hold authorization with caching 156 158 holdAuthorizer := auth.NewRemoteHoldAuthorizer(uiDatabase, testMode) 157 159 middleware.SetGlobalAuthorizer(holdAuthorizer) 158 160 slog.Info("Hold authorizer initialized with database caching") 161 + 162 + // Set global readme cache for middleware 163 + middleware.SetGlobalReadmeCache(readmeCache) 164 + slog.Info("README cache initialized for manifest push refresh") 159 165 160 166 // Initialize Jetstream workers (background services before HTTP routes) 161 - initializeJetstream(uiDatabase, &cfg.Jetstream, defaultHoldDID, testMode, refresher) 167 + initializeJetstream(uiDatabase, &cfg.Jetstream, defaultHoldDID, testMode) 162 168 163 169 // Create main chi router 164 170 mainRouter := chi.NewRouter() ··· 188 194 BaseURL: baseURL, 189 195 DeviceStore: deviceStore, 190 196 HealthChecker: healthChecker, 191 - ReadmeFetcher: readmeFetcher, 197 + ReadmeCache: readmeCache, 192 198 Templates: uiTemplates, 193 - DefaultHoldDID: defaultHoldDID, 194 199 }) 195 200 } 196 201 } ··· 212 217 // Create ATProto client with session provider (uses DoWithSession for DPoP nonce safety) 213 218 client := atproto.NewClientWithSessionProvider(pdsEndpoint, did, refresher) 214 219 215 - // Note: Profile and crew setup now happen automatically via UserContext.EnsureUserSetup() 220 + // Ensure sailor profile exists (creates with default hold if configured) 221 + slog.Debug("Ensuring profile exists", "component", "appview/callback", "did", did, "default_hold_did", defaultHoldDID) 222 + if err := storage.EnsureProfile(ctx, client, defaultHoldDID); err != nil { 223 + slog.Warn("Failed to ensure profile", "component", "appview/callback", "did", did, "error", err) 224 + // Continue anyway - profile creation is not critical for avatar fetch 225 + } else { 226 + slog.Debug("Profile ensured", "component", "appview/callback", "did", did) 227 + } 216 228 217 229 // Fetch user's profile record from PDS (contains blob references) 218 230 profileRecord, err := client.GetProfileRecord(ctx, did) ··· 263 275 return nil // Non-fatal 264 276 } 265 277 266 - // Migrate profile URLโ†’DID if needed (legacy migration, crew registration now handled by UserContext) 267 - if profile != nil && profile.DefaultHold != "" { 278 + var holdDID string 279 + if profile != nil && profile.DefaultHold != nil && *profile.DefaultHold != "" { 280 + defaultHold := *profile.DefaultHold 268 281 // Check if defaultHold is a URL (needs migration) 269 - if strings.HasPrefix(profile.DefaultHold, "http://") || strings.HasPrefix(profile.DefaultHold, "https://") { 270 - slog.Debug("Migrating hold URL to DID", "component", "appview/callback", "did", did, "hold_url", profile.DefaultHold) 282 + if strings.HasPrefix(defaultHold, "http://") || strings.HasPrefix(defaultHold, "https://") { 283 + slog.Debug("Migrating hold URL to DID", "component", "appview/callback", "did", did, "hold_url", defaultHold) 271 284 272 285 // Resolve URL to DID 273 - holdDID := atproto.ResolveHoldDIDFromURL(profile.DefaultHold) 286 + holdDID = atproto.ResolveHoldDIDFromURL(defaultHold) 274 287 275 288 // Update profile with DID 276 - profile.DefaultHold = holdDID 289 + profile.DefaultHold = &holdDID 277 290 if err := storage.UpdateProfile(ctx, client, profile); err != nil { 278 291 slog.Warn("Failed to update profile with hold DID", "component", "appview/callback", "did", did, "error", err) 279 292 } else { 280 293 slog.Debug("Updated profile with hold DID", "component", "appview/callback", "hold_did", holdDID) 281 294 } 295 + } else { 296 + // Already a DID - use it 297 + holdDID = defaultHold 282 298 } 299 + // Register crew regardless of migration (outside the migration block) 300 + // Run in background to avoid blocking OAuth callback if hold is offline 301 + // Use background context - don't inherit request context which gets canceled on response 302 + slog.Debug("Attempting crew registration", "component", "appview/callback", "did", did, "hold_did", holdDID) 303 + go func(client *atproto.Client, refresher *oauth.Refresher, holdDID string) { 304 + ctx := context.Background() 305 + storage.EnsureCrewMembership(ctx, client, refresher, holdDID) 306 + }(client, refresher, holdDID) 307 + 283 308 } 284 309 285 310 return nil // All errors are non-fatal, logged for debugging ··· 301 326 ctx := context.Background() 302 327 app := handlers.NewApp(ctx, cfg.Distribution) 303 328 304 - // Wrap registry app with middleware chain: 305 - // 1. ExtractAuthMethod - extracts auth method from JWT and stores in context 306 - // 2. UserContextMiddleware - builds UserContext with identity, permissions, service tokens 329 + // Wrap registry app with auth method extraction middleware 330 + // This extracts the auth method from the JWT and stores it in the request context 307 331 wrappedApp := middleware.ExtractAuthMethod(app) 308 - 309 - // Create dependencies for UserContextMiddleware 310 - userContextDeps := &auth.Dependencies{ 311 - Refresher: refresher, 312 - Authorizer: holdAuthorizer, 313 - DefaultHoldDID: defaultHoldDID, 314 - } 315 - wrappedApp = middleware.UserContextMiddleware(userContextDeps)(wrappedApp) 316 332 317 333 // Mount registry at /v2/ 318 334 mainRouter.Handle("/v2/*", wrappedApp) ··· 382 398 383 399 w.Header().Set("Content-Type", "application/json") 384 400 w.Header().Set("Access-Control-Allow-Origin", "*") 385 - // Limit caching to allow scope changes to propagate quickly 386 - // PDS servers cache client metadata, so short max-age helps with updates 387 - w.Header().Set("Cache-Control", "public, max-age=300") 388 401 if err := json.NewEncoder(w).Encode(metadataMap); err != nil { 389 402 http.Error(w, "Failed to encode metadata", http.StatusInternalServerError) 390 403 } ··· 402 415 // Prevents the flood of errors when a stale session is discovered during push 403 416 tokenHandler.SetOAuthSessionValidator(refresher) 404 417 405 - // Register token post-auth callback 406 - // Note: Profile and crew setup now happen automatically via UserContext.EnsureUserSetup() 418 + // Register token post-auth callback for profile management 419 + // This decouples the token package from AppView-specific dependencies 407 420 tokenHandler.SetPostAuthCallback(func(ctx context.Context, did, handle, pdsEndpoint, accessToken string) error { 408 421 slog.Debug("Token post-auth callback", "component", "appview/callback", "did", did) 409 - return nil 422 + 423 + // Create ATProto client with validated token 424 + atprotoClient := atproto.NewClient(pdsEndpoint, did, accessToken) 425 + 426 + // Ensure profile exists (will create with default hold if not exists and default is configured) 427 + if err := storage.EnsureProfile(ctx, atprotoClient, defaultHoldDID); err != nil { 428 + // Log error but don't fail auth - profile management is not critical 429 + slog.Warn("Failed to ensure profile", "component", "appview/callback", "did", did, "error", err) 430 + } else { 431 + slog.Debug("Profile ensured with default hold", "component", "appview/callback", "did", did, "default_hold_did", defaultHoldDID) 432 + } 433 + 434 + return nil // All errors are non-fatal 410 435 }) 411 436 412 437 mainRouter.Get("/auth/token", tokenHandler.ServeHTTP) ··· 495 520 } 496 521 497 522 // initializeJetstream initializes the Jetstream workers for real-time events and backfill 498 - func initializeJetstream(database *sql.DB, jetstreamCfg *appview.JetstreamConfig, defaultHoldDID string, testMode bool, refresher *oauth.Refresher) { 523 + func initializeJetstream(database *sql.DB, jetstreamCfg *appview.JetstreamConfig, defaultHoldDID string, testMode bool) { 499 524 // Start Jetstream worker 500 525 jetstreamURL := jetstreamCfg.URL 501 526 ··· 519 544 // Get relay endpoint for sync API (defaults to Bluesky's relay) 520 545 relayEndpoint := jetstreamCfg.RelayEndpoint 521 546 522 - backfillWorker, err := jetstream.NewBackfillWorker(database, relayEndpoint, defaultHoldDID, testMode, refresher) 547 + backfillWorker, err := jetstream.NewBackfillWorker(database, relayEndpoint, defaultHoldDID, testMode) 523 548 if err != nil { 524 549 slog.Warn("Failed to create backfill worker", "component", "jetstream/backfill", "error", err) 525 550 } else {
-84
docs/HOLD_XRPC_ENDPOINTS.md
··· 1 - # Hold Service XRPC Endpoints 2 - 3 - This document lists all XRPC endpoints implemented in the Hold service (`pkg/hold/`). 4 - 5 - ## PDS Endpoints (`pkg/hold/pds/xrpc.go`) 6 - 7 - ### Public (No Auth Required) 8 - 9 - | Endpoint | Method | Description | 10 - |----------|--------|-------------| 11 - | `/xrpc/_health` | GET | Health check | 12 - | `/xrpc/com.atproto.server.describeServer` | GET | Server metadata | 13 - | `/xrpc/com.atproto.repo.describeRepo` | GET | Repository information | 14 - | `/xrpc/com.atproto.repo.getRecord` | GET | Retrieve a single record | 15 - | `/xrpc/com.atproto.repo.listRecords` | GET | List records in a collection (paginated) | 16 - | `/xrpc/com.atproto.sync.listRepos` | GET | List all repositories | 17 - | `/xrpc/com.atproto.sync.getRecord` | GET | Get record as CAR file | 18 - | `/xrpc/com.atproto.sync.getRepo` | GET | Full repository as CAR file | 19 - | `/xrpc/com.atproto.sync.getRepoStatus` | GET | Repository hosting status | 20 - | `/xrpc/com.atproto.sync.subscribeRepos` | GET | WebSocket firehose | 21 - | `/xrpc/com.atproto.identity.resolveHandle` | GET | Resolve handle to DID | 22 - | `/xrpc/app.bsky.actor.getProfile` | GET | Get actor profile | 23 - | `/xrpc/app.bsky.actor.getProfiles` | GET | Get multiple profiles | 24 - | `/.well-known/did.json` | GET | DID document | 25 - | `/.well-known/atproto-did` | GET | DID for handle resolution | 26 - 27 - ### Conditional Auth (based on captain.public) 28 - 29 - | Endpoint | Method | Description | 30 - |----------|--------|-------------| 31 - | `/xrpc/com.atproto.sync.getBlob` | GET/HEAD | Get blob (routes OCI vs ATProto) | 32 - 33 - ### Owner/Crew Admin Required 34 - 35 - | Endpoint | Method | Description | 36 - |----------|--------|-------------| 37 - | `/xrpc/com.atproto.repo.deleteRecord` | POST | Delete a record | 38 - | `/xrpc/com.atproto.repo.uploadBlob` | POST | Upload ATProto blob | 39 - 40 - ### DPoP Auth Required 41 - 42 - | Endpoint | Method | Description | 43 - |----------|--------|-------------| 44 - | `/xrpc/io.atcr.hold.requestCrew` | POST | Request crew membership | 45 - 46 - --- 47 - 48 - ## OCI Multipart Upload Endpoints (`pkg/hold/oci/xrpc.go`) 49 - 50 - All require `blob:write` permission via service token: 51 - 52 - | Endpoint | Method | Description | 53 - |----------|--------|-------------| 54 - | `/xrpc/io.atcr.hold.initiateUpload` | POST | Start multipart upload | 55 - | `/xrpc/io.atcr.hold.getPartUploadUrl` | POST | Get presigned URL for part | 56 - | `/xrpc/io.atcr.hold.uploadPart` | PUT | Direct buffered part upload | 57 - | `/xrpc/io.atcr.hold.completeUpload` | POST | Finalize multipart upload | 58 - | `/xrpc/io.atcr.hold.abortUpload` | POST | Cancel multipart upload | 59 - | `/xrpc/io.atcr.hold.notifyManifest` | POST | Notify manifest push (creates layer records + optional Bluesky post) | 60 - 61 - --- 62 - 63 - ## Standard ATProto Endpoints (excluding io.atcr.hold.*) 64 - 65 - | Endpoint | 66 - |----------| 67 - | /xrpc/_health | 68 - | /xrpc/com.atproto.server.describeServer | 69 - | /xrpc/com.atproto.repo.describeRepo | 70 - | /xrpc/com.atproto.repo.getRecord | 71 - | /xrpc/com.atproto.repo.listRecords | 72 - | /xrpc/com.atproto.repo.deleteRecord | 73 - | /xrpc/com.atproto.repo.uploadBlob | 74 - | /xrpc/com.atproto.sync.listRepos | 75 - | /xrpc/com.atproto.sync.getRecord | 76 - | /xrpc/com.atproto.sync.getRepo | 77 - | /xrpc/com.atproto.sync.getRepoStatus | 78 - | /xrpc/com.atproto.sync.getBlob | 79 - | /xrpc/com.atproto.sync.subscribeRepos | 80 - | /xrpc/com.atproto.identity.resolveHandle | 81 - | /xrpc/app.bsky.actor.getProfile | 82 - | /xrpc/app.bsky.actor.getProfiles | 83 - | /.well-known/did.json | 84 - | /.well-known/atproto-did |
+4 -3
docs/TEST_COVERAGE_GAPS.md
··· 112 112 113 113 **Remaining gaps:** 114 114 - `notifyHoldAboutManifest()` - 0% (background notification, less critical) 115 + - `refreshReadmeCache()` - 11.8% (UI feature, lower priority) 115 116 116 117 ## Critical Priority: Core Registry Functionality 117 118 ··· 422 423 423 424 --- 424 425 425 - ### ๐ŸŸก pkg/appview/readme (Partial coverage) 426 + ### ๐ŸŸก pkg/appview/readme (16.7% coverage) 426 427 427 - README rendering for repo page descriptions. The cache.go was removed as README content is now stored in `io.atcr.repo.page` records and synced via Jetstream. 428 + README fetching and caching. Less critical but still needs work. 428 429 430 + #### cache.go (0% coverage) 429 431 #### fetcher.go (๐Ÿ“Š Partial coverage) 430 - - `RenderMarkdown()` - renders repo page description markdown 431 432 432 433 --- 433 434
-399
docs/VALKEY_MIGRATION.md
··· 1 - # Analysis: AppView SQL Database Usage 2 - 3 - ## Overview 4 - 5 - The AppView uses SQLite with 19 tables. The key finding: **most data is a cache of ATProto records** that could theoretically be rebuilt from users' PDS instances. 6 - 7 - ## Data Categories 8 - 9 - ### 1. MUST PERSIST (Local State Only) 10 - 11 - These tables contain data that **cannot be reconstructed** from external sources: 12 - 13 - | Table | Purpose | Why It Must Persist | 14 - |-------|---------|---------------------| 15 - | `oauth_sessions` | OAuth tokens | Refresh tokens are stateful; losing them = users must re-auth | 16 - | `ui_sessions` | Web browser sessions | Session continuity for logged-in users | 17 - | `devices` | Approved devices + bcrypt secrets | User authorization decisions; secrets are one-way hashed | 18 - | `pending_device_auth` | In-flight auth flows | Short-lived (10min) but critical during auth | 19 - | `oauth_auth_requests` | OAuth flow state | Short-lived but required for auth completion | 20 - | `repository_stats` | Pull/push counts | **Locally tracked metrics** - not stored in ATProto | 21 - 22 - ### 2. CACHED FROM PDS (Rebuildable) 23 - 24 - These tables are essentially a **read-through cache** of ATProto data: 25 - 26 - | Table | Source | ATProto Collection | 27 - |-------|--------|-------------------| 28 - | `users` | User's PDS profile | `app.bsky.actor.profile` + DID document | 29 - | `manifests` | User's PDS | `io.atcr.manifest` records | 30 - | `tags` | User's PDS | `io.atcr.tag` records | 31 - | `layers` | Derived from manifests | Parsed from manifest content | 32 - | `manifest_references` | Derived from manifest lists | Parsed from multi-arch manifests | 33 - | `repository_annotations` | Manifest config blob | OCI annotations from config | 34 - | `repo_pages` | User's PDS | `io.atcr.repo.page` records | 35 - | `stars` | User's PDS | `io.atcr.sailor.star` records (synced via Jetstream) | 36 - | `hold_captain_records` | Hold's embedded PDS | `io.atcr.hold.captain` records | 37 - | `hold_crew_approvals` | Hold's embedded PDS | `io.atcr.hold.crew` records | 38 - | `hold_crew_denials` | Local authorization cache | Could re-check on demand | 39 - 40 - ### 3. OPERATIONAL 41 - 42 - | Table | Purpose | 43 - |-------|---------| 44 - | `schema_migrations` | Migration tracking | 45 - | `firehose_cursor` | Jetstream position (can restart from 0) | 46 - 47 - ## Key Insights 48 - 49 - ### What's Actually Unique to AppView? 50 - 51 - 1. **Authentication state** - OAuth sessions, devices, UI sessions 52 - 2. **Engagement metrics** - Pull/push counts (locally tracked, not in ATProto) 53 - 54 - ### What Could Be Eliminated? 55 - 56 - If ATCR fully embraced the ATProto model: 57 - 58 - 1. **`users`** - Query PDS on demand (with caching) 59 - 2. **`manifests`, `tags`, `layers`** - Query PDS on demand (with caching) 60 - 3. **`repository_annotations`** - Fetch manifest config on demand 61 - 4. **`repo_pages`** - Query PDS on demand 62 - 5. **`hold_*` tables** - Query hold's PDS on demand 63 - 64 - ### Trade-offs 65 - 66 - **Current approach (heavy caching):** 67 - - Fast queries for UI (search, browse, stats) 68 - - Offline resilience (PDS down doesn't break UI) 69 - - Complex sync logic (Jetstream consumer, backfill) 70 - - State can diverge from source of truth 71 - 72 - **Lighter approach (query on demand):** 73 - - Always fresh data 74 - - Simpler codebase (no sync) 75 - - Slower queries (network round-trips) 76 - - Depends on PDS availability 77 - 78 - ## Current Limitation: No Cache-Miss Queries 79 - 80 - **Finding:** There's no "query PDS on cache miss" logic. Users/manifests only enter the DB via: 81 - 1. OAuth login (user authenticates) 82 - 2. Jetstream events (firehose activity) 83 - 84 - **Problem:** If someone visits `atcr.io/alice/myapp` before alice is indexed โ†’ 404 85 - 86 - **Where this happens:** 87 - - `pkg/appview/handlers/repository.go:50-53`: If `db.GetUserByDID()` returns nil โ†’ 404 88 - - No fallback to `atproto.Client.ListRecords()` or similar 89 - 90 - **This matters for Valkey migration:** If cache is ephemeral and restarts clear it, you need cache-miss logic to repopulate on demand. Otherwise: 91 - - Restart Valkey โ†’ all users/manifests gone 92 - - Wait for Jetstream to re-index OR implement cache-miss queries 93 - 94 - **Cache-miss implementation design:** 95 - 96 - Existing code to reuse: `pkg/appview/jetstream/processor.go:43-97` (`EnsureUser`) 97 - 98 - ```go 99 - // New: pkg/appview/cache/loader.go 100 - 101 - type Loader struct { 102 - cache Cache // Valkey interface 103 - client *atproto.Client 104 - } 105 - 106 - // GetUser with cache-miss fallback 107 - func (l *Loader) GetUser(ctx context.Context, did string) (*User, error) { 108 - // 1. Try cache 109 - if user := l.cache.GetUser(did); user != nil { 110 - return user, nil 111 - } 112 - 113 - // 2. Cache miss - resolve identity (already queries network) 114 - _, handle, pdsEndpoint, err := atproto.ResolveIdentity(ctx, did) 115 - if err != nil { 116 - return nil, err // User doesn't exist in network 117 - } 118 - 119 - // 3. Fetch profile for avatar 120 - client := atproto.NewClient(pdsEndpoint, "", "") 121 - profile, _ := client.GetProfileRecord(ctx, did) 122 - avatarURL := "" 123 - if profile != nil && profile.Avatar != nil { 124 - avatarURL = atproto.BlobCDNURL(did, profile.Avatar.Ref.Link) 125 - } 126 - 127 - // 4. Cache and return 128 - user := &User{DID: did, Handle: handle, PDSEndpoint: pdsEndpoint, Avatar: avatarURL} 129 - l.cache.SetUser(user, 1*time.Hour) 130 - return user, nil 131 - } 132 - 133 - // GetManifestsForRepo with cache-miss fallback 134 - func (l *Loader) GetManifestsForRepo(ctx context.Context, did, repo string) ([]Manifest, error) { 135 - cacheKey := fmt.Sprintf("manifests:%s:%s", did, repo) 136 - 137 - // 1. Try cache 138 - if cached := l.cache.Get(cacheKey); cached != nil { 139 - return cached.([]Manifest), nil 140 - } 141 - 142 - // 2. Cache miss - get user's PDS endpoint 143 - user, err := l.GetUser(ctx, did) 144 - if err != nil { 145 - return nil, err 146 - } 147 - 148 - // 3. Query PDS for manifests 149 - client := atproto.NewClient(user.PDSEndpoint, "", "") 150 - records, _, err := client.ListRecordsForRepo(ctx, did, atproto.ManifestCollection, 100, "") 151 - if err != nil { 152 - return nil, err 153 - } 154 - 155 - // 4. Filter by repository and parse 156 - var manifests []Manifest 157 - for _, rec := range records { 158 - var m atproto.ManifestRecord 159 - if err := json.Unmarshal(rec.Value, &m); err != nil { 160 - continue 161 - } 162 - if m.Repository == repo { 163 - manifests = append(manifests, convertManifest(m)) 164 - } 165 - } 166 - 167 - // 5. Cache and return 168 - l.cache.Set(cacheKey, manifests, 10*time.Minute) 169 - return manifests, nil 170 - } 171 - ``` 172 - 173 - **Handler changes:** 174 - ```go 175 - // Before (repository.go:45-53): 176 - owner, err := db.GetUserByDID(h.DB, did) 177 - if owner == nil { 178 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 179 - return 180 - } 181 - 182 - // After: 183 - owner, err := h.Loader.GetUser(r.Context(), did) 184 - if err != nil { 185 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 186 - return 187 - } 188 - ``` 189 - 190 - **Performance considerations:** 191 - - Cache hit: ~1ms (Valkey lookup) 192 - - Cache miss: ~200-500ms (PDS round-trip) 193 - - First request after restart: slower but correct 194 - - Jetstream still useful for proactive warming 195 - 196 - --- 197 - 198 - ## Proposed Architecture: Valkey + ATProto 199 - 200 - ### Goal 201 - Replace SQLite with Valkey (Redis-compatible) for ephemeral state, push remaining persistent data to ATProto. 202 - 203 - ### What goes to Valkey (ephemeral, TTL-based) 204 - 205 - | Current Table | Valkey Key Pattern | TTL | Notes | 206 - |---------------|-------------------|-----|-------| 207 - | `oauth_sessions` | `oauth:{did}:{session_id}` | 90 days | Lost on restart = re-auth | 208 - | `ui_sessions` | `ui:{session_id}` | Session duration | Lost on restart = re-login | 209 - | `oauth_auth_requests` | `authreq:{state}` | 10 min | In-flight flows | 210 - | `pending_device_auth` | `pending:{device_code}` | 10 min | In-flight flows | 211 - | `firehose_cursor` | `cursor:jetstream` | None | Can restart from 0 | 212 - | All PDS cache tables | `cache:{collection}:{did}:{rkey}` | 10-60 min | Query PDS on miss | 213 - 214 - **Benefits:** 215 - - Multi-instance ready (shared Valkey) 216 - - No schema migrations 217 - - Natural TTL expiry 218 - - Simpler code (no SQL) 219 - 220 - ### What could become ATProto records 221 - 222 - | Current Table | Proposed Collection | Where Stored | Open Questions | 223 - |---------------|---------------------|--------------|----------------| 224 - | `devices` | `io.atcr.sailor.device` | User's PDS | Privacy: IP, user-agent sensitive? | 225 - | `repository_stats` | `io.atcr.repo.stats` | Hold's PDS or User's PDS | Who owns the stats? | 226 - 227 - **Devices โ†’ Valkey:** 228 - - Move current device table to Valkey 229 - - Key: `device:{did}:{device_id}` โ†’ `{name, secret_hash, ip, user_agent, created_at, last_used}` 230 - - TTL: Long (1 year?) or no expiry 231 - - Device list: `devices:{did}` โ†’ Set of device IDs 232 - - Secret validation works the same, just different backend 233 - 234 - **Service auth exploration (future):** 235 - The challenge with pure ATProto service auth is the AppView still needs the user's OAuth session to write manifests to their PDS. The current flow: 236 - 1. User authenticates via OAuth โ†’ AppView gets OAuth tokens 237 - 2. AppView issues registry JWT to credential helper 238 - 3. Credential helper presents JWT on each push/pull 239 - 4. AppView uses OAuth session to write to user's PDS 240 - 241 - Service auth could work for the hold side (AppView โ†’ Hold), but not for the user's OAuth session. 242 - 243 - **Repository stats โ†’ Hold's PDS:** 244 - 245 - **Challenge discovered:** The hold's `getBlob` endpoint only receives `did` + `cid`, not the repository name. 246 - 247 - Current flow (`proxy_blob_store.go:358-362`): 248 - ```go 249 - xrpcURL := fmt.Sprintf("%s%s?did=%s&cid=%s&method=%s", 250 - p.holdURL, atproto.SyncGetBlob, p.ctx.DID, dgst.String(), operation) 251 - ``` 252 - 253 - **Implementation options:** 254 - 255 - **Option A: Add repository parameter to getBlob (recommended)** 256 - ```go 257 - // Modified AppView call: 258 - xrpcURL := fmt.Sprintf("%s%s?did=%s&cid=%s&method=%s&repo=%s", 259 - p.holdURL, atproto.SyncGetBlob, p.ctx.DID, dgst.String(), operation, p.ctx.Repository) 260 - ``` 261 - 262 - ```go 263 - // Modified hold handler (xrpc.go:969): 264 - func (h *XRPCHandler) HandleGetBlob(w http.ResponseWriter, r *http.Request) { 265 - did := r.URL.Query().Get("did") 266 - cidOrDigest := r.URL.Query().Get("cid") 267 - repo := r.URL.Query().Get("repo") // NEW 268 - 269 - // ... existing blob handling ... 270 - 271 - // Increment stats if repo provided 272 - if repo != "" { 273 - go h.pds.IncrementPullCount(did, repo) // Async, non-blocking 274 - } 275 - } 276 - ``` 277 - 278 - **Stats record structure:** 279 - ``` 280 - Collection: io.atcr.hold.stats 281 - Rkey: base64(did:repository) // Deterministic, unique 282 - 283 - { 284 - "$type": "io.atcr.hold.stats", 285 - "did": "did:plc:alice123", 286 - "repository": "myapp", 287 - "pullCount": 1542, 288 - "pushCount": 47, 289 - "lastPull": "2025-01-15T...", 290 - "lastPush": "2025-01-10T...", 291 - "createdAt": "2025-01-01T..." 292 - } 293 - ``` 294 - 295 - **Hold-side implementation:** 296 - ```go 297 - // New file: pkg/hold/pds/stats.go 298 - 299 - func (p *HoldPDS) IncrementPullCount(ctx context.Context, did, repo string) error { 300 - rkey := statsRecordKey(did, repo) 301 - 302 - // Get or create stats record 303 - stats, err := p.GetStatsRecord(ctx, rkey) 304 - if err != nil || stats == nil { 305 - stats = &atproto.StatsRecord{ 306 - Type: atproto.StatsCollection, 307 - DID: did, 308 - Repository: repo, 309 - PullCount: 0, 310 - PushCount: 0, 311 - CreatedAt: time.Now(), 312 - } 313 - } 314 - 315 - // Increment and update 316 - stats.PullCount++ 317 - stats.LastPull = time.Now() 318 - 319 - _, err = p.repomgr.UpdateRecord(ctx, p.uid, atproto.StatsCollection, rkey, stats) 320 - return err 321 - } 322 - ``` 323 - 324 - **Query endpoint (new XRPC):** 325 - ``` 326 - GET /xrpc/io.atcr.hold.getStats?did={userDID}&repo={repository} 327 - โ†’ Returns JSON: { pullCount, pushCount, lastPull, lastPush } 328 - 329 - GET /xrpc/io.atcr.hold.listStats?did={userDID} 330 - โ†’ Returns all stats for a user across all repos on this hold 331 - ``` 332 - 333 - **AppView aggregation:** 334 - ```go 335 - func (l *Loader) GetAggregatedStats(ctx context.Context, did, repo string) (*Stats, error) { 336 - // 1. Get all holds that have served this repo 337 - holdDIDs, _ := l.cache.GetHoldDIDsForRepo(did, repo) 338 - 339 - // 2. Query each hold for stats 340 - var total Stats 341 - for _, holdDID := range holdDIDs { 342 - holdURL := resolveHoldDID(holdDID) 343 - stats, _ := queryHoldStats(ctx, holdURL, did, repo) 344 - total.PullCount += stats.PullCount 345 - total.PushCount += stats.PushCount 346 - } 347 - 348 - return &total, nil 349 - } 350 - ``` 351 - 352 - **Files to modify:** 353 - - `pkg/atproto/lexicon.go` - Add `StatsCollection` + `StatsRecord` 354 - - `pkg/hold/pds/stats.go` - New file for stats operations 355 - - `pkg/hold/pds/xrpc.go` - Add `repo` param to getBlob, add stats endpoints 356 - - `pkg/appview/storage/proxy_blob_store.go` - Pass repository to getBlob 357 - - `pkg/appview/cache/loader.go` - Aggregation logic 358 - 359 - ### Migration Path 360 - 361 - **Phase 1: Add Valkey infrastructure** 362 - - Add Valkey client to AppView 363 - - Create store interfaces that abstract SQLite vs Valkey 364 - - Dual-write OAuth sessions to both 365 - 366 - **Phase 2: Migrate sessions to Valkey** 367 - - OAuth sessions, UI sessions, auth requests, pending device auth 368 - - Remove SQLite session tables 369 - - Test: restart AppView, users get logged out (acceptable) 370 - 371 - **Phase 3: Migrate devices to Valkey** 372 - - Move device store to Valkey 373 - - Same data structure, different backend 374 - - Consider device expiry policy 375 - 376 - **Phase 4: Implement hold-side stats** 377 - - Add `io.atcr.hold.stats` collection to hold's embedded PDS 378 - - Hold increments stats on blob access 379 - - Add XRPC endpoint: `io.atcr.hold.getStats` 380 - 381 - **Phase 5: AppView stats aggregation** 382 - - Track holdDids per repo in Valkey cache 383 - - Query holds for stats, aggregate 384 - - Cache aggregated stats with TTL 385 - 386 - **Phase 6: Remove SQLite (optional)** 387 - - Keep SQLite as optional cache layer for UI queries 388 - - Or: Query PDS on demand with Valkey caching 389 - - Jetstream still useful for real-time updates 390 - 391 - ## Summary Table 392 - 393 - | Category | Tables | % of Schema | Truly Persistent? | 394 - |----------|--------|-------------|-------------------| 395 - | Auth & Sessions + Metrics | 6 | 32% | Yes | 396 - | PDS Cache | 11 | 58% | No (rebuildable) | 397 - | Operational | 2 | 10% | No | 398 - 399 - **~58% of the database is cached ATProto data that could be rebuilt from PDSes.**
+1 -1
go.mod
··· 1 1 module atcr.io 2 2 3 - go 1.25.4 3 + go 1.25.5 4 4 5 5 require ( 6 6 github.com/aws/aws-sdk-go v1.55.5
-21
lexicons/io/atcr/authFullApp.json
··· 1 - { 2 - "lexicon": 1, 3 - "id": "io.atcr.authFullApp", 4 - "defs": { 5 - "main": { 6 - "type": "permission-set", 7 - "title": "AT Container Registry", 8 - "title:langs": {}, 9 - "detail": "Push and pull container images to the ATProto Container Registry. Includes creating and managing image manifests, tags, and repository settings.", 10 - "detail:langs": {}, 11 - "permissions": [ 12 - { 13 - "type": "permission", 14 - "resource": "repo", 15 - "action": ["create", "update", "delete"], 16 - "collection": ["io.atcr.manifest", "io.atcr.tag", "io.atcr.sailor.star", "io.atcr.sailor.profile", "io.atcr.repo.page"] 17 - } 18 - ] 19 - } 20 - } 21 - }
+2 -4
lexicons/io/atcr/hold/captain.json
··· 34 34 }, 35 35 "region": { 36 36 "type": "string", 37 - "description": "S3 region where blobs are stored", 38 - "maxLength": 64 37 + "description": "S3 region where blobs are stored" 39 38 }, 40 39 "provider": { 41 40 "type": "string", 42 - "description": "Deployment provider (e.g., fly.io, aws, etc.)", 43 - "maxLength": 64 41 + "description": "Deployment provider (e.g., fly.io, aws, etc.)" 44 42 } 45 43 } 46 44 }
+2 -4
lexicons/io/atcr/hold/crew.json
··· 18 18 "role": { 19 19 "type": "string", 20 20 "description": "Member's role in the hold", 21 - "knownValues": ["owner", "admin", "write", "read"], 22 - "maxLength": 32 21 + "knownValues": ["owner", "admin", "write", "read"] 23 22 }, 24 23 "permissions": { 25 24 "type": "array", 26 25 "description": "Specific permissions granted to this member", 27 26 "items": { 28 - "type": "string", 29 - "maxLength": 64 27 + "type": "string" 30 28 } 31 29 }, 32 30 "addedAt": {
+3 -6
lexicons/io/atcr/hold/layer.json
··· 12 12 "properties": { 13 13 "digest": { 14 14 "type": "string", 15 - "description": "Layer digest (e.g., sha256:abc123...)", 16 - "maxLength": 128 15 + "description": "Layer digest (e.g., sha256:abc123...)" 17 16 }, 18 17 "size": { 19 18 "type": "integer", ··· 21 20 }, 22 21 "mediaType": { 23 22 "type": "string", 24 - "description": "Media type (e.g., application/vnd.oci.image.layer.v1.tar+gzip)", 25 - "maxLength": 128 23 + "description": "Media type (e.g., application/vnd.oci.image.layer.v1.tar+gzip)" 26 24 }, 27 25 "repository": { 28 26 "type": "string", 29 - "description": "Repository this layer belongs to", 30 - "maxLength": 255 27 + "description": "Repository this layer belongs to" 31 28 }, 32 29 "userDid": { 33 30 "type": "string",
+17 -28
lexicons/io/atcr/manifest.json
··· 17 17 }, 18 18 "digest": { 19 19 "type": "string", 20 - "description": "Content digest (e.g., 'sha256:abc123...')", 21 - "maxLength": 128 20 + "description": "Content digest (e.g., 'sha256:abc123...')" 22 21 }, 23 22 "holdDid": { 24 23 "type": "string", ··· 38 37 "application/vnd.docker.distribution.manifest.v2+json", 39 38 "application/vnd.oci.image.index.v1+json", 40 39 "application/vnd.docker.distribution.manifest.list.v2+json" 41 - ], 42 - "maxLength": 128 40 + ] 43 41 }, 44 42 "schemaVersion": { 45 43 "type": "integer", ··· 67 65 "description": "Referenced manifests (for manifest lists/indexes)" 68 66 }, 69 67 "annotations": { 70 - "type": "unknown", 71 - "description": "Optional OCI annotation metadata. Map of string keys to string values (e.g., org.opencontainers.image.title โ†’ 'My App')." 68 + "type": "object", 69 + "description": "Optional metadata annotations" 72 70 }, 73 71 "subject": { 74 72 "type": "ref", ··· 94 92 "properties": { 95 93 "mediaType": { 96 94 "type": "string", 97 - "description": "MIME type of the blob", 98 - "maxLength": 128 95 + "description": "MIME type of the blob" 99 96 }, 100 97 "size": { 101 98 "type": "integer", ··· 103 100 }, 104 101 "digest": { 105 102 "type": "string", 106 - "description": "Content digest (e.g., 'sha256:...')", 107 - "maxLength": 128 103 + "description": "Content digest (e.g., 'sha256:...')" 108 104 }, 109 105 "urls": { 110 106 "type": "array", ··· 115 111 "description": "Optional direct URLs to blob (for BYOS)" 116 112 }, 117 113 "annotations": { 118 - "type": "unknown", 119 - "description": "Optional OCI annotation metadata. Map of string keys to string values." 114 + "type": "object", 115 + "description": "Optional metadata" 120 116 } 121 117 } 122 118 }, ··· 127 123 "properties": { 128 124 "mediaType": { 129 125 "type": "string", 130 - "description": "Media type of the referenced manifest", 131 - "maxLength": 128 126 + "description": "Media type of the referenced manifest" 132 127 }, 133 128 "size": { 134 129 "type": "integer", ··· 136 131 }, 137 132 "digest": { 138 133 "type": "string", 139 - "description": "Content digest (e.g., 'sha256:...')", 140 - "maxLength": 128 134 + "description": "Content digest (e.g., 'sha256:...')" 141 135 }, 142 136 "platform": { 143 137 "type": "ref", ··· 145 139 "description": "Platform information for this manifest" 146 140 }, 147 141 "annotations": { 148 - "type": "unknown", 149 - "description": "Optional OCI annotation metadata. Map of string keys to string values." 142 + "type": "object", 143 + "description": "Optional metadata" 150 144 } 151 145 } 152 146 }, ··· 157 151 "properties": { 158 152 "architecture": { 159 153 "type": "string", 160 - "description": "CPU architecture (e.g., 'amd64', 'arm64', 'arm')", 161 - "maxLength": 32 154 + "description": "CPU architecture (e.g., 'amd64', 'arm64', 'arm')" 162 155 }, 163 156 "os": { 164 157 "type": "string", 165 - "description": "Operating system (e.g., 'linux', 'windows', 'darwin')", 166 - "maxLength": 32 158 + "description": "Operating system (e.g., 'linux', 'windows', 'darwin')" 167 159 }, 168 160 "osVersion": { 169 161 "type": "string", 170 - "description": "Optional OS version", 171 - "maxLength": 64 162 + "description": "Optional OS version" 172 163 }, 173 164 "osFeatures": { 174 165 "type": "array", 175 166 "items": { 176 - "type": "string", 177 - "maxLength": 64 167 + "type": "string" 178 168 }, 179 169 "description": "Optional OS features" 180 170 }, 181 171 "variant": { 182 172 "type": "string", 183 - "description": "Optional CPU variant (e.g., 'v7' for ARM)", 184 - "maxLength": 32 173 + "description": "Optional CPU variant (e.g., 'v7' for ARM)" 185 174 } 186 175 } 187 176 }
-43
lexicons/io/atcr/repo/page.json
··· 1 - { 2 - "lexicon": 1, 3 - "id": "io.atcr.repo.page", 4 - "defs": { 5 - "main": { 6 - "type": "record", 7 - "description": "Repository page metadata including description and avatar. Users can edit this directly in their PDS to customize their repository page.", 8 - "key": "any", 9 - "record": { 10 - "type": "object", 11 - "required": ["repository", "createdAt", "updatedAt"], 12 - "properties": { 13 - "repository": { 14 - "type": "string", 15 - "description": "The name of the repository (e.g., 'myapp'). Must match the rkey.", 16 - "maxLength": 256 17 - }, 18 - "description": { 19 - "type": "string", 20 - "description": "Markdown README/description content for the repository page.", 21 - "maxLength": 100000 22 - }, 23 - "avatar": { 24 - "type": "blob", 25 - "description": "Repository avatar/icon image.", 26 - "accept": ["image/png", "image/jpeg", "image/webp"], 27 - "maxSize": 3000000 28 - }, 29 - "createdAt": { 30 - "type": "string", 31 - "format": "datetime", 32 - "description": "Record creation timestamp" 33 - }, 34 - "updatedAt": { 35 - "type": "string", 36 - "format": "datetime", 37 - "description": "Record last updated timestamp" 38 - } 39 - } 40 - } 41 - } 42 - } 43 - }
+1 -2
lexicons/io/atcr/tag.json
··· 27 27 }, 28 28 "manifestDigest": { 29 29 "type": "string", 30 - "description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead.", 31 - "maxLength": 128 30 + "description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead." 32 31 }, 33 32 "createdAt": { 34 33 "type": "string",
+4
pkg/appview/config.go
··· 79 79 80 80 // CheckInterval is the hold health check refresh interval (from env: ATCR_HEALTH_CHECK_INTERVAL, default: 15m) 81 81 CheckInterval time.Duration `yaml:"check_interval"` 82 + 83 + // ReadmeCacheTTL is the README cache TTL (from env: ATCR_README_CACHE_TTL, default: 1h) 84 + ReadmeCacheTTL time.Duration `yaml:"readme_cache_ttl"` 82 85 } 83 86 84 87 // JetstreamConfig defines ATProto Jetstream settings ··· 162 165 // Health and cache configuration 163 166 cfg.Health.CacheTTL = getDurationOrDefault("ATCR_HEALTH_CACHE_TTL", 15*time.Minute) 164 167 cfg.Health.CheckInterval = getDurationOrDefault("ATCR_HEALTH_CHECK_INTERVAL", 15*time.Minute) 168 + cfg.Health.ReadmeCacheTTL = getDurationOrDefault("ATCR_README_CACHE_TTL", 1*time.Hour) 165 169 166 170 // Jetstream configuration 167 171 cfg.Jetstream.URL = getEnvOrDefault("JETSTREAM_URL", "wss://jetstream2.us-west.bsky.network/subscribe")
-18
pkg/appview/db/migrations/0006_add_repo_pages.yaml
··· 1 - description: Add repo_pages table and remove readme_cache 2 - query: | 3 - -- Create repo_pages table for storing repository page metadata 4 - -- This replaces readme_cache with PDS-synced data 5 - CREATE TABLE IF NOT EXISTS repo_pages ( 6 - did TEXT NOT NULL, 7 - repository TEXT NOT NULL, 8 - description TEXT, 9 - avatar_cid TEXT, 10 - created_at TIMESTAMP NOT NULL, 11 - updated_at TIMESTAMP NOT NULL, 12 - PRIMARY KEY(did, repository), 13 - FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE 14 - ); 15 - CREATE INDEX IF NOT EXISTS idx_repo_pages_did ON repo_pages(did); 16 - 17 - -- Drop readme_cache table (no longer needed) 18 - DROP TABLE IF EXISTS readme_cache;
+2 -3
pkg/appview/db/models.go
··· 148 148 // TagWithPlatforms extends Tag with platform information 149 149 type TagWithPlatforms struct { 150 150 Tag 151 - Platforms []PlatformInfo 152 - IsMultiArch bool 153 - HasAttestations bool // true if manifest list contains attestation references 151 + Platforms []PlatformInfo 152 + IsMultiArch bool 154 153 } 155 154 156 155 // ManifestWithMetadata extends Manifest with tags and platform information
+33 -119
pkg/appview/db/queries.go
··· 7 7 "time" 8 8 ) 9 9 10 - // BlobCDNURL returns the CDN URL for an ATProto blob 11 - // This is a local copy to avoid importing atproto (prevents circular dependencies) 12 - func BlobCDNURL(did, cid string) string { 13 - return fmt.Sprintf("https://imgs.blue/%s/%s", did, cid) 14 - } 15 - 16 10 // escapeLikePattern escapes SQL LIKE wildcards (%, _) and backslash for safe searching. 17 11 // It also sanitizes the input to prevent injection attacks via special characters. 18 12 func escapeLikePattern(s string) string { ··· 52 46 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0), 53 47 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0), 54 48 t.created_at, 55 - m.hold_endpoint, 56 - COALESCE(rp.avatar_cid, '') 49 + m.hold_endpoint 57 50 FROM tags t 58 51 JOIN users u ON t.did = u.did 59 52 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest 60 53 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository 61 - LEFT JOIN repo_pages rp ON t.did = rp.did AND t.repository = rp.repository 62 54 ` 63 55 64 56 args := []any{currentUserDID} ··· 81 73 for rows.Next() { 82 74 var p Push 83 75 var isStarredInt int 84 - var avatarCID string 85 - if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint, &avatarCID); err != nil { 76 + if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil { 86 77 return nil, 0, err 87 78 } 88 79 p.IsStarred = isStarredInt > 0 89 - // Prefer repo page avatar over annotation icon 90 - if avatarCID != "" { 91 - p.IconURL = BlobCDNURL(p.DID, avatarCID) 92 - } 93 80 pushes = append(pushes, p) 94 81 } 95 82 ··· 132 119 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0), 133 120 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0), 134 121 t.created_at, 135 - m.hold_endpoint, 136 - COALESCE(rp.avatar_cid, '') 122 + m.hold_endpoint 137 123 FROM tags t 138 124 JOIN users u ON t.did = u.did 139 125 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest 140 126 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository 141 - LEFT JOIN repo_pages rp ON t.did = rp.did AND t.repository = rp.repository 142 127 WHERE u.handle LIKE ? ESCAPE '\' 143 128 OR u.did = ? 144 129 OR t.repository LIKE ? ESCAPE '\' ··· 161 146 for rows.Next() { 162 147 var p Push 163 148 var isStarredInt int 164 - var avatarCID string 165 - if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint, &avatarCID); err != nil { 149 + if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil { 166 150 return nil, 0, err 167 151 } 168 152 p.IsStarred = isStarredInt > 0 169 - // Prefer repo page avatar over annotation icon 170 - if avatarCID != "" { 171 - p.IconURL = BlobCDNURL(p.DID, avatarCID) 172 - } 173 153 pushes = append(pushes, p) 174 154 } 175 155 ··· 312 292 r.Licenses = annotations["org.opencontainers.image.licenses"] 313 293 r.IconURL = annotations["io.atcr.icon"] 314 294 r.ReadmeURL = annotations["io.atcr.readme"] 315 - 316 - // Check for repo page avatar (overrides annotation icon) 317 - repoPage, err := GetRepoPage(db, did, r.Name) 318 - if err == nil && repoPage != nil && repoPage.AvatarCID != "" { 319 - r.IconURL = BlobCDNURL(did, repoPage.AvatarCID) 320 - } 321 295 322 296 repos = append(repos, r) 323 297 } ··· 622 596 // GetTagsWithPlatforms returns all tags for a repository with platform information 623 597 // Only multi-arch tags (manifest lists) have platform info in manifest_references 624 598 // Single-arch tags will have empty Platforms slice (platform is obvious for single-arch) 625 - // Attestation references (unknown/unknown platforms) are filtered out but tracked via HasAttestations 626 599 func GetTagsWithPlatforms(db *sql.DB, did, repository string) ([]TagWithPlatforms, error) { 627 600 rows, err := db.Query(` 628 601 SELECT ··· 636 609 COALESCE(mr.platform_os, '') as platform_os, 637 610 COALESCE(mr.platform_architecture, '') as platform_architecture, 638 611 COALESCE(mr.platform_variant, '') as platform_variant, 639 - COALESCE(mr.platform_os_version, '') as platform_os_version, 640 - COALESCE(mr.is_attestation, 0) as is_attestation 612 + COALESCE(mr.platform_os_version, '') as platform_os_version 641 613 FROM tags t 642 614 JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository 643 615 LEFT JOIN manifest_references mr ON m.id = mr.manifest_id ··· 657 629 for rows.Next() { 658 630 var t Tag 659 631 var mediaType, platformOS, platformArch, platformVariant, platformOSVersion string 660 - var isAttestation bool 661 632 662 633 if err := rows.Scan(&t.ID, &t.DID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt, 663 - &mediaType, &platformOS, &platformArch, &platformVariant, &platformOSVersion, &isAttestation); err != nil { 634 + &mediaType, &platformOS, &platformArch, &platformVariant, &platformOSVersion); err != nil { 664 635 return nil, err 665 636 } 666 637 ··· 672 643 Platforms: []PlatformInfo{}, 673 644 } 674 645 tagOrder = append(tagOrder, tagKey) 675 - } 676 - 677 - // Track if manifest list has attestations 678 - if isAttestation { 679 - tagMap[tagKey].HasAttestations = true 680 - // Skip attestation references in platform display 681 - continue 682 646 } 683 647 684 648 // Add platform info if present (only for multi-arch manifest lists) ··· 1634 1598 return time.Time{}, fmt.Errorf("unable to parse timestamp: %s", s) 1635 1599 } 1636 1600 1601 + // MetricsDB wraps a sql.DB and implements the metrics interface for middleware 1602 + type MetricsDB struct { 1603 + db *sql.DB 1604 + } 1605 + 1606 + // NewMetricsDB creates a new metrics database wrapper 1607 + func NewMetricsDB(db *sql.DB) *MetricsDB { 1608 + return &MetricsDB{db: db} 1609 + } 1610 + 1611 + // IncrementPullCount increments the pull count for a repository 1612 + func (m *MetricsDB) IncrementPullCount(did, repository string) error { 1613 + return IncrementPullCount(m.db, did, repository) 1614 + } 1615 + 1616 + // IncrementPushCount increments the push count for a repository 1617 + func (m *MetricsDB) IncrementPushCount(did, repository string) error { 1618 + return IncrementPushCount(m.db, did, repository) 1619 + } 1620 + 1621 + // GetLatestHoldDIDForRepo returns the hold DID from the most recent manifest for a repository 1622 + func (m *MetricsDB) GetLatestHoldDIDForRepo(did, repository string) (string, error) { 1623 + return GetLatestHoldDIDForRepo(m.db, did, repository) 1624 + } 1625 + 1637 1626 // GetFeaturedRepositories fetches top repositories sorted by stars and pulls 1638 1627 func GetFeaturedRepositories(db *sql.DB, limit int, currentUserDID string) ([]FeaturedRepository, error) { 1639 1628 query := ` ··· 1661 1650 COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'io.atcr.icon'), ''), 1662 1651 rs.pull_count, 1663 1652 rs.star_count, 1664 - COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0), 1665 - COALESCE(rp.avatar_cid, '') 1653 + COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0) 1666 1654 FROM latest_manifests lm 1667 1655 JOIN manifests m ON lm.latest_id = m.id 1668 1656 JOIN users u ON m.did = u.did 1669 1657 JOIN repo_stats rs ON m.did = rs.did AND m.repository = rs.repository 1670 - LEFT JOIN repo_pages rp ON m.did = rp.did AND m.repository = rp.repository 1671 1658 ORDER BY rs.score DESC, rs.star_count DESC, rs.pull_count DESC, m.created_at DESC 1672 1659 LIMIT ? 1673 1660 ` ··· 1682 1669 for rows.Next() { 1683 1670 var f FeaturedRepository 1684 1671 var isStarredInt int 1685 - var avatarCID string 1686 1672 1687 1673 if err := rows.Scan(&f.OwnerDID, &f.OwnerHandle, &f.Repository, 1688 - &f.Title, &f.Description, &f.IconURL, &f.PullCount, &f.StarCount, &isStarredInt, &avatarCID); err != nil { 1674 + &f.Title, &f.Description, &f.IconURL, &f.PullCount, &f.StarCount, &isStarredInt); err != nil { 1689 1675 return nil, err 1690 1676 } 1691 1677 f.IsStarred = isStarredInt > 0 1692 - // Prefer repo page avatar over annotation icon 1693 - if avatarCID != "" { 1694 - f.IconURL = BlobCDNURL(f.OwnerDID, avatarCID) 1695 - } 1696 1678 1697 1679 featured = append(featured, f) 1698 1680 } 1699 1681 1700 1682 return featured, nil 1701 1683 } 1702 - 1703 - // RepoPage represents a repository page record cached from PDS 1704 - type RepoPage struct { 1705 - DID string 1706 - Repository string 1707 - Description string 1708 - AvatarCID string 1709 - CreatedAt time.Time 1710 - UpdatedAt time.Time 1711 - } 1712 - 1713 - // UpsertRepoPage inserts or updates a repo page record 1714 - func UpsertRepoPage(db *sql.DB, did, repository, description, avatarCID string, createdAt, updatedAt time.Time) error { 1715 - _, err := db.Exec(` 1716 - INSERT INTO repo_pages (did, repository, description, avatar_cid, created_at, updated_at) 1717 - VALUES (?, ?, ?, ?, ?, ?) 1718 - ON CONFLICT(did, repository) DO UPDATE SET 1719 - description = excluded.description, 1720 - avatar_cid = excluded.avatar_cid, 1721 - updated_at = excluded.updated_at 1722 - `, did, repository, description, avatarCID, createdAt, updatedAt) 1723 - return err 1724 - } 1725 - 1726 - // GetRepoPage retrieves a repo page record 1727 - func GetRepoPage(db *sql.DB, did, repository string) (*RepoPage, error) { 1728 - var rp RepoPage 1729 - err := db.QueryRow(` 1730 - SELECT did, repository, description, avatar_cid, created_at, updated_at 1731 - FROM repo_pages 1732 - WHERE did = ? AND repository = ? 1733 - `, did, repository).Scan(&rp.DID, &rp.Repository, &rp.Description, &rp.AvatarCID, &rp.CreatedAt, &rp.UpdatedAt) 1734 - if err != nil { 1735 - return nil, err 1736 - } 1737 - return &rp, nil 1738 - } 1739 - 1740 - // DeleteRepoPage deletes a repo page record 1741 - func DeleteRepoPage(db *sql.DB, did, repository string) error { 1742 - _, err := db.Exec(` 1743 - DELETE FROM repo_pages WHERE did = ? AND repository = ? 1744 - `, did, repository) 1745 - return err 1746 - } 1747 - 1748 - // GetRepoPagesByDID returns all repo pages for a DID 1749 - func GetRepoPagesByDID(db *sql.DB, did string) ([]RepoPage, error) { 1750 - rows, err := db.Query(` 1751 - SELECT did, repository, description, avatar_cid, created_at, updated_at 1752 - FROM repo_pages 1753 - WHERE did = ? 1754 - `, did) 1755 - if err != nil { 1756 - return nil, err 1757 - } 1758 - defer rows.Close() 1759 - 1760 - var pages []RepoPage 1761 - for rows.Next() { 1762 - var rp RepoPage 1763 - if err := rows.Scan(&rp.DID, &rp.Repository, &rp.Description, &rp.AvatarCID, &rp.CreatedAt, &rp.UpdatedAt); err != nil { 1764 - return nil, err 1765 - } 1766 - pages = append(pages, rp) 1767 - } 1768 - return pages, rows.Err() 1769 - }
+5 -10
pkg/appview/db/schema.sql
··· 205 205 ); 206 206 CREATE INDEX IF NOT EXISTS idx_crew_denials_retry ON hold_crew_denials(next_retry_at); 207 207 208 - CREATE TABLE IF NOT EXISTS repo_pages ( 209 - did TEXT NOT NULL, 210 - repository TEXT NOT NULL, 211 - description TEXT, 212 - avatar_cid TEXT, 213 - created_at TIMESTAMP NOT NULL, 214 - updated_at TIMESTAMP NOT NULL, 215 - PRIMARY KEY(did, repository), 216 - FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE 208 + CREATE TABLE IF NOT EXISTS readme_cache ( 209 + url TEXT PRIMARY KEY, 210 + html TEXT NOT NULL, 211 + fetched_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP 217 212 ); 218 - CREATE INDEX IF NOT EXISTS idx_repo_pages_did ON repo_pages(did); 213 + CREATE INDEX IF NOT EXISTS idx_readme_cache_fetched ON readme_cache(fetched_at);
-32
pkg/appview/handlers/errors.go
··· 1 - package handlers 2 - 3 - import ( 4 - "html/template" 5 - "net/http" 6 - ) 7 - 8 - // NotFoundHandler handles 404 errors 9 - type NotFoundHandler struct { 10 - Templates *template.Template 11 - RegistryURL string 12 - } 13 - 14 - func (h *NotFoundHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 15 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 16 - } 17 - 18 - // RenderNotFound renders the 404 page template. 19 - // Use this from other handlers when a resource is not found. 20 - func RenderNotFound(w http.ResponseWriter, r *http.Request, templates *template.Template, registryURL string) { 21 - w.WriteHeader(http.StatusNotFound) 22 - 23 - data := struct { 24 - PageData 25 - }{ 26 - PageData: NewPageData(r, registryURL), 27 - } 28 - 29 - if err := templates.ExecuteTemplate(w, "404", data); err != nil { 30 - http.Error(w, "Page not found", http.StatusNotFound) 31 - } 32 - }
-114
pkg/appview/handlers/images.go
··· 3 3 import ( 4 4 "database/sql" 5 5 "encoding/json" 6 - "errors" 7 6 "fmt" 8 - "io" 9 7 "net/http" 10 8 "strings" 11 - "time" 12 9 13 10 "atcr.io/pkg/appview/db" 14 11 "atcr.io/pkg/appview/middleware" ··· 158 155 159 156 w.WriteHeader(http.StatusOK) 160 157 } 161 - 162 - // UploadAvatarHandler handles uploading/updating a repository avatar 163 - type UploadAvatarHandler struct { 164 - DB *sql.DB 165 - Refresher *oauth.Refresher 166 - } 167 - 168 - // validImageTypes are the allowed MIME types for avatars (matches lexicon) 169 - var validImageTypes = map[string]bool{ 170 - "image/png": true, 171 - "image/jpeg": true, 172 - "image/webp": true, 173 - } 174 - 175 - func (h *UploadAvatarHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { 176 - user := middleware.GetUser(r) 177 - if user == nil { 178 - http.Error(w, "Unauthorized", http.StatusUnauthorized) 179 - return 180 - } 181 - 182 - repo := chi.URLParam(r, "repository") 183 - 184 - // Parse multipart form (max 3MB to match lexicon maxSize) 185 - if err := r.ParseMultipartForm(3 << 20); err != nil { 186 - http.Error(w, "File too large (max 3MB)", http.StatusBadRequest) 187 - return 188 - } 189 - 190 - file, header, err := r.FormFile("avatar") 191 - if err != nil { 192 - http.Error(w, "No file provided", http.StatusBadRequest) 193 - return 194 - } 195 - defer file.Close() 196 - 197 - // Validate MIME type 198 - contentType := header.Header.Get("Content-Type") 199 - if !validImageTypes[contentType] { 200 - http.Error(w, "Invalid file type. Must be PNG, JPEG, or WebP", http.StatusBadRequest) 201 - return 202 - } 203 - 204 - // Read file data 205 - data, err := io.ReadAll(io.LimitReader(file, 3<<20+1)) // Read up to 3MB + 1 byte 206 - if err != nil { 207 - http.Error(w, "Failed to read file", http.StatusInternalServerError) 208 - return 209 - } 210 - if len(data) > 3<<20 { 211 - http.Error(w, "File too large (max 3MB)", http.StatusBadRequest) 212 - return 213 - } 214 - 215 - // Create ATProto client with session provider (uses DoWithSession for DPoP nonce safety) 216 - pdsClient := atproto.NewClientWithSessionProvider(user.PDSEndpoint, user.DID, h.Refresher) 217 - 218 - // Upload blob to PDS 219 - blobRef, err := pdsClient.UploadBlob(r.Context(), data, contentType) 220 - if err != nil { 221 - if handleOAuthError(r.Context(), h.Refresher, user.DID, err) { 222 - http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized) 223 - return 224 - } 225 - http.Error(w, fmt.Sprintf("Failed to upload image: %v", err), http.StatusInternalServerError) 226 - return 227 - } 228 - 229 - // Fetch existing repo page record to preserve description 230 - var existingDescription string 231 - var existingCreatedAt time.Time 232 - record, err := pdsClient.GetRecord(r.Context(), atproto.RepoPageCollection, repo) 233 - if err == nil { 234 - // Parse existing record to preserve description 235 - var existingRecord atproto.RepoPageRecord 236 - if jsonErr := json.Unmarshal(record.Value, &existingRecord); jsonErr == nil { 237 - existingDescription = existingRecord.Description 238 - existingCreatedAt = existingRecord.CreatedAt 239 - } 240 - } else if !errors.Is(err, atproto.ErrRecordNotFound) { 241 - // Some other error - check if OAuth error 242 - if handleOAuthError(r.Context(), h.Refresher, user.DID, err) { 243 - http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized) 244 - return 245 - } 246 - // Log but continue - we'll create a new record 247 - } 248 - 249 - // Create updated repo page record 250 - repoPage := atproto.NewRepoPageRecord(repo, existingDescription, blobRef) 251 - // Preserve original createdAt if record existed 252 - if !existingCreatedAt.IsZero() { 253 - repoPage.CreatedAt = existingCreatedAt 254 - } 255 - 256 - // Save record to PDS 257 - _, err = pdsClient.PutRecord(r.Context(), atproto.RepoPageCollection, repo, repoPage) 258 - if err != nil { 259 - if handleOAuthError(r.Context(), h.Refresher, user.DID, err) { 260 - http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized) 261 - return 262 - } 263 - http.Error(w, fmt.Sprintf("Failed to update repository page: %v", err), http.StatusInternalServerError) 264 - return 265 - } 266 - 267 - // Return new avatar URL 268 - avatarURL := atproto.BlobCDNURL(user.DID, blobRef.Ref.Link) 269 - w.Header().Set("Content-Type", "application/json") 270 - json.NewEncoder(w).Encode(map[string]string{"avatarURL": avatarURL}) 271 - }
+15 -40
pkg/appview/handlers/repository.go
··· 27 27 Directory identity.Directory 28 28 Refresher *oauth.Refresher 29 29 HealthChecker *holdhealth.Checker 30 - ReadmeFetcher *readme.Fetcher // For rendering repo page descriptions 30 + ReadmeCache *readme.Cache 31 31 } 32 32 33 33 func (h *RepositoryPageHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { ··· 37 37 // Resolve identifier (handle or DID) to canonical DID and current handle 38 38 did, resolvedHandle, _, err := atproto.ResolveIdentity(r.Context(), identifier) 39 39 if err != nil { 40 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 40 + http.Error(w, "User not found", http.StatusNotFound) 41 41 return 42 42 } 43 43 ··· 48 48 return 49 49 } 50 50 if owner == nil { 51 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 51 + http.Error(w, "User not found", http.StatusNotFound) 52 52 return 53 53 } 54 54 ··· 136 136 } 137 137 138 138 if len(tagsWithPlatforms) == 0 && len(manifests) == 0 { 139 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 139 + http.Error(w, "Repository not found", http.StatusNotFound) 140 140 return 141 141 } 142 142 ··· 190 190 isOwner = (user.DID == owner.DID) 191 191 } 192 192 193 - // Fetch README content from repo page record or annotations 193 + // Fetch README content if available 194 194 var readmeHTML template.HTML 195 + if repo.ReadmeURL != "" && h.ReadmeCache != nil { 196 + // Fetch with timeout 197 + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) 198 + defer cancel() 195 199 196 - // Try repo page record from database (synced from PDS via Jetstream) 197 - repoPage, err := db.GetRepoPage(h.DB, owner.DID, repository) 198 - if err == nil && repoPage != nil { 199 - // Use repo page avatar if present 200 - if repoPage.AvatarCID != "" { 201 - repo.IconURL = atproto.BlobCDNURL(owner.DID, repoPage.AvatarCID) 202 - } 203 - // Render description as markdown if present 204 - if repoPage.Description != "" && h.ReadmeFetcher != nil { 205 - html, err := h.ReadmeFetcher.RenderMarkdown([]byte(repoPage.Description)) 206 - if err != nil { 207 - slog.Warn("Failed to render repo page description", "error", err) 208 - } else { 209 - readmeHTML = template.HTML(html) 210 - } 211 - } 212 - } 213 - // Fall back to fetching README from URL annotations if no description in repo page 214 - if readmeHTML == "" && h.ReadmeFetcher != nil { 215 - // Fall back to fetching from URL annotations 216 - readmeURL := repo.ReadmeURL 217 - if readmeURL == "" && repo.SourceURL != "" { 218 - // Try to derive README URL from source URL 219 - readmeURL = readme.DeriveReadmeURL(repo.SourceURL, "main") 220 - if readmeURL == "" { 221 - readmeURL = readme.DeriveReadmeURL(repo.SourceURL, "master") 222 - } 223 - } 224 - if readmeURL != "" { 225 - html, err := h.ReadmeFetcher.FetchAndRender(r.Context(), readmeURL) 226 - if err != nil { 227 - slog.Debug("Failed to fetch README from URL", "url", readmeURL, "error", err) 228 - } else { 229 - readmeHTML = template.HTML(html) 230 - } 200 + html, err := h.ReadmeCache.Get(ctx, repo.ReadmeURL) 201 + if err != nil { 202 + slog.Warn("Failed to fetch README", "url", repo.ReadmeURL, "error", err) 203 + // Continue without README on error 204 + } else { 205 + readmeHTML = template.HTML(html) 231 206 } 232 207 } 233 208
+6 -3
pkg/appview/handlers/settings.go
··· 62 62 data.Profile.Handle = user.Handle 63 63 data.Profile.DID = user.DID 64 64 data.Profile.PDSEndpoint = user.PDSEndpoint 65 - data.Profile.DefaultHold = profile.DefaultHold 65 + if profile.DefaultHold != nil { 66 + data.Profile.DefaultHold = *profile.DefaultHold 67 + } 66 68 67 69 if err := h.Templates.ExecuteTemplate(w, "settings", data); err != nil { 68 70 http.Error(w, err.Error(), http.StatusInternalServerError) ··· 94 96 profile = atproto.NewSailorProfileRecord(holdEndpoint) 95 97 } else { 96 98 // Update existing profile 97 - profile.DefaultHold = holdEndpoint 98 - profile.UpdatedAt = time.Now() 99 + profile.DefaultHold = &holdEndpoint 100 + now := time.Now().Format(time.RFC3339) 101 + profile.UpdatedAt = &now 99 102 } 100 103 101 104 // Save profile
+1 -1
pkg/appview/handlers/user.go
··· 23 23 // Resolve identifier (handle or DID) to canonical DID and current handle 24 24 did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(r.Context(), identifier) 25 25 if err != nil { 26 - RenderNotFound(w, r, h.Templates, h.RegistryURL) 26 + http.Error(w, "User not found", http.StatusNotFound) 27 27 return 28 28 } 29 29
+20 -261
pkg/appview/jetstream/backfill.go
··· 5 5 "database/sql" 6 6 "encoding/json" 7 7 "fmt" 8 - "io" 9 8 "log/slog" 10 - "net/http" 11 9 "strings" 12 10 "time" 13 11 14 12 "atcr.io/pkg/appview/db" 15 - "atcr.io/pkg/appview/readme" 16 13 "atcr.io/pkg/atproto" 17 - "atcr.io/pkg/auth/oauth" 18 14 ) 19 15 20 16 // BackfillWorker uses com.atproto.sync.listReposByCollection to backfill historical data 21 17 type BackfillWorker struct { 22 18 db *sql.DB 23 19 client *atproto.Client 24 - processor *Processor // Shared processor for DB operations 25 - defaultHoldDID string // Default hold DID from AppView config (e.g., "did:web:hold01.atcr.io") 26 - testMode bool // If true, suppress warnings for external holds 27 - refresher *oauth.Refresher // OAuth refresher for PDS writes (optional, can be nil) 20 + processor *Processor // Shared processor for DB operations 21 + defaultHoldDID string // Default hold DID from AppView config (e.g., "did:web:hold01.atcr.io") 22 + testMode bool // If true, suppress warnings for external holds 28 23 } 29 24 30 25 // BackfillState tracks backfill progress ··· 41 36 // NewBackfillWorker creates a backfill worker using sync API 42 37 // defaultHoldDID should be in format "did:web:hold01.atcr.io" 43 38 // To find a hold's DID, visit: https://hold-url/.well-known/did.json 44 - // refresher is optional - if provided, backfill will try to update PDS records when fetching README content 45 - func NewBackfillWorker(database *sql.DB, relayEndpoint, defaultHoldDID string, testMode bool, refresher *oauth.Refresher) (*BackfillWorker, error) { 39 + func NewBackfillWorker(database *sql.DB, relayEndpoint, defaultHoldDID string, testMode bool) (*BackfillWorker, error) { 46 40 // Create client for relay - used only for listReposByCollection 47 41 client := atproto.NewClient(relayEndpoint, "", "") 48 42 ··· 52 46 processor: NewProcessor(database, false), // No cache for batch processing 53 47 defaultHoldDID: defaultHoldDID, 54 48 testMode: testMode, 55 - refresher: refresher, 56 49 }, nil 57 50 } 58 51 ··· 74 67 atproto.TagCollection, // io.atcr.tag 75 68 atproto.StarCollection, // io.atcr.sailor.star 76 69 atproto.SailorProfileCollection, // io.atcr.sailor.profile 77 - atproto.RepoPageCollection, // io.atcr.repo.page 78 70 } 79 71 80 72 for _, collection := range collections { ··· 172 164 // Track what we found for deletion reconciliation 173 165 switch collection { 174 166 case atproto.ManifestCollection: 175 - var manifestRecord atproto.ManifestRecord 167 + var manifestRecord atproto.Manifest 176 168 if err := json.Unmarshal(record.Value, &manifestRecord); err == nil { 177 169 foundManifestDigests = append(foundManifestDigests, manifestRecord.Digest) 178 170 } 179 171 case atproto.TagCollection: 180 - var tagRecord atproto.TagRecord 172 + var tagRecord atproto.Tag 181 173 if err := json.Unmarshal(record.Value, &tagRecord); err == nil { 182 174 foundTags = append(foundTags, struct{ Repository, Tag string }{ 183 175 Repository: tagRecord.Repository, ··· 185 177 }) 186 178 } 187 179 case atproto.StarCollection: 188 - var starRecord atproto.StarRecord 180 + var starRecord atproto.SailorStar 189 181 if err := json.Unmarshal(record.Value, &starRecord); err == nil { 190 - key := fmt.Sprintf("%s/%s", starRecord.Subject.DID, starRecord.Subject.Repository) 191 - foundStars[key] = starRecord.CreatedAt 182 + key := fmt.Sprintf("%s/%s", starRecord.Subject.Did, starRecord.Subject.Repository) 183 + // Parse CreatedAt string to time.Time 184 + createdAt, parseErr := time.Parse(time.RFC3339, starRecord.CreatedAt) 185 + if parseErr != nil { 186 + createdAt = time.Now() 187 + } 188 + foundStars[key] = createdAt 192 189 } 193 190 } 194 191 ··· 225 222 } 226 223 } 227 224 228 - // After processing repo pages, fetch descriptions from external sources if empty 229 - if collection == atproto.RepoPageCollection { 230 - if err := b.reconcileRepoPageDescriptions(ctx, did, pdsEndpoint); err != nil { 231 - slog.Warn("Backfill failed to reconcile repo page descriptions", "did", did, "error", err) 232 - } 233 - } 234 - 235 225 return recordCount, nil 236 226 } 237 227 ··· 297 287 return b.processor.ProcessStar(context.Background(), did, record.Value) 298 288 case atproto.SailorProfileCollection: 299 289 return b.processor.ProcessSailorProfile(ctx, did, record.Value, b.queryCaptainRecordWrapper) 300 - case atproto.RepoPageCollection: 301 - // rkey is extracted from the record URI, but for repo pages we use Repository field 302 - return b.processor.ProcessRepoPage(ctx, did, record.URI, record.Value, false) 303 290 default: 304 291 return fmt.Errorf("unsupported collection: %s", collection) 305 292 } ··· 377 364 378 365 // reconcileAnnotations ensures annotations come from the newest manifest in each repository 379 366 // This fixes the out-of-order backfill issue where older manifests can overwrite newer annotations 367 + // NOTE: Currently disabled because the generated Manifest_Annotations type doesn't support 368 + // arbitrary key-value pairs. Would need to update lexicon schema with "unknown" type. 380 369 func (b *BackfillWorker) reconcileAnnotations(ctx context.Context, did string, pdsClient *atproto.Client) error { 381 - // Get all repositories for this DID 382 - repositories, err := db.GetRepositoriesForDID(b.db, did) 383 - if err != nil { 384 - return fmt.Errorf("failed to get repositories: %w", err) 385 - } 386 - 387 - for _, repo := range repositories { 388 - // Find newest manifest for this repository 389 - newestManifest, err := db.GetNewestManifestForRepo(b.db, did, repo) 390 - if err != nil { 391 - slog.Warn("Backfill failed to get newest manifest for repo", "did", did, "repository", repo, "error", err) 392 - continue // Skip on error 393 - } 394 - 395 - // Fetch the full manifest record from PDS using the digest as rkey 396 - rkey := strings.TrimPrefix(newestManifest.Digest, "sha256:") 397 - record, err := pdsClient.GetRecord(ctx, atproto.ManifestCollection, rkey) 398 - if err != nil { 399 - slog.Warn("Backfill failed to fetch manifest record for repo", "did", did, "repository", repo, "error", err) 400 - continue // Skip on error 401 - } 402 - 403 - // Parse manifest record 404 - var manifestRecord atproto.ManifestRecord 405 - if err := json.Unmarshal(record.Value, &manifestRecord); err != nil { 406 - slog.Warn("Backfill failed to parse manifest record for repo", "did", did, "repository", repo, "error", err) 407 - continue 408 - } 409 - 410 - // Update annotations from newest manifest only 411 - if len(manifestRecord.Annotations) > 0 { 412 - // Filter out empty annotations 413 - hasData := false 414 - for _, value := range manifestRecord.Annotations { 415 - if value != "" { 416 - hasData = true 417 - break 418 - } 419 - } 420 - 421 - if hasData { 422 - err = db.UpsertRepositoryAnnotations(b.db, did, repo, manifestRecord.Annotations) 423 - if err != nil { 424 - slog.Warn("Backfill failed to reconcile annotations for repo", "did", did, "repository", repo, "error", err) 425 - } else { 426 - slog.Info("Backfill reconciled annotations for repo from newest manifest", "did", did, "repository", repo, "digest", newestManifest.Digest) 427 - } 428 - } 429 - } 430 - } 431 - 432 - return nil 433 - } 434 - 435 - // reconcileRepoPageDescriptions fetches README content from external sources for repo pages with empty descriptions 436 - // If the user has an OAuth session, it updates the PDS record (source of truth) 437 - // Otherwise, it just stores the fetched content in the database 438 - func (b *BackfillWorker) reconcileRepoPageDescriptions(ctx context.Context, did, pdsEndpoint string) error { 439 - // Get all repo pages for this DID 440 - repoPages, err := db.GetRepoPagesByDID(b.db, did) 441 - if err != nil { 442 - return fmt.Errorf("failed to get repo pages: %w", err) 443 - } 444 - 445 - for _, page := range repoPages { 446 - // Skip pages that already have a description 447 - if page.Description != "" { 448 - continue 449 - } 450 - 451 - // Get annotations from the repository's manifest 452 - annotations, err := db.GetRepositoryAnnotations(b.db, did, page.Repository) 453 - if err != nil { 454 - slog.Debug("Failed to get annotations for repo page", "did", did, "repository", page.Repository, "error", err) 455 - continue 456 - } 457 - 458 - // Try to fetch README content from external sources 459 - description := b.fetchReadmeContent(ctx, annotations) 460 - if description == "" { 461 - // No README content available, skip 462 - continue 463 - } 464 - 465 - slog.Info("Fetched README for repo page", "did", did, "repository", page.Repository, "descriptionLength", len(description)) 466 - 467 - // Try to update PDS if we have OAuth session 468 - pdsUpdated := false 469 - if b.refresher != nil { 470 - if err := b.updateRepoPageInPDS(ctx, did, pdsEndpoint, page.Repository, description, page.AvatarCID); err != nil { 471 - slog.Debug("Could not update repo page in PDS, falling back to DB-only", "did", did, "repository", page.Repository, "error", err) 472 - } else { 473 - pdsUpdated = true 474 - slog.Info("Updated repo page in PDS with fetched description", "did", did, "repository", page.Repository) 475 - } 476 - } 477 - 478 - // Always update database with the fetched content 479 - if err := db.UpsertRepoPage(b.db, did, page.Repository, description, page.AvatarCID, page.CreatedAt, time.Now()); err != nil { 480 - slog.Warn("Failed to update repo page in database", "did", did, "repository", page.Repository, "error", err) 481 - } else if !pdsUpdated { 482 - slog.Info("Updated repo page in database (PDS not updated)", "did", did, "repository", page.Repository) 483 - } 484 - } 485 - 486 - return nil 487 - } 488 - 489 - // fetchReadmeContent attempts to fetch README content from external sources based on annotations 490 - // Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source 491 - func (b *BackfillWorker) fetchReadmeContent(ctx context.Context, annotations map[string]string) string { 492 - // Create a context with timeout for README fetching 493 - fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second) 494 - defer cancel() 495 - 496 - // Priority 1: Direct README URL from io.atcr.readme annotation 497 - if readmeURL := annotations["io.atcr.readme"]; readmeURL != "" { 498 - content, err := b.fetchRawReadme(fetchCtx, readmeURL) 499 - if err != nil { 500 - slog.Debug("Failed to fetch README from io.atcr.readme annotation", "url", readmeURL, "error", err) 501 - } else if content != "" { 502 - return content 503 - } 504 - } 505 - 506 - // Priority 2: Derive README URL from org.opencontainers.image.source 507 - if sourceURL := annotations["org.opencontainers.image.source"]; sourceURL != "" { 508 - // Try main branch first, then master 509 - for _, branch := range []string{"main", "master"} { 510 - readmeURL := readme.DeriveReadmeURL(sourceURL, branch) 511 - if readmeURL == "" { 512 - continue 513 - } 514 - 515 - content, err := b.fetchRawReadme(fetchCtx, readmeURL) 516 - if err != nil { 517 - // Only log non-404 errors (404 is expected when trying main vs master) 518 - if !readme.Is404(err) { 519 - slog.Debug("Failed to fetch README from source URL", "url", readmeURL, "branch", branch, "error", err) 520 - } 521 - continue 522 - } 523 - 524 - if content != "" { 525 - return content 526 - } 527 - } 528 - } 529 - 530 - return "" 531 - } 532 - 533 - // fetchRawReadme fetches raw markdown content from a URL 534 - func (b *BackfillWorker) fetchRawReadme(ctx context.Context, readmeURL string) (string, error) { 535 - req, err := http.NewRequestWithContext(ctx, "GET", readmeURL, nil) 536 - if err != nil { 537 - return "", fmt.Errorf("failed to create request: %w", err) 538 - } 539 - 540 - req.Header.Set("User-Agent", "ATCR-Backfill-README-Fetcher/1.0") 541 - 542 - client := &http.Client{ 543 - Timeout: 10 * time.Second, 544 - CheckRedirect: func(req *http.Request, via []*http.Request) error { 545 - if len(via) >= 5 { 546 - return fmt.Errorf("too many redirects") 547 - } 548 - return nil 549 - }, 550 - } 551 - 552 - resp, err := client.Do(req) 553 - if err != nil { 554 - return "", fmt.Errorf("failed to fetch URL: %w", err) 555 - } 556 - defer resp.Body.Close() 557 - 558 - if resp.StatusCode != http.StatusOK { 559 - return "", fmt.Errorf("status %d", resp.StatusCode) 560 - } 561 - 562 - // Limit content size to 100KB 563 - limitedReader := io.LimitReader(resp.Body, 100*1024) 564 - content, err := io.ReadAll(limitedReader) 565 - if err != nil { 566 - return "", fmt.Errorf("failed to read response body: %w", err) 567 - } 568 - 569 - return string(content), nil 570 - } 571 - 572 - // updateRepoPageInPDS updates the repo page record in the user's PDS using OAuth 573 - func (b *BackfillWorker) updateRepoPageInPDS(ctx context.Context, did, pdsEndpoint, repository, description, avatarCID string) error { 574 - if b.refresher == nil { 575 - return fmt.Errorf("no OAuth refresher available") 576 - } 577 - 578 - // Create ATProto client with session provider 579 - pdsClient := atproto.NewClientWithSessionProvider(pdsEndpoint, did, b.refresher) 580 - 581 - // Get existing repo page record to preserve other fields 582 - existingRecord, err := pdsClient.GetRecord(ctx, atproto.RepoPageCollection, repository) 583 - var createdAt time.Time 584 - var avatarRef *atproto.ATProtoBlobRef 585 - 586 - if err == nil && existingRecord != nil { 587 - // Parse existing record 588 - var existingPage atproto.RepoPageRecord 589 - if err := json.Unmarshal(existingRecord.Value, &existingPage); err == nil { 590 - createdAt = existingPage.CreatedAt 591 - avatarRef = existingPage.Avatar 592 - } 593 - } 594 - 595 - if createdAt.IsZero() { 596 - createdAt = time.Now() 597 - } 598 - 599 - // Create updated repo page record 600 - repoPage := &atproto.RepoPageRecord{ 601 - Type: atproto.RepoPageCollection, 602 - Repository: repository, 603 - Description: description, 604 - Avatar: avatarRef, 605 - CreatedAt: createdAt, 606 - UpdatedAt: time.Now(), 607 - } 608 - 609 - // Write to PDS - this will use DoWithSession internally 610 - _, err = pdsClient.PutRecord(ctx, atproto.RepoPageCollection, repository, repoPage) 611 - if err != nil { 612 - return fmt.Errorf("failed to write to PDS: %w", err) 613 - } 614 - 370 + // TODO: Re-enable once lexicon supports annotations as map[string]string 371 + // For now, skip annotation reconciliation as the generated type is an empty struct 372 + _ = did 373 + _ = pdsClient 615 374 return nil 616 375 }
+51 -65
pkg/appview/jetstream/processor.go
··· 100 100 // Returns the manifest ID for further processing (layers/references) 101 101 func (p *Processor) ProcessManifest(ctx context.Context, did string, recordData []byte) (int64, error) { 102 102 // Unmarshal manifest record 103 - var manifestRecord atproto.ManifestRecord 103 + var manifestRecord atproto.Manifest 104 104 if err := json.Unmarshal(recordData, &manifestRecord); err != nil { 105 105 return 0, fmt.Errorf("failed to unmarshal manifest: %w", err) 106 106 } ··· 110 110 // Extract hold DID from manifest (with fallback for legacy manifests) 111 111 // New manifests use holdDid field (DID format) 112 112 // Old manifests use holdEndpoint field (URL format) - convert to DID 113 - holdDID := manifestRecord.HoldDID 114 - if holdDID == "" && manifestRecord.HoldEndpoint != "" { 113 + var holdDID string 114 + if manifestRecord.HoldDid != nil && *manifestRecord.HoldDid != "" { 115 + holdDID = *manifestRecord.HoldDid 116 + } else if manifestRecord.HoldEndpoint != nil && *manifestRecord.HoldEndpoint != "" { 115 117 // Legacy manifest - convert URL to DID 116 - holdDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint) 118 + holdDID = atproto.ResolveHoldDIDFromURL(*manifestRecord.HoldEndpoint) 119 + } 120 + 121 + // Parse CreatedAt string to time.Time 122 + createdAt, err := time.Parse(time.RFC3339, manifestRecord.CreatedAt) 123 + if err != nil { 124 + // Fall back to current time if parsing fails 125 + createdAt = time.Now() 117 126 } 118 127 119 128 // Prepare manifest for insertion (WITHOUT annotation fields) ··· 122 131 Repository: manifestRecord.Repository, 123 132 Digest: manifestRecord.Digest, 124 133 MediaType: manifestRecord.MediaType, 125 - SchemaVersion: manifestRecord.SchemaVersion, 134 + SchemaVersion: int(manifestRecord.SchemaVersion), 126 135 HoldEndpoint: holdDID, 127 - CreatedAt: manifestRecord.CreatedAt, 136 + CreatedAt: createdAt, 128 137 // Annotations removed - stored separately in repository_annotations table 129 138 } 130 139 ··· 154 163 } 155 164 } 156 165 157 - // Update repository annotations ONLY if manifest has at least one non-empty annotation 158 - if manifestRecord.Annotations != nil { 159 - hasData := false 160 - for _, value := range manifestRecord.Annotations { 161 - if value != "" { 162 - hasData = true 163 - break 164 - } 165 - } 166 - 167 - if hasData { 168 - // Replace all annotations for this repository 169 - err = db.UpsertRepositoryAnnotations(p.db, did, manifestRecord.Repository, manifestRecord.Annotations) 170 - if err != nil { 171 - return 0, fmt.Errorf("failed to upsert annotations: %w", err) 172 - } 173 - } 174 - } 166 + // Note: Repository annotations are currently disabled because the generated 167 + // Manifest_Annotations type doesn't support arbitrary key-value pairs. 168 + // The lexicon would need to use "unknown" type for annotations to support this. 169 + // TODO: Re-enable once lexicon supports annotations as map[string]string 170 + _ = manifestRecord.Annotations 175 171 176 172 // Insert manifest references or layers 177 173 if isManifestList { ··· 184 180 185 181 if ref.Platform != nil { 186 182 platformArch = ref.Platform.Architecture 187 - platformOS = ref.Platform.OS 188 - platformVariant = ref.Platform.Variant 189 - platformOSVersion = ref.Platform.OSVersion 183 + platformOS = ref.Platform.Os 184 + if ref.Platform.Variant != nil { 185 + platformVariant = *ref.Platform.Variant 186 + } 187 + if ref.Platform.OsVersion != nil { 188 + platformOSVersion = *ref.Platform.OsVersion 189 + } 190 190 } 191 191 192 - // Detect attestation manifests from annotations 192 + // Note: Attestation detection via annotations is currently disabled 193 + // because the generated Manifest_ManifestReference_Annotations type 194 + // doesn't support arbitrary key-value pairs. 193 195 isAttestation := false 194 - if ref.Annotations != nil { 195 - if refType, ok := ref.Annotations["vnd.docker.reference.type"]; ok { 196 - isAttestation = refType == "attestation-manifest" 197 - } 198 - } 199 196 200 197 if err := db.InsertManifestReference(p.db, &db.ManifestReference{ 201 198 ManifestID: manifestID, ··· 235 232 // ProcessTag processes a tag record and stores it in the database 236 233 func (p *Processor) ProcessTag(ctx context.Context, did string, recordData []byte) error { 237 234 // Unmarshal tag record 238 - var tagRecord atproto.TagRecord 235 + var tagRecord atproto.Tag 239 236 if err := json.Unmarshal(recordData, &tagRecord); err != nil { 240 237 return fmt.Errorf("failed to unmarshal tag: %w", err) 241 238 } ··· 245 242 return fmt.Errorf("failed to get manifest digest from tag record: %w", err) 246 243 } 247 244 245 + // Parse CreatedAt string to time.Time 246 + tagCreatedAt, err := time.Parse(time.RFC3339, tagRecord.CreatedAt) 247 + if err != nil { 248 + // Fall back to current time if parsing fails 249 + tagCreatedAt = time.Now() 250 + } 251 + 248 252 // Insert or update tag 249 253 return db.UpsertTag(p.db, &db.Tag{ 250 254 DID: did, 251 255 Repository: tagRecord.Repository, 252 256 Tag: tagRecord.Tag, 253 257 Digest: manifestDigest, 254 - CreatedAt: tagRecord.UpdatedAt, 258 + CreatedAt: tagCreatedAt, 255 259 }) 256 260 } 257 261 258 262 // ProcessStar processes a star record and stores it in the database 259 263 func (p *Processor) ProcessStar(ctx context.Context, did string, recordData []byte) error { 260 264 // Unmarshal star record 261 - var starRecord atproto.StarRecord 265 + var starRecord atproto.SailorStar 262 266 if err := json.Unmarshal(recordData, &starRecord); err != nil { 263 267 return fmt.Errorf("failed to unmarshal star: %w", err) 264 268 } ··· 266 270 // The DID here is the starrer (user who starred) 267 271 // The subject contains the owner DID and repository 268 272 // Star count will be calculated on demand from the stars table 269 - return db.UpsertStar(p.db, did, starRecord.Subject.DID, starRecord.Subject.Repository, starRecord.CreatedAt) 273 + // Parse the CreatedAt string to time.Time 274 + createdAt, err := time.Parse(time.RFC3339, starRecord.CreatedAt) 275 + if err != nil { 276 + // Fall back to current time if parsing fails 277 + createdAt = time.Now() 278 + } 279 + return db.UpsertStar(p.db, did, starRecord.Subject.Did, starRecord.Subject.Repository, createdAt) 270 280 } 271 281 272 282 // ProcessSailorProfile processes a sailor profile record 273 283 // This is primarily used by backfill to cache captain records for holds 274 284 func (p *Processor) ProcessSailorProfile(ctx context.Context, did string, recordData []byte, queryCaptainFn func(context.Context, string) error) error { 275 285 // Unmarshal sailor profile record 276 - var profileRecord atproto.SailorProfileRecord 286 + var profileRecord atproto.SailorProfile 277 287 if err := json.Unmarshal(recordData, &profileRecord); err != nil { 278 288 return fmt.Errorf("failed to unmarshal sailor profile: %w", err) 279 289 } 280 290 281 291 // Skip if no default hold set 282 - if profileRecord.DefaultHold == "" { 292 + if profileRecord.DefaultHold == nil || *profileRecord.DefaultHold == "" { 283 293 return nil 284 294 } 285 295 286 296 // Convert hold URL/DID to canonical DID 287 - holdDID := atproto.ResolveHoldDIDFromURL(profileRecord.DefaultHold) 297 + holdDID := atproto.ResolveHoldDIDFromURL(*profileRecord.DefaultHold) 288 298 if holdDID == "" { 289 - slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", profileRecord.DefaultHold) 299 + slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", *profileRecord.DefaultHold) 290 300 return nil 291 301 } 292 302 ··· 297 307 } 298 308 299 309 return nil 300 - } 301 - 302 - // ProcessRepoPage processes a repository page record 303 - // This is called when Jetstream receives a repo page create/update event 304 - func (p *Processor) ProcessRepoPage(ctx context.Context, did string, rkey string, recordData []byte, isDelete bool) error { 305 - if isDelete { 306 - // Delete the repo page from our cache 307 - return db.DeleteRepoPage(p.db, did, rkey) 308 - } 309 - 310 - // Unmarshal repo page record 311 - var pageRecord atproto.RepoPageRecord 312 - if err := json.Unmarshal(recordData, &pageRecord); err != nil { 313 - return fmt.Errorf("failed to unmarshal repo page: %w", err) 314 - } 315 - 316 - // Extract avatar CID if present 317 - avatarCID := "" 318 - if pageRecord.Avatar != nil && pageRecord.Avatar.Ref.Link != "" { 319 - avatarCID = pageRecord.Avatar.Ref.Link 320 - } 321 - 322 - // Upsert to database 323 - return db.UpsertRepoPage(p.db, did, pageRecord.Repository, pageRecord.Description, avatarCID, pageRecord.CreatedAt, pageRecord.UpdatedAt) 324 310 } 325 311 326 312 // ProcessIdentity handles identity change events (handle updates)
+36 -54
pkg/appview/jetstream/processor_test.go
··· 11 11 _ "github.com/mattn/go-sqlite3" 12 12 ) 13 13 14 + // ptrString returns a pointer to the given string 15 + func ptrString(s string) *string { 16 + return &s 17 + } 18 + 14 19 // setupTestDB creates an in-memory SQLite database for testing 15 20 func setupTestDB(t *testing.T) *sql.DB { 16 21 database, err := sql.Open("sqlite3", ":memory:") ··· 143 148 ctx := context.Background() 144 149 145 150 // Create test manifest record 146 - manifestRecord := &atproto.ManifestRecord{ 151 + manifestRecord := &atproto.Manifest{ 147 152 Repository: "test-app", 148 153 Digest: "sha256:abc123", 149 154 MediaType: "application/vnd.oci.image.manifest.v1+json", 150 155 SchemaVersion: 2, 151 - HoldEndpoint: "did:web:hold01.atcr.io", 152 - CreatedAt: time.Now(), 153 - Config: &atproto.BlobReference{ 156 + HoldEndpoint: ptrString("did:web:hold01.atcr.io"), 157 + CreatedAt: time.Now().Format(time.RFC3339), 158 + Config: &atproto.Manifest_BlobReference{ 154 159 Digest: "sha256:config123", 155 160 Size: 1234, 156 161 }, 157 - Layers: []atproto.BlobReference{ 162 + Layers: []atproto.Manifest_BlobReference{ 158 163 {Digest: "sha256:layer1", Size: 5000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"}, 159 164 {Digest: "sha256:layer2", Size: 3000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"}, 160 165 }, 161 - Annotations: map[string]string{ 162 - "org.opencontainers.image.title": "Test App", 163 - "org.opencontainers.image.description": "A test application", 164 - "org.opencontainers.image.source": "https://github.com/test/app", 165 - "org.opencontainers.image.licenses": "MIT", 166 - "io.atcr.icon": "https://example.com/icon.png", 167 - }, 166 + // Annotations disabled - generated Manifest_Annotations is empty struct 168 167 } 169 168 170 169 // Marshal to bytes for ProcessManifest ··· 193 192 t.Errorf("Expected 1 manifest, got %d", count) 194 193 } 195 194 196 - // Verify annotations were stored in repository_annotations table 197 - var title, source string 198 - err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?", 199 - "did:plc:test123", "test-app", "org.opencontainers.image.title").Scan(&title) 200 - if err != nil { 201 - t.Fatalf("Failed to query title annotation: %v", err) 202 - } 203 - if title != "Test App" { 204 - t.Errorf("title = %q, want %q", title, "Test App") 205 - } 206 - 207 - err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?", 208 - "did:plc:test123", "test-app", "org.opencontainers.image.source").Scan(&source) 209 - if err != nil { 210 - t.Fatalf("Failed to query source annotation: %v", err) 211 - } 212 - if source != "https://github.com/test/app" { 213 - t.Errorf("source = %q, want %q", source, "https://github.com/test/app") 214 - } 195 + // Note: Annotations verification disabled - generated Manifest_Annotations is empty struct 196 + // TODO: Re-enable when lexicon uses "unknown" type for annotations 215 197 216 198 // Verify layers were inserted 217 199 var layerCount int ··· 242 224 ctx := context.Background() 243 225 244 226 // Create test manifest list record 245 - manifestRecord := &atproto.ManifestRecord{ 227 + manifestRecord := &atproto.Manifest{ 246 228 Repository: "test-app", 247 229 Digest: "sha256:list123", 248 230 MediaType: "application/vnd.oci.image.index.v1+json", 249 231 SchemaVersion: 2, 250 - HoldEndpoint: "did:web:hold01.atcr.io", 251 - CreatedAt: time.Now(), 252 - Manifests: []atproto.ManifestReference{ 232 + HoldEndpoint: ptrString("did:web:hold01.atcr.io"), 233 + CreatedAt: time.Now().Format(time.RFC3339), 234 + Manifests: []atproto.Manifest_ManifestReference{ 253 235 { 254 236 Digest: "sha256:amd64manifest", 255 237 MediaType: "application/vnd.oci.image.manifest.v1+json", 256 238 Size: 1000, 257 - Platform: &atproto.Platform{ 239 + Platform: &atproto.Manifest_Platform{ 258 240 Architecture: "amd64", 259 - OS: "linux", 241 + Os: "linux", 260 242 }, 261 243 }, 262 244 { 263 245 Digest: "sha256:arm64manifest", 264 246 MediaType: "application/vnd.oci.image.manifest.v1+json", 265 247 Size: 1100, 266 - Platform: &atproto.Platform{ 248 + Platform: &atproto.Manifest_Platform{ 267 249 Architecture: "arm64", 268 - OS: "linux", 269 - Variant: "v8", 250 + Os: "linux", 251 + Variant: ptrString("v8"), 270 252 }, 271 253 }, 272 254 }, ··· 326 308 ctx := context.Background() 327 309 328 310 // Create test tag record (using ManifestDigest field for simplicity) 329 - tagRecord := &atproto.TagRecord{ 311 + tagRecord := &atproto.Tag{ 330 312 Repository: "test-app", 331 313 Tag: "latest", 332 - ManifestDigest: "sha256:abc123", 333 - UpdatedAt: time.Now(), 314 + ManifestDigest: ptrString("sha256:abc123"), 315 + CreatedAt: time.Now().Format(time.RFC3339), 334 316 } 335 317 336 318 // Marshal to bytes for ProcessTag ··· 368 350 } 369 351 370 352 // Test upserting same tag with new digest 371 - tagRecord.ManifestDigest = "sha256:newdigest" 353 + tagRecord.ManifestDigest = ptrString("sha256:newdigest") 372 354 recordBytes, err = json.Marshal(tagRecord) 373 355 if err != nil { 374 356 t.Fatalf("Failed to marshal tag: %v", err) ··· 407 389 ctx := context.Background() 408 390 409 391 // Create test star record 410 - starRecord := &atproto.StarRecord{ 411 - Subject: atproto.StarSubject{ 412 - DID: "did:plc:owner123", 392 + starRecord := &atproto.SailorStar{ 393 + Subject: atproto.SailorStar_Subject{ 394 + Did: "did:plc:owner123", 413 395 Repository: "test-app", 414 396 }, 415 - CreatedAt: time.Now(), 397 + CreatedAt: time.Now().Format(time.RFC3339), 416 398 } 417 399 418 400 // Marshal to bytes for ProcessStar ··· 466 448 p := NewProcessor(database, false) 467 449 ctx := context.Background() 468 450 469 - manifestRecord := &atproto.ManifestRecord{ 451 + manifestRecord := &atproto.Manifest{ 470 452 Repository: "test-app", 471 453 Digest: "sha256:abc123", 472 454 MediaType: "application/vnd.oci.image.manifest.v1+json", 473 455 SchemaVersion: 2, 474 - HoldEndpoint: "did:web:hold01.atcr.io", 475 - CreatedAt: time.Now(), 456 + HoldEndpoint: ptrString("did:web:hold01.atcr.io"), 457 + CreatedAt: time.Now().Format(time.RFC3339), 476 458 } 477 459 478 460 // Marshal to bytes for ProcessManifest ··· 518 500 ctx := context.Background() 519 501 520 502 // Manifest with nil annotations 521 - manifestRecord := &atproto.ManifestRecord{ 503 + manifestRecord := &atproto.Manifest{ 522 504 Repository: "test-app", 523 505 Digest: "sha256:abc123", 524 506 MediaType: "application/vnd.oci.image.manifest.v1+json", 525 507 SchemaVersion: 2, 526 - HoldEndpoint: "did:web:hold01.atcr.io", 527 - CreatedAt: time.Now(), 508 + HoldEndpoint: ptrString("did:web:hold01.atcr.io"), 509 + CreatedAt: time.Now().Format(time.RFC3339), 528 510 Annotations: nil, 529 511 } 530 512
+3 -39
pkg/appview/jetstream/worker.go
··· 61 61 jetstreamURL: jetstreamURL, 62 62 startCursor: startCursor, 63 63 wantedCollections: []string{ 64 - "io.atcr.*", // Subscribe to all ATCR collections 64 + atproto.ManifestCollection, // io.atcr.manifest 65 + atproto.TagCollection, // io.atcr.tag 66 + atproto.StarCollection, // io.atcr.sailor.star 65 67 }, 66 68 processor: NewProcessor(database, true), // Use cache for live streaming 67 69 } ··· 310 312 case atproto.StarCollection: 311 313 slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 312 314 return w.processStar(commit) 313 - case atproto.RepoPageCollection: 314 - slog.Info("Jetstream processing repo page event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey) 315 - return w.processRepoPage(commit) 316 315 default: 317 316 // Ignore other collections 318 317 return nil ··· 435 434 436 435 // Use shared processor for DB operations 437 436 return w.processor.ProcessStar(context.Background(), commit.DID, recordBytes) 438 - } 439 - 440 - // processRepoPage processes a repo page commit event 441 - func (w *Worker) processRepoPage(commit *CommitEvent) error { 442 - // Resolve and upsert user with handle/PDS endpoint 443 - if err := w.processor.EnsureUser(context.Background(), commit.DID); err != nil { 444 - return fmt.Errorf("failed to ensure user: %w", err) 445 - } 446 - 447 - isDelete := commit.Operation == "delete" 448 - 449 - if isDelete { 450 - // Delete - rkey is the repository name 451 - slog.Info("Jetstream deleting repo page", "did", commit.DID, "repository", commit.RKey) 452 - if err := w.processor.ProcessRepoPage(context.Background(), commit.DID, commit.RKey, nil, true); err != nil { 453 - slog.Error("Jetstream ERROR deleting repo page", "error", err) 454 - return err 455 - } 456 - slog.Info("Jetstream successfully deleted repo page", "did", commit.DID, "repository", commit.RKey) 457 - return nil 458 - } 459 - 460 - // Parse repo page record 461 - if commit.Record == nil { 462 - return nil 463 - } 464 - 465 - // Marshal map to bytes for processing 466 - recordBytes, err := json.Marshal(commit.Record) 467 - if err != nil { 468 - return fmt.Errorf("failed to marshal record: %w", err) 469 - } 470 - 471 - // Use shared processor for DB operations 472 - return w.processor.ProcessRepoPage(context.Background(), commit.DID, commit.RKey, recordBytes, false) 473 437 } 474 438 475 439 // processIdentity processes an identity event (handle change)
+6 -59
pkg/appview/middleware/auth.go
··· 11 11 "net/url" 12 12 13 13 "atcr.io/pkg/appview/db" 14 - "atcr.io/pkg/auth" 15 - "atcr.io/pkg/auth/oauth" 16 14 ) 17 15 18 16 type contextKey string 19 17 20 18 const userKey contextKey = "user" 21 19 22 - // WebAuthDeps contains dependencies for web auth middleware 23 - type WebAuthDeps struct { 24 - SessionStore *db.SessionStore 25 - Database *sql.DB 26 - Refresher *oauth.Refresher 27 - DefaultHoldDID string 28 - } 29 - 30 20 // RequireAuth is middleware that requires authentication 31 21 func RequireAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler { 32 - return RequireAuthWithDeps(WebAuthDeps{ 33 - SessionStore: store, 34 - Database: database, 35 - }) 36 - } 37 - 38 - // RequireAuthWithDeps is middleware that requires authentication and creates UserContext 39 - func RequireAuthWithDeps(deps WebAuthDeps) func(http.Handler) http.Handler { 40 22 return func(next http.Handler) http.Handler { 41 23 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 42 24 sessionID, ok := getSessionID(r) ··· 50 32 return 51 33 } 52 34 53 - sess, ok := deps.SessionStore.Get(sessionID) 35 + sess, ok := store.Get(sessionID) 54 36 if !ok { 55 37 // Build return URL with query parameters preserved 56 38 returnTo := r.URL.Path ··· 62 44 } 63 45 64 46 // Look up full user from database to get avatar 65 - user, err := db.GetUserByDID(deps.Database, sess.DID) 47 + user, err := db.GetUserByDID(database, sess.DID) 66 48 if err != nil || user == nil { 67 49 // Fallback to session data if DB lookup fails 68 50 user = &db.User{ ··· 72 54 } 73 55 } 74 56 75 - ctx := r.Context() 76 - ctx = context.WithValue(ctx, userKey, user) 77 - 78 - // Create UserContext for authenticated users (enables EnsureUserSetup) 79 - if deps.Refresher != nil { 80 - userCtx := auth.NewUserContext(sess.DID, auth.AuthMethodOAuth, r.Method, &auth.Dependencies{ 81 - Refresher: deps.Refresher, 82 - DefaultHoldDID: deps.DefaultHoldDID, 83 - }) 84 - userCtx.SetPDS(sess.Handle, sess.PDSEndpoint) 85 - userCtx.EnsureUserSetup() 86 - ctx = auth.WithUserContext(ctx, userCtx) 87 - } 88 - 57 + ctx := context.WithValue(r.Context(), userKey, user) 89 58 next.ServeHTTP(w, r.WithContext(ctx)) 90 59 }) 91 60 } ··· 93 62 94 63 // OptionalAuth is middleware that optionally includes user if authenticated 95 64 func OptionalAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler { 96 - return OptionalAuthWithDeps(WebAuthDeps{ 97 - SessionStore: store, 98 - Database: database, 99 - }) 100 - } 101 - 102 - // OptionalAuthWithDeps is middleware that optionally includes user and UserContext if authenticated 103 - func OptionalAuthWithDeps(deps WebAuthDeps) func(http.Handler) http.Handler { 104 65 return func(next http.Handler) http.Handler { 105 66 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 106 67 sessionID, ok := getSessionID(r) 107 68 if ok { 108 - if sess, ok := deps.SessionStore.Get(sessionID); ok { 69 + if sess, ok := store.Get(sessionID); ok { 109 70 // Look up full user from database to get avatar 110 - user, err := db.GetUserByDID(deps.Database, sess.DID) 71 + user, err := db.GetUserByDID(database, sess.DID) 111 72 if err != nil || user == nil { 112 73 // Fallback to session data if DB lookup fails 113 74 user = &db.User{ ··· 116 77 PDSEndpoint: sess.PDSEndpoint, 117 78 } 118 79 } 119 - 120 - ctx := r.Context() 121 - ctx = context.WithValue(ctx, userKey, user) 122 - 123 - // Create UserContext for authenticated users (enables EnsureUserSetup) 124 - if deps.Refresher != nil { 125 - userCtx := auth.NewUserContext(sess.DID, auth.AuthMethodOAuth, r.Method, &auth.Dependencies{ 126 - Refresher: deps.Refresher, 127 - DefaultHoldDID: deps.DefaultHoldDID, 128 - }) 129 - userCtx.SetPDS(sess.Handle, sess.PDSEndpoint) 130 - userCtx.EnsureUserSetup() 131 - ctx = auth.WithUserContext(ctx, userCtx) 132 - } 133 - 80 + ctx := context.WithValue(r.Context(), userKey, user) 134 81 r = r.WithContext(ctx) 135 82 } 136 83 }
+340 -111
pkg/appview/middleware/registry.go
··· 2 2 3 3 import ( 4 4 "context" 5 - "database/sql" 6 5 "fmt" 7 6 "log/slog" 8 7 "net/http" 9 8 "strings" 9 + "sync" 10 + "time" 10 11 11 12 "github.com/distribution/distribution/v3" 13 + "github.com/distribution/distribution/v3/registry/api/errcode" 12 14 registrymw "github.com/distribution/distribution/v3/registry/middleware/registry" 13 15 "github.com/distribution/distribution/v3/registry/storage/driver" 14 16 "github.com/distribution/reference" ··· 26 28 // authMethodKey is the context key for storing auth method from JWT 27 29 const authMethodKey contextKey = "auth.method" 28 30 29 - // pullerDIDKey is the context key for storing the authenticated user's DID from JWT 30 - const pullerDIDKey contextKey = "puller.did" 31 + // validationCacheEntry stores a validated service token with expiration 32 + type validationCacheEntry struct { 33 + serviceToken string 34 + validUntil time.Time 35 + err error // Cached error for fast-fail 36 + mu sync.Mutex // Per-entry lock to serialize cache population 37 + inFlight bool // True if another goroutine is fetching the token 38 + done chan struct{} // Closed when fetch completes 39 + } 40 + 41 + // validationCache provides request-level caching for service tokens 42 + // This prevents concurrent layer uploads from racing on OAuth/DPoP requests 43 + type validationCache struct { 44 + mu sync.RWMutex 45 + entries map[string]*validationCacheEntry // key: "did:holdDID" 46 + } 47 + 48 + // newValidationCache creates a new validation cache 49 + func newValidationCache() *validationCache { 50 + return &validationCache{ 51 + entries: make(map[string]*validationCacheEntry), 52 + } 53 + } 54 + 55 + // getOrFetch retrieves a service token from cache or fetches it 56 + // Multiple concurrent requests for the same DID:holdDID will share the fetch operation 57 + func (vc *validationCache) getOrFetch(ctx context.Context, cacheKey string, fetchFunc func() (string, error)) (string, error) { 58 + // Fast path: check cache with read lock 59 + vc.mu.RLock() 60 + entry, exists := vc.entries[cacheKey] 61 + vc.mu.RUnlock() 62 + 63 + if exists { 64 + // Entry exists, check if it's still valid 65 + entry.mu.Lock() 66 + 67 + // If another goroutine is fetching, wait for it 68 + if entry.inFlight { 69 + done := entry.done 70 + entry.mu.Unlock() 71 + 72 + select { 73 + case <-done: 74 + // Fetch completed, check result 75 + entry.mu.Lock() 76 + defer entry.mu.Unlock() 77 + 78 + if entry.err != nil { 79 + return "", entry.err 80 + } 81 + if time.Now().Before(entry.validUntil) { 82 + return entry.serviceToken, nil 83 + } 84 + // Fall through to refetch 85 + case <-ctx.Done(): 86 + return "", ctx.Err() 87 + } 88 + } else { 89 + // Check if cached token is still valid 90 + if entry.err != nil && time.Now().Before(entry.validUntil) { 91 + // Return cached error (fast-fail) 92 + entry.mu.Unlock() 93 + return "", entry.err 94 + } 95 + if entry.err == nil && time.Now().Before(entry.validUntil) { 96 + // Return cached token 97 + token := entry.serviceToken 98 + entry.mu.Unlock() 99 + return token, nil 100 + } 101 + entry.mu.Unlock() 102 + } 103 + } 104 + 105 + // Slow path: need to fetch token 106 + vc.mu.Lock() 107 + entry, exists = vc.entries[cacheKey] 108 + if !exists { 109 + // Create new entry 110 + entry = &validationCacheEntry{ 111 + inFlight: true, 112 + done: make(chan struct{}), 113 + } 114 + vc.entries[cacheKey] = entry 115 + } 116 + vc.mu.Unlock() 117 + 118 + // Lock the entry to perform fetch 119 + entry.mu.Lock() 120 + 121 + // Double-check: another goroutine may have fetched while we waited 122 + if !entry.inFlight { 123 + if entry.err != nil && time.Now().Before(entry.validUntil) { 124 + err := entry.err 125 + entry.mu.Unlock() 126 + return "", err 127 + } 128 + if entry.err == nil && time.Now().Before(entry.validUntil) { 129 + token := entry.serviceToken 130 + entry.mu.Unlock() 131 + return token, nil 132 + } 133 + } 134 + 135 + // Mark as in-flight and create fresh done channel for this fetch 136 + // IMPORTANT: Always create a new channel - a closed channel is not nil 137 + entry.done = make(chan struct{}) 138 + entry.inFlight = true 139 + done := entry.done 140 + entry.mu.Unlock() 141 + 142 + // Perform the fetch (outside the lock to allow other operations) 143 + serviceToken, err := fetchFunc() 144 + 145 + // Update the entry with result 146 + entry.mu.Lock() 147 + entry.inFlight = false 148 + 149 + if err != nil { 150 + // Cache errors for 5 seconds (fast-fail for subsequent requests) 151 + entry.err = err 152 + entry.validUntil = time.Now().Add(5 * time.Second) 153 + entry.serviceToken = "" 154 + } else { 155 + // Cache token for 45 seconds (covers typical Docker push operation) 156 + entry.err = nil 157 + entry.serviceToken = serviceToken 158 + entry.validUntil = time.Now().Add(45 * time.Second) 159 + } 160 + 161 + // Signal completion to waiting goroutines 162 + close(done) 163 + entry.mu.Unlock() 164 + 165 + return serviceToken, err 166 + } 31 167 32 168 // Global variables for initialization only 33 169 // These are set by main.go during startup and copied into NamespaceResolver instances. 34 170 // After initialization, request handling uses the NamespaceResolver's instance fields. 35 171 var ( 36 - globalRefresher *oauth.Refresher 37 - globalDatabase *sql.DB 38 - globalAuthorizer auth.HoldAuthorizer 172 + globalRefresher *oauth.Refresher 173 + globalDatabase storage.DatabaseMetrics 174 + globalAuthorizer auth.HoldAuthorizer 175 + globalReadmeCache storage.ReadmeCache 39 176 ) 40 177 41 178 // SetGlobalRefresher sets the OAuth refresher instance during initialization ··· 46 183 47 184 // SetGlobalDatabase sets the database instance during initialization 48 185 // Must be called before the registry starts serving requests 49 - func SetGlobalDatabase(database *sql.DB) { 186 + func SetGlobalDatabase(database storage.DatabaseMetrics) { 50 187 globalDatabase = database 51 188 } 52 189 ··· 56 193 globalAuthorizer = authorizer 57 194 } 58 195 196 + // SetGlobalReadmeCache sets the readme cache instance during initialization 197 + // Must be called before the registry starts serving requests 198 + func SetGlobalReadmeCache(readmeCache storage.ReadmeCache) { 199 + globalReadmeCache = readmeCache 200 + } 201 + 59 202 func init() { 60 203 // Register the name resolution middleware 61 204 registrymw.Register("atproto-resolver", initATProtoResolver) ··· 64 207 // NamespaceResolver wraps a namespace and resolves names 65 208 type NamespaceResolver struct { 66 209 distribution.Namespace 67 - defaultHoldDID string // Default hold DID (e.g., "did:web:hold01.atcr.io") 68 - baseURL string // Base URL for error messages (e.g., "https://atcr.io") 69 - testMode bool // If true, fallback to default hold when user's hold is unreachable 70 - refresher *oauth.Refresher // OAuth session manager (copied from global on init) 71 - sqlDB *sql.DB // Database for hold DID lookup and metrics (copied from global on init) 72 - authorizer auth.HoldAuthorizer // Hold authorization (copied from global on init) 210 + defaultHoldDID string // Default hold DID (e.g., "did:web:hold01.atcr.io") 211 + baseURL string // Base URL for error messages (e.g., "https://atcr.io") 212 + testMode bool // If true, fallback to default hold when user's hold is unreachable 213 + refresher *oauth.Refresher // OAuth session manager (copied from global on init) 214 + database storage.DatabaseMetrics // Metrics database (copied from global on init) 215 + authorizer auth.HoldAuthorizer // Hold authorization (copied from global on init) 216 + readmeCache storage.ReadmeCache // README cache (copied from global on init) 217 + validationCache *validationCache // Request-level service token cache 73 218 } 74 219 75 220 // initATProtoResolver initializes the name resolution middleware ··· 96 241 // Copy shared services from globals into the instance 97 242 // This avoids accessing globals during request handling 98 243 return &NamespaceResolver{ 99 - Namespace: ns, 100 - defaultHoldDID: defaultHoldDID, 101 - baseURL: baseURL, 102 - testMode: testMode, 103 - refresher: globalRefresher, 104 - sqlDB: globalDatabase, 105 - authorizer: globalAuthorizer, 244 + Namespace: ns, 245 + defaultHoldDID: defaultHoldDID, 246 + baseURL: baseURL, 247 + testMode: testMode, 248 + refresher: globalRefresher, 249 + database: globalDatabase, 250 + authorizer: globalAuthorizer, 251 + readmeCache: globalReadmeCache, 252 + validationCache: newValidationCache(), 106 253 }, nil 254 + } 255 + 256 + // authErrorMessage creates a user-friendly auth error with login URL 257 + func (nr *NamespaceResolver) authErrorMessage(message string) error { 258 + loginURL := fmt.Sprintf("%s/auth/oauth/login", nr.baseURL) 259 + fullMessage := fmt.Sprintf("%s - please re-authenticate at %s", message, loginURL) 260 + return errcode.ErrorCodeUnauthorized.WithMessage(fullMessage) 107 261 } 108 262 109 263 // Repository resolves the repository name and delegates to underlying namespace ··· 139 293 } 140 294 ctx = context.WithValue(ctx, holdDIDKey, holdDID) 141 295 142 - // Note: Profile and crew membership are now ensured in UserContextMiddleware 143 - // via EnsureUserSetup() - no need to call here 296 + // Auto-reconcile crew membership on first push/pull 297 + // This ensures users can push immediately after docker login without web sign-in 298 + // EnsureCrewMembership is best-effort and logs errors without failing the request 299 + // Run in background to avoid blocking registry operations if hold is offline 300 + if holdDID != "" && nr.refresher != nil { 301 + slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID) 302 + client := atproto.NewClient(pdsEndpoint, did, "") 303 + go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) { 304 + storage.EnsureCrewMembership(ctx, client, refresher, holdDID) 305 + }(ctx, client, nr.refresher, holdDID) 306 + } 307 + 308 + // Get service token for hold authentication (only if authenticated) 309 + // Use validation cache to prevent concurrent requests from racing on OAuth/DPoP 310 + // Route based on auth method from JWT token 311 + var serviceToken string 312 + authMethod, _ := ctx.Value(authMethodKey).(string) 313 + 314 + // Only fetch service token if user is authenticated 315 + // Unauthenticated requests (like /v2/ ping) should not trigger token fetching 316 + if authMethod != "" { 317 + // Create cache key: "did:holdDID" 318 + cacheKey := fmt.Sprintf("%s:%s", did, holdDID) 319 + 320 + // Fetch service token through validation cache 321 + // This ensures only ONE request per DID:holdDID pair fetches the token 322 + // Concurrent requests will wait for the first request to complete 323 + var fetchErr error 324 + serviceToken, fetchErr = nr.validationCache.getOrFetch(ctx, cacheKey, func() (string, error) { 325 + if authMethod == token.AuthMethodAppPassword { 326 + // App-password flow: use Bearer token authentication 327 + slog.Debug("Using app-password flow for service token", 328 + "component", "registry/middleware", 329 + "did", did, 330 + "cacheKey", cacheKey) 331 + 332 + token, err := token.GetOrFetchServiceTokenWithAppPassword(ctx, did, holdDID, pdsEndpoint) 333 + if err != nil { 334 + slog.Error("Failed to get service token with app-password", 335 + "component", "registry/middleware", 336 + "did", did, 337 + "holdDID", holdDID, 338 + "pdsEndpoint", pdsEndpoint, 339 + "error", err) 340 + return "", err 341 + } 342 + return token, nil 343 + } else if nr.refresher != nil { 344 + // OAuth flow: use DPoP authentication 345 + slog.Debug("Using OAuth flow for service token", 346 + "component", "registry/middleware", 347 + "did", did, 348 + "cacheKey", cacheKey) 349 + 350 + token, err := token.GetOrFetchServiceToken(ctx, nr.refresher, did, holdDID, pdsEndpoint) 351 + if err != nil { 352 + slog.Error("Failed to get service token with OAuth", 353 + "component", "registry/middleware", 354 + "did", did, 355 + "holdDID", holdDID, 356 + "pdsEndpoint", pdsEndpoint, 357 + "error", err) 358 + return "", err 359 + } 360 + return token, nil 361 + } 362 + return "", fmt.Errorf("no authentication method available") 363 + }) 364 + 365 + // Handle errors from cached fetch 366 + if fetchErr != nil { 367 + errMsg := fetchErr.Error() 368 + 369 + // Check for app-password specific errors 370 + if authMethod == token.AuthMethodAppPassword { 371 + if strings.Contains(errMsg, "expired or invalid") || strings.Contains(errMsg, "no app-password") { 372 + return nil, nr.authErrorMessage("App-password authentication failed. Please re-authenticate with: docker login") 373 + } 374 + } 375 + 376 + // Check for OAuth specific errors 377 + if strings.Contains(errMsg, "OAuth session") || strings.Contains(errMsg, "OAuth validation") { 378 + return nil, nr.authErrorMessage("OAuth session expired or invalidated by PDS. Your session has been cleared") 379 + } 380 + 381 + // Generic service token error 382 + return nil, nr.authErrorMessage(fmt.Sprintf("Failed to obtain storage credentials: %v", fetchErr)) 383 + } 384 + } else { 385 + slog.Debug("Skipping service token fetch for unauthenticated request", 386 + "component", "registry/middleware", 387 + "did", did) 388 + } 144 389 145 390 // Create a new reference with identity/image format 146 391 // Use the identity (or DID) as the namespace to ensure canonical format ··· 157 402 return nil, err 158 403 } 159 404 405 + // Get access token for PDS operations 406 + // Use auth method from JWT to determine client type: 407 + // - OAuth users: use session provider (DPoP-enabled) 408 + // - App-password users: use Basic Auth token cache 409 + var atprotoClient *atproto.Client 410 + 411 + if authMethod == token.AuthMethodOAuth && nr.refresher != nil { 412 + // OAuth flow: use session provider for locked OAuth sessions 413 + // This prevents DPoP nonce race conditions during concurrent layer uploads 414 + slog.Debug("Creating ATProto client with OAuth session provider", 415 + "component", "registry/middleware", 416 + "did", did, 417 + "authMethod", authMethod) 418 + atprotoClient = atproto.NewClientWithSessionProvider(pdsEndpoint, did, nr.refresher) 419 + } else { 420 + // App-password flow (or fallback): use Basic Auth token cache 421 + accessToken, ok := auth.GetGlobalTokenCache().Get(did) 422 + if !ok { 423 + slog.Debug("No cached access token found for app-password auth", 424 + "component", "registry/middleware", 425 + "did", did, 426 + "authMethod", authMethod) 427 + accessToken = "" // Will fail on manifest push, but let it try 428 + } else { 429 + slog.Debug("Creating ATProto client with app-password", 430 + "component", "registry/middleware", 431 + "did", did, 432 + "authMethod", authMethod, 433 + "token_length", len(accessToken)) 434 + } 435 + atprotoClient = atproto.NewClient(pdsEndpoint, did, accessToken) 436 + } 437 + 160 438 // IMPORTANT: Use only the image name (not identity/image) for ATProto storage 161 439 // ATProto records are scoped to the user's DID, so we don't need the identity prefix 162 440 // Example: "evan.jarrett.net/debian" -> store as "debian" 163 441 repositoryName := imageName 164 442 165 - // Get UserContext from request context (set by UserContextMiddleware) 166 - userCtx := auth.FromContext(ctx) 167 - if userCtx == nil { 168 - return nil, fmt.Errorf("UserContext not set in request context - ensure UserContextMiddleware is configured") 443 + // Default auth method to OAuth if not already set (backward compatibility with old tokens) 444 + if authMethod == "" { 445 + authMethod = token.AuthMethodOAuth 169 446 } 170 447 171 - // Set target repository info on UserContext 172 - // ATProtoClient is cached lazily via userCtx.GetATProtoClient() 173 - userCtx.SetTarget(did, handle, pdsEndpoint, repositoryName, holdDID) 174 - 175 448 // Create routing repository - routes manifests to ATProto, blobs to hold service 176 449 // The registry is stateless - no local storage is used 450 + // Bundle all context into a single RegistryContext struct 177 451 // 178 452 // NOTE: We create a fresh RoutingRepository on every request (no caching) because: 179 453 // 1. Each layer upload is a separate HTTP request (possibly different process) 180 454 // 2. OAuth sessions can be refreshed/invalidated between requests 181 455 // 3. The refresher already caches sessions efficiently (in-memory + DB) 182 - // 4. ATProtoClient is now cached in UserContext via GetATProtoClient() 183 - return storage.NewRoutingRepository(repo, userCtx, nr.sqlDB), nil 456 + // 4. Caching the repository with a stale ATProtoClient causes refresh token errors 457 + registryCtx := &storage.RegistryContext{ 458 + DID: did, 459 + Handle: handle, 460 + HoldDID: holdDID, 461 + PDSEndpoint: pdsEndpoint, 462 + Repository: repositoryName, 463 + ServiceToken: serviceToken, // Cached service token from middleware validation 464 + ATProtoClient: atprotoClient, 465 + AuthMethod: authMethod, // Auth method from JWT token 466 + Database: nr.database, 467 + Authorizer: nr.authorizer, 468 + Refresher: nr.refresher, 469 + ReadmeCache: nr.readmeCache, 470 + } 471 + 472 + return storage.NewRoutingRepository(repo, registryCtx), nil 184 473 } 185 474 186 475 // Repositories delegates to underlying namespace ··· 201 490 // findHoldDID determines which hold DID to use for blob storage 202 491 // Priority order: 203 492 // 1. User's sailor profile defaultHold (if set) 204 - // 2. AppView's default hold DID 493 + // 2. User's own hold record (io.atcr.hold) 494 + // 3. AppView's default hold DID 205 495 // Returns a hold DID (e.g., "did:web:hold01.atcr.io"), or empty string if none configured 206 496 func (nr *NamespaceResolver) findHoldDID(ctx context.Context, did, pdsEndpoint string) string { 207 497 // Create ATProto client (without auth - reading public records) ··· 214 504 slog.Warn("Failed to read profile", "did", did, "error", err) 215 505 } 216 506 217 - if profile != nil && profile.DefaultHold != "" { 218 - // In test mode, verify the hold is reachable (fall back to default if not) 219 - // In production, trust the user's profile and return their hold 507 + if profile != nil && profile.DefaultHold != nil && *profile.DefaultHold != "" { 508 + defaultHold := *profile.DefaultHold 509 + // Profile exists with defaultHold set 510 + // In test mode, verify it's reachable before using it 220 511 if nr.testMode { 221 - if nr.isHoldReachable(ctx, profile.DefaultHold) { 222 - return profile.DefaultHold 512 + if nr.isHoldReachable(ctx, defaultHold) { 513 + return defaultHold 223 514 } 224 - slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", profile.DefaultHold) 515 + slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", defaultHold) 225 516 return nr.defaultHoldDID 226 517 } 227 - return profile.DefaultHold 518 + return defaultHold 228 519 } 229 520 230 - // No profile defaultHold - use AppView default 521 + // Profile doesn't exist or defaultHold is null/empty 522 + // Legacy io.atcr.hold records are no longer supported - use AppView default 231 523 return nr.defaultHoldDID 232 524 } 233 525 ··· 250 542 return false 251 543 } 252 544 253 - // ExtractAuthMethod is an HTTP middleware that extracts the auth method and puller DID from the JWT Authorization header 254 - // and stores them in the request context for later use by the registry middleware. 255 - // Also stores the HTTP method for routing decisions (GET/HEAD = pull, PUT/POST = push). 545 + // ExtractAuthMethod is an HTTP middleware that extracts the auth method from the JWT Authorization header 546 + // and stores it in the request context for later use by the registry middleware 256 547 func ExtractAuthMethod(next http.Handler) http.Handler { 257 548 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 258 - ctx := r.Context() 259 - 260 - // Store HTTP method in context for routing decisions 261 - // This is used by routing_repository.go to distinguish pull (GET/HEAD) from push (PUT/POST) 262 - ctx = context.WithValue(ctx, "http.request.method", r.Method) 263 - 264 549 // Extract Authorization header 265 550 authHeader := r.Header.Get("Authorization") 266 551 if authHeader != "" { ··· 273 558 authMethod := token.ExtractAuthMethod(tokenString) 274 559 if authMethod != "" { 275 560 // Store in context for registry middleware 276 - ctx = context.WithValue(ctx, authMethodKey, authMethod) 277 - } 278 - 279 - // Extract puller DID (Subject) from JWT 280 - // This is the authenticated user's DID, used for service token requests 281 - pullerDID := token.ExtractSubject(tokenString) 282 - if pullerDID != "" { 283 - ctx = context.WithValue(ctx, pullerDIDKey, pullerDID) 561 + ctx := context.WithValue(r.Context(), authMethodKey, authMethod) 562 + r = r.WithContext(ctx) 563 + slog.Debug("Extracted auth method from JWT", 564 + "component", "registry/middleware", 565 + "authMethod", authMethod) 284 566 } 285 - 286 - slog.Debug("Extracted auth info from JWT", 287 - "component", "registry/middleware", 288 - "authMethod", authMethod, 289 - "pullerDID", pullerDID, 290 - "httpMethod", r.Method) 291 567 } 292 568 } 293 569 294 - r = r.WithContext(ctx) 295 570 next.ServeHTTP(w, r) 296 571 }) 297 572 } 298 - 299 - // UserContextMiddleware creates a UserContext from the extracted JWT claims 300 - // and stores it in the request context for use throughout request processing. 301 - // This middleware should be chained AFTER ExtractAuthMethod. 302 - func UserContextMiddleware(deps *auth.Dependencies) func(http.Handler) http.Handler { 303 - return func(next http.Handler) http.Handler { 304 - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 305 - ctx := r.Context() 306 - 307 - // Get values set by ExtractAuthMethod 308 - authMethod, _ := ctx.Value(authMethodKey).(string) 309 - pullerDID, _ := ctx.Value(pullerDIDKey).(string) 310 - 311 - // Build UserContext with all dependencies 312 - userCtx := auth.NewUserContext(pullerDID, authMethod, r.Method, deps) 313 - 314 - // Eagerly resolve user's PDS for authenticated users 315 - // This is a fast path that avoids lazy loading in most cases 316 - if userCtx.IsAuthenticated { 317 - if err := userCtx.ResolvePDS(ctx); err != nil { 318 - slog.Warn("Failed to resolve puller's PDS", 319 - "component", "registry/middleware", 320 - "did", pullerDID, 321 - "error", err) 322 - // Continue without PDS - will fail on service token request 323 - } 324 - 325 - // Ensure user has profile and crew membership (runs in background, cached) 326 - userCtx.EnsureUserSetup() 327 - } 328 - 329 - // Store UserContext in request context 330 - ctx = auth.WithUserContext(ctx, userCtx) 331 - r = r.WithContext(ctx) 332 - 333 - slog.Debug("Created UserContext", 334 - "component", "registry/middleware", 335 - "isAuthenticated", userCtx.IsAuthenticated, 336 - "authMethod", userCtx.AuthMethod, 337 - "action", userCtx.Action.String(), 338 - "pullerDID", pullerDID) 339 - 340 - next.ServeHTTP(w, r) 341 - }) 342 - } 343 - }
+43 -2
pkg/appview/middleware/registry_test.go
··· 67 67 // If we get here without panic, test passes 68 68 } 69 69 70 + func TestSetGlobalReadmeCache(t *testing.T) { 71 + SetGlobalReadmeCache(nil) 72 + // If we get here without panic, test passes 73 + } 74 + 70 75 // TestInitATProtoResolver tests the initialization function 71 76 func TestInitATProtoResolver(t *testing.T) { 72 77 ctx := context.Background() ··· 129 134 } 130 135 } 131 136 137 + // TestAuthErrorMessage tests the error message formatting 138 + func TestAuthErrorMessage(t *testing.T) { 139 + resolver := &NamespaceResolver{ 140 + baseURL: "https://atcr.io", 141 + } 142 + 143 + err := resolver.authErrorMessage("OAuth session expired") 144 + assert.Contains(t, err.Error(), "OAuth session expired") 145 + assert.Contains(t, err.Error(), "https://atcr.io/auth/oauth/login") 146 + } 147 + 132 148 // TestFindHoldDID_DefaultFallback tests default hold DID fallback 133 149 func TestFindHoldDID_DefaultFallback(t *testing.T) { 134 150 // Start a mock PDS server that returns 404 for profile and empty list for holds ··· 188 204 assert.Equal(t, "did:web:user.hold.io", holdDID, "should use sailor profile's defaultHold") 189 205 } 190 206 191 - // TestFindHoldDID_Priority tests the priority order 207 + // TestFindHoldDID_NoProfile tests fallback to default hold when no profile exists 208 + func TestFindHoldDID_NoProfile(t *testing.T) { 209 + // Start a mock PDS server that returns 404 for profile 210 + mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 211 + if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" { 212 + // Profile not found 213 + w.WriteHeader(http.StatusNotFound) 214 + return 215 + } 216 + w.WriteHeader(http.StatusNotFound) 217 + })) 218 + defer mockPDS.Close() 219 + 220 + resolver := &NamespaceResolver{ 221 + defaultHoldDID: "did:web:default.atcr.io", 222 + } 223 + 224 + ctx := context.Background() 225 + holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL) 226 + 227 + // Should fall back to default hold DID when no profile exists 228 + // Note: Legacy io.atcr.hold records are no longer supported 229 + assert.Equal(t, "did:web:default.atcr.io", holdDID, "should fall back to default hold DID") 230 + } 231 + 232 + // TestFindHoldDID_Priority tests that profile takes priority over default 192 233 func TestFindHoldDID_Priority(t *testing.T) { 193 - // Start a mock PDS server that returns both profile and hold records 234 + // Start a mock PDS server that returns profile 194 235 mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 195 236 if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" { 196 237 // Return sailor profile with defaultHold (highest priority)
+111
pkg/appview/readme/cache.go
··· 1 + // Package readme provides README fetching, rendering, and caching functionality 2 + // for container repositories. It fetches markdown content from URLs, renders it 3 + // to sanitized HTML using GitHub-flavored markdown, and caches the results in 4 + // a database with configurable TTL. 5 + package readme 6 + 7 + import ( 8 + "context" 9 + "database/sql" 10 + "log/slog" 11 + "time" 12 + ) 13 + 14 + // Cache stores rendered README HTML in the database 15 + type Cache struct { 16 + db *sql.DB 17 + fetcher *Fetcher 18 + ttl time.Duration 19 + } 20 + 21 + // NewCache creates a new README cache 22 + func NewCache(db *sql.DB, ttl time.Duration) *Cache { 23 + if ttl == 0 { 24 + ttl = 1 * time.Hour // Default TTL 25 + } 26 + return &Cache{ 27 + db: db, 28 + fetcher: NewFetcher(), 29 + ttl: ttl, 30 + } 31 + } 32 + 33 + // Get retrieves a README from cache or fetches it 34 + func (c *Cache) Get(ctx context.Context, readmeURL string) (string, error) { 35 + // Try to get from cache 36 + html, fetchedAt, err := c.getFromDB(readmeURL) 37 + if err == nil { 38 + // Check if cache is still valid 39 + if time.Since(fetchedAt) < c.ttl { 40 + return html, nil 41 + } 42 + } 43 + 44 + // Cache miss or expired, fetch fresh content 45 + html, err = c.fetcher.FetchAndRender(ctx, readmeURL) 46 + if err != nil { 47 + // If fetch fails but we have stale cache, return it 48 + if html != "" { 49 + return html, nil 50 + } 51 + return "", err 52 + } 53 + 54 + // Store in cache 55 + if err := c.storeInDB(readmeURL, html); err != nil { 56 + // Log error but don't fail - we have the content 57 + slog.Warn("Failed to cache README", "error", err) 58 + } 59 + 60 + return html, nil 61 + } 62 + 63 + // getFromDB retrieves cached README from database 64 + func (c *Cache) getFromDB(readmeURL string) (string, time.Time, error) { 65 + var html string 66 + var fetchedAt time.Time 67 + 68 + err := c.db.QueryRow(` 69 + SELECT html, fetched_at 70 + FROM readme_cache 71 + WHERE url = ? 72 + `, readmeURL).Scan(&html, &fetchedAt) 73 + 74 + if err != nil { 75 + return "", time.Time{}, err 76 + } 77 + 78 + return html, fetchedAt, nil 79 + } 80 + 81 + // storeInDB stores rendered README in database 82 + func (c *Cache) storeInDB(readmeURL, html string) error { 83 + _, err := c.db.Exec(` 84 + INSERT INTO readme_cache (url, html, fetched_at) 85 + VALUES (?, ?, ?) 86 + ON CONFLICT(url) DO UPDATE SET 87 + html = excluded.html, 88 + fetched_at = excluded.fetched_at 89 + `, readmeURL, html, time.Now()) 90 + 91 + return err 92 + } 93 + 94 + // Invalidate removes a README from the cache 95 + func (c *Cache) Invalidate(readmeURL string) error { 96 + _, err := c.db.Exec(` 97 + DELETE FROM readme_cache 98 + WHERE url = ? 99 + `, readmeURL) 100 + return err 101 + } 102 + 103 + // Cleanup removes expired entries from the cache 104 + func (c *Cache) Cleanup() error { 105 + cutoff := time.Now().Add(-c.ttl * 2) // Keep for 2x TTL 106 + _, err := c.db.Exec(` 107 + DELETE FROM readme_cache 108 + WHERE fetched_at < ? 109 + `, cutoff) 110 + return err 111 + }
+13
pkg/appview/readme/cache_test.go
··· 1 + package readme 2 + 3 + import "testing" 4 + 5 + func TestCache_Struct(t *testing.T) { 6 + // Simple struct test 7 + cache := &Cache{} 8 + if cache == nil { 9 + t.Error("Expected non-nil cache") 10 + } 11 + } 12 + 13 + // TODO: Add cache operation tests
+9 -62
pkg/appview/readme/fetcher.go
··· 7 7 "io" 8 8 "net/http" 9 9 "net/url" 10 - "regexp" 11 10 "strings" 12 11 "time" 13 12 ··· 181 180 return fmt.Sprintf("%s://%s%s", u.Scheme, u.Host, path) 182 181 } 183 182 184 - // Is404 returns true if the error indicates a 404 Not Found response 185 - func Is404(err error) bool { 186 - return err != nil && strings.Contains(err.Error(), "unexpected status code: 404") 187 - } 188 - 189 - // RenderMarkdown renders a markdown string to sanitized HTML 190 - // This is used for rendering repo page descriptions stored in the database 191 - func (f *Fetcher) RenderMarkdown(content []byte) (string, error) { 192 - // Render markdown to HTML (no base URL for repo page descriptions) 193 - return f.renderMarkdown(content, "") 194 - } 195 - 196 - // Regex patterns for matching relative URLs that need rewriting 197 - // These match src="..." or href="..." where the URL is relative (not absolute, not data:, not #anchor) 198 - var ( 199 - // Match src="filename" where filename doesn't start with http://, https://, //, /, #, data:, or mailto: 200 - relativeSrcPattern = regexp.MustCompile(`src="([^"/:][^"]*)"`) 201 - // Match href="filename" where filename doesn't start with http://, https://, //, /, #, data:, or mailto: 202 - relativeHrefPattern = regexp.MustCompile(`href="([^"/:][^"]*)"`) 203 - ) 204 - 205 183 // rewriteRelativeURLs converts relative URLs to absolute URLs 206 184 func rewriteRelativeURLs(html, baseURL string) string { 207 185 if baseURL == "" { ··· 213 191 return html 214 192 } 215 193 216 - // Handle root-relative URLs (starting with /) first 217 - // Must be done before bare relative URLs to avoid double-processing 218 - if base.Scheme != "" && base.Host != "" { 219 - root := fmt.Sprintf("%s://%s/", base.Scheme, base.Host) 220 - // Replace src="/" and href="/" but not src="//" (protocol-relative URLs) 221 - html = strings.ReplaceAll(html, `src="/`, fmt.Sprintf(`src="%s`, root)) 222 - html = strings.ReplaceAll(html, `href="/`, fmt.Sprintf(`href="%s`, root)) 223 - } 224 - 225 - // Handle explicit relative paths (./something and ../something) 194 + // Simple string replacement for common patterns 195 + // This is a basic implementation - for production, consider using an HTML parser 226 196 html = strings.ReplaceAll(html, `src="./`, fmt.Sprintf(`src="%s`, baseURL)) 227 197 html = strings.ReplaceAll(html, `href="./`, fmt.Sprintf(`href="%s`, baseURL)) 228 198 html = strings.ReplaceAll(html, `src="../`, fmt.Sprintf(`src="%s../`, baseURL)) 229 199 html = strings.ReplaceAll(html, `href="../`, fmt.Sprintf(`href="%s../`, baseURL)) 230 200 231 - // Handle bare relative URLs (e.g., src="image.png" without ./ prefix) 232 - // Skip URLs that are already absolute (start with http://, https://, or //) 233 - // Skip anchors (#), data URLs (data:), and mailto links 234 - html = relativeSrcPattern.ReplaceAllStringFunc(html, func(match string) string { 235 - // Extract the URL from src="..." 236 - url := match[5 : len(match)-1] // Remove 'src="' and '"' 237 - 238 - // Skip if already processed or is a special URL type 239 - if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") || 240 - strings.HasPrefix(url, "//") || strings.HasPrefix(url, "#") || 241 - strings.HasPrefix(url, "data:") || strings.HasPrefix(url, "mailto:") { 242 - return match 243 - } 244 - 245 - return fmt.Sprintf(`src="%s%s"`, baseURL, url) 246 - }) 247 - 248 - html = relativeHrefPattern.ReplaceAllStringFunc(html, func(match string) string { 249 - // Extract the URL from href="..." 250 - url := match[6 : len(match)-1] // Remove 'href="' and '"' 251 - 252 - // Skip if already processed or is a special URL type 253 - if strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://") || 254 - strings.HasPrefix(url, "//") || strings.HasPrefix(url, "#") || 255 - strings.HasPrefix(url, "data:") || strings.HasPrefix(url, "mailto:") { 256 - return match 257 - } 258 - 259 - return fmt.Sprintf(`href="%s%s"`, baseURL, url) 260 - }) 201 + // Handle root-relative URLs (starting with /) 202 + if base.Scheme != "" && base.Host != "" { 203 + root := fmt.Sprintf("%s://%s/", base.Scheme, base.Host) 204 + // Replace src="/" and href="/" but not src="//" (absolute URLs) 205 + html = strings.ReplaceAll(html, `src="/`, fmt.Sprintf(`src="%s`, root)) 206 + html = strings.ReplaceAll(html, `href="/`, fmt.Sprintf(`href="%s`, root)) 207 + } 261 208 262 209 return html 263 210 }
-148
pkg/appview/readme/fetcher_test.go
··· 145 145 baseURL: "https://example.com/docs/", 146 146 expected: `<img src="https://example.com//cdn.example.com/image.png">`, 147 147 }, 148 - { 149 - name: "bare relative src (no ./ prefix)", 150 - html: `<img src="image.png">`, 151 - baseURL: "https://example.com/docs/", 152 - expected: `<img src="https://example.com/docs/image.png">`, 153 - }, 154 - { 155 - name: "bare relative href (no ./ prefix)", 156 - html: `<a href="page.html">link</a>`, 157 - baseURL: "https://example.com/docs/", 158 - expected: `<a href="https://example.com/docs/page.html">link</a>`, 159 - }, 160 - { 161 - name: "bare relative with path", 162 - html: `<img src="images/logo.png">`, 163 - baseURL: "https://example.com/docs/", 164 - expected: `<img src="https://example.com/docs/images/logo.png">`, 165 - }, 166 - { 167 - name: "anchor links unchanged", 168 - html: `<a href="#section">link</a>`, 169 - baseURL: "https://example.com/docs/", 170 - expected: `<a href="#section">link</a>`, 171 - }, 172 - { 173 - name: "data URLs unchanged", 174 - html: `<img src="data:image/png;base64,abc123">`, 175 - baseURL: "https://example.com/docs/", 176 - expected: `<img src="data:image/png;base64,abc123">`, 177 - }, 178 - { 179 - name: "mailto links unchanged", 180 - html: `<a href="mailto:test@example.com">email</a>`, 181 - baseURL: "https://example.com/docs/", 182 - expected: `<a href="mailto:test@example.com">email</a>`, 183 - }, 184 - { 185 - name: "mixed bare and prefixed relative URLs", 186 - html: `<img src="slices_and_lucy.png"><a href="./other.md">link</a>`, 187 - baseURL: "https://github.com/user/repo/blob/main/", 188 - expected: `<img src="https://github.com/user/repo/blob/main/slices_and_lucy.png"><a href="https://github.com/user/repo/blob/main/other.md">link</a>`, 189 - }, 190 148 } 191 149 192 150 for _, tt := range tests { ··· 197 155 } 198 156 }) 199 157 } 200 - } 201 - 202 - func TestFetcher_RenderMarkdown(t *testing.T) { 203 - fetcher := NewFetcher() 204 - 205 - tests := []struct { 206 - name string 207 - content string 208 - wantContain string 209 - wantErr bool 210 - }{ 211 - { 212 - name: "simple paragraph", 213 - content: "Hello, world!", 214 - wantContain: "<p>Hello, world!</p>", 215 - wantErr: false, 216 - }, 217 - { 218 - name: "heading", 219 - content: "# My App", 220 - wantContain: "<h1", 221 - wantErr: false, 222 - }, 223 - { 224 - name: "bold text", 225 - content: "This is **bold** text.", 226 - wantContain: "<strong>bold</strong>", 227 - wantErr: false, 228 - }, 229 - { 230 - name: "italic text", 231 - content: "This is *italic* text.", 232 - wantContain: "<em>italic</em>", 233 - wantErr: false, 234 - }, 235 - { 236 - name: "code block", 237 - content: "```\ncode here\n```", 238 - wantContain: "<pre>", 239 - wantErr: false, 240 - }, 241 - { 242 - name: "link", 243 - content: "[Link text](https://example.com)", 244 - wantContain: `href="https://example.com"`, 245 - wantErr: false, 246 - }, 247 - { 248 - name: "image", 249 - content: "![Alt text](https://example.com/image.png)", 250 - wantContain: `src="https://example.com/image.png"`, 251 - wantErr: false, 252 - }, 253 - { 254 - name: "unordered list", 255 - content: "- Item 1\n- Item 2", 256 - wantContain: "<ul>", 257 - wantErr: false, 258 - }, 259 - { 260 - name: "ordered list", 261 - content: "1. Item 1\n2. Item 2", 262 - wantContain: "<ol>", 263 - wantErr: false, 264 - }, 265 - { 266 - name: "empty content", 267 - content: "", 268 - wantContain: "", 269 - wantErr: false, 270 - }, 271 - { 272 - name: "complex markdown", 273 - content: "# Title\n\nA paragraph with **bold** and *italic* text.\n\n- List item 1\n- List item 2\n\n```go\nfunc main() {}\n```", 274 - wantContain: "<h1", 275 - wantErr: false, 276 - }, 277 - } 278 - 279 - for _, tt := range tests { 280 - t.Run(tt.name, func(t *testing.T) { 281 - html, err := fetcher.RenderMarkdown([]byte(tt.content)) 282 - if (err != nil) != tt.wantErr { 283 - t.Errorf("RenderMarkdown() error = %v, wantErr %v", err, tt.wantErr) 284 - return 285 - } 286 - if !tt.wantErr && tt.wantContain != "" { 287 - if !containsSubstring(html, tt.wantContain) { 288 - t.Errorf("RenderMarkdown() = %q, want to contain %q", html, tt.wantContain) 289 - } 290 - } 291 - }) 292 - } 293 - } 294 - 295 - func containsSubstring(s, substr string) bool { 296 - return len(substr) == 0 || (len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstringHelper(s, substr))) 297 - } 298 - 299 - func containsSubstringHelper(s, substr string) bool { 300 - for i := 0; i <= len(s)-len(substr); i++ { 301 - if s[i:i+len(substr)] == substr { 302 - return true 303 - } 304 - } 305 - return false 306 158 } 307 159 308 160 // TODO: Add README fetching and caching tests
-103
pkg/appview/readme/source.go
··· 1 - package readme 2 - 3 - import ( 4 - "fmt" 5 - "net/url" 6 - "strings" 7 - ) 8 - 9 - // Platform represents a supported Git hosting platform 10 - type Platform string 11 - 12 - const ( 13 - PlatformGitHub Platform = "github" 14 - PlatformGitLab Platform = "gitlab" 15 - PlatformTangled Platform = "tangled" 16 - ) 17 - 18 - // ParseSourceURL extracts platform, user, and repo from a source repository URL. 19 - // Returns ok=false if the URL is not a recognized pattern. 20 - func ParseSourceURL(sourceURL string) (platform Platform, user, repo string, ok bool) { 21 - if sourceURL == "" { 22 - return "", "", "", false 23 - } 24 - 25 - parsed, err := url.Parse(sourceURL) 26 - if err != nil { 27 - return "", "", "", false 28 - } 29 - 30 - // Normalize: remove trailing slash and .git suffix 31 - path := strings.TrimSuffix(parsed.Path, "/") 32 - path = strings.TrimSuffix(path, ".git") 33 - path = strings.TrimPrefix(path, "/") 34 - 35 - if path == "" { 36 - return "", "", "", false 37 - } 38 - 39 - host := strings.ToLower(parsed.Host) 40 - 41 - switch { 42 - case host == "github.com": 43 - // GitHub: github.com/{user}/{repo} 44 - parts := strings.SplitN(path, "/", 3) 45 - if len(parts) < 2 || parts[0] == "" || parts[1] == "" { 46 - return "", "", "", false 47 - } 48 - return PlatformGitHub, parts[0], parts[1], true 49 - 50 - case host == "gitlab.com": 51 - // GitLab: gitlab.com/{user}/{repo} or gitlab.com/{group}/{subgroup}/{repo} 52 - // For nested groups, user = everything except last part, repo = last part 53 - lastSlash := strings.LastIndex(path, "/") 54 - if lastSlash == -1 || lastSlash == 0 { 55 - return "", "", "", false 56 - } 57 - user = path[:lastSlash] 58 - repo = path[lastSlash+1:] 59 - if user == "" || repo == "" { 60 - return "", "", "", false 61 - } 62 - return PlatformGitLab, user, repo, true 63 - 64 - case host == "tangled.org" || host == "tangled.sh": 65 - // Tangled: tangled.org/{user}/{repo} or tangled.sh/@{user}/{repo} (legacy) 66 - // Strip leading @ from user if present 67 - path = strings.TrimPrefix(path, "@") 68 - parts := strings.SplitN(path, "/", 3) 69 - if len(parts) < 2 || parts[0] == "" || parts[1] == "" { 70 - return "", "", "", false 71 - } 72 - return PlatformTangled, parts[0], parts[1], true 73 - 74 - default: 75 - return "", "", "", false 76 - } 77 - } 78 - 79 - // DeriveReadmeURL converts a source repository URL to a raw README URL. 80 - // Returns empty string if platform is not supported. 81 - func DeriveReadmeURL(sourceURL, branch string) string { 82 - platform, user, repo, ok := ParseSourceURL(sourceURL) 83 - if !ok { 84 - return "" 85 - } 86 - 87 - switch platform { 88 - case PlatformGitHub: 89 - // https://raw.githubusercontent.com/{user}/{repo}/refs/heads/{branch}/README.md 90 - return fmt.Sprintf("https://raw.githubusercontent.com/%s/%s/refs/heads/%s/README.md", user, repo, branch) 91 - 92 - case PlatformGitLab: 93 - // https://gitlab.com/{user}/{repo}/-/raw/{branch}/README.md 94 - return fmt.Sprintf("https://gitlab.com/%s/%s/-/raw/%s/README.md", user, repo, branch) 95 - 96 - case PlatformTangled: 97 - // https://tangled.org/{user}/{repo}/raw/{branch}/README.md 98 - return fmt.Sprintf("https://tangled.org/%s/%s/raw/%s/README.md", user, repo, branch) 99 - 100 - default: 101 - return "" 102 - } 103 - }
-241
pkg/appview/readme/source_test.go
··· 1 - package readme 2 - 3 - import ( 4 - "testing" 5 - ) 6 - 7 - func TestParseSourceURL(t *testing.T) { 8 - tests := []struct { 9 - name string 10 - sourceURL string 11 - wantPlatform Platform 12 - wantUser string 13 - wantRepo string 14 - wantOK bool 15 - }{ 16 - // GitHub 17 - { 18 - name: "github standard", 19 - sourceURL: "https://github.com/bigmoves/quickslice", 20 - wantPlatform: PlatformGitHub, 21 - wantUser: "bigmoves", 22 - wantRepo: "quickslice", 23 - wantOK: true, 24 - }, 25 - { 26 - name: "github with .git suffix", 27 - sourceURL: "https://github.com/user/repo.git", 28 - wantPlatform: PlatformGitHub, 29 - wantUser: "user", 30 - wantRepo: "repo", 31 - wantOK: true, 32 - }, 33 - { 34 - name: "github with trailing slash", 35 - sourceURL: "https://github.com/user/repo/", 36 - wantPlatform: PlatformGitHub, 37 - wantUser: "user", 38 - wantRepo: "repo", 39 - wantOK: true, 40 - }, 41 - { 42 - name: "github with subpath (ignored)", 43 - sourceURL: "https://github.com/user/repo/tree/main", 44 - wantPlatform: PlatformGitHub, 45 - wantUser: "user", 46 - wantRepo: "repo", 47 - wantOK: true, 48 - }, 49 - { 50 - name: "github user only", 51 - sourceURL: "https://github.com/user", 52 - wantOK: false, 53 - }, 54 - 55 - // GitLab 56 - { 57 - name: "gitlab standard", 58 - sourceURL: "https://gitlab.com/user/repo", 59 - wantPlatform: PlatformGitLab, 60 - wantUser: "user", 61 - wantRepo: "repo", 62 - wantOK: true, 63 - }, 64 - { 65 - name: "gitlab nested groups", 66 - sourceURL: "https://gitlab.com/group/subgroup/repo", 67 - wantPlatform: PlatformGitLab, 68 - wantUser: "group/subgroup", 69 - wantRepo: "repo", 70 - wantOK: true, 71 - }, 72 - { 73 - name: "gitlab deep nested groups", 74 - sourceURL: "https://gitlab.com/a/b/c/d/repo", 75 - wantPlatform: PlatformGitLab, 76 - wantUser: "a/b/c/d", 77 - wantRepo: "repo", 78 - wantOK: true, 79 - }, 80 - { 81 - name: "gitlab with .git suffix", 82 - sourceURL: "https://gitlab.com/user/repo.git", 83 - wantPlatform: PlatformGitLab, 84 - wantUser: "user", 85 - wantRepo: "repo", 86 - wantOK: true, 87 - }, 88 - 89 - // Tangled 90 - { 91 - name: "tangled standard", 92 - sourceURL: "https://tangled.org/evan.jarrett.net/at-container-registry", 93 - wantPlatform: PlatformTangled, 94 - wantUser: "evan.jarrett.net", 95 - wantRepo: "at-container-registry", 96 - wantOK: true, 97 - }, 98 - { 99 - name: "tangled with legacy @ prefix", 100 - sourceURL: "https://tangled.org/@evan.jarrett.net/at-container-registry", 101 - wantPlatform: PlatformTangled, 102 - wantUser: "evan.jarrett.net", 103 - wantRepo: "at-container-registry", 104 - wantOK: true, 105 - }, 106 - { 107 - name: "tangled.sh domain", 108 - sourceURL: "https://tangled.sh/user/repo", 109 - wantPlatform: PlatformTangled, 110 - wantUser: "user", 111 - wantRepo: "repo", 112 - wantOK: true, 113 - }, 114 - { 115 - name: "tangled with trailing slash", 116 - sourceURL: "https://tangled.org/user/repo/", 117 - wantPlatform: PlatformTangled, 118 - wantUser: "user", 119 - wantRepo: "repo", 120 - wantOK: true, 121 - }, 122 - 123 - // Unsupported / Invalid 124 - { 125 - name: "unsupported platform", 126 - sourceURL: "https://bitbucket.org/user/repo", 127 - wantOK: false, 128 - }, 129 - { 130 - name: "empty url", 131 - sourceURL: "", 132 - wantOK: false, 133 - }, 134 - { 135 - name: "invalid url", 136 - sourceURL: "not-a-url", 137 - wantOK: false, 138 - }, 139 - { 140 - name: "just host", 141 - sourceURL: "https://github.com", 142 - wantOK: false, 143 - }, 144 - } 145 - 146 - for _, tt := range tests { 147 - t.Run(tt.name, func(t *testing.T) { 148 - platform, user, repo, ok := ParseSourceURL(tt.sourceURL) 149 - if ok != tt.wantOK { 150 - t.Errorf("ParseSourceURL(%q) ok = %v, want %v", tt.sourceURL, ok, tt.wantOK) 151 - return 152 - } 153 - if !tt.wantOK { 154 - return 155 - } 156 - if platform != tt.wantPlatform { 157 - t.Errorf("ParseSourceURL(%q) platform = %v, want %v", tt.sourceURL, platform, tt.wantPlatform) 158 - } 159 - if user != tt.wantUser { 160 - t.Errorf("ParseSourceURL(%q) user = %q, want %q", tt.sourceURL, user, tt.wantUser) 161 - } 162 - if repo != tt.wantRepo { 163 - t.Errorf("ParseSourceURL(%q) repo = %q, want %q", tt.sourceURL, repo, tt.wantRepo) 164 - } 165 - }) 166 - } 167 - } 168 - 169 - func TestDeriveReadmeURL(t *testing.T) { 170 - tests := []struct { 171 - name string 172 - sourceURL string 173 - branch string 174 - want string 175 - }{ 176 - // GitHub 177 - { 178 - name: "github main", 179 - sourceURL: "https://github.com/bigmoves/quickslice", 180 - branch: "main", 181 - want: "https://raw.githubusercontent.com/bigmoves/quickslice/refs/heads/main/README.md", 182 - }, 183 - { 184 - name: "github master", 185 - sourceURL: "https://github.com/user/repo", 186 - branch: "master", 187 - want: "https://raw.githubusercontent.com/user/repo/refs/heads/master/README.md", 188 - }, 189 - 190 - // GitLab 191 - { 192 - name: "gitlab main", 193 - sourceURL: "https://gitlab.com/user/repo", 194 - branch: "main", 195 - want: "https://gitlab.com/user/repo/-/raw/main/README.md", 196 - }, 197 - { 198 - name: "gitlab nested groups", 199 - sourceURL: "https://gitlab.com/group/subgroup/repo", 200 - branch: "main", 201 - want: "https://gitlab.com/group/subgroup/repo/-/raw/main/README.md", 202 - }, 203 - 204 - // Tangled 205 - { 206 - name: "tangled main", 207 - sourceURL: "https://tangled.org/evan.jarrett.net/at-container-registry", 208 - branch: "main", 209 - want: "https://tangled.org/evan.jarrett.net/at-container-registry/raw/main/README.md", 210 - }, 211 - { 212 - name: "tangled legacy @ prefix", 213 - sourceURL: "https://tangled.org/@user/repo", 214 - branch: "main", 215 - want: "https://tangled.org/user/repo/raw/main/README.md", 216 - }, 217 - 218 - // Unsupported 219 - { 220 - name: "unsupported platform", 221 - sourceURL: "https://bitbucket.org/user/repo", 222 - branch: "main", 223 - want: "", 224 - }, 225 - { 226 - name: "empty url", 227 - sourceURL: "", 228 - branch: "main", 229 - want: "", 230 - }, 231 - } 232 - 233 - for _, tt := range tests { 234 - t.Run(tt.name, func(t *testing.T) { 235 - got := DeriveReadmeURL(tt.sourceURL, tt.branch) 236 - if got != tt.want { 237 - t.Errorf("DeriveReadmeURL(%q, %q) = %q, want %q", tt.sourceURL, tt.branch, got, tt.want) 238 - } 239 - }) 240 - } 241 - }
+15 -37
pkg/appview/routes/routes.go
··· 27 27 BaseURL string 28 28 DeviceStore *db.DeviceStore 29 29 HealthChecker *holdhealth.Checker 30 - ReadmeFetcher *readme.Fetcher 30 + ReadmeCache *readme.Cache 31 31 Templates *template.Template 32 - DefaultHoldDID string // For UserContext creation 33 32 } 34 33 35 34 // RegisterUIRoutes registers all web UI and API routes on the provided router ··· 37 36 // Extract trimmed registry URL for templates 38 37 registryURL := trimRegistryURL(deps.BaseURL) 39 38 40 - // Create web auth dependencies for middleware (enables UserContext in web routes) 41 - webAuthDeps := middleware.WebAuthDeps{ 42 - SessionStore: deps.SessionStore, 43 - Database: deps.Database, 44 - Refresher: deps.Refresher, 45 - DefaultHoldDID: deps.DefaultHoldDID, 46 - } 47 - 48 39 // OAuth login routes (public) 49 40 router.Get("/auth/oauth/login", (&uihandlers.LoginHandler{ 50 41 Templates: deps.Templates, ··· 54 45 55 46 // Public routes (with optional auth for navbar) 56 47 // SECURITY: Public pages use read-only DB 57 - router.Get("/", middleware.OptionalAuthWithDeps(webAuthDeps)( 48 + router.Get("/", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 58 49 &uihandlers.HomeHandler{ 59 50 DB: deps.ReadOnlyDB, 60 51 Templates: deps.Templates, ··· 62 53 }, 63 54 ).ServeHTTP) 64 55 65 - router.Get("/api/recent-pushes", middleware.OptionalAuthWithDeps(webAuthDeps)( 56 + router.Get("/api/recent-pushes", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 66 57 &uihandlers.RecentPushesHandler{ 67 58 DB: deps.ReadOnlyDB, 68 59 Templates: deps.Templates, ··· 72 63 ).ServeHTTP) 73 64 74 65 // SECURITY: Search uses read-only DB to prevent writes and limit access to sensitive tables 75 - router.Get("/search", middleware.OptionalAuthWithDeps(webAuthDeps)( 66 + router.Get("/search", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 76 67 &uihandlers.SearchHandler{ 77 68 DB: deps.ReadOnlyDB, 78 69 Templates: deps.Templates, ··· 80 71 }, 81 72 ).ServeHTTP) 82 73 83 - router.Get("/api/search-results", middleware.OptionalAuthWithDeps(webAuthDeps)( 74 + router.Get("/api/search-results", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 84 75 &uihandlers.SearchResultsHandler{ 85 76 DB: deps.ReadOnlyDB, 86 77 Templates: deps.Templates, ··· 89 80 ).ServeHTTP) 90 81 91 82 // Install page (public) 92 - router.Get("/install", middleware.OptionalAuthWithDeps(webAuthDeps)( 83 + router.Get("/install", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 93 84 &uihandlers.InstallHandler{ 94 85 Templates: deps.Templates, 95 86 RegistryURL: registryURL, ··· 97 88 ).ServeHTTP) 98 89 99 90 // API route for repository stats (public, read-only) 100 - router.Get("/api/stats/{handle}/{repository}", middleware.OptionalAuthWithDeps(webAuthDeps)( 91 + router.Get("/api/stats/{handle}/{repository}", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 101 92 &uihandlers.GetStatsHandler{ 102 93 DB: deps.ReadOnlyDB, 103 94 Directory: deps.OAuthClientApp.Dir, ··· 105 96 ).ServeHTTP) 106 97 107 98 // API routes for stars (require authentication) 108 - router.Post("/api/stars/{handle}/{repository}", middleware.RequireAuthWithDeps(webAuthDeps)( 99 + router.Post("/api/stars/{handle}/{repository}", middleware.RequireAuth(deps.SessionStore, deps.Database)( 109 100 &uihandlers.StarRepositoryHandler{ 110 101 DB: deps.Database, // Needs write access 111 102 Directory: deps.OAuthClientApp.Dir, ··· 113 104 }, 114 105 ).ServeHTTP) 115 106 116 - router.Delete("/api/stars/{handle}/{repository}", middleware.RequireAuthWithDeps(webAuthDeps)( 107 + router.Delete("/api/stars/{handle}/{repository}", middleware.RequireAuth(deps.SessionStore, deps.Database)( 117 108 &uihandlers.UnstarRepositoryHandler{ 118 109 DB: deps.Database, // Needs write access 119 110 Directory: deps.OAuthClientApp.Dir, ··· 121 112 }, 122 113 ).ServeHTTP) 123 114 124 - router.Get("/api/stars/{handle}/{repository}", middleware.OptionalAuthWithDeps(webAuthDeps)( 115 + router.Get("/api/stars/{handle}/{repository}", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 125 116 &uihandlers.CheckStarHandler{ 126 117 DB: deps.ReadOnlyDB, // Read-only check 127 118 Directory: deps.OAuthClientApp.Dir, ··· 130 121 ).ServeHTTP) 131 122 132 123 // Manifest detail API endpoint 133 - router.Get("/api/manifests/{handle}/{repository}/{digest}", middleware.OptionalAuthWithDeps(webAuthDeps)( 124 + router.Get("/api/manifests/{handle}/{repository}/{digest}", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 134 125 &uihandlers.ManifestDetailHandler{ 135 126 DB: deps.ReadOnlyDB, 136 127 Directory: deps.OAuthClientApp.Dir, ··· 142 133 HealthChecker: deps.HealthChecker, 143 134 }).ServeHTTP) 144 135 145 - router.Get("/u/{handle}", middleware.OptionalAuthWithDeps(webAuthDeps)( 136 + router.Get("/u/{handle}", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 146 137 &uihandlers.UserPageHandler{ 147 138 DB: deps.ReadOnlyDB, 148 139 Templates: deps.Templates, ··· 161 152 DB: deps.ReadOnlyDB, 162 153 }).ServeHTTP) 163 154 164 - router.Get("/r/{handle}/{repository}", middleware.OptionalAuthWithDeps(webAuthDeps)( 155 + router.Get("/r/{handle}/{repository}", middleware.OptionalAuth(deps.SessionStore, deps.Database)( 165 156 &uihandlers.RepositoryPageHandler{ 166 157 DB: deps.ReadOnlyDB, 167 158 Templates: deps.Templates, ··· 169 160 Directory: deps.OAuthClientApp.Dir, 170 161 Refresher: deps.Refresher, 171 162 HealthChecker: deps.HealthChecker, 172 - ReadmeFetcher: deps.ReadmeFetcher, 163 + ReadmeCache: deps.ReadmeCache, 173 164 }, 174 165 ).ServeHTTP) 175 166 176 167 // Authenticated routes 177 168 router.Group(func(r chi.Router) { 178 - r.Use(middleware.RequireAuthWithDeps(webAuthDeps)) 169 + r.Use(middleware.RequireAuth(deps.SessionStore, deps.Database)) 179 170 180 171 r.Get("/settings", (&uihandlers.SettingsHandler{ 181 172 Templates: deps.Templates, ··· 197 188 Refresher: deps.Refresher, 198 189 }).ServeHTTP) 199 190 200 - r.Post("/api/images/{repository}/avatar", (&uihandlers.UploadAvatarHandler{ 201 - DB: deps.Database, 202 - Refresher: deps.Refresher, 203 - }).ServeHTTP) 204 - 205 191 // Device approval page (authenticated) 206 192 r.Get("/device", (&uihandlers.DeviceApprovalPageHandler{ 207 193 Store: deps.DeviceStore, ··· 233 219 } 234 220 router.Get("/auth/logout", logoutHandler.ServeHTTP) 235 221 router.Post("/auth/logout", logoutHandler.ServeHTTP) 236 - 237 - // Custom 404 handler 238 - router.NotFound(middleware.OptionalAuthWithDeps(webAuthDeps)( 239 - &uihandlers.NotFoundHandler{ 240 - Templates: deps.Templates, 241 - RegistryURL: registryURL, 242 - }, 243 - ).ServeHTTP) 244 222 } 245 223 246 224 // CORSMiddleware returns a middleware that sets CORS headers for API endpoints
+49 -160
pkg/appview/static/css/style.css
··· 38 38 --version-badge-text: #7b1fa2; 39 39 --version-badge-border: #ba68c8; 40 40 41 - /* Attestation badge */ 42 - --attestation-badge-bg: #d1fae5; 43 - --attestation-badge-text: #065f46; 44 - 45 41 /* Hero section colors */ 46 42 --hero-bg-start: #f8f9fa; 47 43 --hero-bg-end: #e9ecef; ··· 94 90 --version-badge-text: #ffffff; 95 91 --version-badge-border: #ba68c8; 96 92 97 - /* Attestation badge */ 98 - --attestation-badge-bg: #065f46; 99 - --attestation-badge-text: #6ee7b7; 100 - 101 93 /* Hero section colors */ 102 94 --hero-bg-start: #2d2d2d; 103 95 --hero-bg-end: #1a1a1a; ··· 117 109 } 118 110 119 111 body { 120 - font-family: 121 - -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", 122 - Arial, sans-serif; 112 + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, 'Helvetica Neue', Arial, sans-serif; 123 113 background: var(--bg); 124 114 color: var(--fg); 125 115 line-height: 1.6; ··· 180 170 } 181 171 182 172 .nav-links a:hover { 183 - background: var(--secondary); 173 + background:var(--secondary); 184 174 border-radius: 4px; 185 175 } 186 176 ··· 203 193 } 204 194 205 195 .user-menu-btn:hover { 206 - background: var(--secondary); 196 + background:var(--secondary); 207 197 } 208 198 209 199 .user-avatar { ··· 276 266 position: absolute; 277 267 top: calc(100% + 0.5rem); 278 268 right: 0; 279 - background: var(--bg); 269 + background:var(--bg); 280 270 border: 1px solid var(--border); 281 271 border-radius: 8px; 282 272 box-shadow: var(--shadow-lg); ··· 297 287 color: var(--fg); 298 288 text-decoration: none; 299 289 border: none; 300 - background: var(--bg); 290 + background:var(--bg); 301 291 cursor: pointer; 302 292 transition: background 0.2s; 303 293 font-size: 0.95rem; ··· 319 309 } 320 310 321 311 /* Buttons */ 322 - button, 323 - .btn, 324 - .btn-primary, 325 - .btn-secondary { 312 + button, .btn, .btn-primary, .btn-secondary { 326 313 padding: 0.5rem 1rem; 327 314 background: var(--button-primary); 328 315 color: var(--btn-text); ··· 335 322 transition: opacity 0.2s; 336 323 } 337 324 338 - button:hover, 339 - .btn:hover, 340 - .btn-primary:hover, 341 - .btn-secondary:hover { 325 + button:hover, .btn:hover, .btn-primary:hover, .btn-secondary:hover { 342 326 opacity: 0.9; 343 327 } 344 328 ··· 409 393 } 410 394 411 395 /* Cards */ 412 - .push-card, 413 - .repository-card { 396 + .push-card, .repository-card { 414 397 border: 1px solid var(--border); 415 398 border-radius: 8px; 416 399 padding: 1rem; 417 400 margin-bottom: 1rem; 418 - background: var(--bg); 401 + background:var(--bg); 419 402 box-shadow: var(--shadow-sm); 420 403 } 421 404 ··· 466 449 } 467 450 468 451 .digest { 469 - font-family: "Monaco", "Courier New", monospace; 452 + font-family: 'Monaco', 'Courier New', monospace; 470 453 font-size: 0.85rem; 471 454 background: var(--code-bg); 472 455 padding: 0.1rem 0.3rem; ··· 509 492 } 510 493 511 494 .docker-command-text { 512 - font-family: "Monaco", "Courier New", monospace; 495 + font-family: 'Monaco', 'Courier New', monospace; 513 496 font-size: 0.85rem; 514 497 color: var(--fg); 515 498 flex: 0 1 auto; ··· 527 510 border-radius: 4px; 528 511 opacity: 0; 529 512 visibility: hidden; 530 - transition: 531 - opacity 0.2s, 532 - visibility 0.2s; 513 + transition: opacity 0.2s, visibility 0.2s; 533 514 } 534 515 535 516 .docker-command:hover .copy-btn { ··· 771 752 } 772 753 773 754 .repo-stats { 774 - color: var(--border-dark); 755 + color:var(--border-dark); 775 756 font-size: 0.9rem; 776 757 display: flex; 777 758 gap: 0.5rem; ··· 800 781 padding-top: 1rem; 801 782 } 802 783 803 - .tags-section, 804 - .manifests-section { 784 + .tags-section, .manifests-section { 805 785 margin-bottom: 1.5rem; 806 786 } 807 787 808 - .tags-section h3, 809 - .manifests-section h3 { 788 + .tags-section h3, .manifests-section h3 { 810 789 font-size: 1.1rem; 811 790 margin-bottom: 0.5rem; 812 791 color: var(--secondary); 813 792 } 814 793 815 - .tag-row, 816 - .manifest-row { 794 + .tag-row, .manifest-row { 817 795 display: flex; 818 796 gap: 1rem; 819 797 align-items: center; ··· 821 799 border-bottom: 1px solid var(--border); 822 800 } 823 801 824 - .tag-row:last-child, 825 - .manifest-row:last-child { 802 + .tag-row:last-child, .manifest-row:last-child { 826 803 border-bottom: none; 827 804 } 828 805 ··· 844 821 } 845 822 846 823 .settings-section { 847 - background: var(--bg); 824 + background:var(--bg); 848 825 border: 1px solid var(--border); 849 826 border-radius: 8px; 850 827 padding: 1.5rem; ··· 941 918 padding: 1rem; 942 919 border-radius: 4px; 943 920 overflow-x: auto; 944 - font-family: "Monaco", "Courier New", monospace; 921 + font-family: 'Monaco', 'Courier New', monospace; 945 922 font-size: 0.85rem; 946 923 border: 1px solid var(--border); 947 924 } ··· 1027 1004 margin: 1rem 0; 1028 1005 } 1029 1006 1007 + /* Load More Button */ 1008 + .load-more { 1009 + width: 100%; 1010 + margin-top: 1rem; 1011 + background: var(--secondary); 1012 + } 1013 + 1030 1014 /* Login Page */ 1031 1015 .login-page { 1032 1016 max-width: 450px; ··· 1047 1031 } 1048 1032 1049 1033 .login-form { 1050 - background: var(--bg); 1034 + background:var(--bg); 1051 1035 padding: 2rem; 1052 1036 border-radius: 8px; 1053 1037 border: 1px solid var(--border); ··· 1198 1182 } 1199 1183 1200 1184 .repository-header { 1201 - background: var(--bg); 1185 + background:var(--bg); 1202 1186 border: 1px solid var(--border); 1203 1187 border-radius: 8px; 1204 1188 padding: 2rem; ··· 1236 1220 flex-shrink: 0; 1237 1221 } 1238 1222 1239 - .repo-hero-icon-wrapper { 1240 - position: relative; 1241 - display: inline-block; 1242 - flex-shrink: 0; 1243 - } 1244 - 1245 - .avatar-upload-overlay { 1246 - position: absolute; 1247 - inset: 0; 1248 - display: flex; 1249 - align-items: center; 1250 - justify-content: center; 1251 - background: rgba(0, 0, 0, 0.5); 1252 - border-radius: 12px; 1253 - opacity: 0; 1254 - cursor: pointer; 1255 - transition: opacity 0.2s ease; 1256 - } 1257 - 1258 - .avatar-upload-overlay i { 1259 - color: white; 1260 - width: 24px; 1261 - height: 24px; 1262 - } 1263 - 1264 - .repo-hero-icon-wrapper:hover .avatar-upload-overlay { 1265 - opacity: 1; 1266 - } 1267 - 1268 1223 .repo-hero-info { 1269 1224 flex: 1; 1270 1225 } ··· 1335 1290 } 1336 1291 1337 1292 .star-btn.starred { 1338 - border-color: var(--star); 1293 + border-color:var(--star); 1339 1294 background: var(--code-bg); 1340 1295 } 1341 1296 ··· 1419 1374 } 1420 1375 1421 1376 .repo-section { 1422 - background: var(--bg); 1377 + background:var(--bg); 1423 1378 border: 1px solid var(--border); 1424 1379 border-radius: 8px; 1425 1380 padding: 1.5rem; ··· 1434 1389 border-bottom: 2px solid var(--border); 1435 1390 } 1436 1391 1437 - .tags-list, 1438 - .manifests-list { 1392 + .tags-list, .manifests-list { 1439 1393 display: flex; 1440 1394 flex-direction: column; 1441 1395 gap: 1rem; 1442 1396 } 1443 1397 1444 - .tag-item, 1445 - .manifest-item { 1398 + .tag-item, .manifest-item { 1446 1399 border: 1px solid var(--border); 1447 1400 border-radius: 6px; 1448 1401 padding: 1rem; 1449 1402 background: var(--hover-bg); 1450 1403 } 1451 1404 1452 - .tag-item-header, 1453 - .manifest-item-header { 1405 + .tag-item-header, .manifest-item-header { 1454 1406 display: flex; 1455 1407 justify-content: space-between; 1456 1408 align-items: center; ··· 1580 1532 color: var(--fg); 1581 1533 border: 1px solid var(--border); 1582 1534 white-space: nowrap; 1583 - font-family: "Monaco", "Courier New", monospace; 1535 + font-family: 'Monaco', 'Courier New', monospace; 1584 1536 } 1585 1537 1586 1538 .platforms-inline { ··· 1618 1570 .badge-attestation { 1619 1571 display: inline-flex; 1620 1572 align-items: center; 1621 - gap: 0.3rem; 1622 - padding: 0.25rem 0.6rem; 1623 - background: var(--attestation-badge-bg); 1624 - color: var(--attestation-badge-text); 1625 - border-radius: 12px; 1626 - font-size: 0.75rem; 1573 + gap: 0.35rem; 1574 + padding: 0.25rem 0.5rem; 1575 + background: #f3e8ff; 1576 + color: #7c3aed; 1577 + border: 1px solid #c4b5fd; 1578 + border-radius: 4px; 1579 + font-size: 0.85rem; 1627 1580 font-weight: 600; 1628 1581 margin-left: 0.5rem; 1629 - vertical-align: middle; 1630 - white-space: nowrap; 1631 1582 } 1632 1583 1633 1584 .badge-attestation .lucide { 1634 - width: 0.75rem; 1635 - height: 0.75rem; 1585 + width: 0.9rem; 1586 + height: 0.9rem; 1636 1587 } 1637 1588 1638 1589 /* Featured Repositories Section */ ··· 1785 1736 1786 1737 /* Hero Section */ 1787 1738 .hero-section { 1788 - background: linear-gradient( 1789 - 135deg, 1790 - var(--hero-bg-start) 0%, 1791 - var(--hero-bg-end) 100% 1792 - ); 1739 + background: linear-gradient(135deg, var(--hero-bg-start) 0%, var(--hero-bg-end) 100%); 1793 1740 padding: 4rem 2rem; 1794 1741 border-bottom: 1px solid var(--border); 1795 1742 } ··· 1854 1801 .terminal-content { 1855 1802 padding: 1.5rem; 1856 1803 margin: 0; 1857 - font-family: "Monaco", "Courier New", monospace; 1804 + font-family: 'Monaco', 'Courier New', monospace; 1858 1805 font-size: 0.95rem; 1859 1806 line-height: 1.8; 1860 1807 color: var(--terminal-text); ··· 2010 1957 } 2011 1958 2012 1959 .code-block code { 2013 - font-family: "Monaco", "Menlo", monospace; 1960 + font-family: 'Monaco', 'Menlo', monospace; 2014 1961 font-size: 0.9rem; 2015 1962 line-height: 1.5; 2016 1963 white-space: pre-wrap; ··· 2067 2014 flex-wrap: wrap; 2068 2015 } 2069 2016 2070 - .tag-row, 2071 - .manifest-row { 2017 + .tag-row, .manifest-row { 2072 2018 flex-wrap: wrap; 2073 2019 } 2074 2020 ··· 2157 2103 /* README and Repository Layout */ 2158 2104 .repo-content-layout { 2159 2105 display: grid; 2160 - grid-template-columns: 6fr 4fr; 2106 + grid-template-columns: 7fr 3fr; 2161 2107 gap: 2rem; 2162 2108 margin-top: 2rem; 2163 2109 } ··· 2268 2214 background: var(--code-bg); 2269 2215 padding: 0.2rem 0.4rem; 2270 2216 border-radius: 3px; 2271 - font-family: 2272 - "SFMono-Regular", Consolas, "Liberation Mono", Menlo, monospace; 2217 + font-family: 'SFMono-Regular', Consolas, 'Liberation Mono', Menlo, monospace; 2273 2218 font-size: 0.9em; 2274 2219 } 2275 2220 ··· 2373 2318 padding: 0.75rem; 2374 2319 } 2375 2320 } 2376 - 2377 - /* 404 Error Page */ 2378 - .error-page { 2379 - display: flex; 2380 - align-items: center; 2381 - justify-content: center; 2382 - min-height: calc(100vh - 60px); 2383 - text-align: center; 2384 - padding: 2rem; 2385 - } 2386 - 2387 - .error-content { 2388 - max-width: 480px; 2389 - } 2390 - 2391 - .error-icon { 2392 - width: 80px; 2393 - height: 80px; 2394 - color: var(--secondary); 2395 - margin-bottom: 1.5rem; 2396 - } 2397 - 2398 - .error-code { 2399 - font-size: 8rem; 2400 - font-weight: 700; 2401 - color: var(--primary); 2402 - line-height: 1; 2403 - margin-bottom: 0.5rem; 2404 - } 2405 - 2406 - .error-content h1 { 2407 - font-size: 2rem; 2408 - margin-bottom: 0.75rem; 2409 - color: var(--fg); 2410 - } 2411 - 2412 - .error-content p { 2413 - font-size: 1.125rem; 2414 - color: var(--secondary); 2415 - margin-bottom: 2rem; 2416 - } 2417 - 2418 - @media (max-width: 768px) { 2419 - .error-code { 2420 - font-size: 5rem; 2421 - } 2422 - 2423 - .error-icon { 2424 - width: 60px; 2425 - height: 60px; 2426 - } 2427 - 2428 - .error-content h1 { 2429 - font-size: 1.5rem; 2430 - } 2431 - }
-63
pkg/appview/static/js/app.js
··· 434 434 } 435 435 } 436 436 437 - // Upload repository avatar 438 - async function uploadAvatar(input, repository) { 439 - const file = input.files[0]; 440 - if (!file) return; 441 - 442 - // Client-side validation 443 - const validTypes = ['image/png', 'image/jpeg', 'image/webp']; 444 - if (!validTypes.includes(file.type)) { 445 - alert('Please select a PNG, JPEG, or WebP image'); 446 - return; 447 - } 448 - if (file.size > 3 * 1024 * 1024) { 449 - alert('Image must be less than 3MB'); 450 - return; 451 - } 452 - 453 - const formData = new FormData(); 454 - formData.append('avatar', file); 455 - 456 - try { 457 - const response = await fetch(`/api/images/${repository}/avatar`, { 458 - method: 'POST', 459 - credentials: 'include', 460 - body: formData 461 - }); 462 - 463 - if (response.status === 401) { 464 - window.location.href = '/auth/oauth/login'; 465 - return; 466 - } 467 - 468 - if (!response.ok) { 469 - const error = await response.text(); 470 - throw new Error(error); 471 - } 472 - 473 - const data = await response.json(); 474 - 475 - // Update the avatar image on the page 476 - const wrapper = document.querySelector('.repo-hero-icon-wrapper'); 477 - if (!wrapper) return; 478 - 479 - const existingImg = wrapper.querySelector('.repo-hero-icon'); 480 - const placeholder = wrapper.querySelector('.repo-hero-icon-placeholder'); 481 - 482 - if (existingImg) { 483 - existingImg.src = data.avatarURL; 484 - } else if (placeholder) { 485 - const newImg = document.createElement('img'); 486 - newImg.src = data.avatarURL; 487 - newImg.alt = repository; 488 - newImg.className = 'repo-hero-icon'; 489 - placeholder.replaceWith(newImg); 490 - } 491 - } catch (err) { 492 - console.error('Error uploading avatar:', err); 493 - alert('Failed to upload avatar: ' + err.message); 494 - } 495 - 496 - // Clear input so same file can be selected again 497 - input.value = ''; 498 - } 499 - 500 437 // Close modal when clicking outside 501 438 document.addEventListener('DOMContentLoaded', () => { 502 439 const modal = document.getElementById('manifest-delete-modal');
+42
pkg/appview/storage/context.go
··· 1 + package storage 2 + 3 + import ( 4 + "context" 5 + 6 + "atcr.io/pkg/atproto" 7 + "atcr.io/pkg/auth" 8 + "atcr.io/pkg/auth/oauth" 9 + ) 10 + 11 + // DatabaseMetrics interface for tracking pull/push counts and querying hold DIDs 12 + type DatabaseMetrics interface { 13 + IncrementPullCount(did, repository string) error 14 + IncrementPushCount(did, repository string) error 15 + GetLatestHoldDIDForRepo(did, repository string) (string, error) 16 + } 17 + 18 + // ReadmeCache interface for README content caching 19 + type ReadmeCache interface { 20 + Get(ctx context.Context, url string) (string, error) 21 + Invalidate(url string) error 22 + } 23 + 24 + // RegistryContext bundles all the context needed for registry operations 25 + // This includes both per-request data (DID, hold) and shared services 26 + type RegistryContext struct { 27 + // Per-request identity and routing information 28 + DID string // User's DID (e.g., "did:plc:abc123") 29 + Handle string // User's handle (e.g., "alice.bsky.social") 30 + HoldDID string // Hold service DID (e.g., "did:web:hold01.atcr.io") 31 + PDSEndpoint string // User's PDS endpoint URL 32 + Repository string // Image repository name (e.g., "debian") 33 + ServiceToken string // Service token for hold authentication (cached by middleware) 34 + ATProtoClient *atproto.Client // Authenticated ATProto client for this user 35 + AuthMethod string // Auth method used ("oauth" or "app_password") 36 + 37 + // Shared services (same for all requests) 38 + Database DatabaseMetrics // Metrics tracking database 39 + Authorizer auth.HoldAuthorizer // Hold access authorization 40 + Refresher *oauth.Refresher // OAuth session manager 41 + ReadmeCache ReadmeCache // README content cache 42 + }
+146
pkg/appview/storage/context_test.go
··· 1 + package storage 2 + 3 + import ( 4 + "context" 5 + "sync" 6 + "testing" 7 + 8 + "atcr.io/pkg/atproto" 9 + ) 10 + 11 + // Mock implementations for testing 12 + type mockDatabaseMetrics struct { 13 + mu sync.Mutex 14 + pullCount int 15 + pushCount int 16 + } 17 + 18 + func (m *mockDatabaseMetrics) IncrementPullCount(did, repository string) error { 19 + m.mu.Lock() 20 + defer m.mu.Unlock() 21 + m.pullCount++ 22 + return nil 23 + } 24 + 25 + func (m *mockDatabaseMetrics) IncrementPushCount(did, repository string) error { 26 + m.mu.Lock() 27 + defer m.mu.Unlock() 28 + m.pushCount++ 29 + return nil 30 + } 31 + 32 + func (m *mockDatabaseMetrics) GetLatestHoldDIDForRepo(did, repository string) (string, error) { 33 + // Return empty string for mock - tests can override if needed 34 + return "", nil 35 + } 36 + 37 + func (m *mockDatabaseMetrics) getPullCount() int { 38 + m.mu.Lock() 39 + defer m.mu.Unlock() 40 + return m.pullCount 41 + } 42 + 43 + func (m *mockDatabaseMetrics) getPushCount() int { 44 + m.mu.Lock() 45 + defer m.mu.Unlock() 46 + return m.pushCount 47 + } 48 + 49 + type mockReadmeCache struct{} 50 + 51 + func (m *mockReadmeCache) Get(ctx context.Context, url string) (string, error) { 52 + return "# Test README", nil 53 + } 54 + 55 + func (m *mockReadmeCache) Invalidate(url string) error { 56 + return nil 57 + } 58 + 59 + type mockHoldAuthorizer struct{} 60 + 61 + func (m *mockHoldAuthorizer) Authorize(holdDID, userDID, permission string) (bool, error) { 62 + return true, nil 63 + } 64 + 65 + func TestRegistryContext_Fields(t *testing.T) { 66 + // Create a sample RegistryContext 67 + ctx := &RegistryContext{ 68 + DID: "did:plc:test123", 69 + Handle: "alice.bsky.social", 70 + HoldDID: "did:web:hold01.atcr.io", 71 + PDSEndpoint: "https://bsky.social", 72 + Repository: "debian", 73 + ServiceToken: "test-token", 74 + ATProtoClient: &atproto.Client{ 75 + // Mock client - would need proper initialization in real tests 76 + }, 77 + Database: &mockDatabaseMetrics{}, 78 + ReadmeCache: &mockReadmeCache{}, 79 + } 80 + 81 + // Verify fields are accessible 82 + if ctx.DID != "did:plc:test123" { 83 + t.Errorf("Expected DID %q, got %q", "did:plc:test123", ctx.DID) 84 + } 85 + if ctx.Handle != "alice.bsky.social" { 86 + t.Errorf("Expected Handle %q, got %q", "alice.bsky.social", ctx.Handle) 87 + } 88 + if ctx.HoldDID != "did:web:hold01.atcr.io" { 89 + t.Errorf("Expected HoldDID %q, got %q", "did:web:hold01.atcr.io", ctx.HoldDID) 90 + } 91 + if ctx.PDSEndpoint != "https://bsky.social" { 92 + t.Errorf("Expected PDSEndpoint %q, got %q", "https://bsky.social", ctx.PDSEndpoint) 93 + } 94 + if ctx.Repository != "debian" { 95 + t.Errorf("Expected Repository %q, got %q", "debian", ctx.Repository) 96 + } 97 + if ctx.ServiceToken != "test-token" { 98 + t.Errorf("Expected ServiceToken %q, got %q", "test-token", ctx.ServiceToken) 99 + } 100 + } 101 + 102 + func TestRegistryContext_DatabaseInterface(t *testing.T) { 103 + db := &mockDatabaseMetrics{} 104 + ctx := &RegistryContext{ 105 + Database: db, 106 + } 107 + 108 + // Test that interface methods are callable 109 + err := ctx.Database.IncrementPullCount("did:plc:test", "repo") 110 + if err != nil { 111 + t.Errorf("Unexpected error: %v", err) 112 + } 113 + 114 + err = ctx.Database.IncrementPushCount("did:plc:test", "repo") 115 + if err != nil { 116 + t.Errorf("Unexpected error: %v", err) 117 + } 118 + } 119 + 120 + func TestRegistryContext_ReadmeCacheInterface(t *testing.T) { 121 + cache := &mockReadmeCache{} 122 + ctx := &RegistryContext{ 123 + ReadmeCache: cache, 124 + } 125 + 126 + // Test that interface methods are callable 127 + content, err := ctx.ReadmeCache.Get(context.Background(), "https://example.com/README.md") 128 + if err != nil { 129 + t.Errorf("Unexpected error: %v", err) 130 + } 131 + if content != "# Test README" { 132 + t.Errorf("Expected content %q, got %q", "# Test README", content) 133 + } 134 + 135 + err = ctx.ReadmeCache.Invalidate("https://example.com/README.md") 136 + if err != nil { 137 + t.Errorf("Unexpected error: %v", err) 138 + } 139 + } 140 + 141 + // TODO: Add more comprehensive tests: 142 + // - Test ATProtoClient integration 143 + // - Test OAuth Refresher integration 144 + // - Test HoldAuthorizer integration 145 + // - Test nil handling for optional fields 146 + // - Integration tests with real components
+93
pkg/appview/storage/crew.go
··· 1 + package storage 2 + 3 + import ( 4 + "context" 5 + "fmt" 6 + "io" 7 + "log/slog" 8 + "net/http" 9 + "time" 10 + 11 + "atcr.io/pkg/atproto" 12 + "atcr.io/pkg/auth/oauth" 13 + "atcr.io/pkg/auth/token" 14 + ) 15 + 16 + // EnsureCrewMembership attempts to register the user as a crew member on their default hold. 17 + // The hold's requestCrew endpoint handles all authorization logic (checking allowAllCrew, existing membership, etc). 18 + // This is best-effort and does not fail on errors. 19 + func EnsureCrewMembership(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, defaultHoldDID string) { 20 + if defaultHoldDID == "" { 21 + return 22 + } 23 + 24 + // Normalize URL to DID if needed 25 + holdDID := atproto.ResolveHoldDIDFromURL(defaultHoldDID) 26 + if holdDID == "" { 27 + slog.Warn("failed to resolve hold DID", "defaultHold", defaultHoldDID) 28 + return 29 + } 30 + 31 + // Resolve hold DID to HTTP endpoint 32 + holdEndpoint := atproto.ResolveHoldURL(holdDID) 33 + 34 + // Get service token for the hold 35 + // Only works with OAuth (refresher required) - app passwords can't get service tokens 36 + if refresher == nil { 37 + slog.Debug("skipping crew registration - no OAuth refresher (app password flow)", "holdDID", holdDID) 38 + return 39 + } 40 + 41 + // Wrap the refresher to match OAuthSessionRefresher interface 42 + serviceToken, err := token.GetOrFetchServiceToken(ctx, refresher, client.DID(), holdDID, client.PDSEndpoint()) 43 + if err != nil { 44 + slog.Warn("failed to get service token", "holdDID", holdDID, "error", err) 45 + return 46 + } 47 + 48 + // Call requestCrew endpoint - it handles all the logic: 49 + // - Checks allowAllCrew flag 50 + // - Checks if already a crew member (returns success if so) 51 + // - Creates crew record if authorized 52 + if err := requestCrewMembership(ctx, holdEndpoint, serviceToken); err != nil { 53 + slog.Warn("failed to request crew membership", "holdDID", holdDID, "error", err) 54 + return 55 + } 56 + 57 + slog.Info("successfully registered as crew member", "holdDID", holdDID, "userDID", client.DID()) 58 + } 59 + 60 + // requestCrewMembership calls the hold's requestCrew endpoint 61 + // The endpoint handles all authorization and duplicate checking internally 62 + func requestCrewMembership(ctx context.Context, holdEndpoint, serviceToken string) error { 63 + // Add 5 second timeout to prevent hanging on offline holds 64 + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 65 + defer cancel() 66 + 67 + url := fmt.Sprintf("%s%s", holdEndpoint, atproto.HoldRequestCrew) 68 + 69 + req, err := http.NewRequestWithContext(ctx, "POST", url, nil) 70 + if err != nil { 71 + return err 72 + } 73 + 74 + req.Header.Set("Authorization", "Bearer "+serviceToken) 75 + req.Header.Set("Content-Type", "application/json") 76 + 77 + resp, err := http.DefaultClient.Do(req) 78 + if err != nil { 79 + return err 80 + } 81 + defer resp.Body.Close() 82 + 83 + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { 84 + // Read response body to capture actual error message from hold 85 + body, readErr := io.ReadAll(resp.Body) 86 + if readErr != nil { 87 + return fmt.Errorf("requestCrew failed with status %d (failed to read error body: %w)", resp.StatusCode, readErr) 88 + } 89 + return fmt.Errorf("requestCrew failed with status %d: %s", resp.StatusCode, string(body)) 90 + } 91 + 92 + return nil 93 + }
+14
pkg/appview/storage/crew_test.go
··· 1 + package storage 2 + 3 + import ( 4 + "context" 5 + "testing" 6 + ) 7 + 8 + func TestEnsureCrewMembership_EmptyHoldDID(t *testing.T) { 9 + // Test that empty hold DID returns early without error (best-effort function) 10 + EnsureCrewMembership(context.Background(), nil, nil, "") 11 + // If we get here without panic, test passes 12 + } 13 + 14 + // TODO: Add comprehensive tests with HTTP client mocking
+86 -314
pkg/appview/storage/manifest_store.go
··· 3 3 import ( 4 4 "bytes" 5 5 "context" 6 - "database/sql" 7 6 "encoding/json" 8 7 "errors" 9 8 "fmt" ··· 11 10 "log/slog" 12 11 "net/http" 13 12 "strings" 14 - "time" 13 + "sync" 15 14 16 - "atcr.io/pkg/appview/db" 17 - "atcr.io/pkg/appview/readme" 18 15 "atcr.io/pkg/atproto" 19 - "atcr.io/pkg/auth" 20 16 "github.com/distribution/distribution/v3" 21 17 "github.com/opencontainers/go-digest" 22 18 ) ··· 24 20 // ManifestStore implements distribution.ManifestService 25 21 // It stores manifests in ATProto as records 26 22 type ManifestStore struct { 27 - ctx *auth.UserContext // User context with identity, target, permissions 23 + ctx *RegistryContext // Context with user/hold info 24 + mu sync.RWMutex // Protects lastFetchedHoldDID 25 + lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull) 28 26 blobStore distribution.BlobStore // Blob store for fetching config during push 29 - sqlDB *sql.DB // Database for pull/push counts 30 27 } 31 28 32 29 // NewManifestStore creates a new ATProto-backed manifest store 33 - func NewManifestStore(userCtx *auth.UserContext, blobStore distribution.BlobStore, sqlDB *sql.DB) *ManifestStore { 30 + func NewManifestStore(ctx *RegistryContext, blobStore distribution.BlobStore) *ManifestStore { 34 31 return &ManifestStore{ 35 - ctx: userCtx, 32 + ctx: ctx, 36 33 blobStore: blobStore, 37 - sqlDB: sqlDB, 38 34 } 39 35 } 40 36 41 37 // Exists checks if a manifest exists by digest 42 38 func (s *ManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { 43 39 rkey := digestToRKey(dgst) 44 - _, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.ManifestCollection, rkey) 40 + _, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey) 45 41 if err != nil { 46 42 // If not found, return false without error 47 43 if errors.Is(err, atproto.ErrRecordNotFound) { ··· 55 51 // Get retrieves a manifest by digest 56 52 func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { 57 53 rkey := digestToRKey(dgst) 58 - record, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.ManifestCollection, rkey) 54 + record, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey) 59 55 if err != nil { 60 56 return nil, distribution.ErrManifestUnknownRevision{ 61 - Name: s.ctx.TargetRepo, 57 + Name: s.ctx.Repository, 62 58 Revision: dgst, 63 59 } 64 60 } 65 61 66 - var manifestRecord atproto.ManifestRecord 62 + var manifestRecord atproto.Manifest 67 63 if err := json.Unmarshal(record.Value, &manifestRecord); err != nil { 68 64 return nil, fmt.Errorf("failed to unmarshal manifest record: %w", err) 69 65 } 70 66 67 + // Store the hold DID for subsequent blob requests during pull 68 + // Prefer HoldDid (new format) with fallback to HoldEndpoint (legacy URL format) 69 + // The routing repository will cache this for concurrent blob fetches 70 + s.mu.Lock() 71 + if manifestRecord.HoldDid != nil && *manifestRecord.HoldDid != "" { 72 + // New format: DID reference (preferred) 73 + s.lastFetchedHoldDID = *manifestRecord.HoldDid 74 + } else if manifestRecord.HoldEndpoint != nil && *manifestRecord.HoldEndpoint != "" { 75 + // Legacy format: URL reference - convert to DID 76 + s.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(*manifestRecord.HoldEndpoint) 77 + } 78 + s.mu.Unlock() 79 + 71 80 var ociManifest []byte 72 81 73 82 // New records: Download blob from ATProto blob storage 74 - if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Link != "" { 75 - ociManifest, err = s.ctx.GetATProtoClient().GetBlob(ctx, manifestRecord.ManifestBlob.Ref.Link) 83 + if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Defined() { 84 + ociManifest, err = s.ctx.ATProtoClient.GetBlob(ctx, manifestRecord.ManifestBlob.Ref.String()) 76 85 if err != nil { 77 86 return nil, fmt.Errorf("failed to download manifest blob: %w", err) 78 87 } ··· 80 89 81 90 // Track pull count (increment asynchronously to avoid blocking the response) 82 91 // Only count GET requests (actual downloads), not HEAD requests (existence checks) 83 - if s.sqlDB != nil { 92 + if s.ctx.Database != nil { 84 93 // Check HTTP method from context (distribution library stores it as "http.request.method") 85 94 if method, ok := ctx.Value("http.request.method").(string); ok && method == "GET" { 86 95 go func() { 87 - if err := db.IncrementPullCount(s.sqlDB, s.ctx.TargetOwnerDID, s.ctx.TargetRepo); err != nil { 88 - slog.Warn("Failed to increment pull count", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err) 96 + if err := s.ctx.Database.IncrementPullCount(s.ctx.DID, s.ctx.Repository); err != nil { 97 + slog.Warn("Failed to increment pull count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err) 89 98 } 90 99 }() 91 100 } ··· 112 121 dgst := digest.FromBytes(payload) 113 122 114 123 // Upload manifest as blob to PDS 115 - blobRef, err := s.ctx.GetATProtoClient().UploadBlob(ctx, payload, mediaType) 124 + blobRef, err := s.ctx.ATProtoClient.UploadBlob(ctx, payload, mediaType) 116 125 if err != nil { 117 126 return "", fmt.Errorf("failed to upload manifest blob: %w", err) 118 127 } 119 128 120 129 // Create manifest record with structured metadata 121 - manifestRecord, err := atproto.NewManifestRecord(s.ctx.TargetRepo, dgst.String(), payload) 130 + manifestRecord, err := atproto.NewManifestRecord(s.ctx.Repository, dgst.String(), payload) 122 131 if err != nil { 123 132 return "", fmt.Errorf("failed to create manifest record: %w", err) 124 133 } 125 134 126 135 // Set the blob reference, hold DID, and hold endpoint 127 136 manifestRecord.ManifestBlob = blobRef 128 - manifestRecord.HoldDID = s.ctx.TargetHoldDID // Primary reference (DID) 137 + if s.ctx.HoldDID != "" { 138 + manifestRecord.HoldDid = &s.ctx.HoldDID // Primary reference (DID) 139 + } 129 140 130 141 // Extract Dockerfile labels from config blob and add to annotations 131 142 // Only for image manifests (not manifest lists which don't have config blobs) ··· 152 163 if !exists { 153 164 platform := "unknown" 154 165 if ref.Platform != nil { 155 - platform = fmt.Sprintf("%s/%s", ref.Platform.OS, ref.Platform.Architecture) 166 + platform = fmt.Sprintf("%s/%s", ref.Platform.Os, ref.Platform.Architecture) 156 167 } 157 168 slog.Warn("Manifest list references non-existent child manifest", 158 - "repository", s.ctx.TargetRepo, 169 + "repository", s.ctx.Repository, 159 170 "missingDigest", ref.Digest, 160 171 "platform", platform) 161 172 return "", distribution.ErrManifestBlobUnknown{Digest: refDigest} ··· 163 174 } 164 175 } 165 176 166 - if !isManifestList && s.blobStore != nil && manifestRecord.Config != nil && manifestRecord.Config.Digest != "" { 167 - labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest) 168 - if err != nil { 169 - // Log error but don't fail the push - labels are optional 170 - slog.Warn("Failed to extract config labels", "error", err) 171 - } else if len(labels) > 0 { 172 - // Initialize annotations map if needed 173 - if manifestRecord.Annotations == nil { 174 - manifestRecord.Annotations = make(map[string]string) 175 - } 176 - 177 - // Copy labels to annotations as fallback 178 - // Only set label values for keys NOT already in manifest annotations 179 - // This ensures explicit annotations take precedence over Dockerfile LABELs 180 - // (which may be inherited from base images) 181 - for key, value := range labels { 182 - if _, exists := manifestRecord.Annotations[key]; !exists { 183 - manifestRecord.Annotations[key] = value 184 - } 185 - } 186 - 187 - slog.Debug("Merged labels from config blob", "labelsCount", len(labels), "annotationsCount", len(manifestRecord.Annotations)) 188 - } 189 - } 177 + // Note: Label extraction from config blob is currently disabled because the generated 178 + // Manifest_Annotations type doesn't support arbitrary keys. The lexicon schema would 179 + // need to use "unknown" type for annotations to support dynamic key-value pairs. 180 + // TODO: Update lexicon schema if label extraction is needed. 181 + _ = isManifestList // silence unused variable warning for now 190 182 191 183 // Store manifest record in ATProto 192 184 rkey := digestToRKey(dgst) 193 - _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.ManifestCollection, rkey, manifestRecord) 185 + _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.ManifestCollection, rkey, manifestRecord) 194 186 if err != nil { 195 187 return "", fmt.Errorf("failed to store manifest record in ATProto: %w", err) 196 188 } 197 189 198 190 // Track push count (increment asynchronously to avoid blocking the response) 199 - if s.sqlDB != nil { 191 + if s.ctx.Database != nil { 200 192 go func() { 201 - if err := db.IncrementPushCount(s.sqlDB, s.ctx.TargetOwnerDID, s.ctx.TargetRepo); err != nil { 202 - slog.Warn("Failed to increment push count", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err) 193 + if err := s.ctx.Database.IncrementPushCount(s.ctx.DID, s.ctx.Repository); err != nil { 194 + slog.Warn("Failed to increment push count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err) 203 195 } 204 196 }() 205 197 } ··· 209 201 for _, option := range options { 210 202 if tagOpt, ok := option.(distribution.WithTagOption); ok { 211 203 tag = tagOpt.Tag 212 - tagRecord := atproto.NewTagRecord(s.ctx.GetATProtoClient().DID(), s.ctx.TargetRepo, tag, dgst.String()) 213 - tagRKey := atproto.RepositoryTagToRKey(s.ctx.TargetRepo, tag) 214 - _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.TagCollection, tagRKey, tagRecord) 204 + tagRecord := atproto.NewTagRecord(s.ctx.ATProtoClient.DID(), s.ctx.Repository, tag, dgst.String()) 205 + tagRKey := atproto.RepositoryTagToRKey(s.ctx.Repository, tag) 206 + _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.TagCollection, tagRKey, tagRecord) 215 207 if err != nil { 216 208 return "", fmt.Errorf("failed to store tag in ATProto: %w", err) 217 209 } ··· 220 212 221 213 // Notify hold about manifest upload (for layer tracking and Bluesky posts) 222 214 // Do this asynchronously to avoid blocking the push 223 - // Get service token before goroutine (requires context) 224 - serviceToken, _ := s.ctx.GetServiceToken(ctx) 225 - if tag != "" && serviceToken != "" && s.ctx.TargetOwnerHandle != "" { 226 - go func(serviceToken string) { 215 + if tag != "" && s.ctx.ServiceToken != "" && s.ctx.Handle != "" { 216 + go func() { 227 217 defer func() { 228 218 if r := recover(); r != nil { 229 219 slog.Error("Panic in notifyHoldAboutManifest", "panic", r) 230 220 } 231 221 }() 232 - if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String(), serviceToken); err != nil { 222 + if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String()); err != nil { 233 223 slog.Warn("Failed to notify hold about manifest", "error", err) 234 224 } 235 - }(serviceToken) 225 + }() 236 226 } 237 227 238 - // Create or update repo page asynchronously if manifest has relevant annotations 239 - // This ensures repository metadata is synced to user's PDS 228 + // Refresh README cache asynchronously if manifest has io.atcr.readme annotation 229 + // This ensures fresh README content is available on repository pages 240 230 go func() { 241 231 defer func() { 242 232 if r := recover(); r != nil { 243 - slog.Error("Panic in ensureRepoPage", "panic", r) 233 + slog.Error("Panic in refreshReadmeCache", "panic", r) 244 234 } 245 235 }() 246 - s.ensureRepoPage(context.Background(), manifestRecord) 236 + s.refreshReadmeCache(context.Background(), manifestRecord) 247 237 }() 248 238 249 239 return dgst, nil ··· 252 242 // Delete removes a manifest 253 243 func (s *ManifestStore) Delete(ctx context.Context, dgst digest.Digest) error { 254 244 rkey := digestToRKey(dgst) 255 - return s.ctx.GetATProtoClient().DeleteRecord(ctx, atproto.ManifestCollection, rkey) 245 + return s.ctx.ATProtoClient.DeleteRecord(ctx, atproto.ManifestCollection, rkey) 256 246 } 257 247 258 248 // digestToRKey converts a digest to an ATProto record key ··· 262 252 return dgst.Encoded() 263 253 } 264 254 255 + // GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest 256 + // This is used by the routing repository to cache the hold for blob requests 257 + func (s *ManifestStore) GetLastFetchedHoldDID() string { 258 + s.mu.RLock() 259 + defer s.mu.RUnlock() 260 + return s.lastFetchedHoldDID 261 + } 262 + 265 263 // rawManifest is a simple implementation of distribution.Manifest 266 264 type rawManifest struct { 267 265 mediaType string ··· 307 305 308 306 // notifyHoldAboutManifest notifies the hold service about a manifest upload 309 307 // This enables the hold to create layer records and Bluesky posts 310 - func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRecord *atproto.ManifestRecord, tag, manifestDigest, serviceToken string) error { 311 - // Skip if no service token provided 312 - if serviceToken == "" { 308 + func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRecord *atproto.Manifest, tag, manifestDigest string) error { 309 + // Skip if no service token configured (e.g., anonymous pulls) 310 + if s.ctx.ServiceToken == "" { 313 311 return nil 314 312 } 315 313 316 314 // Resolve hold DID to HTTP endpoint 317 315 // For did:web, this is straightforward (e.g., did:web:hold01.atcr.io โ†’ https://hold01.atcr.io) 318 - holdEndpoint := atproto.ResolveHoldURL(s.ctx.TargetHoldDID) 316 + holdEndpoint := atproto.ResolveHoldURL(s.ctx.HoldDID) 319 317 320 - // Service token is passed in (already cached and validated) 318 + // Use service token from middleware (already cached and validated) 319 + serviceToken := s.ctx.ServiceToken 321 320 322 321 // Build notification request 323 322 manifestData := map[string]any{ ··· 356 355 } 357 356 if m.Platform != nil { 358 357 mData["platform"] = map[string]any{ 359 - "os": m.Platform.OS, 358 + "os": m.Platform.Os, 360 359 "architecture": m.Platform.Architecture, 361 360 } 362 361 } ··· 366 365 } 367 366 368 367 notifyReq := map[string]any{ 369 - "repository": s.ctx.TargetRepo, 368 + "repository": s.ctx.Repository, 370 369 "tag": tag, 371 - "userDid": s.ctx.TargetOwnerDID, 372 - "userHandle": s.ctx.TargetOwnerHandle, 370 + "userDid": s.ctx.DID, 371 + "userHandle": s.ctx.Handle, 373 372 "manifest": manifestData, 374 373 } 375 374 ··· 407 406 // Parse response (optional logging) 408 407 var notifyResp map[string]any 409 408 if err := json.NewDecoder(resp.Body).Decode(&notifyResp); err == nil { 410 - slog.Info("Hold notification successful", "repository", s.ctx.TargetRepo, "tag", tag, "response", notifyResp) 409 + slog.Info("Hold notification successful", "repository", s.ctx.Repository, "tag", tag, "response", notifyResp) 411 410 } 412 411 413 412 return nil 414 413 } 415 414 416 - // ensureRepoPage creates or updates a repo page record in the user's PDS if needed 417 - // This syncs repository metadata from manifest annotations to the io.atcr.repo.page collection 418 - // Only creates a new record if one doesn't exist (doesn't overwrite user's custom content) 419 - func (s *ManifestStore) ensureRepoPage(ctx context.Context, manifestRecord *atproto.ManifestRecord) { 420 - // Check if repo page already exists (don't overwrite user's custom content) 421 - rkey := s.ctx.TargetRepo 422 - _, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.RepoPageCollection, rkey) 423 - if err == nil { 424 - // Record already exists - don't overwrite 425 - slog.Debug("Repo page already exists, skipping creation", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo) 426 - return 427 - } 428 - 429 - // Only continue if it's a "not found" error - other errors mean we should skip 430 - if !errors.Is(err, atproto.ErrRecordNotFound) { 431 - slog.Warn("Failed to check for existing repo page", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err) 432 - return 433 - } 434 - 435 - // Get annotations (may be nil if image has no OCI labels) 436 - annotations := manifestRecord.Annotations 437 - if annotations == nil { 438 - annotations = make(map[string]string) 439 - } 440 - 441 - // Try to fetch README content from external sources 442 - // Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source > org.opencontainers.image.description 443 - description := s.fetchReadmeContent(ctx, annotations) 444 - 445 - // If no README content could be fetched, fall back to description annotation 446 - if description == "" { 447 - description = annotations["org.opencontainers.image.description"] 448 - } 449 - 450 - // Try to fetch and upload icon from io.atcr.icon annotation 451 - var avatarRef *atproto.ATProtoBlobRef 452 - if iconURL := annotations["io.atcr.icon"]; iconURL != "" { 453 - avatarRef = s.fetchAndUploadIcon(ctx, iconURL) 454 - } 455 - 456 - // Create new repo page record with description and optional avatar 457 - repoPage := atproto.NewRepoPageRecord(s.ctx.TargetRepo, description, avatarRef) 458 - 459 - slog.Info("Creating repo page from manifest annotations", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "descriptionLength", len(description), "hasAvatar", avatarRef != nil) 460 - 461 - _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.RepoPageCollection, rkey, repoPage) 462 - if err != nil { 463 - slog.Warn("Failed to create repo page", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err) 415 + // refreshReadmeCache refreshes the README cache for this manifest if it has io.atcr.readme annotation 416 + // This should be called asynchronously after manifest push to keep README content fresh 417 + // NOTE: Currently disabled because the generated Manifest_Annotations type doesn't support 418 + // arbitrary key-value pairs. Would need to update lexicon schema with "unknown" type. 419 + func (s *ManifestStore) refreshReadmeCache(ctx context.Context, manifestRecord *atproto.Manifest) { 420 + // Skip if no README cache configured 421 + if s.ctx.ReadmeCache == nil { 464 422 return 465 423 } 466 424 467 - slog.Info("Repo page created successfully", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo) 468 - } 469 - 470 - // fetchReadmeContent attempts to fetch README content from external sources 471 - // Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source 472 - // Returns the raw markdown content, or empty string if not available 473 - func (s *ManifestStore) fetchReadmeContent(ctx context.Context, annotations map[string]string) string { 474 - 475 - // Create a context with timeout for README fetching (don't block push too long) 476 - fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second) 477 - defer cancel() 478 - 479 - // Priority 1: Direct README URL from io.atcr.readme annotation 480 - if readmeURL := annotations["io.atcr.readme"]; readmeURL != "" { 481 - content, err := s.fetchRawReadme(fetchCtx, readmeURL) 482 - if err != nil { 483 - slog.Debug("Failed to fetch README from io.atcr.readme annotation", "url", readmeURL, "error", err) 484 - } else if content != "" { 485 - slog.Info("Fetched README from io.atcr.readme annotation", "url", readmeURL, "length", len(content)) 486 - return content 487 - } 488 - } 489 - 490 - // Priority 2: Derive README URL from org.opencontainers.image.source 491 - if sourceURL := annotations["org.opencontainers.image.source"]; sourceURL != "" { 492 - // Try main branch first, then master 493 - for _, branch := range []string{"main", "master"} { 494 - readmeURL := readme.DeriveReadmeURL(sourceURL, branch) 495 - if readmeURL == "" { 496 - continue 497 - } 498 - 499 - content, err := s.fetchRawReadme(fetchCtx, readmeURL) 500 - if err != nil { 501 - // Only log non-404 errors (404 is expected when trying main vs master) 502 - if !readme.Is404(err) { 503 - slog.Debug("Failed to fetch README from source URL", "url", readmeURL, "branch", branch, "error", err) 504 - } 505 - continue 506 - } 507 - 508 - if content != "" { 509 - slog.Info("Fetched README from source URL", "sourceURL", sourceURL, "branch", branch, "length", len(content)) 510 - return content 511 - } 512 - } 513 - } 514 - 515 - return "" 516 - } 517 - 518 - // fetchRawReadme fetches raw markdown content from a URL 519 - // Returns the raw markdown (not rendered HTML) for storage in the repo page record 520 - func (s *ManifestStore) fetchRawReadme(ctx context.Context, readmeURL string) (string, error) { 521 - // Use a simple HTTP client to fetch raw content 522 - // We want raw markdown, not rendered HTML (the Fetcher renders to HTML) 523 - req, err := http.NewRequestWithContext(ctx, "GET", readmeURL, nil) 524 - if err != nil { 525 - return "", fmt.Errorf("failed to create request: %w", err) 526 - } 527 - 528 - req.Header.Set("User-Agent", "ATCR-README-Fetcher/1.0") 529 - 530 - client := &http.Client{ 531 - Timeout: 10 * time.Second, 532 - CheckRedirect: func(req *http.Request, via []*http.Request) error { 533 - if len(via) >= 5 { 534 - return fmt.Errorf("too many redirects") 535 - } 536 - return nil 537 - }, 538 - } 539 - 540 - resp, err := client.Do(req) 541 - if err != nil { 542 - return "", fmt.Errorf("failed to fetch URL: %w", err) 543 - } 544 - defer resp.Body.Close() 545 - 546 - if resp.StatusCode != http.StatusOK { 547 - return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode) 548 - } 549 - 550 - // Limit content size to 100KB (repo page description has 100KB limit in lexicon) 551 - limitedReader := io.LimitReader(resp.Body, 100*1024) 552 - content, err := io.ReadAll(limitedReader) 553 - if err != nil { 554 - return "", fmt.Errorf("failed to read response body: %w", err) 555 - } 556 - 557 - return string(content), nil 558 - } 559 - 560 - // fetchAndUploadIcon fetches an image from a URL and uploads it as a blob to the user's PDS 561 - // Returns the blob reference for use in the repo page record, or nil on error 562 - func (s *ManifestStore) fetchAndUploadIcon(ctx context.Context, iconURL string) *atproto.ATProtoBlobRef { 563 - // Create a context with timeout for icon fetching 564 - fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second) 565 - defer cancel() 566 - 567 - // Fetch the icon 568 - req, err := http.NewRequestWithContext(fetchCtx, "GET", iconURL, nil) 569 - if err != nil { 570 - slog.Debug("Failed to create icon request", "url", iconURL, "error", err) 571 - return nil 572 - } 573 - 574 - req.Header.Set("User-Agent", "ATCR-Icon-Fetcher/1.0") 575 - 576 - client := &http.Client{ 577 - Timeout: 10 * time.Second, 578 - CheckRedirect: func(req *http.Request, via []*http.Request) error { 579 - if len(via) >= 5 { 580 - return fmt.Errorf("too many redirects") 581 - } 582 - return nil 583 - }, 584 - } 585 - 586 - resp, err := client.Do(req) 587 - if err != nil { 588 - slog.Debug("Failed to fetch icon", "url", iconURL, "error", err) 589 - return nil 590 - } 591 - defer resp.Body.Close() 592 - 593 - if resp.StatusCode != http.StatusOK { 594 - slog.Debug("Icon fetch returned non-OK status", "url", iconURL, "status", resp.StatusCode) 595 - return nil 596 - } 597 - 598 - // Validate content type - only allow images 599 - contentType := resp.Header.Get("Content-Type") 600 - mimeType := detectImageMimeType(contentType, iconURL) 601 - if mimeType == "" { 602 - slog.Debug("Icon has unsupported content type", "url", iconURL, "contentType", contentType) 603 - return nil 604 - } 605 - 606 - // Limit icon size to 3MB (matching lexicon maxSize) 607 - limitedReader := io.LimitReader(resp.Body, 3*1024*1024) 608 - iconData, err := io.ReadAll(limitedReader) 609 - if err != nil { 610 - slog.Debug("Failed to read icon data", "url", iconURL, "error", err) 611 - return nil 612 - } 613 - 614 - if len(iconData) == 0 { 615 - slog.Debug("Icon data is empty", "url", iconURL) 616 - return nil 617 - } 618 - 619 - // Upload the icon as a blob to the user's PDS 620 - blobRef, err := s.ctx.GetATProtoClient().UploadBlob(ctx, iconData, mimeType) 621 - if err != nil { 622 - slog.Warn("Failed to upload icon blob", "url", iconURL, "error", err) 623 - return nil 624 - } 625 - 626 - slog.Info("Uploaded icon blob", "url", iconURL, "size", len(iconData), "mimeType", mimeType, "cid", blobRef.Ref.Link) 627 - return blobRef 628 - } 629 - 630 - // detectImageMimeType determines the MIME type for an image 631 - // Uses Content-Type header first, then falls back to extension-based detection 632 - // Only allows types accepted by the lexicon: image/png, image/jpeg, image/webp 633 - func detectImageMimeType(contentType, url string) string { 634 - // Check Content-Type header first 635 - switch { 636 - case strings.HasPrefix(contentType, "image/png"): 637 - return "image/png" 638 - case strings.HasPrefix(contentType, "image/jpeg"): 639 - return "image/jpeg" 640 - case strings.HasPrefix(contentType, "image/webp"): 641 - return "image/webp" 642 - } 643 - 644 - // Fall back to URL extension detection 645 - lowerURL := strings.ToLower(url) 646 - switch { 647 - case strings.HasSuffix(lowerURL, ".png"): 648 - return "image/png" 649 - case strings.HasSuffix(lowerURL, ".jpg"), strings.HasSuffix(lowerURL, ".jpeg"): 650 - return "image/jpeg" 651 - case strings.HasSuffix(lowerURL, ".webp"): 652 - return "image/webp" 653 - } 654 - 655 - // Unknown or unsupported type - reject 656 - return "" 425 + // TODO: Re-enable once lexicon supports annotations as map[string]string 426 + // The generated Manifest_Annotations is an empty struct that doesn't support map access. 427 + // For now, README cache refresh on push is disabled. 428 + _ = manifestRecord // silence unused variable warning 657 429 }
+300 -136
pkg/appview/storage/manifest_store_test.go
··· 8 8 "net/http" 9 9 "net/http/httptest" 10 10 "testing" 11 + "time" 11 12 12 13 "atcr.io/pkg/atproto" 13 - "atcr.io/pkg/auth" 14 14 "github.com/distribution/distribution/v3" 15 15 "github.com/opencontainers/go-digest" 16 16 ) 17 + 18 + // mockDatabaseMetrics removed - using the one from context_test.go 17 19 18 20 // mockBlobStore is a minimal mock of distribution.BlobStore for testing 19 21 type mockBlobStore struct { ··· 70 72 return nil, nil // Not needed for current tests 71 73 } 72 74 73 - // mockUserContextForManifest creates a mock auth.UserContext for manifest store testing 74 - func mockUserContextForManifest(pdsEndpoint, repository, holdDID, ownerDID, ownerHandle string) *auth.UserContext { 75 - userCtx := auth.NewUserContext(ownerDID, "oauth", "PUT", nil) 76 - userCtx.SetTarget(ownerDID, ownerHandle, pdsEndpoint, repository, holdDID) 77 - return userCtx 75 + // mockRegistryContext creates a mock RegistryContext for testing 76 + func mockRegistryContext(client *atproto.Client, repository, holdDID, did, handle string, database DatabaseMetrics) *RegistryContext { 77 + return &RegistryContext{ 78 + ATProtoClient: client, 79 + Repository: repository, 80 + HoldDID: holdDID, 81 + DID: did, 82 + Handle: handle, 83 + Database: database, 84 + } 78 85 } 79 86 80 87 // TestDigestToRKey tests digest to record key conversion ··· 108 115 109 116 // TestNewManifestStore tests creating a new manifest store 110 117 func TestNewManifestStore(t *testing.T) { 118 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 111 119 blobStore := newMockBlobStore() 112 - userCtx := mockUserContextForManifest( 113 - "https://pds.example.com", 114 - "myapp", 115 - "did:web:hold.example.com", 116 - "did:plc:alice123", 117 - "alice.test", 118 - ) 119 - store := NewManifestStore(userCtx, blobStore, nil) 120 + db := &mockDatabaseMetrics{} 121 + 122 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", db) 123 + store := NewManifestStore(ctx, blobStore) 120 124 121 - if store.ctx.TargetRepo != "myapp" { 122 - t.Errorf("repository = %v, want myapp", store.ctx.TargetRepo) 125 + if store.ctx.Repository != "myapp" { 126 + t.Errorf("repository = %v, want myapp", store.ctx.Repository) 127 + } 128 + if store.ctx.HoldDID != "did:web:hold.example.com" { 129 + t.Errorf("holdDID = %v, want did:web:hold.example.com", store.ctx.HoldDID) 130 + } 131 + if store.ctx.DID != "did:plc:alice123" { 132 + t.Errorf("did = %v, want did:plc:alice123", store.ctx.DID) 123 133 } 124 - if store.ctx.TargetHoldDID != "did:web:hold.example.com" { 125 - t.Errorf("holdDID = %v, want did:web:hold.example.com", store.ctx.TargetHoldDID) 134 + if store.ctx.Handle != "alice.test" { 135 + t.Errorf("handle = %v, want alice.test", store.ctx.Handle) 126 136 } 127 - if store.ctx.TargetOwnerDID != "did:plc:alice123" { 128 - t.Errorf("did = %v, want did:plc:alice123", store.ctx.TargetOwnerDID) 137 + } 138 + 139 + // TestManifestStore_GetLastFetchedHoldDID tests tracking last fetched hold DID 140 + func TestManifestStore_GetLastFetchedHoldDID(t *testing.T) { 141 + tests := []struct { 142 + name string 143 + manifestHoldDID string 144 + manifestHoldURL string 145 + expectedLastFetched string 146 + }{ 147 + { 148 + name: "prefers HoldDID", 149 + manifestHoldDID: "did:web:hold01.atcr.io", 150 + manifestHoldURL: "https://hold01.atcr.io", 151 + expectedLastFetched: "did:web:hold01.atcr.io", 152 + }, 153 + { 154 + name: "falls back to HoldEndpoint URL conversion", 155 + manifestHoldDID: "", 156 + manifestHoldURL: "https://hold02.atcr.io", 157 + expectedLastFetched: "did:web:hold02.atcr.io", 158 + }, 159 + { 160 + name: "empty hold references", 161 + manifestHoldDID: "", 162 + manifestHoldURL: "", 163 + expectedLastFetched: "", 164 + }, 129 165 } 130 - if store.ctx.TargetOwnerHandle != "alice.test" { 131 - t.Errorf("handle = %v, want alice.test", store.ctx.TargetOwnerHandle) 166 + 167 + for _, tt := range tests { 168 + t.Run(tt.name, func(t *testing.T) { 169 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 170 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 171 + store := NewManifestStore(ctx, nil) 172 + 173 + // Simulate what happens in Get() when parsing a manifest record 174 + var manifestRecord atproto.Manifest 175 + if tt.manifestHoldDID != "" { 176 + manifestRecord.HoldDid = &tt.manifestHoldDID 177 + } 178 + if tt.manifestHoldURL != "" { 179 + manifestRecord.HoldEndpoint = &tt.manifestHoldURL 180 + } 181 + 182 + // Mimic the hold DID extraction logic from Get() 183 + if manifestRecord.HoldDid != nil && *manifestRecord.HoldDid != "" { 184 + store.lastFetchedHoldDID = *manifestRecord.HoldDid 185 + } else if manifestRecord.HoldEndpoint != nil && *manifestRecord.HoldEndpoint != "" { 186 + store.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(*manifestRecord.HoldEndpoint) 187 + } 188 + 189 + got := store.GetLastFetchedHoldDID() 190 + if got != tt.expectedLastFetched { 191 + t.Errorf("GetLastFetchedHoldDID() = %v, want %v", got, tt.expectedLastFetched) 192 + } 193 + }) 132 194 } 133 195 } 134 196 ··· 183 245 blobStore.blobs[configDigest] = configData 184 246 185 247 // Create manifest store 186 - userCtx := mockUserContextForManifest( 187 - "https://pds.example.com", 188 - "myapp", 189 - "", 190 - "did:plc:test123", 191 - "test.handle", 192 - ) 193 - store := NewManifestStore(userCtx, blobStore, nil) 248 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 249 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 250 + store := NewManifestStore(ctx, blobStore) 194 251 195 252 // Extract labels 196 253 labels, err := store.extractConfigLabels(context.Background(), configDigest.String()) ··· 228 285 configDigest := digest.FromBytes(configData) 229 286 blobStore.blobs[configDigest] = configData 230 287 231 - userCtx := mockUserContextForManifest( 232 - "https://pds.example.com", 233 - "myapp", 234 - "", 235 - "did:plc:test123", 236 - "test.handle", 237 - ) 238 - store := NewManifestStore(userCtx, blobStore, nil) 288 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 289 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 290 + store := NewManifestStore(ctx, blobStore) 239 291 240 292 labels, err := store.extractConfigLabels(context.Background(), configDigest.String()) 241 293 if err != nil { ··· 251 303 // TestExtractConfigLabels_InvalidDigest tests error handling for invalid digest 252 304 func TestExtractConfigLabels_InvalidDigest(t *testing.T) { 253 305 blobStore := newMockBlobStore() 254 - userCtx := mockUserContextForManifest( 255 - "https://pds.example.com", 256 - "myapp", 257 - "", 258 - "did:plc:test123", 259 - "test.handle", 260 - ) 261 - store := NewManifestStore(userCtx, blobStore, nil) 306 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 307 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 308 + store := NewManifestStore(ctx, blobStore) 262 309 263 310 _, err := store.extractConfigLabels(context.Background(), "invalid-digest") 264 311 if err == nil { ··· 275 322 configDigest := digest.FromBytes(configData) 276 323 blobStore.blobs[configDigest] = configData 277 324 278 - userCtx := mockUserContextForManifest( 279 - "https://pds.example.com", 280 - "myapp", 281 - "", 282 - "did:plc:test123", 283 - "test.handle", 284 - ) 285 - store := NewManifestStore(userCtx, blobStore, nil) 325 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 326 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 327 + store := NewManifestStore(ctx, blobStore) 286 328 287 329 _, err := store.extractConfigLabels(context.Background(), configDigest.String()) 288 330 if err == nil { ··· 290 332 } 291 333 } 292 334 293 - // TestManifestStore_WithoutDatabase tests that nil database is acceptable 294 - func TestManifestStore_WithoutDatabase(t *testing.T) { 295 - userCtx := mockUserContextForManifest( 296 - "https://pds.example.com", 297 - "myapp", 298 - "did:web:hold.example.com", 299 - "did:plc:alice123", 300 - "alice.test", 301 - ) 302 - store := NewManifestStore(userCtx, nil, nil) 335 + // TestManifestStore_WithMetrics tests that metrics are tracked 336 + func TestManifestStore_WithMetrics(t *testing.T) { 337 + db := &mockDatabaseMetrics{} 338 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 339 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", db) 340 + store := NewManifestStore(ctx, nil) 341 + 342 + if store.ctx.Database != db { 343 + t.Error("ManifestStore should store database reference") 344 + } 345 + 346 + // Note: Actual metrics tracking happens in Put() and Get() which require 347 + // full mock setup. The important thing is that the database is wired up. 348 + } 349 + 350 + // TestManifestStore_WithoutMetrics tests that nil database is acceptable 351 + func TestManifestStore_WithoutMetrics(t *testing.T) { 352 + client := atproto.NewClient("https://pds.example.com", "did:plc:test123", "token") 353 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:alice123", "alice.test", nil) 354 + store := NewManifestStore(ctx, nil) 303 355 304 - if store.sqlDB != nil { 356 + if store.ctx.Database != nil { 305 357 t.Error("ManifestStore should accept nil database") 306 358 } 307 359 } ··· 320 372 name: "manifest exists", 321 373 digest: "sha256:abc123", 322 374 serverStatus: http.StatusOK, 323 - serverResp: `{"uri":"at://did:plc:test123/io.atcr.manifest/abc123","cid":"bafytest","value":{}}`, 375 + serverResp: `{"uri":"at://did:plc:test123/io.atcr.manifest/abc123","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku","value":{}}`, 324 376 wantExists: true, 325 377 wantErr: false, 326 378 }, ··· 351 403 })) 352 404 defer server.Close() 353 405 354 - userCtx := mockUserContextForManifest( 355 - server.URL, 356 - "myapp", 357 - "did:web:hold.example.com", 358 - "did:plc:test123", 359 - "test.handle", 360 - ) 361 - store := NewManifestStore(userCtx, nil, nil) 406 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 407 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", nil) 408 + store := NewManifestStore(ctx, nil) 362 409 363 410 exists, err := store.Exists(context.Background(), tt.digest) 364 411 if (err != nil) != tt.wantErr { ··· 390 437 digest: "sha256:abc123", 391 438 serverResp: `{ 392 439 "uri":"at://did:plc:test123/io.atcr.manifest/abc123", 393 - "cid":"bafytest", 440 + "cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku", 394 441 "value":{ 395 442 "$type":"io.atcr.manifest", 396 443 "repository":"myapp", ··· 400 447 "mediaType":"application/vnd.oci.image.manifest.v1+json", 401 448 "manifestBlob":{ 402 449 "$type":"blob", 403 - "ref":{"$link":"bafytest"}, 450 + "ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}, 404 451 "mimeType":"application/vnd.oci.image.manifest.v1+json", 405 452 "size":100 406 453 } ··· 434 481 "holdEndpoint":"https://hold02.atcr.io", 435 482 "mediaType":"application/vnd.oci.image.manifest.v1+json", 436 483 "manifestBlob":{ 437 - "ref":{"$link":"bafylegacy"}, 484 + "$type":"blob", 485 + "ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}, 486 + "mimeType":"application/json", 438 487 "size":100 439 488 } 440 489 } ··· 474 523 })) 475 524 defer server.Close() 476 525 477 - userCtx := mockUserContextForManifest( 478 - server.URL, 479 - "myapp", 480 - "did:web:hold.example.com", 481 - "did:plc:test123", 482 - "test.handle", 483 - ) 484 - store := NewManifestStore(userCtx, nil, nil) 526 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 527 + db := &mockDatabaseMetrics{} 528 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", db) 529 + store := NewManifestStore(ctx, nil) 485 530 486 531 manifest, err := store.Get(context.Background(), tt.digest) 487 532 if (err != nil) != tt.wantErr { ··· 502 547 } 503 548 } 504 549 550 + // TestManifestStore_Get_HoldDIDTracking tests that Get() stores the holdDID 551 + func TestManifestStore_Get_HoldDIDTracking(t *testing.T) { 552 + ociManifest := []byte(`{"schemaVersion":2}`) 553 + 554 + tests := []struct { 555 + name string 556 + manifestResp string 557 + expectedHoldDID string 558 + }{ 559 + { 560 + name: "tracks HoldDID from new format", 561 + manifestResp: `{ 562 + "uri":"at://did:plc:test123/io.atcr.manifest/abc123", 563 + "value":{ 564 + "$type":"io.atcr.manifest", 565 + "holdDid":"did:web:hold01.atcr.io", 566 + "holdEndpoint":"https://hold01.atcr.io", 567 + "mediaType":"application/vnd.oci.image.manifest.v1+json", 568 + "manifestBlob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"mimeType":"application/json","size":100} 569 + } 570 + }`, 571 + expectedHoldDID: "did:web:hold01.atcr.io", 572 + }, 573 + { 574 + name: "tracks HoldDID from legacy HoldEndpoint", 575 + manifestResp: `{ 576 + "uri":"at://did:plc:test123/io.atcr.manifest/abc123", 577 + "value":{ 578 + "$type":"io.atcr.manifest", 579 + "holdEndpoint":"https://hold02.atcr.io", 580 + "mediaType":"application/vnd.oci.image.manifest.v1+json", 581 + "manifestBlob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"mimeType":"application/json","size":100} 582 + } 583 + }`, 584 + expectedHoldDID: "did:web:hold02.atcr.io", 585 + }, 586 + } 587 + 588 + for _, tt := range tests { 589 + t.Run(tt.name, func(t *testing.T) { 590 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 591 + if r.URL.Path == atproto.SyncGetBlob { 592 + w.Write(ociManifest) 593 + return 594 + } 595 + w.Write([]byte(tt.manifestResp)) 596 + })) 597 + defer server.Close() 598 + 599 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 600 + ctx := mockRegistryContext(client, "myapp", "", "did:plc:test123", "test.handle", nil) 601 + store := NewManifestStore(ctx, nil) 602 + 603 + _, err := store.Get(context.Background(), "sha256:abc123") 604 + if err != nil { 605 + t.Fatalf("Get() error = %v", err) 606 + } 607 + 608 + gotHoldDID := store.GetLastFetchedHoldDID() 609 + if gotHoldDID != tt.expectedHoldDID { 610 + t.Errorf("GetLastFetchedHoldDID() = %v, want %v", gotHoldDID, tt.expectedHoldDID) 611 + } 612 + }) 613 + } 614 + } 615 + 616 + // TestManifestStore_Get_OnlyCountsGETRequests verifies that HEAD requests don't increment pull count 617 + func TestManifestStore_Get_OnlyCountsGETRequests(t *testing.T) { 618 + ociManifest := []byte(`{"schemaVersion":2}`) 619 + 620 + tests := []struct { 621 + name string 622 + httpMethod string 623 + expectPullIncrement bool 624 + }{ 625 + { 626 + name: "GET request increments pull count", 627 + httpMethod: "GET", 628 + expectPullIncrement: true, 629 + }, 630 + { 631 + name: "HEAD request does not increment pull count", 632 + httpMethod: "HEAD", 633 + expectPullIncrement: false, 634 + }, 635 + { 636 + name: "POST request does not increment pull count", 637 + httpMethod: "POST", 638 + expectPullIncrement: false, 639 + }, 640 + } 641 + 642 + for _, tt := range tests { 643 + t.Run(tt.name, func(t *testing.T) { 644 + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 645 + if r.URL.Path == atproto.SyncGetBlob { 646 + w.Write(ociManifest) 647 + return 648 + } 649 + w.Write([]byte(`{ 650 + "uri": "at://did:plc:test123/io.atcr.manifest/abc123", 651 + "value": { 652 + "$type":"io.atcr.manifest", 653 + "holdDid":"did:web:hold01.atcr.io", 654 + "mediaType":"application/vnd.oci.image.manifest.v1+json", 655 + "manifestBlob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"mimeType":"application/json","size":100} 656 + } 657 + }`)) 658 + })) 659 + defer server.Close() 660 + 661 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 662 + mockDB := &mockDatabaseMetrics{} 663 + ctx := mockRegistryContext(client, "myapp", "did:web:hold01.atcr.io", "did:plc:test123", "test.handle", mockDB) 664 + store := NewManifestStore(ctx, nil) 665 + 666 + // Create a context with the HTTP method stored (as distribution library does) 667 + testCtx := context.WithValue(context.Background(), "http.request.method", tt.httpMethod) 668 + 669 + _, err := store.Get(testCtx, "sha256:abc123") 670 + if err != nil { 671 + t.Fatalf("Get() error = %v", err) 672 + } 673 + 674 + // Wait for async goroutine to complete (metrics are incremented asynchronously) 675 + time.Sleep(50 * time.Millisecond) 676 + 677 + if tt.expectPullIncrement { 678 + // Check that IncrementPullCount was called 679 + if mockDB.getPullCount() == 0 { 680 + t.Error("Expected pull count to be incremented for GET request, but it wasn't") 681 + } 682 + } else { 683 + // Check that IncrementPullCount was NOT called 684 + if mockDB.getPullCount() > 0 { 685 + t.Errorf("Expected pull count NOT to be incremented for %s request, but it was (count=%d)", tt.httpMethod, mockDB.getPullCount()) 686 + } 687 + } 688 + }) 689 + } 690 + } 691 + 505 692 // TestManifestStore_Put tests storing manifests 506 693 func TestManifestStore_Put(t *testing.T) { 507 694 ociManifest := []byte(`{ ··· 573 760 // Handle uploadBlob 574 761 if r.URL.Path == atproto.RepoUploadBlob { 575 762 w.WriteHeader(http.StatusOK) 576 - w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafytest"},"mimeType":"application/json","size":100}}`)) 763 + w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"mimeType":"application/json","size":100}}`)) 577 764 return 578 765 } 579 766 ··· 582 769 json.NewDecoder(r.Body).Decode(&lastBody) 583 770 w.WriteHeader(tt.serverStatus) 584 771 if tt.serverStatus == http.StatusOK { 585 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/abc123","cid":"bafytest"}`)) 772 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/abc123","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}`)) 586 773 } else { 587 774 w.Write([]byte(`{"error":"ServerError"}`)) 588 775 } ··· 593 780 })) 594 781 defer server.Close() 595 782 596 - userCtx := mockUserContextForManifest( 597 - server.URL, 598 - "myapp", 599 - "did:web:hold.example.com", 600 - "did:plc:test123", 601 - "test.handle", 602 - ) 603 - store := NewManifestStore(userCtx, nil, nil) 783 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 784 + db := &mockDatabaseMetrics{} 785 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", db) 786 + store := NewManifestStore(ctx, nil) 604 787 605 788 dgst, err := store.Put(context.Background(), tt.manifest, tt.options...) 606 789 if (err != nil) != tt.wantErr { ··· 638 821 639 822 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 640 823 if r.URL.Path == atproto.RepoUploadBlob { 641 - w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafytest"},"size":100}}`)) 824 + w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"size":100}}`)) 642 825 return 643 826 } 644 827 if r.URL.Path == atproto.RepoPutRecord { 645 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/config123","cid":"bafytest"}`)) 828 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/config123","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}`)) 646 829 return 647 830 } 648 831 w.WriteHeader(http.StatusOK) 649 832 })) 650 833 defer server.Close() 651 834 652 - userCtx := mockUserContextForManifest( 653 - server.URL, 654 - "myapp", 655 - "did:web:hold.example.com", 656 - "did:plc:test123", 657 - "test.handle", 658 - ) 835 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 836 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", nil) 659 837 660 838 // Use config digest in manifest 661 839 ociManifestWithConfig := []byte(`{ ··· 670 848 payload: ociManifestWithConfig, 671 849 } 672 850 673 - store := NewManifestStore(userCtx, blobStore, nil) 851 + store := NewManifestStore(ctx, blobStore) 674 852 675 853 _, err := store.Put(context.Background(), manifest) 676 854 if err != nil { ··· 698 876 name: "successful delete", 699 877 digest: "sha256:abc123", 700 878 serverStatus: http.StatusOK, 701 - serverResp: `{"commit":{"cid":"bafytest","rev":"12345"}}`, 879 + serverResp: `{"commit":{"cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku","rev":"12345"}}`, 702 880 wantErr: false, 703 881 }, 704 882 { ··· 730 908 })) 731 909 defer server.Close() 732 910 733 - userCtx := mockUserContextForManifest( 734 - server.URL, 735 - "myapp", 736 - "did:web:hold.example.com", 737 - "did:plc:test123", 738 - "test.handle", 739 - ) 740 - store := NewManifestStore(userCtx, nil, nil) 911 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 912 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", nil) 913 + store := NewManifestStore(ctx, nil) 741 914 742 915 err := store.Delete(context.Background(), tt.digest) 743 916 if (err != nil) != tt.wantErr { ··· 860 1033 // Handle uploadBlob 861 1034 if r.URL.Path == atproto.RepoUploadBlob { 862 1035 w.WriteHeader(http.StatusOK) 863 - w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafytest"},"mimeType":"application/json","size":100}}`)) 1036 + w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"mimeType":"application/json","size":100}}`)) 864 1037 return 865 1038 } 866 1039 ··· 872 1045 // If child should exist, return it; otherwise return RecordNotFound 873 1046 if tt.childExists || rkey == childDigest.Encoded() { 874 1047 w.WriteHeader(http.StatusOK) 875 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/` + rkey + `","cid":"bafytest","value":{}}`)) 1048 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/` + rkey + `","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku","value":{}}`)) 876 1049 } else { 877 1050 w.WriteHeader(http.StatusBadRequest) 878 1051 w.Write([]byte(`{"error":"RecordNotFound","message":"Record not found"}`)) ··· 883 1056 // Handle putRecord 884 1057 if r.URL.Path == atproto.RepoPutRecord { 885 1058 w.WriteHeader(http.StatusOK) 886 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/test123","cid":"bafytest"}`)) 1059 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/test123","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}`)) 887 1060 return 888 1061 } 889 1062 ··· 891 1064 })) 892 1065 defer server.Close() 893 1066 894 - userCtx := mockUserContextForManifest( 895 - server.URL, 896 - "myapp", 897 - "did:web:hold.example.com", 898 - "did:plc:test123", 899 - "test.handle", 900 - ) 901 - store := NewManifestStore(userCtx, nil, nil) 1067 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 1068 + db := &mockDatabaseMetrics{} 1069 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", db) 1070 + store := NewManifestStore(ctx, nil) 902 1071 903 1072 manifest := &rawManifest{ 904 1073 mediaType: "application/vnd.oci.image.index.v1+json", ··· 948 1117 949 1118 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 950 1119 if r.URL.Path == atproto.RepoUploadBlob { 951 - w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafytest"},"size":100}}`)) 1120 + w.Write([]byte(`{"blob":{"$type":"blob","ref":{"$link":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},"size":100}}`)) 952 1121 return 953 1122 } 954 1123 955 1124 if r.URL.Path == atproto.RepoGetRecord { 956 1125 rkey := r.URL.Query().Get("rkey") 957 1126 if existingManifests[rkey] { 958 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/` + rkey + `","cid":"bafytest","value":{}}`)) 1127 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/` + rkey + `","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku","value":{}}`)) 959 1128 } else { 960 1129 w.WriteHeader(http.StatusBadRequest) 961 1130 w.Write([]byte(`{"error":"RecordNotFound"}`)) ··· 964 1133 } 965 1134 966 1135 if r.URL.Path == atproto.RepoPutRecord { 967 - w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/test123","cid":"bafytest"}`)) 1136 + w.Write([]byte(`{"uri":"at://did:plc:test123/io.atcr.manifest/test123","cid":"bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}`)) 968 1137 return 969 1138 } 970 1139 ··· 972 1141 })) 973 1142 defer server.Close() 974 1143 975 - userCtx := mockUserContextForManifest( 976 - server.URL, 977 - "myapp", 978 - "did:web:hold.example.com", 979 - "did:plc:test123", 980 - "test.handle", 981 - ) 982 - store := NewManifestStore(userCtx, nil, nil) 1144 + client := atproto.NewClient(server.URL, "did:plc:test123", "token") 1145 + ctx := mockRegistryContext(client, "myapp", "did:web:hold.example.com", "did:plc:test123", "test.handle", nil) 1146 + store := NewManifestStore(ctx, nil) 983 1147 984 1148 // Create manifest list with both children 985 1149 manifestList := []byte(`{
+12 -10
pkg/appview/storage/profile.go
··· 54 54 // GetProfile retrieves the user's profile from their PDS 55 55 // Returns nil if profile doesn't exist 56 56 // Automatically migrates old URL-based defaultHold values to DIDs 57 - func GetProfile(ctx context.Context, client *atproto.Client) (*atproto.SailorProfileRecord, error) { 57 + func GetProfile(ctx context.Context, client *atproto.Client) (*atproto.SailorProfile, error) { 58 58 record, err := client.GetRecord(ctx, atproto.SailorProfileCollection, ProfileRKey) 59 59 if err != nil { 60 60 // Check if it's a 404 (profile doesn't exist) ··· 65 65 } 66 66 67 67 // Parse the profile record 68 - var profile atproto.SailorProfileRecord 68 + var profile atproto.SailorProfile 69 69 if err := json.Unmarshal(record.Value, &profile); err != nil { 70 70 return nil, fmt.Errorf("failed to parse profile: %w", err) 71 71 } 72 72 73 73 // Migrate old URL-based defaultHold to DID format 74 74 // This ensures backward compatibility with profiles created before DID migration 75 - if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) { 75 + if profile.DefaultHold != nil && *profile.DefaultHold != "" && !atproto.IsDID(*profile.DefaultHold) { 76 76 // Convert URL to DID transparently 77 - migratedDID := atproto.ResolveHoldDIDFromURL(profile.DefaultHold) 78 - profile.DefaultHold = migratedDID 77 + migratedDID := atproto.ResolveHoldDIDFromURL(*profile.DefaultHold) 78 + profile.DefaultHold = &migratedDID 79 79 80 80 // Persist the migration to PDS in a background goroutine 81 81 // Use a lock to ensure only one goroutine migrates this DID ··· 94 94 defer cancel() 95 95 96 96 // Update the profile on the PDS 97 - profile.UpdatedAt = time.Now() 97 + now := time.Now().Format(time.RFC3339) 98 + profile.UpdatedAt = &now 98 99 if err := UpdateProfile(ctx, client, &profile); err != nil { 99 100 slog.Warn("Failed to persist URL-to-DID migration", "component", "profile", "did", did, "error", err) 100 101 } else { ··· 109 110 110 111 // UpdateProfile updates the user's profile 111 112 // Normalizes defaultHold to DID format before saving 112 - func UpdateProfile(ctx context.Context, client *atproto.Client, profile *atproto.SailorProfileRecord) error { 113 + func UpdateProfile(ctx context.Context, client *atproto.Client, profile *atproto.SailorProfile) error { 113 114 // Normalize defaultHold to DID if it's a URL 114 115 // This ensures we always store DIDs, even if user provides a URL 115 - if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) { 116 - profile.DefaultHold = atproto.ResolveHoldDIDFromURL(profile.DefaultHold) 117 - slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", profile.DefaultHold) 116 + if profile.DefaultHold != nil && *profile.DefaultHold != "" && !atproto.IsDID(*profile.DefaultHold) { 117 + normalized := atproto.ResolveHoldDIDFromURL(*profile.DefaultHold) 118 + profile.DefaultHold = &normalized 119 + slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", normalized) 118 120 } 119 121 120 122 _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, ProfileRKey, profile)
+46 -40
pkg/appview/storage/profile_test.go
··· 39 39 40 40 for _, tt := range tests { 41 41 t.Run(tt.name, func(t *testing.T) { 42 - var createdProfile *atproto.SailorProfileRecord 42 + var createdProfile *atproto.SailorProfile 43 43 44 44 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 45 45 // First request: GetRecord (should 404) ··· 95 95 t.Fatal("Profile was not created") 96 96 } 97 97 98 - if createdProfile.Type != atproto.SailorProfileCollection { 99 - t.Errorf("Type = %v, want %v", createdProfile.Type, atproto.SailorProfileCollection) 98 + if createdProfile.LexiconTypeID != atproto.SailorProfileCollection { 99 + t.Errorf("LexiconTypeID = %v, want %v", createdProfile.LexiconTypeID, atproto.SailorProfileCollection) 100 100 } 101 101 102 - if createdProfile.DefaultHold != tt.wantNormalized { 103 - t.Errorf("DefaultHold = %v, want %v", createdProfile.DefaultHold, tt.wantNormalized) 102 + gotDefaultHold := "" 103 + if createdProfile.DefaultHold != nil { 104 + gotDefaultHold = *createdProfile.DefaultHold 105 + } 106 + if gotDefaultHold != tt.wantNormalized { 107 + t.Errorf("DefaultHold = %v, want %v", gotDefaultHold, tt.wantNormalized) 104 108 } 105 109 }) 106 110 } ··· 154 158 name string 155 159 serverResponse string 156 160 serverStatus int 157 - wantProfile *atproto.SailorProfileRecord 161 + wantProfile *atproto.SailorProfile 158 162 wantNil bool 159 163 wantErr bool 160 164 expectMigration bool // Whether URL-to-DID migration should happen ··· 265 269 } 266 270 267 271 // Check that defaultHold is migrated to DID in returned profile 268 - if profile.DefaultHold != tt.expectedHoldDID { 269 - t.Errorf("DefaultHold = %v, want %v", profile.DefaultHold, tt.expectedHoldDID) 272 + gotDefaultHold := "" 273 + if profile.DefaultHold != nil { 274 + gotDefaultHold = *profile.DefaultHold 275 + } 276 + if gotDefaultHold != tt.expectedHoldDID { 277 + t.Errorf("DefaultHold = %v, want %v", gotDefaultHold, tt.expectedHoldDID) 270 278 } 271 279 272 280 if tt.expectMigration { ··· 366 374 } 367 375 } 368 376 377 + // testSailorProfile creates a test profile with the given default hold 378 + func testSailorProfile(defaultHold string) *atproto.SailorProfile { 379 + now := time.Now().Format(time.RFC3339) 380 + profile := &atproto.SailorProfile{ 381 + LexiconTypeID: atproto.SailorProfileCollection, 382 + CreatedAt: now, 383 + UpdatedAt: &now, 384 + } 385 + if defaultHold != "" { 386 + profile.DefaultHold = &defaultHold 387 + } 388 + return profile 389 + } 390 + 369 391 // TestUpdateProfile tests updating a user's profile 370 392 func TestUpdateProfile(t *testing.T) { 371 393 tests := []struct { 372 394 name string 373 - profile *atproto.SailorProfileRecord 395 + profile *atproto.SailorProfile 374 396 wantNormalized string // Expected defaultHold after normalization 375 397 wantErr bool 376 398 }{ 377 399 { 378 - name: "update with DID", 379 - profile: &atproto.SailorProfileRecord{ 380 - Type: atproto.SailorProfileCollection, 381 - DefaultHold: "did:web:hold02.atcr.io", 382 - CreatedAt: time.Now(), 383 - UpdatedAt: time.Now(), 384 - }, 400 + name: "update with DID", 401 + profile: testSailorProfile("did:web:hold02.atcr.io"), 385 402 wantNormalized: "did:web:hold02.atcr.io", 386 403 wantErr: false, 387 404 }, 388 405 { 389 - name: "update with URL - should normalize", 390 - profile: &atproto.SailorProfileRecord{ 391 - Type: atproto.SailorProfileCollection, 392 - DefaultHold: "https://hold02.atcr.io", 393 - CreatedAt: time.Now(), 394 - UpdatedAt: time.Now(), 395 - }, 406 + name: "update with URL - should normalize", 407 + profile: testSailorProfile("https://hold02.atcr.io"), 396 408 wantNormalized: "did:web:hold02.atcr.io", 397 409 wantErr: false, 398 410 }, 399 411 { 400 - name: "clear default hold", 401 - profile: &atproto.SailorProfileRecord{ 402 - Type: atproto.SailorProfileCollection, 403 - DefaultHold: "", 404 - CreatedAt: time.Now(), 405 - UpdatedAt: time.Now(), 406 - }, 412 + name: "clear default hold", 413 + profile: testSailorProfile(""), 407 414 wantNormalized: "", 408 415 wantErr: false, 409 416 }, ··· 454 461 } 455 462 456 463 // Verify normalization also updated the profile object 457 - if tt.profile.DefaultHold != tt.wantNormalized { 458 - t.Errorf("profile.DefaultHold = %v, want %v (should be updated in-place)", tt.profile.DefaultHold, tt.wantNormalized) 464 + gotProfileHold := "" 465 + if tt.profile.DefaultHold != nil { 466 + gotProfileHold = *tt.profile.DefaultHold 467 + } 468 + if gotProfileHold != tt.wantNormalized { 469 + t.Errorf("profile.DefaultHold = %v, want %v (should be updated in-place)", gotProfileHold, tt.wantNormalized) 459 470 } 460 471 } 461 472 }) ··· 539 550 t.Fatalf("GetProfile() error = %v", err) 540 551 } 541 552 542 - if profile.DefaultHold != "" { 543 - t.Errorf("DefaultHold = %v, want empty string", profile.DefaultHold) 553 + if profile.DefaultHold != nil && *profile.DefaultHold != "" { 554 + t.Errorf("DefaultHold = %v, want empty or nil", profile.DefaultHold) 544 555 } 545 556 } 546 557 ··· 553 564 defer server.Close() 554 565 555 566 client := atproto.NewClient(server.URL, "did:plc:test123", "test-token") 556 - profile := &atproto.SailorProfileRecord{ 557 - Type: atproto.SailorProfileCollection, 558 - DefaultHold: "did:web:hold01.atcr.io", 559 - CreatedAt: time.Now(), 560 - UpdatedAt: time.Now(), 561 - } 567 + profile := testSailorProfile("did:web:hold01.atcr.io") 562 568 563 569 err := UpdateProfile(context.Background(), client, profile) 564 570
+28 -26
pkg/appview/storage/proxy_blob_store.go
··· 12 12 "time" 13 13 14 14 "atcr.io/pkg/atproto" 15 - "atcr.io/pkg/auth" 16 15 "github.com/distribution/distribution/v3" 17 16 "github.com/distribution/distribution/v3/registry/api/errcode" 18 17 "github.com/opencontainers/go-digest" ··· 33 32 34 33 // ProxyBlobStore proxies blob requests to an external storage service 35 34 type ProxyBlobStore struct { 36 - ctx *auth.UserContext // User context with identity, target, permissions 37 - holdURL string // Resolved HTTP URL for XRPC requests 35 + ctx *RegistryContext // All context and services 36 + holdURL string // Resolved HTTP URL for XRPC requests 38 37 httpClient *http.Client 39 38 } 40 39 41 40 // NewProxyBlobStore creates a new proxy blob store 42 - func NewProxyBlobStore(userCtx *auth.UserContext) *ProxyBlobStore { 41 + func NewProxyBlobStore(ctx *RegistryContext) *ProxyBlobStore { 43 42 // Resolve DID to URL once at construction time 44 - holdURL := atproto.ResolveHoldURL(userCtx.TargetHoldDID) 43 + holdURL := atproto.ResolveHoldURL(ctx.HoldDID) 45 44 46 - slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", userCtx.TargetHoldDID, "hold_url", holdURL, "user_did", userCtx.TargetOwnerDID, "repo", userCtx.TargetRepo) 45 + slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", ctx.HoldDID, "hold_url", holdURL, "user_did", ctx.DID, "repo", ctx.Repository) 47 46 48 47 return &ProxyBlobStore{ 49 - ctx: userCtx, 48 + ctx: ctx, 50 49 holdURL: holdURL, 51 50 httpClient: &http.Client{ 52 51 Timeout: 5 * time.Minute, // Timeout for presigned URL requests and uploads ··· 62 61 } 63 62 64 63 // doAuthenticatedRequest performs an HTTP request with service token authentication 65 - // Uses the service token from UserContext to authenticate requests to the hold service 64 + // Uses the service token from middleware to authenticate requests to the hold service 66 65 func (p *ProxyBlobStore) doAuthenticatedRequest(ctx context.Context, req *http.Request) (*http.Response, error) { 67 - // Get service token from UserContext (lazy-loaded and cached per holdDID) 68 - serviceToken, err := p.ctx.GetServiceToken(ctx) 69 - if err != nil { 70 - slog.Error("Failed to get service token", "component", "proxy_blob_store", "did", p.ctx.DID, "error", err) 71 - return nil, fmt.Errorf("failed to get service token: %w", err) 72 - } 73 - if serviceToken == "" { 66 + // Use service token that middleware already validated and cached 67 + // Middleware fails fast with HTTP 401 if OAuth session is invalid 68 + if p.ctx.ServiceToken == "" { 74 69 // Should never happen - middleware validates OAuth before handlers run 75 70 slog.Error("No service token in context", "component", "proxy_blob_store", "did", p.ctx.DID) 76 71 return nil, fmt.Errorf("no service token available (middleware should have validated)") 77 72 } 78 73 79 74 // Add Bearer token to Authorization header 80 - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", serviceToken)) 75 + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.ctx.ServiceToken)) 81 76 82 77 return p.httpClient.Do(req) 83 78 } 84 79 85 80 // checkReadAccess validates that the user has read access to blobs in this hold 86 81 func (p *ProxyBlobStore) checkReadAccess(ctx context.Context) error { 87 - canRead, err := p.ctx.CanRead(ctx) 82 + if p.ctx.Authorizer == nil { 83 + return nil // No authorization check if authorizer not configured 84 + } 85 + allowed, err := p.ctx.Authorizer.CheckReadAccess(ctx, p.ctx.HoldDID, p.ctx.DID) 88 86 if err != nil { 89 87 return fmt.Errorf("authorization check failed: %w", err) 90 88 } 91 - if !canRead { 89 + if !allowed { 92 90 // Return 403 Forbidden instead of masquerading as missing blob 93 91 return errcode.ErrorCodeDenied.WithMessage("read access denied") 94 92 } ··· 97 95 98 96 // checkWriteAccess validates that the user has write access to blobs in this hold 99 97 func (p *ProxyBlobStore) checkWriteAccess(ctx context.Context) error { 100 - slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID) 101 - canWrite, err := p.ctx.CanWrite(ctx) 98 + if p.ctx.Authorizer == nil { 99 + return nil // No authorization check if authorizer not configured 100 + } 101 + 102 + slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 103 + allowed, err := p.ctx.Authorizer.CheckWriteAccess(ctx, p.ctx.HoldDID, p.ctx.DID) 102 104 if err != nil { 103 105 slog.Error("Authorization check error", "component", "proxy_blob_store", "error", err) 104 106 return fmt.Errorf("authorization check failed: %w", err) 105 107 } 106 - if !canWrite { 107 - slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID) 108 - return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.TargetHoldDID)) 108 + if !allowed { 109 + slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 110 + return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.HoldDID)) 109 111 } 110 - slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID) 112 + slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID) 111 113 return nil 112 114 } 113 115 ··· 354 356 // getPresignedURL returns the XRPC endpoint URL for blob operations 355 357 func (p *ProxyBlobStore) getPresignedURL(ctx context.Context, operation string, dgst digest.Digest) (string, error) { 356 358 // Use XRPC endpoint: /xrpc/com.atproto.sync.getBlob?did={userDID}&cid={digest} 357 - // The 'did' parameter is the TARGET OWNER's DID (whose blob we're fetching), not the hold service DID 359 + // The 'did' parameter is the USER's DID (whose blob we're fetching), not the hold service DID 358 360 // Per migration doc: hold accepts OCI digest directly as cid parameter (checks for sha256: prefix) 359 361 xrpcURL := fmt.Sprintf("%s%s?did=%s&cid=%s&method=%s", 360 - p.holdURL, atproto.SyncGetBlob, p.ctx.TargetOwnerDID, dgst.String(), operation) 362 + p.holdURL, atproto.SyncGetBlob, p.ctx.DID, dgst.String(), operation) 361 363 362 364 req, err := http.NewRequestWithContext(ctx, "GET", xrpcURL, nil) 363 365 if err != nil {
+420 -78
pkg/appview/storage/proxy_blob_store_test.go
··· 1 1 package storage 2 2 3 3 import ( 4 + "context" 4 5 "encoding/base64" 6 + "encoding/json" 5 7 "fmt" 8 + "net/http" 9 + "net/http/httptest" 6 10 "strings" 7 11 "testing" 8 12 "time" 9 13 10 14 "atcr.io/pkg/atproto" 11 - "atcr.io/pkg/auth" 15 + "atcr.io/pkg/auth/token" 16 + "github.com/opencontainers/go-digest" 12 17 ) 13 18 14 - // TestGetServiceToken_CachingLogic tests the global service token caching mechanism 15 - // These tests use the global auth cache functions directly 19 + // TestGetServiceToken_CachingLogic tests the token caching mechanism 16 20 func TestGetServiceToken_CachingLogic(t *testing.T) { 17 - userDID := "did:plc:cache-test" 21 + userDID := "did:plc:test" 18 22 holdDID := "did:web:hold.example.com" 19 23 20 24 // Test 1: Empty cache - invalidate any existing token 21 - auth.InvalidateServiceToken(userDID, holdDID) 22 - cachedToken, _ := auth.GetServiceToken(userDID, holdDID) 25 + token.InvalidateServiceToken(userDID, holdDID) 26 + cachedToken, _ := token.GetServiceToken(userDID, holdDID) 23 27 if cachedToken != "" { 24 28 t.Error("Expected empty cache at start") 25 29 } 26 30 27 31 // Test 2: Insert token into cache 28 32 // Create a JWT-like token with exp claim for testing 33 + // Format: header.payload.signature where payload has exp claim 29 34 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix()) 30 35 testToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature" 31 36 32 - err := auth.SetServiceToken(userDID, holdDID, testToken) 37 + err := token.SetServiceToken(userDID, holdDID, testToken) 33 38 if err != nil { 34 39 t.Fatalf("Failed to set service token: %v", err) 35 40 } 36 41 37 42 // Test 3: Retrieve from cache 38 - cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID) 43 + cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID) 39 44 if cachedToken == "" { 40 45 t.Fatal("Expected token to be in cache") 41 46 } ··· 51 56 // Test 4: Expired token - GetServiceToken automatically removes it 52 57 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix()) 53 58 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature" 54 - auth.SetServiceToken(userDID, holdDID, expiredToken) 59 + token.SetServiceToken(userDID, holdDID, expiredToken) 55 60 56 61 // GetServiceToken should return empty string for expired token 57 - cachedToken, _ = auth.GetServiceToken(userDID, holdDID) 62 + cachedToken, _ = token.GetServiceToken(userDID, holdDID) 58 63 if cachedToken != "" { 59 64 t.Error("Expected expired token to be removed from cache") 60 65 } ··· 65 70 return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(data)), "=") 66 71 } 67 72 68 - // mockUserContextForProxy creates a mock auth.UserContext for proxy blob store testing. 69 - // It sets up both the user identity and target info, and configures test helpers 70 - // to bypass network calls. 71 - func mockUserContextForProxy(did, holdDID, pdsEndpoint, repository string) *auth.UserContext { 72 - userCtx := auth.NewUserContext(did, "oauth", "PUT", nil) 73 - userCtx.SetTarget(did, "test.handle", pdsEndpoint, repository, holdDID) 73 + // TestServiceToken_EmptyInContext tests that operations fail when service token is missing 74 + func TestServiceToken_EmptyInContext(t *testing.T) { 75 + ctx := &RegistryContext{ 76 + DID: "did:plc:test", 77 + HoldDID: "did:web:hold.example.com", 78 + PDSEndpoint: "https://pds.example.com", 79 + Repository: "test-repo", 80 + ServiceToken: "", // No service token (middleware didn't set it) 81 + Refresher: nil, 82 + } 83 + 84 + store := NewProxyBlobStore(ctx) 85 + 86 + // Try a write operation that requires authentication 87 + testDigest := digest.FromString("test-content") 88 + _, err := store.Stat(context.Background(), testDigest) 89 + 90 + // Should fail because no service token is available 91 + if err == nil { 92 + t.Error("Expected error when service token is empty") 93 + } 94 + 95 + // Error should indicate authentication issue 96 + if !strings.Contains(err.Error(), "UNAUTHORIZED") && !strings.Contains(err.Error(), "authentication") { 97 + t.Logf("Got error (acceptable): %v", err) 98 + } 99 + } 100 + 101 + // TestDoAuthenticatedRequest_BearerTokenInjection tests that Bearer tokens are added to requests 102 + func TestDoAuthenticatedRequest_BearerTokenInjection(t *testing.T) { 103 + // This test verifies the Bearer token injection logic 104 + 105 + testToken := "test-bearer-token-xyz" 106 + 107 + // Create a test server to verify the Authorization header 108 + var receivedAuthHeader string 109 + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 110 + receivedAuthHeader = r.Header.Get("Authorization") 111 + w.WriteHeader(http.StatusOK) 112 + })) 113 + defer testServer.Close() 74 114 75 - // Bypass PDS resolution (avoids network calls) 76 - userCtx.SetPDSForTest("test.handle", pdsEndpoint) 115 + // Create ProxyBlobStore with service token in context (set by middleware) 116 + ctx := &RegistryContext{ 117 + DID: "did:plc:bearer-test", 118 + HoldDID: "did:web:hold.example.com", 119 + PDSEndpoint: "https://pds.example.com", 120 + Repository: "test-repo", 121 + ServiceToken: testToken, // Service token from middleware 122 + Refresher: nil, 123 + } 77 124 78 - // Set up mock authorizer that allows access 79 - userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer()) 125 + store := NewProxyBlobStore(ctx) 80 126 81 - // Set default hold DID for push resolution 82 - userCtx.SetDefaultHoldDIDForTest(holdDID) 127 + // Create request 128 + req, err := http.NewRequest(http.MethodGet, testServer.URL+"/test", nil) 129 + if err != nil { 130 + t.Fatalf("Failed to create request: %v", err) 131 + } 83 132 84 - return userCtx 133 + // Do authenticated request 134 + resp, err := store.doAuthenticatedRequest(context.Background(), req) 135 + if err != nil { 136 + t.Fatalf("doAuthenticatedRequest failed: %v", err) 137 + } 138 + defer resp.Body.Close() 139 + 140 + // Verify Bearer token was added 141 + expectedHeader := "Bearer " + testToken 142 + if receivedAuthHeader != expectedHeader { 143 + t.Errorf("Expected Authorization header %s, got %s", expectedHeader, receivedAuthHeader) 144 + } 85 145 } 86 146 87 - // mockUserContextForProxyWithToken creates a mock UserContext with a pre-populated service token. 88 - func mockUserContextForProxyWithToken(did, holdDID, pdsEndpoint, repository, serviceToken string) *auth.UserContext { 89 - userCtx := mockUserContextForProxy(did, holdDID, pdsEndpoint, repository) 90 - userCtx.SetServiceTokenForTest(holdDID, serviceToken) 91 - return userCtx 147 + // TestDoAuthenticatedRequest_ErrorWhenTokenUnavailable tests that authentication failures return proper errors 148 + func TestDoAuthenticatedRequest_ErrorWhenTokenUnavailable(t *testing.T) { 149 + // Create test server (should not be called since auth fails first) 150 + called := false 151 + testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 152 + called = true 153 + w.WriteHeader(http.StatusOK) 154 + })) 155 + defer testServer.Close() 156 + 157 + // Create ProxyBlobStore without service token (middleware didn't set it) 158 + ctx := &RegistryContext{ 159 + DID: "did:plc:fallback", 160 + HoldDID: "did:web:hold.example.com", 161 + PDSEndpoint: "https://pds.example.com", 162 + Repository: "test-repo", 163 + ServiceToken: "", // No service token 164 + Refresher: nil, 165 + } 166 + 167 + store := NewProxyBlobStore(ctx) 168 + 169 + // Create request 170 + req, err := http.NewRequest(http.MethodGet, testServer.URL+"/test", nil) 171 + if err != nil { 172 + t.Fatalf("Failed to create request: %v", err) 173 + } 174 + 175 + // Do authenticated request - should fail when no service token 176 + resp, err := store.doAuthenticatedRequest(context.Background(), req) 177 + if err == nil { 178 + t.Fatal("Expected doAuthenticatedRequest to fail when no service token is available") 179 + } 180 + if resp != nil { 181 + resp.Body.Close() 182 + } 183 + 184 + // Verify error indicates authentication/authorization issue 185 + errStr := err.Error() 186 + if !strings.Contains(errStr, "service token") && !strings.Contains(errStr, "UNAUTHORIZED") { 187 + t.Errorf("Expected service token or unauthorized error, got: %v", err) 188 + } 189 + 190 + if called { 191 + t.Error("Expected request to NOT be made when authentication fails") 192 + } 92 193 } 93 194 94 - // TestResolveHoldURL tests DID to URL conversion (pure function) 195 + // TestResolveHoldURL tests DID to URL conversion 95 196 func TestResolveHoldURL(t *testing.T) { 96 197 tests := []struct { 97 198 name string ··· 99 200 expected string 100 201 }{ 101 202 { 102 - name: "did:web with http (localhost)", 203 + name: "did:web with http (TEST_MODE)", 103 204 holdDID: "did:web:localhost:8080", 104 205 expected: "http://localhost:8080", 105 206 }, ··· 127 228 128 229 // TestServiceTokenCacheExpiry tests that expired cached tokens are not used 129 230 func TestServiceTokenCacheExpiry(t *testing.T) { 130 - userDID := "did:plc:expiry-test" 231 + userDID := "did:plc:expiry" 131 232 holdDID := "did:web:hold.example.com" 132 233 133 234 // Insert expired token 134 235 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix()) 135 236 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature" 136 - auth.SetServiceToken(userDID, holdDID, expiredToken) 237 + token.SetServiceToken(userDID, holdDID, expiredToken) 137 238 138 239 // GetServiceToken should automatically remove expired tokens 139 - cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID) 240 + cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID) 140 241 141 242 // Should return empty string for expired token 142 243 if cachedToken != "" { ··· 171 272 172 273 // TestNewProxyBlobStore tests ProxyBlobStore creation 173 274 func TestNewProxyBlobStore(t *testing.T) { 174 - userCtx := mockUserContextForProxy( 175 - "did:plc:test", 176 - "did:web:hold.example.com", 177 - "https://pds.example.com", 178 - "test-repo", 179 - ) 275 + ctx := &RegistryContext{ 276 + DID: "did:plc:test", 277 + HoldDID: "did:web:hold.example.com", 278 + PDSEndpoint: "https://pds.example.com", 279 + Repository: "test-repo", 280 + } 180 281 181 - store := NewProxyBlobStore(userCtx) 282 + store := NewProxyBlobStore(ctx) 182 283 183 284 if store == nil { 184 285 t.Fatal("Expected non-nil ProxyBlobStore") 185 286 } 186 287 187 - if store.ctx != userCtx { 288 + if store.ctx != ctx { 188 289 t.Error("Expected context to be set") 189 290 } 190 291 ··· 209 310 210 311 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix()) 211 312 testTokenStr := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature" 212 - auth.SetServiceToken(userDID, holdDID, testTokenStr) 313 + token.SetServiceToken(userDID, holdDID, testTokenStr) 213 314 214 315 for b.Loop() { 215 - cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID) 316 + cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID) 216 317 217 318 if cachedToken == "" || time.Now().After(expiresAt) { 218 319 b.Error("Cache miss in benchmark") ··· 220 321 } 221 322 } 222 323 223 - // TestParseJWTExpiry tests JWT expiry parsing 224 - func TestParseJWTExpiry(t *testing.T) { 225 - // Create a JWT with known expiry 226 - futureTime := time.Now().Add(1 * time.Hour).Unix() 227 - testPayload := fmt.Sprintf(`{"exp":%d}`, futureTime) 228 - testToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature" 324 + // TestCompleteMultipartUpload_JSONFormat verifies the JSON request format sent to hold service 325 + // This test would have caught the "partNumber" vs "part_number" bug 326 + func TestCompleteMultipartUpload_JSONFormat(t *testing.T) { 327 + var capturedBody map[string]any 328 + 329 + // Mock hold service that captures the request body 330 + holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 331 + if !strings.Contains(r.URL.Path, atproto.HoldCompleteUpload) { 332 + t.Errorf("Wrong endpoint called: %s", r.URL.Path) 333 + } 334 + 335 + // Capture request body 336 + var body map[string]any 337 + if err := json.NewDecoder(r.Body).Decode(&body); err != nil { 338 + t.Errorf("Failed to decode request body: %v", err) 339 + } 340 + capturedBody = body 341 + 342 + w.Header().Set("Content-Type", "application/json") 343 + w.WriteHeader(http.StatusOK) 344 + w.Write([]byte(`{}`)) 345 + })) 346 + defer holdServer.Close() 229 347 230 - expiry, err := auth.ParseJWTExpiry(testToken) 348 + // Create store with mocked hold URL 349 + ctx := &RegistryContext{ 350 + DID: "did:plc:test", 351 + HoldDID: "did:web:hold.example.com", 352 + PDSEndpoint: "https://pds.example.com", 353 + Repository: "test-repo", 354 + ServiceToken: "test-service-token", // Service token from middleware 355 + } 356 + store := NewProxyBlobStore(ctx) 357 + store.holdURL = holdServer.URL 358 + 359 + // Call completeMultipartUpload 360 + parts := []CompletedPart{ 361 + {PartNumber: 1, ETag: "etag-1"}, 362 + {PartNumber: 2, ETag: "etag-2"}, 363 + } 364 + err := store.completeMultipartUpload(context.Background(), "sha256:abc123", "upload-id-xyz", parts) 231 365 if err != nil { 232 - t.Fatalf("ParseJWTExpiry failed: %v", err) 366 + t.Fatalf("completeMultipartUpload failed: %v", err) 233 367 } 234 368 235 - // Verify expiry is close to what we set (within 1 second tolerance) 236 - expectedExpiry := time.Unix(futureTime, 0) 237 - diff := expiry.Sub(expectedExpiry) 238 - if diff < -time.Second || diff > time.Second { 239 - t.Errorf("Expiry mismatch: expected %v, got %v", expectedExpiry, expiry) 369 + // Verify JSON format 370 + if capturedBody == nil { 371 + t.Fatal("No request body was captured") 372 + } 373 + 374 + // Check top-level fields 375 + if uploadID, ok := capturedBody["uploadId"].(string); !ok || uploadID != "upload-id-xyz" { 376 + t.Errorf("Expected uploadId='upload-id-xyz', got %v", capturedBody["uploadId"]) 377 + } 378 + if digest, ok := capturedBody["digest"].(string); !ok || digest != "sha256:abc123" { 379 + t.Errorf("Expected digest='sha256:abc123', got %v", capturedBody["digest"]) 380 + } 381 + 382 + // Check parts array 383 + partsArray, ok := capturedBody["parts"].([]any) 384 + if !ok { 385 + t.Fatalf("Expected parts to be array, got %T", capturedBody["parts"]) 386 + } 387 + if len(partsArray) != 2 { 388 + t.Fatalf("Expected 2 parts, got %d", len(partsArray)) 389 + } 390 + 391 + // Verify first part has "part_number" (not "partNumber") 392 + part0, ok := partsArray[0].(map[string]any) 393 + if !ok { 394 + t.Fatalf("Expected part to be object, got %T", partsArray[0]) 395 + } 396 + 397 + // THIS IS THE KEY CHECK - would have caught the bug 398 + if _, hasPartNumber := part0["partNumber"]; hasPartNumber { 399 + t.Error("Found 'partNumber' (camelCase) - should be 'part_number' (snake_case)") 400 + } 401 + if partNum, ok := part0["part_number"].(float64); !ok || int(partNum) != 1 { 402 + t.Errorf("Expected part_number=1, got %v", part0["part_number"]) 403 + } 404 + if etag, ok := part0["etag"].(string); !ok || etag != "etag-1" { 405 + t.Errorf("Expected etag='etag-1', got %v", part0["etag"]) 240 406 } 241 407 } 242 408 243 - // TestParseJWTExpiry_InvalidToken tests error handling for invalid tokens 244 - func TestParseJWTExpiry_InvalidToken(t *testing.T) { 409 + // TestGet_UsesPresignedURLDirectly verifies that Get() doesn't add auth headers to presigned URLs 410 + // This test would have caught the presigned URL authentication bug 411 + func TestGet_UsesPresignedURLDirectly(t *testing.T) { 412 + blobData := []byte("test blob content") 413 + var s3ReceivedAuthHeader string 414 + 415 + // Mock S3 server that rejects requests with Authorization header 416 + s3Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 417 + s3ReceivedAuthHeader = r.Header.Get("Authorization") 418 + 419 + // Presigned URLs should NOT have Authorization header 420 + if s3ReceivedAuthHeader != "" { 421 + t.Errorf("S3 received Authorization header: %s (should be empty for presigned URLs)", s3ReceivedAuthHeader) 422 + w.WriteHeader(http.StatusForbidden) 423 + w.Write([]byte(`<?xml version="1.0"?><Error><Code>SignatureDoesNotMatch</Code></Error>`)) 424 + return 425 + } 426 + 427 + // Return blob data 428 + w.WriteHeader(http.StatusOK) 429 + w.Write(blobData) 430 + })) 431 + defer s3Server.Close() 432 + 433 + // Mock hold service that returns presigned S3 URL 434 + holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 435 + // Return presigned URL pointing to S3 server 436 + w.Header().Set("Content-Type", "application/json") 437 + w.WriteHeader(http.StatusOK) 438 + resp := map[string]string{ 439 + "url": s3Server.URL + "/blob?X-Amz-Signature=fake-signature", 440 + } 441 + json.NewEncoder(w).Encode(resp) 442 + })) 443 + defer holdServer.Close() 444 + 445 + // Create store with service token in context 446 + ctx := &RegistryContext{ 447 + DID: "did:plc:test", 448 + HoldDID: "did:web:hold.example.com", 449 + PDSEndpoint: "https://pds.example.com", 450 + Repository: "test-repo", 451 + ServiceToken: "test-service-token", // Service token from middleware 452 + } 453 + store := NewProxyBlobStore(ctx) 454 + store.holdURL = holdServer.URL 455 + 456 + // Call Get() 457 + dgst := digest.FromBytes(blobData) 458 + retrieved, err := store.Get(context.Background(), dgst) 459 + if err != nil { 460 + t.Fatalf("Get() failed: %v", err) 461 + } 462 + 463 + // Verify correct data was retrieved 464 + if string(retrieved) != string(blobData) { 465 + t.Errorf("Expected data=%s, got %s", string(blobData), string(retrieved)) 466 + } 467 + 468 + // Verify S3 received NO Authorization header 469 + if s3ReceivedAuthHeader != "" { 470 + t.Errorf("S3 should not receive Authorization header for presigned URLs, got: %s", s3ReceivedAuthHeader) 471 + } 472 + } 473 + 474 + // TestOpen_UsesPresignedURLDirectly verifies that Open() doesn't add auth headers to presigned URLs 475 + // This test would have caught the presigned URL authentication bug 476 + func TestOpen_UsesPresignedURLDirectly(t *testing.T) { 477 + blobData := []byte("test blob stream content") 478 + var s3ReceivedAuthHeader string 479 + 480 + // Mock S3 server 481 + s3Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 482 + s3ReceivedAuthHeader = r.Header.Get("Authorization") 483 + 484 + // Presigned URLs should NOT have Authorization header 485 + if s3ReceivedAuthHeader != "" { 486 + t.Errorf("S3 received Authorization header: %s (should be empty)", s3ReceivedAuthHeader) 487 + w.WriteHeader(http.StatusForbidden) 488 + return 489 + } 490 + 491 + w.WriteHeader(http.StatusOK) 492 + w.Write(blobData) 493 + })) 494 + defer s3Server.Close() 495 + 496 + // Mock hold service 497 + holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 498 + w.Header().Set("Content-Type", "application/json") 499 + w.WriteHeader(http.StatusOK) 500 + json.NewEncoder(w).Encode(map[string]string{ 501 + "url": s3Server.URL + "/blob?X-Amz-Signature=fake", 502 + }) 503 + })) 504 + defer holdServer.Close() 505 + 506 + // Create store with service token in context 507 + ctx := &RegistryContext{ 508 + DID: "did:plc:test", 509 + HoldDID: "did:web:hold.example.com", 510 + PDSEndpoint: "https://pds.example.com", 511 + Repository: "test-repo", 512 + ServiceToken: "test-service-token", // Service token from middleware 513 + } 514 + store := NewProxyBlobStore(ctx) 515 + store.holdURL = holdServer.URL 516 + 517 + // Call Open() 518 + dgst := digest.FromBytes(blobData) 519 + reader, err := store.Open(context.Background(), dgst) 520 + if err != nil { 521 + t.Fatalf("Open() failed: %v", err) 522 + } 523 + defer reader.Close() 524 + 525 + // Verify S3 received NO Authorization header 526 + if s3ReceivedAuthHeader != "" { 527 + t.Errorf("S3 should not receive Authorization header for presigned URLs, got: %s", s3ReceivedAuthHeader) 528 + } 529 + } 530 + 531 + // TestMultipartEndpoints_CorrectURLs verifies all multipart XRPC endpoints use correct URLs 532 + // This would have caught the old com.atproto.repo.uploadBlob vs new io.atcr.hold.* endpoints 533 + func TestMultipartEndpoints_CorrectURLs(t *testing.T) { 245 534 tests := []struct { 246 - name string 247 - token string 535 + name string 536 + testFunc func(*ProxyBlobStore) error 537 + expectedPath string 248 538 }{ 249 - {"empty token", ""}, 250 - {"single part", "header"}, 251 - {"two parts", "header.payload"}, 252 - {"invalid base64 payload", "header.!!!.signature"}, 253 - {"missing exp claim", "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(`{"sub":"test"}`) + ".sig"}, 539 + { 540 + name: "startMultipartUpload", 541 + testFunc: func(store *ProxyBlobStore) error { 542 + _, err := store.startMultipartUpload(context.Background(), "sha256:test") 543 + return err 544 + }, 545 + expectedPath: atproto.HoldInitiateUpload, 546 + }, 547 + { 548 + name: "getPartUploadInfo", 549 + testFunc: func(store *ProxyBlobStore) error { 550 + _, err := store.getPartUploadInfo(context.Background(), "sha256:test", "upload-123", 1) 551 + return err 552 + }, 553 + expectedPath: atproto.HoldGetPartUploadURL, 554 + }, 555 + { 556 + name: "completeMultipartUpload", 557 + testFunc: func(store *ProxyBlobStore) error { 558 + parts := []CompletedPart{{PartNumber: 1, ETag: "etag1"}} 559 + return store.completeMultipartUpload(context.Background(), "sha256:test", "upload-123", parts) 560 + }, 561 + expectedPath: atproto.HoldCompleteUpload, 562 + }, 563 + { 564 + name: "abortMultipartUpload", 565 + testFunc: func(store *ProxyBlobStore) error { 566 + return store.abortMultipartUpload(context.Background(), "sha256:test", "upload-123") 567 + }, 568 + expectedPath: atproto.HoldAbortUpload, 569 + }, 254 570 } 255 571 256 572 for _, tt := range tests { 257 573 t.Run(tt.name, func(t *testing.T) { 258 - _, err := auth.ParseJWTExpiry(tt.token) 259 - if err == nil { 260 - t.Error("Expected error for invalid token") 574 + var capturedPath string 575 + 576 + // Mock hold service that captures request path 577 + holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 578 + capturedPath = r.URL.Path 579 + 580 + // Return success response 581 + w.Header().Set("Content-Type", "application/json") 582 + w.WriteHeader(http.StatusOK) 583 + resp := map[string]string{ 584 + "uploadId": "test-upload-id", 585 + "url": "https://s3.example.com/presigned", 586 + } 587 + json.NewEncoder(w).Encode(resp) 588 + })) 589 + defer holdServer.Close() 590 + 591 + // Create store with service token in context 592 + ctx := &RegistryContext{ 593 + DID: "did:plc:test", 594 + HoldDID: "did:web:hold.example.com", 595 + PDSEndpoint: "https://pds.example.com", 596 + Repository: "test-repo", 597 + ServiceToken: "test-service-token", // Service token from middleware 598 + } 599 + store := NewProxyBlobStore(ctx) 600 + store.holdURL = holdServer.URL 601 + 602 + // Call the function 603 + _ = tt.testFunc(store) // Ignore error, we just care about the URL 604 + 605 + // Verify correct endpoint was called 606 + if capturedPath != tt.expectedPath { 607 + t.Errorf("Expected endpoint %s, got %s", tt.expectedPath, capturedPath) 608 + } 609 + 610 + // Verify it's NOT the old endpoint 611 + if strings.Contains(capturedPath, "com.atproto.repo.uploadBlob") { 612 + t.Error("Still using old com.atproto.repo.uploadBlob endpoint!") 261 613 } 262 614 }) 263 615 } 264 616 } 265 - 266 - // Note: Tests for doAuthenticatedRequest, Get, Open, completeMultipartUpload, etc. 267 - // require complex dependency mocking (OAuth refresher, PDS resolution, HoldAuthorizer). 268 - // These should be tested at the integration level with proper infrastructure. 269 - // 270 - // The current unit tests cover: 271 - // - Global service token cache (auth.GetServiceToken, auth.SetServiceToken, etc.) 272 - // - URL resolution (atproto.ResolveHoldURL) 273 - // - JWT parsing (auth.ParseJWTExpiry) 274 - // - Store construction (NewProxyBlobStore)
+74 -39
pkg/appview/storage/routing_repository.go
··· 6 6 7 7 import ( 8 8 "context" 9 - "database/sql" 10 9 "log/slog" 10 + "sync" 11 11 12 - "atcr.io/pkg/auth" 13 12 "github.com/distribution/distribution/v3" 14 - "github.com/distribution/reference" 15 13 ) 16 14 17 - // RoutingRepository routes manifests to ATProto and blobs to external hold service. 18 - // The registry (AppView) is stateless and NEVER stores blobs locally. 19 - // A new instance is created per HTTP request - no caching or synchronization needed. 15 + // RoutingRepository routes manifests to ATProto and blobs to external hold service 16 + // The registry (AppView) is stateless and NEVER stores blobs locally 20 17 type RoutingRepository struct { 21 18 distribution.Repository 22 - userCtx *auth.UserContext 23 - sqlDB *sql.DB 19 + Ctx *RegistryContext // All context and services (exported for token updates) 20 + mu sync.Mutex // Protects manifestStore and blobStore 21 + manifestStore *ManifestStore // Cached manifest store instance 22 + blobStore *ProxyBlobStore // Cached blob store instance 24 23 } 25 24 26 25 // NewRoutingRepository creates a new routing repository 27 - func NewRoutingRepository(baseRepo distribution.Repository, userCtx *auth.UserContext, sqlDB *sql.DB) *RoutingRepository { 26 + func NewRoutingRepository(baseRepo distribution.Repository, ctx *RegistryContext) *RoutingRepository { 28 27 return &RoutingRepository{ 29 28 Repository: baseRepo, 30 - userCtx: userCtx, 31 - sqlDB: sqlDB, 29 + Ctx: ctx, 32 30 } 33 31 } 34 32 35 33 // Manifests returns the ATProto-backed manifest service 36 34 func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { 37 - // blobStore used to fetch labels from th 38 - blobStore := r.Blobs(ctx) 39 - return NewManifestStore(r.userCtx, blobStore, r.sqlDB), nil 35 + r.mu.Lock() 36 + // Create or return cached manifest store 37 + if r.manifestStore == nil { 38 + // Ensure blob store is created first (needed for label extraction during push) 39 + // Release lock while calling Blobs to avoid deadlock 40 + r.mu.Unlock() 41 + blobStore := r.Blobs(ctx) 42 + r.mu.Lock() 43 + 44 + // Double-check after reacquiring lock (another goroutine might have set it) 45 + if r.manifestStore == nil { 46 + r.manifestStore = NewManifestStore(r.Ctx, blobStore) 47 + } 48 + } 49 + manifestStore := r.manifestStore 50 + r.mu.Unlock() 51 + 52 + return manifestStore, nil 40 53 } 41 54 42 55 // Blobs returns a proxy blob store that routes to external hold service 56 + // The registry (AppView) NEVER stores blobs locally - all blobs go through hold service 43 57 func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore { 44 - // Resolve hold DID: pull uses DB lookup, push uses profile discovery 45 - holdDID, err := r.userCtx.ResolveHoldDID(ctx, r.sqlDB) 46 - if err != nil { 47 - slog.Warn("Failed to resolve hold DID", "component", "storage/blobs", "error", err) 48 - holdDID = r.userCtx.TargetHoldDID 58 + r.mu.Lock() 59 + // Return cached blob store if available 60 + if r.blobStore != nil { 61 + blobStore := r.blobStore 62 + r.mu.Unlock() 63 + slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository) 64 + return blobStore 65 + } 66 + 67 + // Determine if this is a pull (GET) or push (PUT/POST/HEAD/etc) operation 68 + // Pull operations use the historical hold DID from the database (blobs are where they were pushed) 69 + // Push operations use the discovery-based hold DID from user's profile/default 70 + // This allows users to change their default hold and have new pushes go there 71 + isPull := false 72 + if method, ok := ctx.Value("http.request.method").(string); ok { 73 + isPull = method == "GET" 74 + } 75 + 76 + holdDID := r.Ctx.HoldDID // Default to discovery-based DID 77 + holdSource := "discovery" 78 + 79 + // Only query database for pull operations 80 + if isPull && r.Ctx.Database != nil { 81 + // Query database for the latest manifest's hold DID 82 + if dbHoldDID, err := r.Ctx.Database.GetLatestHoldDIDForRepo(r.Ctx.DID, r.Ctx.Repository); err == nil && dbHoldDID != "" { 83 + // Use hold DID from database (pull case - use historical reference) 84 + holdDID = dbHoldDID 85 + holdSource = "database" 86 + slog.Debug("Using hold from database manifest (pull)", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", dbHoldDID) 87 + } else if err != nil { 88 + // Log error but don't fail - fall back to discovery-based DID 89 + slog.Warn("Failed to query database for hold DID", "component", "storage/blobs", "error", err) 90 + } 91 + // If dbHoldDID is empty (no manifests yet), fall through to use discovery-based DID 49 92 } 50 93 51 94 if holdDID == "" { 52 - panic("hold DID not set - ensure default_hold_did is configured in middleware") 95 + // This should never happen if middleware is configured correctly 96 + panic("hold DID not set in RegistryContext - ensure default_hold_did is configured in middleware") 53 97 } 54 98 55 - slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.userCtx.TargetOwnerDID, "repo", r.userCtx.TargetRepo, "hold", holdDID, "action", r.userCtx.Action.String()) 99 + slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID, "source", holdSource) 100 + 101 + // Update context with the correct hold DID (may be from database or discovered) 102 + r.Ctx.HoldDID = holdDID 56 103 57 - return NewProxyBlobStore(r.userCtx) 104 + // Create and cache proxy blob store 105 + r.blobStore = NewProxyBlobStore(r.Ctx) 106 + blobStore := r.blobStore 107 + r.mu.Unlock() 108 + return blobStore 58 109 } 59 110 60 111 // Tags returns the tag service 61 112 // Tags are stored in ATProto as io.atcr.tag records 62 113 func (r *RoutingRepository) Tags(ctx context.Context) distribution.TagService { 63 - return NewTagStore(r.userCtx.GetATProtoClient(), r.userCtx.TargetRepo) 64 - } 65 - 66 - // Named returns a reference to the repository name. 67 - // If the base repository is set, it delegates to the base. 68 - // Otherwise, it constructs a name from the user context. 69 - func (r *RoutingRepository) Named() reference.Named { 70 - if r.Repository != nil { 71 - return r.Repository.Named() 72 - } 73 - // Construct from user context 74 - name, err := reference.WithName(r.userCtx.TargetRepo) 75 - if err != nil { 76 - // Fallback: return a simple reference 77 - name, _ = reference.WithName("unknown") 78 - } 79 - return name 114 + return NewTagStore(r.Ctx.ATProtoClient, r.Ctx.Repository) 80 115 }
+301 -179
pkg/appview/storage/routing_repository_test.go
··· 2 2 3 3 import ( 4 4 "context" 5 + "sync" 5 6 "testing" 6 7 8 + "github.com/distribution/distribution/v3" 7 9 "github.com/stretchr/testify/assert" 8 10 "github.com/stretchr/testify/require" 9 11 10 12 "atcr.io/pkg/atproto" 11 - "atcr.io/pkg/auth" 12 13 ) 13 14 14 - // mockUserContext creates a mock auth.UserContext for testing. 15 - // It sets up both the user identity and target info, and configures 16 - // test helpers to bypass network calls. 17 - func mockUserContext(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID string) *auth.UserContext { 18 - userCtx := auth.NewUserContext(did, authMethod, httpMethod, nil) 19 - userCtx.SetTarget(targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID) 20 - 21 - // Bypass PDS resolution (avoids network calls) 22 - userCtx.SetPDSForTest(targetOwnerHandle, targetOwnerPDS) 23 - 24 - // Set up mock authorizer that allows access 25 - userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer()) 15 + // mockDatabase is a simple mock for testing 16 + type mockDatabase struct { 17 + holdDID string 18 + err error 19 + } 26 20 27 - // Set default hold DID for push resolution 28 - userCtx.SetDefaultHoldDIDForTest(targetHoldDID) 21 + func (m *mockDatabase) IncrementPullCount(did, repository string) error { 22 + return nil 23 + } 29 24 30 - return userCtx 25 + func (m *mockDatabase) IncrementPushCount(did, repository string) error { 26 + return nil 31 27 } 32 28 33 - // mockUserContextWithToken creates a mock UserContext with a pre-populated service token. 34 - func mockUserContextWithToken(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID, serviceToken string) *auth.UserContext { 35 - userCtx := mockUserContext(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID) 36 - userCtx.SetServiceTokenForTest(targetHoldDID, serviceToken) 37 - return userCtx 29 + func (m *mockDatabase) GetLatestHoldDIDForRepo(did, repository string) (string, error) { 30 + if m.err != nil { 31 + return "", m.err 32 + } 33 + return m.holdDID, nil 38 34 } 39 35 40 36 func TestNewRoutingRepository(t *testing.T) { 41 - userCtx := mockUserContext( 42 - "did:plc:test123", // authenticated user 43 - "oauth", // auth method 44 - "GET", // HTTP method 45 - "did:plc:test123", // target owner 46 - "test.handle", // target owner handle 47 - "https://pds.example.com", // target owner PDS 48 - "debian", // repository 49 - "did:web:hold01.atcr.io", // hold DID 50 - ) 37 + ctx := &RegistryContext{ 38 + DID: "did:plc:test123", 39 + Repository: "debian", 40 + HoldDID: "did:web:hold01.atcr.io", 41 + ATProtoClient: &atproto.Client{}, 42 + } 51 43 52 - repo := NewRoutingRepository(nil, userCtx, nil) 44 + repo := NewRoutingRepository(nil, ctx) 53 45 54 - if repo.userCtx.TargetOwnerDID != "did:plc:test123" { 55 - t.Errorf("Expected TargetOwnerDID %q, got %q", "did:plc:test123", repo.userCtx.TargetOwnerDID) 46 + if repo.Ctx.DID != "did:plc:test123" { 47 + t.Errorf("Expected DID %q, got %q", "did:plc:test123", repo.Ctx.DID) 56 48 } 57 49 58 - if repo.userCtx.TargetRepo != "debian" { 59 - t.Errorf("Expected TargetRepo %q, got %q", "debian", repo.userCtx.TargetRepo) 50 + if repo.Ctx.Repository != "debian" { 51 + t.Errorf("Expected repository %q, got %q", "debian", repo.Ctx.Repository) 52 + } 53 + 54 + if repo.manifestStore != nil { 55 + t.Error("Expected manifestStore to be nil initially") 60 56 } 61 57 62 - if repo.userCtx.TargetHoldDID != "did:web:hold01.atcr.io" { 63 - t.Errorf("Expected TargetHoldDID %q, got %q", "did:web:hold01.atcr.io", repo.userCtx.TargetHoldDID) 58 + if repo.blobStore != nil { 59 + t.Error("Expected blobStore to be nil initially") 64 60 } 65 61 } 66 62 67 63 // TestRoutingRepository_Manifests tests the Manifests() method 68 64 func TestRoutingRepository_Manifests(t *testing.T) { 69 - userCtx := mockUserContext( 70 - "did:plc:test123", 71 - "oauth", 72 - "GET", 73 - "did:plc:test123", 74 - "test.handle", 75 - "https://pds.example.com", 76 - "myapp", 77 - "did:web:hold01.atcr.io", 78 - ) 65 + ctx := &RegistryContext{ 66 + DID: "did:plc:test123", 67 + Repository: "myapp", 68 + HoldDID: "did:web:hold01.atcr.io", 69 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 70 + } 79 71 80 - repo := NewRoutingRepository(nil, userCtx, nil) 72 + repo := NewRoutingRepository(nil, ctx) 81 73 manifestService, err := repo.Manifests(context.Background()) 82 74 83 75 require.NoError(t, err) 84 76 assert.NotNil(t, manifestService) 77 + 78 + // Verify the manifest store is cached 79 + assert.NotNil(t, repo.manifestStore, "manifest store should be cached") 80 + 81 + // Call again and verify we get the same instance 82 + manifestService2, err := repo.Manifests(context.Background()) 83 + require.NoError(t, err) 84 + assert.Same(t, manifestService, manifestService2, "should return cached manifest store") 85 85 } 86 86 87 - // TestRoutingRepository_Blobs tests the Blobs() method 88 - func TestRoutingRepository_Blobs(t *testing.T) { 89 - userCtx := mockUserContext( 90 - "did:plc:test123", 91 - "oauth", 92 - "GET", 93 - "did:plc:test123", 94 - "test.handle", 95 - "https://pds.example.com", 96 - "myapp", 97 - "did:web:hold01.atcr.io", 98 - ) 87 + // TestRoutingRepository_ManifestStoreCaching tests that manifest store is cached 88 + func TestRoutingRepository_ManifestStoreCaching(t *testing.T) { 89 + ctx := &RegistryContext{ 90 + DID: "did:plc:test123", 91 + Repository: "myapp", 92 + HoldDID: "did:web:hold01.atcr.io", 93 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 94 + } 99 95 100 - repo := NewRoutingRepository(nil, userCtx, nil) 96 + repo := NewRoutingRepository(nil, ctx) 97 + 98 + // First call creates the store 99 + store1, err := repo.Manifests(context.Background()) 100 + require.NoError(t, err) 101 + assert.NotNil(t, store1) 102 + 103 + // Second call returns cached store 104 + store2, err := repo.Manifests(context.Background()) 105 + require.NoError(t, err) 106 + assert.Same(t, store1, store2, "should return cached manifest store instance") 107 + 108 + // Verify internal cache 109 + assert.NotNil(t, repo.manifestStore) 110 + } 111 + 112 + // TestRoutingRepository_Blobs_PullUsesDatabase tests that GET (pull) uses database hold DID 113 + func TestRoutingRepository_Blobs_PullUsesDatabase(t *testing.T) { 114 + dbHoldDID := "did:web:database.hold.io" 115 + discoveryHoldDID := "did:web:discovery.hold.io" 116 + 117 + ctx := &RegistryContext{ 118 + DID: "did:plc:test123", 119 + Repository: "myapp", 120 + HoldDID: discoveryHoldDID, // Discovery-based hold (should be overridden for pull) 121 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 122 + Database: &mockDatabase{holdDID: dbHoldDID}, 123 + } 124 + 125 + repo := NewRoutingRepository(nil, ctx) 126 + 127 + // Create context with GET method (pull operation) 128 + pullCtx := context.WithValue(context.Background(), "http.request.method", "GET") 129 + blobStore := repo.Blobs(pullCtx) 130 + 131 + assert.NotNil(t, blobStore) 132 + // Verify the hold DID was updated to use the database value for pull 133 + assert.Equal(t, dbHoldDID, repo.Ctx.HoldDID, "pull (GET) should use database hold DID") 134 + } 135 + 136 + // TestRoutingRepository_Blobs_PushUsesDiscovery tests that push operations use discovery hold DID 137 + func TestRoutingRepository_Blobs_PushUsesDiscovery(t *testing.T) { 138 + dbHoldDID := "did:web:database.hold.io" 139 + discoveryHoldDID := "did:web:discovery.hold.io" 140 + 141 + testCases := []struct { 142 + name string 143 + method string 144 + }{ 145 + {"PUT", "PUT"}, 146 + {"POST", "POST"}, 147 + {"HEAD", "HEAD"}, 148 + {"PATCH", "PATCH"}, 149 + {"DELETE", "DELETE"}, 150 + } 151 + 152 + for _, tc := range testCases { 153 + t.Run(tc.name, func(t *testing.T) { 154 + ctx := &RegistryContext{ 155 + DID: "did:plc:test123", 156 + Repository: "myapp-" + tc.method, // Unique repo to avoid caching 157 + HoldDID: discoveryHoldDID, 158 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 159 + Database: &mockDatabase{holdDID: dbHoldDID}, 160 + } 161 + 162 + repo := NewRoutingRepository(nil, ctx) 163 + 164 + // Create context with push method 165 + pushCtx := context.WithValue(context.Background(), "http.request.method", tc.method) 166 + blobStore := repo.Blobs(pushCtx) 167 + 168 + assert.NotNil(t, blobStore) 169 + // Verify the hold DID remains the discovery-based one for push operations 170 + assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "%s should use discovery hold DID, not database", tc.method) 171 + }) 172 + } 173 + } 174 + 175 + // TestRoutingRepository_Blobs_NoMethodUsesDiscovery tests that missing method defaults to discovery 176 + func TestRoutingRepository_Blobs_NoMethodUsesDiscovery(t *testing.T) { 177 + dbHoldDID := "did:web:database.hold.io" 178 + discoveryHoldDID := "did:web:discovery.hold.io" 179 + 180 + ctx := &RegistryContext{ 181 + DID: "did:plc:test123", 182 + Repository: "myapp-nomethod", 183 + HoldDID: discoveryHoldDID, 184 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 185 + Database: &mockDatabase{holdDID: dbHoldDID}, 186 + } 187 + 188 + repo := NewRoutingRepository(nil, ctx) 189 + 190 + // Context without HTTP method (shouldn't happen in practice, but test defensive behavior) 101 191 blobStore := repo.Blobs(context.Background()) 102 192 103 193 assert.NotNil(t, blobStore) 194 + // Without method, should default to discovery (safer for push scenarios) 195 + assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "missing method should use discovery hold DID") 196 + } 197 + 198 + // TestRoutingRepository_Blobs_WithoutDatabase tests blob store with discovery-based hold 199 + func TestRoutingRepository_Blobs_WithoutDatabase(t *testing.T) { 200 + discoveryHoldDID := "did:web:discovery.hold.io" 201 + 202 + ctx := &RegistryContext{ 203 + DID: "did:plc:nocache456", 204 + Repository: "uncached-app", 205 + HoldDID: discoveryHoldDID, 206 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:nocache456", ""), 207 + Database: nil, // No database 208 + } 209 + 210 + repo := NewRoutingRepository(nil, ctx) 211 + blobStore := repo.Blobs(context.Background()) 212 + 213 + assert.NotNil(t, blobStore) 214 + // Verify the hold DID remains the discovery-based one 215 + assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "should use discovery-based hold DID") 216 + } 217 + 218 + // TestRoutingRepository_Blobs_DatabaseEmptyFallback tests fallback when database returns empty hold DID 219 + func TestRoutingRepository_Blobs_DatabaseEmptyFallback(t *testing.T) { 220 + discoveryHoldDID := "did:web:discovery.hold.io" 221 + 222 + ctx := &RegistryContext{ 223 + DID: "did:plc:test123", 224 + Repository: "newapp", 225 + HoldDID: discoveryHoldDID, 226 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 227 + Database: &mockDatabase{holdDID: ""}, // Empty string (no manifests yet) 228 + } 229 + 230 + repo := NewRoutingRepository(nil, ctx) 231 + blobStore := repo.Blobs(context.Background()) 232 + 233 + assert.NotNil(t, blobStore) 234 + // Verify the hold DID falls back to discovery-based 235 + assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "should fall back to discovery-based hold DID when database returns empty") 236 + } 237 + 238 + // TestRoutingRepository_BlobStoreCaching tests that blob store is cached 239 + func TestRoutingRepository_BlobStoreCaching(t *testing.T) { 240 + ctx := &RegistryContext{ 241 + DID: "did:plc:test123", 242 + Repository: "myapp", 243 + HoldDID: "did:web:hold01.atcr.io", 244 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 245 + } 246 + 247 + repo := NewRoutingRepository(nil, ctx) 248 + 249 + // First call creates the store 250 + store1 := repo.Blobs(context.Background()) 251 + assert.NotNil(t, store1) 252 + 253 + // Second call returns cached store 254 + store2 := repo.Blobs(context.Background()) 255 + assert.Same(t, store1, store2, "should return cached blob store instance") 256 + 257 + // Verify internal cache 258 + assert.NotNil(t, repo.blobStore) 104 259 } 105 260 106 261 // TestRoutingRepository_Blobs_PanicOnEmptyHoldDID tests panic when hold DID is empty 107 262 func TestRoutingRepository_Blobs_PanicOnEmptyHoldDID(t *testing.T) { 108 - // Create context without default hold and empty target hold 109 - userCtx := auth.NewUserContext("did:plc:emptyholdtest999", "oauth", "GET", nil) 110 - userCtx.SetTarget("did:plc:emptyholdtest999", "test.handle", "https://pds.example.com", "empty-hold-app", "") 111 - userCtx.SetPDSForTest("test.handle", "https://pds.example.com") 112 - userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer()) 113 - // Intentionally NOT setting default hold DID 263 + // Use a unique DID/repo to ensure no cache entry exists 264 + ctx := &RegistryContext{ 265 + DID: "did:plc:emptyholdtest999", 266 + Repository: "empty-hold-app", 267 + HoldDID: "", // Empty hold DID should panic 268 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:emptyholdtest999", ""), 269 + } 114 270 115 - repo := NewRoutingRepository(nil, userCtx, nil) 271 + repo := NewRoutingRepository(nil, ctx) 116 272 117 273 // Should panic with empty hold DID 118 274 assert.Panics(t, func() { ··· 122 278 123 279 // TestRoutingRepository_Tags tests the Tags() method 124 280 func TestRoutingRepository_Tags(t *testing.T) { 125 - userCtx := mockUserContext( 126 - "did:plc:test123", 127 - "oauth", 128 - "GET", 129 - "did:plc:test123", 130 - "test.handle", 131 - "https://pds.example.com", 132 - "myapp", 133 - "did:web:hold01.atcr.io", 134 - ) 281 + ctx := &RegistryContext{ 282 + DID: "did:plc:test123", 283 + Repository: "myapp", 284 + HoldDID: "did:web:hold01.atcr.io", 285 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 286 + } 135 287 136 - repo := NewRoutingRepository(nil, userCtx, nil) 288 + repo := NewRoutingRepository(nil, ctx) 137 289 tagService := repo.Tags(context.Background()) 138 290 139 291 assert.NotNil(t, tagService) 140 292 141 - // Call again and verify we get a fresh instance (no caching) 293 + // Call again and verify we get a new instance (Tags() doesn't cache) 142 294 tagService2 := repo.Tags(context.Background()) 143 295 assert.NotNil(t, tagService2) 296 + // Tags service is not cached, so each call creates a new instance 144 297 } 145 298 146 - // TestRoutingRepository_UserContext tests that UserContext fields are properly set 147 - func TestRoutingRepository_UserContext(t *testing.T) { 148 - testCases := []struct { 149 - name string 150 - httpMethod string 151 - expectedAction auth.RequestAction 152 - }{ 153 - {"GET request is pull", "GET", auth.ActionPull}, 154 - {"HEAD request is pull", "HEAD", auth.ActionPull}, 155 - {"PUT request is push", "PUT", auth.ActionPush}, 156 - {"POST request is push", "POST", auth.ActionPush}, 157 - {"DELETE request is push", "DELETE", auth.ActionPush}, 299 + // TestRoutingRepository_ConcurrentAccess tests concurrent access to cached stores 300 + func TestRoutingRepository_ConcurrentAccess(t *testing.T) { 301 + ctx := &RegistryContext{ 302 + DID: "did:plc:test123", 303 + Repository: "myapp", 304 + HoldDID: "did:web:hold01.atcr.io", 305 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 158 306 } 159 307 160 - for _, tc := range testCases { 161 - t.Run(tc.name, func(t *testing.T) { 162 - userCtx := mockUserContext( 163 - "did:plc:test123", 164 - "oauth", 165 - tc.httpMethod, 166 - "did:plc:test123", 167 - "test.handle", 168 - "https://pds.example.com", 169 - "myapp", 170 - "did:web:hold01.atcr.io", 171 - ) 308 + repo := NewRoutingRepository(nil, ctx) 172 309 173 - repo := NewRoutingRepository(nil, userCtx, nil) 310 + var wg sync.WaitGroup 311 + numGoroutines := 10 174 312 175 - assert.Equal(t, tc.expectedAction, repo.userCtx.Action, "action should match HTTP method") 176 - }) 177 - } 178 - } 313 + // Track all manifest stores returned 314 + manifestStores := make([]distribution.ManifestService, numGoroutines) 315 + blobStores := make([]distribution.BlobStore, numGoroutines) 179 316 180 - // TestRoutingRepository_DifferentHoldDIDs tests routing with different hold DIDs 181 - func TestRoutingRepository_DifferentHoldDIDs(t *testing.T) { 182 - testCases := []struct { 183 - name string 184 - holdDID string 185 - }{ 186 - {"did:web hold", "did:web:hold01.atcr.io"}, 187 - {"did:web with port", "did:web:localhost:8080"}, 188 - {"did:plc hold", "did:plc:xyz123"}, 317 + // Concurrent access to Manifests() 318 + for i := 0; i < numGoroutines; i++ { 319 + wg.Add(1) 320 + go func(index int) { 321 + defer wg.Done() 322 + store, err := repo.Manifests(context.Background()) 323 + require.NoError(t, err) 324 + manifestStores[index] = store 325 + }(i) 189 326 } 190 327 191 - for _, tc := range testCases { 192 - t.Run(tc.name, func(t *testing.T) { 193 - userCtx := mockUserContext( 194 - "did:plc:test123", 195 - "oauth", 196 - "PUT", 197 - "did:plc:test123", 198 - "test.handle", 199 - "https://pds.example.com", 200 - "myapp", 201 - tc.holdDID, 202 - ) 203 - 204 - repo := NewRoutingRepository(nil, userCtx, nil) 205 - blobStore := repo.Blobs(context.Background()) 328 + wg.Wait() 206 329 207 - assert.NotNil(t, blobStore, "should create blob store for %s", tc.holdDID) 208 - }) 330 + // Verify all stores are non-nil (due to race conditions, they may not all be the same instance) 331 + for i := 0; i < numGoroutines; i++ { 332 + assert.NotNil(t, manifestStores[i], "manifest store should not be nil") 209 333 } 210 - } 211 334 212 - // TestRoutingRepository_Named tests the Named() method 213 - func TestRoutingRepository_Named(t *testing.T) { 214 - userCtx := mockUserContext( 215 - "did:plc:test123", 216 - "oauth", 217 - "GET", 218 - "did:plc:test123", 219 - "test.handle", 220 - "https://pds.example.com", 221 - "myapp", 222 - "did:web:hold01.atcr.io", 223 - ) 335 + // After concurrent creation, subsequent calls should return the cached instance 336 + cachedStore, err := repo.Manifests(context.Background()) 337 + require.NoError(t, err) 338 + assert.NotNil(t, cachedStore) 224 339 225 - repo := NewRoutingRepository(nil, userCtx, nil) 340 + // Concurrent access to Blobs() 341 + for i := 0; i < numGoroutines; i++ { 342 + wg.Add(1) 343 + go func(index int) { 344 + defer wg.Done() 345 + blobStores[index] = repo.Blobs(context.Background()) 346 + }(i) 347 + } 226 348 227 - // Named() returns a reference.Named from the base repository 228 - // Since baseRepo is nil, this tests our implementation handles that case 229 - named := repo.Named() 349 + wg.Wait() 350 + 351 + // Verify all stores are non-nil (due to race conditions, they may not all be the same instance) 352 + for i := 0; i < numGoroutines; i++ { 353 + assert.NotNil(t, blobStores[i], "blob store should not be nil") 354 + } 230 355 231 - // With nil base, Named() should return a name constructed from context 232 - assert.NotNil(t, named) 233 - assert.Contains(t, named.Name(), "myapp") 356 + // After concurrent creation, subsequent calls should return the cached instance 357 + cachedBlobStore := repo.Blobs(context.Background()) 358 + assert.NotNil(t, cachedBlobStore) 234 359 } 235 360 236 - // TestATProtoResolveHoldURL tests DID to URL resolution 237 - func TestATProtoResolveHoldURL(t *testing.T) { 238 - tests := []struct { 239 - name string 240 - holdDID string 241 - expected string 242 - }{ 243 - { 244 - name: "did:web simple domain", 245 - holdDID: "did:web:hold01.atcr.io", 246 - expected: "https://hold01.atcr.io", 247 - }, 248 - { 249 - name: "did:web with port (localhost)", 250 - holdDID: "did:web:localhost:8080", 251 - expected: "http://localhost:8080", 252 - }, 253 - } 361 + // TestRoutingRepository_Blobs_PullPriority tests that database hold DID takes priority for pull (GET) 362 + func TestRoutingRepository_Blobs_PullPriority(t *testing.T) { 363 + dbHoldDID := "did:web:database.hold.io" 364 + discoveryHoldDID := "did:web:discovery.hold.io" 254 365 255 - for _, tt := range tests { 256 - t.Run(tt.name, func(t *testing.T) { 257 - result := atproto.ResolveHoldURL(tt.holdDID) 258 - assert.Equal(t, tt.expected, result) 259 - }) 366 + ctx := &RegistryContext{ 367 + DID: "did:plc:test123", 368 + Repository: "myapp-priority", 369 + HoldDID: discoveryHoldDID, // Discovery-based hold 370 + ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""), 371 + Database: &mockDatabase{holdDID: dbHoldDID}, // Database has a different hold DID 260 372 } 373 + 374 + repo := NewRoutingRepository(nil, ctx) 375 + 376 + // For pull (GET), database should take priority 377 + pullCtx := context.WithValue(context.Background(), "http.request.method", "GET") 378 + blobStore := repo.Blobs(pullCtx) 379 + 380 + assert.NotNil(t, blobStore) 381 + // Database hold DID should take priority over discovery for pull operations 382 + assert.Equal(t, dbHoldDID, repo.Ctx.HoldDID, "database hold DID should take priority over discovery for pull (GET)") 261 383 }
+3 -3
pkg/appview/storage/tag_store.go
··· 36 36 return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag} 37 37 } 38 38 39 - var tagRecord atproto.TagRecord 39 + var tagRecord atproto.Tag 40 40 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 41 41 return distribution.Descriptor{}, fmt.Errorf("failed to unmarshal tag record: %w", err) 42 42 } ··· 91 91 92 92 var tags []string 93 93 for _, record := range records { 94 - var tagRecord atproto.TagRecord 94 + var tagRecord atproto.Tag 95 95 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 96 96 // Skip invalid records 97 97 continue ··· 116 116 117 117 var tags []string 118 118 for _, record := range records { 119 - var tagRecord atproto.TagRecord 119 + var tagRecord atproto.Tag 120 120 if err := json.Unmarshal(record.Value, &tagRecord); err != nil { 121 121 // Skip invalid records 122 122 continue
+6 -6
pkg/appview/storage/tag_store_test.go
··· 229 229 230 230 for _, tt := range tests { 231 231 t.Run(tt.name, func(t *testing.T) { 232 - var sentTagRecord *atproto.TagRecord 232 + var sentTagRecord *atproto.Tag 233 233 234 234 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { 235 235 if r.Method != "POST" { ··· 254 254 // Parse and verify tag record 255 255 recordData := body["record"].(map[string]any) 256 256 recordBytes, _ := json.Marshal(recordData) 257 - var tagRecord atproto.TagRecord 257 + var tagRecord atproto.Tag 258 258 json.Unmarshal(recordBytes, &tagRecord) 259 259 sentTagRecord = &tagRecord 260 260 ··· 284 284 285 285 if !tt.wantErr && sentTagRecord != nil { 286 286 // Verify the tag record 287 - if sentTagRecord.Type != atproto.TagCollection { 288 - t.Errorf("Type = %v, want %v", sentTagRecord.Type, atproto.TagCollection) 287 + if sentTagRecord.LexiconTypeID != atproto.TagCollection { 288 + t.Errorf("LexiconTypeID = %v, want %v", sentTagRecord.LexiconTypeID, atproto.TagCollection) 289 289 } 290 290 if sentTagRecord.Repository != "myapp" { 291 291 t.Errorf("Repository = %v, want myapp", sentTagRecord.Repository) ··· 295 295 } 296 296 // New records should have manifest field 297 297 expectedURI := atproto.BuildManifestURI("did:plc:test123", tt.digest.String()) 298 - if sentTagRecord.Manifest != expectedURI { 298 + if sentTagRecord.Manifest == nil || *sentTagRecord.Manifest != expectedURI { 299 299 t.Errorf("Manifest = %v, want %v", sentTagRecord.Manifest, expectedURI) 300 300 } 301 301 // New records should NOT have manifestDigest field 302 - if sentTagRecord.ManifestDigest != "" { 302 + if sentTagRecord.ManifestDigest != nil && *sentTagRecord.ManifestDigest != "" { 303 303 t.Errorf("ManifestDigest should be empty for new records, got %v", sentTagRecord.ManifestDigest) 304 304 } 305 305 }
-22
pkg/appview/templates/pages/404.html
··· 1 - {{ define "404" }} 2 - <!DOCTYPE html> 3 - <html lang="en"> 4 - <head> 5 - <title>404 - Lost at Sea | ATCR</title> 6 - {{ template "head" . }} 7 - </head> 8 - <body> 9 - {{ template "nav-simple" . }} 10 - <main class="error-page"> 11 - <div class="error-content"> 12 - <i data-lucide="anchor" class="error-icon"></i> 13 - <div class="error-code">404</div> 14 - <h1>Lost at Sea</h1> 15 - <p>The page you're looking for has drifted into uncharted waters.</p> 16 - <a href="/" class="btn btn-primary">Return to Port</a> 17 - </div> 18 - </main> 19 - <script>lucide.createIcons();</script> 20 - </body> 21 - </html> 22 - {{ end }}
+5 -17
pkg/appview/templates/pages/repository.html
··· 27 27 <!-- Repository Header --> 28 28 <div class="repository-header"> 29 29 <div class="repo-hero"> 30 - <div class="repo-hero-icon-wrapper"> 31 - {{ if .Repository.IconURL }} 32 - <img src="{{ .Repository.IconURL }}" alt="{{ .Repository.Name }}" class="repo-hero-icon"> 33 - {{ else }} 34 - <div class="repo-hero-icon-placeholder">{{ firstChar .Repository.Name }}</div> 35 - {{ end }} 36 - {{ if $.IsOwner }} 37 - <label class="avatar-upload-overlay" for="avatar-upload"> 38 - <i data-lucide="plus"></i> 39 - </label> 40 - <input type="file" id="avatar-upload" accept="image/png,image/jpeg,image/webp" 41 - onchange="uploadAvatar(this, '{{ .Repository.Name }}')" hidden> 42 - {{ end }} 43 - </div> 30 + {{ if .Repository.IconURL }} 31 + <img src="{{ .Repository.IconURL }}" alt="{{ .Repository.Name }}" class="repo-hero-icon"> 32 + {{ else }} 33 + <div class="repo-hero-icon-placeholder">{{ firstChar .Repository.Name }}</div> 34 + {{ end }} 44 35 <div class="repo-hero-info"> 45 36 <h1> 46 37 <a href="/u/{{ .Owner.Handle }}" class="owner-link">{{ .Owner.Handle }}</a> ··· 138 129 <span class="tag-name-large">{{ .Tag.Tag }}</span> 139 130 {{ if .IsMultiArch }} 140 131 <span class="badge-multi">Multi-arch</span> 141 - {{ end }} 142 - {{ if .HasAttestations }} 143 - <span class="badge-attestation"><i data-lucide="shield-check"></i> Attestations</span> 144 132 {{ end }} 145 133 </div> 146 134 <div style="display: flex; gap: 1rem; align-items: center;">
+9
pkg/appview/templates/partials/push-list.html
··· 44 44 </div> 45 45 {{ end }} 46 46 47 + {{ if .HasMore }} 48 + <button class="load-more" 49 + hx-get="/api/recent-pushes?offset={{ .NextOffset }}" 50 + hx-target="#push-list" 51 + hx-swap="beforeend"> 52 + Load More 53 + </button> 54 + {{ end }} 55 + 47 56 {{ if eq (len .Pushes) 0 }} 48 57 <div class="empty-state"> 49 58 <p>No pushes yet. Start using ATCR by pushing your first image!</p>
+65
pkg/appview/utils_test.go
··· 1 + package appview 2 + 3 + import ( 4 + "testing" 5 + 6 + "atcr.io/pkg/atproto" 7 + ) 8 + 9 + func TestResolveHoldURL(t *testing.T) { 10 + tests := []struct { 11 + name string 12 + input string 13 + expected string 14 + }{ 15 + { 16 + name: "DID with HTTPS domain", 17 + input: "did:web:hold.example.com", 18 + expected: "https://hold.example.com", 19 + }, 20 + { 21 + name: "DID with HTTP and port (IP)", 22 + input: "did:web:172.28.0.3:8080", 23 + expected: "http://172.28.0.3:8080", 24 + }, 25 + { 26 + name: "DID with HTTP and port (localhost)", 27 + input: "did:web:127.0.0.1:8080", 28 + expected: "http://127.0.0.1:8080", 29 + }, 30 + { 31 + name: "DID with localhost", 32 + input: "did:web:localhost:8080", 33 + expected: "http://localhost:8080", 34 + }, 35 + { 36 + name: "Already HTTPS URL (passthrough)", 37 + input: "https://hold.example.com", 38 + expected: "https://hold.example.com", 39 + }, 40 + { 41 + name: "Already HTTP URL (passthrough)", 42 + input: "http://172.28.0.3:8080", 43 + expected: "http://172.28.0.3:8080", 44 + }, 45 + { 46 + name: "Plain hostname (fallback to HTTPS)", 47 + input: "hold.example.com", 48 + expected: "https://hold.example.com", 49 + }, 50 + { 51 + name: "DID with subdomain", 52 + input: "did:web:hold01.atcr.io", 53 + expected: "https://hold01.atcr.io", 54 + }, 55 + } 56 + 57 + for _, tt := range tests { 58 + t.Run(tt.name, func(t *testing.T) { 59 + result := atproto.ResolveHoldURL(tt.input) 60 + if result != tt.expected { 61 + t.Errorf("ResolveHoldURL(%q) = %q, want %q", tt.input, result, tt.expected) 62 + } 63 + }) 64 + } 65 + }
+2958 -126
pkg/atproto/cbor_gen.go
··· 8 8 "math" 9 9 "sort" 10 10 11 + util "github.com/bluesky-social/indigo/lex/util" 11 12 cid "github.com/ipfs/go-cid" 12 13 cbg "github.com/whyrusleeping/cbor-gen" 13 14 xerrors "golang.org/x/xerrors" ··· 18 19 var _ = math.E 19 20 var _ = sort.Sort 20 21 21 - func (t *CrewRecord) MarshalCBOR(w io.Writer) error { 22 + func (t *Manifest) MarshalCBOR(w io.Writer) error { 22 23 if t == nil { 23 24 _, err := w.Write(cbg.CborNull) 24 25 return err 25 26 } 26 27 27 28 cw := cbg.NewCborWriter(w) 29 + fieldCount := 14 28 30 29 - if _, err := cw.Write([]byte{165}); err != nil { 30 - return err 31 + if t.Annotations == nil { 32 + fieldCount-- 33 + } 34 + 35 + if t.Config == nil { 36 + fieldCount-- 37 + } 38 + 39 + if t.HoldDid == nil { 40 + fieldCount-- 31 41 } 32 42 33 - // t.Role (string) (string) 34 - if len("role") > 8192 { 35 - return xerrors.Errorf("Value in field \"role\" was too long") 43 + if t.HoldEndpoint == nil { 44 + fieldCount-- 36 45 } 37 46 38 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("role"))); err != nil { 39 - return err 47 + if t.Layers == nil { 48 + fieldCount-- 40 49 } 41 - if _, err := cw.WriteString(string("role")); err != nil { 42 - return err 50 + 51 + if t.ManifestBlob == nil { 52 + fieldCount-- 43 53 } 44 54 45 - if len(t.Role) > 8192 { 46 - return xerrors.Errorf("Value in field t.Role was too long") 55 + if t.Manifests == nil { 56 + fieldCount-- 47 57 } 48 58 49 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Role))); err != nil { 50 - return err 59 + if t.Subject == nil { 60 + fieldCount-- 51 61 } 52 - if _, err := cw.WriteString(string(t.Role)); err != nil { 62 + 63 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 53 64 return err 54 65 } 55 66 56 - // t.Type (string) (string) 67 + // t.LexiconTypeID (string) (string) 57 68 if len("$type") > 8192 { 58 69 return xerrors.Errorf("Value in field \"$type\" was too long") 59 70 } ··· 65 76 return err 66 77 } 67 78 68 - if len(t.Type) > 8192 { 69 - return xerrors.Errorf("Value in field t.Type was too long") 79 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.manifest"))); err != nil { 80 + return err 81 + } 82 + if _, err := cw.WriteString(string("io.atcr.manifest")); err != nil { 83 + return err 84 + } 85 + 86 + // t.Config (atproto.Manifest_BlobReference) (struct) 87 + if t.Config != nil { 88 + 89 + if len("config") > 8192 { 90 + return xerrors.Errorf("Value in field \"config\" was too long") 91 + } 92 + 93 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("config"))); err != nil { 94 + return err 95 + } 96 + if _, err := cw.WriteString(string("config")); err != nil { 97 + return err 98 + } 99 + 100 + if err := t.Config.MarshalCBOR(cw); err != nil { 101 + return err 102 + } 70 103 } 71 104 72 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { 105 + // t.Digest (string) (string) 106 + if len("digest") > 8192 { 107 + return xerrors.Errorf("Value in field \"digest\" was too long") 108 + } 109 + 110 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("digest"))); err != nil { 73 111 return err 74 112 } 75 - if _, err := cw.WriteString(string(t.Type)); err != nil { 113 + if _, err := cw.WriteString(string("digest")); err != nil { 76 114 return err 77 115 } 78 116 79 - // t.Member (string) (string) 80 - if len("member") > 8192 { 81 - return xerrors.Errorf("Value in field \"member\" was too long") 117 + if len(t.Digest) > 8192 { 118 + return xerrors.Errorf("Value in field t.Digest was too long") 82 119 } 83 120 84 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("member"))); err != nil { 121 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Digest))); err != nil { 85 122 return err 86 123 } 87 - if _, err := cw.WriteString(string("member")); err != nil { 124 + if _, err := cw.WriteString(string(t.Digest)); err != nil { 88 125 return err 89 126 } 90 127 91 - if len(t.Member) > 8192 { 92 - return xerrors.Errorf("Value in field t.Member was too long") 128 + // t.Layers ([]atproto.Manifest_BlobReference) (slice) 129 + if t.Layers != nil { 130 + 131 + if len("layers") > 8192 { 132 + return xerrors.Errorf("Value in field \"layers\" was too long") 133 + } 134 + 135 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("layers"))); err != nil { 136 + return err 137 + } 138 + if _, err := cw.WriteString(string("layers")); err != nil { 139 + return err 140 + } 141 + 142 + if len(t.Layers) > 8192 { 143 + return xerrors.Errorf("Slice value in field t.Layers was too long") 144 + } 145 + 146 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Layers))); err != nil { 147 + return err 148 + } 149 + for _, v := range t.Layers { 150 + if err := v.MarshalCBOR(cw); err != nil { 151 + return err 152 + } 153 + 154 + } 93 155 } 94 156 95 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Member))); err != nil { 157 + // t.HoldDid (string) (string) 158 + if t.HoldDid != nil { 159 + 160 + if len("holdDid") > 8192 { 161 + return xerrors.Errorf("Value in field \"holdDid\" was too long") 162 + } 163 + 164 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("holdDid"))); err != nil { 165 + return err 166 + } 167 + if _, err := cw.WriteString(string("holdDid")); err != nil { 168 + return err 169 + } 170 + 171 + if t.HoldDid == nil { 172 + if _, err := cw.Write(cbg.CborNull); err != nil { 173 + return err 174 + } 175 + } else { 176 + if len(*t.HoldDid) > 8192 { 177 + return xerrors.Errorf("Value in field t.HoldDid was too long") 178 + } 179 + 180 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.HoldDid))); err != nil { 181 + return err 182 + } 183 + if _, err := cw.WriteString(string(*t.HoldDid)); err != nil { 184 + return err 185 + } 186 + } 187 + } 188 + 189 + // t.Subject (atproto.Manifest_BlobReference) (struct) 190 + if t.Subject != nil { 191 + 192 + if len("subject") > 8192 { 193 + return xerrors.Errorf("Value in field \"subject\" was too long") 194 + } 195 + 196 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil { 197 + return err 198 + } 199 + if _, err := cw.WriteString(string("subject")); err != nil { 200 + return err 201 + } 202 + 203 + if err := t.Subject.MarshalCBOR(cw); err != nil { 204 + return err 205 + } 206 + } 207 + 208 + // t.CreatedAt (string) (string) 209 + if len("createdAt") > 8192 { 210 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 211 + } 212 + 213 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 96 214 return err 97 215 } 98 - if _, err := cw.WriteString(string(t.Member)); err != nil { 216 + if _, err := cw.WriteString(string("createdAt")); err != nil { 99 217 return err 100 218 } 101 219 102 - // t.AddedAt (string) (string) 103 - if len("addedAt") > 8192 { 104 - return xerrors.Errorf("Value in field \"addedAt\" was too long") 220 + if len(t.CreatedAt) > 8192 { 221 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 105 222 } 106 223 107 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("addedAt"))); err != nil { 224 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 108 225 return err 109 226 } 110 - if _, err := cw.WriteString(string("addedAt")); err != nil { 227 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 111 228 return err 112 229 } 113 230 114 - if len(t.AddedAt) > 8192 { 115 - return xerrors.Errorf("Value in field t.AddedAt was too long") 231 + // t.Manifests ([]atproto.Manifest_ManifestReference) (slice) 232 + if t.Manifests != nil { 233 + 234 + if len("manifests") > 8192 { 235 + return xerrors.Errorf("Value in field \"manifests\" was too long") 236 + } 237 + 238 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("manifests"))); err != nil { 239 + return err 240 + } 241 + if _, err := cw.WriteString(string("manifests")); err != nil { 242 + return err 243 + } 244 + 245 + if len(t.Manifests) > 8192 { 246 + return xerrors.Errorf("Slice value in field t.Manifests was too long") 247 + } 248 + 249 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Manifests))); err != nil { 250 + return err 251 + } 252 + for _, v := range t.Manifests { 253 + if err := v.MarshalCBOR(cw); err != nil { 254 + return err 255 + } 256 + 257 + } 116 258 } 117 259 118 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.AddedAt))); err != nil { 260 + // t.MediaType (string) (string) 261 + if len("mediaType") > 8192 { 262 + return xerrors.Errorf("Value in field \"mediaType\" was too long") 263 + } 264 + 265 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mediaType"))); err != nil { 119 266 return err 120 267 } 121 - if _, err := cw.WriteString(string(t.AddedAt)); err != nil { 268 + if _, err := cw.WriteString(string("mediaType")); err != nil { 122 269 return err 123 270 } 124 271 125 - // t.Permissions ([]string) (slice) 126 - if len("permissions") > 8192 { 127 - return xerrors.Errorf("Value in field \"permissions\" was too long") 272 + if len(t.MediaType) > 8192 { 273 + return xerrors.Errorf("Value in field t.MediaType was too long") 128 274 } 129 275 130 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("permissions"))); err != nil { 276 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MediaType))); err != nil { 277 + return err 278 + } 279 + if _, err := cw.WriteString(string(t.MediaType)); err != nil { 280 + return err 281 + } 282 + 283 + // t.Repository (string) (string) 284 + if len("repository") > 8192 { 285 + return xerrors.Errorf("Value in field \"repository\" was too long") 286 + } 287 + 288 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repository"))); err != nil { 131 289 return err 132 290 } 133 - if _, err := cw.WriteString(string("permissions")); err != nil { 291 + if _, err := cw.WriteString(string("repository")); err != nil { 134 292 return err 135 293 } 136 294 137 - if len(t.Permissions) > 8192 { 138 - return xerrors.Errorf("Slice value in field t.Permissions was too long") 295 + if len(t.Repository) > 8192 { 296 + return xerrors.Errorf("Value in field t.Repository was too long") 139 297 } 140 298 141 - if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Permissions))); err != nil { 299 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repository))); err != nil { 142 300 return err 143 301 } 144 - for _, v := range t.Permissions { 145 - if len(v) > 8192 { 146 - return xerrors.Errorf("Value in field v was too long") 302 + if _, err := cw.WriteString(string(t.Repository)); err != nil { 303 + return err 304 + } 305 + 306 + // t.Annotations (atproto.Manifest_Annotations) (struct) 307 + if t.Annotations != nil { 308 + 309 + if len("annotations") > 8192 { 310 + return xerrors.Errorf("Value in field \"annotations\" was too long") 311 + } 312 + 313 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("annotations"))); err != nil { 314 + return err 315 + } 316 + if _, err := cw.WriteString(string("annotations")); err != nil { 317 + return err 318 + } 319 + 320 + if err := t.Annotations.MarshalCBOR(cw); err != nil { 321 + return err 322 + } 323 + } 324 + 325 + // t.HoldEndpoint (string) (string) 326 + if t.HoldEndpoint != nil { 327 + 328 + if len("holdEndpoint") > 8192 { 329 + return xerrors.Errorf("Value in field \"holdEndpoint\" was too long") 330 + } 331 + 332 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("holdEndpoint"))); err != nil { 333 + return err 334 + } 335 + if _, err := cw.WriteString(string("holdEndpoint")); err != nil { 336 + return err 337 + } 338 + 339 + if t.HoldEndpoint == nil { 340 + if _, err := cw.Write(cbg.CborNull); err != nil { 341 + return err 342 + } 343 + } else { 344 + if len(*t.HoldEndpoint) > 8192 { 345 + return xerrors.Errorf("Value in field t.HoldEndpoint was too long") 346 + } 347 + 348 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.HoldEndpoint))); err != nil { 349 + return err 350 + } 351 + if _, err := cw.WriteString(string(*t.HoldEndpoint)); err != nil { 352 + return err 353 + } 147 354 } 355 + } 148 356 149 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 357 + // t.ManifestBlob (util.LexBlob) (struct) 358 + if t.ManifestBlob != nil { 359 + 360 + if len("manifestBlob") > 8192 { 361 + return xerrors.Errorf("Value in field \"manifestBlob\" was too long") 362 + } 363 + 364 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("manifestBlob"))); err != nil { 150 365 return err 151 366 } 152 - if _, err := cw.WriteString(string(v)); err != nil { 367 + if _, err := cw.WriteString(string("manifestBlob")); err != nil { 153 368 return err 154 369 } 155 370 371 + if err := t.ManifestBlob.MarshalCBOR(cw); err != nil { 372 + return err 373 + } 156 374 } 375 + 376 + // t.SchemaVersion (int64) (int64) 377 + if len("schemaVersion") > 8192 { 378 + return xerrors.Errorf("Value in field \"schemaVersion\" was too long") 379 + } 380 + 381 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("schemaVersion"))); err != nil { 382 + return err 383 + } 384 + if _, err := cw.WriteString(string("schemaVersion")); err != nil { 385 + return err 386 + } 387 + 388 + if t.SchemaVersion >= 0 { 389 + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.SchemaVersion)); err != nil { 390 + return err 391 + } 392 + } else { 393 + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.SchemaVersion-1)); err != nil { 394 + return err 395 + } 396 + } 397 + 157 398 return nil 158 399 } 159 400 160 - func (t *CrewRecord) UnmarshalCBOR(r io.Reader) (err error) { 161 - *t = CrewRecord{} 401 + func (t *Manifest) UnmarshalCBOR(r io.Reader) (err error) { 402 + *t = Manifest{} 162 403 163 404 cr := cbg.NewCborReader(r) 164 405 ··· 177 418 } 178 419 179 420 if extra > cbg.MaxLength { 180 - return fmt.Errorf("CrewRecord: map struct too large (%d)", extra) 421 + return fmt.Errorf("Manifest: map struct too large (%d)", extra) 422 + } 423 + 424 + n := extra 425 + 426 + nameBuf := make([]byte, 13) 427 + for i := uint64(0); i < n; i++ { 428 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 429 + if err != nil { 430 + return err 431 + } 432 + 433 + if !ok { 434 + // Field doesn't exist on this type, so ignore it 435 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 436 + return err 437 + } 438 + continue 439 + } 440 + 441 + switch string(nameBuf[:nameLen]) { 442 + // t.LexiconTypeID (string) (string) 443 + case "$type": 444 + 445 + { 446 + sval, err := cbg.ReadStringWithMax(cr, 8192) 447 + if err != nil { 448 + return err 449 + } 450 + 451 + t.LexiconTypeID = string(sval) 452 + } 453 + // t.Config (atproto.Manifest_BlobReference) (struct) 454 + case "config": 455 + 456 + { 457 + 458 + b, err := cr.ReadByte() 459 + if err != nil { 460 + return err 461 + } 462 + if b != cbg.CborNull[0] { 463 + if err := cr.UnreadByte(); err != nil { 464 + return err 465 + } 466 + t.Config = new(Manifest_BlobReference) 467 + if err := t.Config.UnmarshalCBOR(cr); err != nil { 468 + return xerrors.Errorf("unmarshaling t.Config pointer: %w", err) 469 + } 470 + } 471 + 472 + } 473 + // t.Digest (string) (string) 474 + case "digest": 475 + 476 + { 477 + sval, err := cbg.ReadStringWithMax(cr, 8192) 478 + if err != nil { 479 + return err 480 + } 481 + 482 + t.Digest = string(sval) 483 + } 484 + // t.Layers ([]atproto.Manifest_BlobReference) (slice) 485 + case "layers": 486 + 487 + maj, extra, err = cr.ReadHeader() 488 + if err != nil { 489 + return err 490 + } 491 + 492 + if extra > 8192 { 493 + return fmt.Errorf("t.Layers: array too large (%d)", extra) 494 + } 495 + 496 + if maj != cbg.MajArray { 497 + return fmt.Errorf("expected cbor array") 498 + } 499 + 500 + if extra > 0 { 501 + t.Layers = make([]Manifest_BlobReference, extra) 502 + } 503 + 504 + for i := 0; i < int(extra); i++ { 505 + { 506 + var maj byte 507 + var extra uint64 508 + var err error 509 + _ = maj 510 + _ = extra 511 + _ = err 512 + 513 + { 514 + 515 + if err := t.Layers[i].UnmarshalCBOR(cr); err != nil { 516 + return xerrors.Errorf("unmarshaling t.Layers[i]: %w", err) 517 + } 518 + 519 + } 520 + 521 + } 522 + } 523 + // t.HoldDid (string) (string) 524 + case "holdDid": 525 + 526 + { 527 + b, err := cr.ReadByte() 528 + if err != nil { 529 + return err 530 + } 531 + if b != cbg.CborNull[0] { 532 + if err := cr.UnreadByte(); err != nil { 533 + return err 534 + } 535 + 536 + sval, err := cbg.ReadStringWithMax(cr, 8192) 537 + if err != nil { 538 + return err 539 + } 540 + 541 + t.HoldDid = (*string)(&sval) 542 + } 543 + } 544 + // t.Subject (atproto.Manifest_BlobReference) (struct) 545 + case "subject": 546 + 547 + { 548 + 549 + b, err := cr.ReadByte() 550 + if err != nil { 551 + return err 552 + } 553 + if b != cbg.CborNull[0] { 554 + if err := cr.UnreadByte(); err != nil { 555 + return err 556 + } 557 + t.Subject = new(Manifest_BlobReference) 558 + if err := t.Subject.UnmarshalCBOR(cr); err != nil { 559 + return xerrors.Errorf("unmarshaling t.Subject pointer: %w", err) 560 + } 561 + } 562 + 563 + } 564 + // t.CreatedAt (string) (string) 565 + case "createdAt": 566 + 567 + { 568 + sval, err := cbg.ReadStringWithMax(cr, 8192) 569 + if err != nil { 570 + return err 571 + } 572 + 573 + t.CreatedAt = string(sval) 574 + } 575 + // t.Manifests ([]atproto.Manifest_ManifestReference) (slice) 576 + case "manifests": 577 + 578 + maj, extra, err = cr.ReadHeader() 579 + if err != nil { 580 + return err 581 + } 582 + 583 + if extra > 8192 { 584 + return fmt.Errorf("t.Manifests: array too large (%d)", extra) 585 + } 586 + 587 + if maj != cbg.MajArray { 588 + return fmt.Errorf("expected cbor array") 589 + } 590 + 591 + if extra > 0 { 592 + t.Manifests = make([]Manifest_ManifestReference, extra) 593 + } 594 + 595 + for i := 0; i < int(extra); i++ { 596 + { 597 + var maj byte 598 + var extra uint64 599 + var err error 600 + _ = maj 601 + _ = extra 602 + _ = err 603 + 604 + { 605 + 606 + if err := t.Manifests[i].UnmarshalCBOR(cr); err != nil { 607 + return xerrors.Errorf("unmarshaling t.Manifests[i]: %w", err) 608 + } 609 + 610 + } 611 + 612 + } 613 + } 614 + // t.MediaType (string) (string) 615 + case "mediaType": 616 + 617 + { 618 + sval, err := cbg.ReadStringWithMax(cr, 8192) 619 + if err != nil { 620 + return err 621 + } 622 + 623 + t.MediaType = string(sval) 624 + } 625 + // t.Repository (string) (string) 626 + case "repository": 627 + 628 + { 629 + sval, err := cbg.ReadStringWithMax(cr, 8192) 630 + if err != nil { 631 + return err 632 + } 633 + 634 + t.Repository = string(sval) 635 + } 636 + // t.Annotations (atproto.Manifest_Annotations) (struct) 637 + case "annotations": 638 + 639 + { 640 + 641 + b, err := cr.ReadByte() 642 + if err != nil { 643 + return err 644 + } 645 + if b != cbg.CborNull[0] { 646 + if err := cr.UnreadByte(); err != nil { 647 + return err 648 + } 649 + t.Annotations = new(Manifest_Annotations) 650 + if err := t.Annotations.UnmarshalCBOR(cr); err != nil { 651 + return xerrors.Errorf("unmarshaling t.Annotations pointer: %w", err) 652 + } 653 + } 654 + 655 + } 656 + // t.HoldEndpoint (string) (string) 657 + case "holdEndpoint": 658 + 659 + { 660 + b, err := cr.ReadByte() 661 + if err != nil { 662 + return err 663 + } 664 + if b != cbg.CborNull[0] { 665 + if err := cr.UnreadByte(); err != nil { 666 + return err 667 + } 668 + 669 + sval, err := cbg.ReadStringWithMax(cr, 8192) 670 + if err != nil { 671 + return err 672 + } 673 + 674 + t.HoldEndpoint = (*string)(&sval) 675 + } 676 + } 677 + // t.ManifestBlob (util.LexBlob) (struct) 678 + case "manifestBlob": 679 + 680 + { 681 + 682 + b, err := cr.ReadByte() 683 + if err != nil { 684 + return err 685 + } 686 + if b != cbg.CborNull[0] { 687 + if err := cr.UnreadByte(); err != nil { 688 + return err 689 + } 690 + t.ManifestBlob = new(util.LexBlob) 691 + if err := t.ManifestBlob.UnmarshalCBOR(cr); err != nil { 692 + return xerrors.Errorf("unmarshaling t.ManifestBlob pointer: %w", err) 693 + } 694 + } 695 + 696 + } 697 + // t.SchemaVersion (int64) (int64) 698 + case "schemaVersion": 699 + { 700 + maj, extra, err := cr.ReadHeader() 701 + if err != nil { 702 + return err 703 + } 704 + var extraI int64 705 + switch maj { 706 + case cbg.MajUnsignedInt: 707 + extraI = int64(extra) 708 + if extraI < 0 { 709 + return fmt.Errorf("int64 positive overflow") 710 + } 711 + case cbg.MajNegativeInt: 712 + extraI = int64(extra) 713 + if extraI < 0 { 714 + return fmt.Errorf("int64 negative overflow") 715 + } 716 + extraI = -1 - extraI 717 + default: 718 + return fmt.Errorf("wrong type for int64 field: %d", maj) 719 + } 720 + 721 + t.SchemaVersion = int64(extraI) 722 + } 723 + 724 + default: 725 + // Field doesn't exist on this type, so ignore it 726 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 727 + return err 728 + } 729 + } 730 + } 731 + 732 + return nil 733 + } 734 + func (t *Manifest_BlobReference) MarshalCBOR(w io.Writer) error { 735 + if t == nil { 736 + _, err := w.Write(cbg.CborNull) 737 + return err 738 + } 739 + 740 + cw := cbg.NewCborWriter(w) 741 + fieldCount := 6 742 + 743 + if t.LexiconTypeID == "" { 744 + fieldCount-- 745 + } 746 + 747 + if t.Annotations == nil { 748 + fieldCount-- 749 + } 750 + 751 + if t.Urls == nil { 752 + fieldCount-- 753 + } 754 + 755 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 756 + return err 757 + } 758 + 759 + // t.Size (int64) (int64) 760 + if len("size") > 8192 { 761 + return xerrors.Errorf("Value in field \"size\" was too long") 762 + } 763 + 764 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("size"))); err != nil { 765 + return err 766 + } 767 + if _, err := cw.WriteString(string("size")); err != nil { 768 + return err 769 + } 770 + 771 + if t.Size >= 0 { 772 + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { 773 + return err 774 + } 775 + } else { 776 + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Size-1)); err != nil { 777 + return err 778 + } 779 + } 780 + 781 + // t.Urls ([]string) (slice) 782 + if t.Urls != nil { 783 + 784 + if len("urls") > 8192 { 785 + return xerrors.Errorf("Value in field \"urls\" was too long") 786 + } 787 + 788 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("urls"))); err != nil { 789 + return err 790 + } 791 + if _, err := cw.WriteString(string("urls")); err != nil { 792 + return err 793 + } 794 + 795 + if len(t.Urls) > 8192 { 796 + return xerrors.Errorf("Slice value in field t.Urls was too long") 797 + } 798 + 799 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Urls))); err != nil { 800 + return err 801 + } 802 + for _, v := range t.Urls { 803 + if len(v) > 8192 { 804 + return xerrors.Errorf("Value in field v was too long") 805 + } 806 + 807 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 808 + return err 809 + } 810 + if _, err := cw.WriteString(string(v)); err != nil { 811 + return err 812 + } 813 + 814 + } 815 + } 816 + 817 + // t.LexiconTypeID (string) (string) 818 + if t.LexiconTypeID != "" { 819 + 820 + if len("$type") > 8192 { 821 + return xerrors.Errorf("Value in field \"$type\" was too long") 822 + } 823 + 824 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 825 + return err 826 + } 827 + if _, err := cw.WriteString(string("$type")); err != nil { 828 + return err 829 + } 830 + 831 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.manifest#blobReference"))); err != nil { 832 + return err 833 + } 834 + if _, err := cw.WriteString(string("io.atcr.manifest#blobReference")); err != nil { 835 + return err 836 + } 837 + } 838 + 839 + // t.Digest (string) (string) 840 + if len("digest") > 8192 { 841 + return xerrors.Errorf("Value in field \"digest\" was too long") 842 + } 843 + 844 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("digest"))); err != nil { 845 + return err 846 + } 847 + if _, err := cw.WriteString(string("digest")); err != nil { 848 + return err 849 + } 850 + 851 + if len(t.Digest) > 8192 { 852 + return xerrors.Errorf("Value in field t.Digest was too long") 853 + } 854 + 855 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Digest))); err != nil { 856 + return err 857 + } 858 + if _, err := cw.WriteString(string(t.Digest)); err != nil { 859 + return err 860 + } 861 + 862 + // t.MediaType (string) (string) 863 + if len("mediaType") > 8192 { 864 + return xerrors.Errorf("Value in field \"mediaType\" was too long") 865 + } 866 + 867 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mediaType"))); err != nil { 868 + return err 869 + } 870 + if _, err := cw.WriteString(string("mediaType")); err != nil { 871 + return err 872 + } 873 + 874 + if len(t.MediaType) > 8192 { 875 + return xerrors.Errorf("Value in field t.MediaType was too long") 876 + } 877 + 878 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MediaType))); err != nil { 879 + return err 880 + } 881 + if _, err := cw.WriteString(string(t.MediaType)); err != nil { 882 + return err 883 + } 884 + 885 + // t.Annotations (atproto.Manifest_BlobReference_Annotations) (struct) 886 + if t.Annotations != nil { 887 + 888 + if len("annotations") > 8192 { 889 + return xerrors.Errorf("Value in field \"annotations\" was too long") 890 + } 891 + 892 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("annotations"))); err != nil { 893 + return err 894 + } 895 + if _, err := cw.WriteString(string("annotations")); err != nil { 896 + return err 897 + } 898 + 899 + if err := t.Annotations.MarshalCBOR(cw); err != nil { 900 + return err 901 + } 902 + } 903 + return nil 904 + } 905 + 906 + func (t *Manifest_BlobReference) UnmarshalCBOR(r io.Reader) (err error) { 907 + *t = Manifest_BlobReference{} 908 + 909 + cr := cbg.NewCborReader(r) 910 + 911 + maj, extra, err := cr.ReadHeader() 912 + if err != nil { 913 + return err 914 + } 915 + defer func() { 916 + if err == io.EOF { 917 + err = io.ErrUnexpectedEOF 918 + } 919 + }() 920 + 921 + if maj != cbg.MajMap { 922 + return fmt.Errorf("cbor input should be of type map") 923 + } 924 + 925 + if extra > cbg.MaxLength { 926 + return fmt.Errorf("Manifest_BlobReference: map struct too large (%d)", extra) 181 927 } 182 928 183 929 n := extra ··· 198 944 } 199 945 200 946 switch string(nameBuf[:nameLen]) { 201 - // t.Role (string) (string) 202 - case "role": 947 + // t.Size (int64) (int64) 948 + case "size": 949 + { 950 + maj, extra, err := cr.ReadHeader() 951 + if err != nil { 952 + return err 953 + } 954 + var extraI int64 955 + switch maj { 956 + case cbg.MajUnsignedInt: 957 + extraI = int64(extra) 958 + if extraI < 0 { 959 + return fmt.Errorf("int64 positive overflow") 960 + } 961 + case cbg.MajNegativeInt: 962 + extraI = int64(extra) 963 + if extraI < 0 { 964 + return fmt.Errorf("int64 negative overflow") 965 + } 966 + extraI = -1 - extraI 967 + default: 968 + return fmt.Errorf("wrong type for int64 field: %d", maj) 969 + } 970 + 971 + t.Size = int64(extraI) 972 + } 973 + // t.Urls ([]string) (slice) 974 + case "urls": 975 + 976 + maj, extra, err = cr.ReadHeader() 977 + if err != nil { 978 + return err 979 + } 980 + 981 + if extra > 8192 { 982 + return fmt.Errorf("t.Urls: array too large (%d)", extra) 983 + } 984 + 985 + if maj != cbg.MajArray { 986 + return fmt.Errorf("expected cbor array") 987 + } 988 + 989 + if extra > 0 { 990 + t.Urls = make([]string, extra) 991 + } 992 + 993 + for i := 0; i < int(extra); i++ { 994 + { 995 + var maj byte 996 + var extra uint64 997 + var err error 998 + _ = maj 999 + _ = extra 1000 + _ = err 1001 + 1002 + { 1003 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1004 + if err != nil { 1005 + return err 1006 + } 1007 + 1008 + t.Urls[i] = string(sval) 1009 + } 1010 + 1011 + } 1012 + } 1013 + // t.LexiconTypeID (string) (string) 1014 + case "$type": 203 1015 204 1016 { 205 1017 sval, err := cbg.ReadStringWithMax(cr, 8192) ··· 207 1019 return err 208 1020 } 209 1021 210 - t.Role = string(sval) 1022 + t.LexiconTypeID = string(sval) 1023 + } 1024 + // t.Digest (string) (string) 1025 + case "digest": 1026 + 1027 + { 1028 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1029 + if err != nil { 1030 + return err 1031 + } 1032 + 1033 + t.Digest = string(sval) 1034 + } 1035 + // t.MediaType (string) (string) 1036 + case "mediaType": 1037 + 1038 + { 1039 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1040 + if err != nil { 1041 + return err 1042 + } 1043 + 1044 + t.MediaType = string(sval) 1045 + } 1046 + // t.Annotations (atproto.Manifest_BlobReference_Annotations) (struct) 1047 + case "annotations": 1048 + 1049 + { 1050 + 1051 + b, err := cr.ReadByte() 1052 + if err != nil { 1053 + return err 1054 + } 1055 + if b != cbg.CborNull[0] { 1056 + if err := cr.UnreadByte(); err != nil { 1057 + return err 1058 + } 1059 + t.Annotations = new(Manifest_BlobReference_Annotations) 1060 + if err := t.Annotations.UnmarshalCBOR(cr); err != nil { 1061 + return xerrors.Errorf("unmarshaling t.Annotations pointer: %w", err) 1062 + } 1063 + } 1064 + 1065 + } 1066 + 1067 + default: 1068 + // Field doesn't exist on this type, so ignore it 1069 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1070 + return err 1071 + } 1072 + } 1073 + } 1074 + 1075 + return nil 1076 + } 1077 + func (t *Manifest_ManifestReference) MarshalCBOR(w io.Writer) error { 1078 + if t == nil { 1079 + _, err := w.Write(cbg.CborNull) 1080 + return err 1081 + } 1082 + 1083 + cw := cbg.NewCborWriter(w) 1084 + fieldCount := 6 1085 + 1086 + if t.LexiconTypeID == "" { 1087 + fieldCount-- 1088 + } 1089 + 1090 + if t.Annotations == nil { 1091 + fieldCount-- 1092 + } 1093 + 1094 + if t.Platform == nil { 1095 + fieldCount-- 1096 + } 1097 + 1098 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 1099 + return err 1100 + } 1101 + 1102 + // t.Size (int64) (int64) 1103 + if len("size") > 8192 { 1104 + return xerrors.Errorf("Value in field \"size\" was too long") 1105 + } 1106 + 1107 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("size"))); err != nil { 1108 + return err 1109 + } 1110 + if _, err := cw.WriteString(string("size")); err != nil { 1111 + return err 1112 + } 1113 + 1114 + if t.Size >= 0 { 1115 + if err := cw.WriteMajorTypeHeader(cbg.MajUnsignedInt, uint64(t.Size)); err != nil { 1116 + return err 1117 + } 1118 + } else { 1119 + if err := cw.WriteMajorTypeHeader(cbg.MajNegativeInt, uint64(-t.Size-1)); err != nil { 1120 + return err 1121 + } 1122 + } 1123 + 1124 + // t.LexiconTypeID (string) (string) 1125 + if t.LexiconTypeID != "" { 1126 + 1127 + if len("$type") > 8192 { 1128 + return xerrors.Errorf("Value in field \"$type\" was too long") 1129 + } 1130 + 1131 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 1132 + return err 1133 + } 1134 + if _, err := cw.WriteString(string("$type")); err != nil { 1135 + return err 1136 + } 1137 + 1138 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.manifest#manifestReference"))); err != nil { 1139 + return err 1140 + } 1141 + if _, err := cw.WriteString(string("io.atcr.manifest#manifestReference")); err != nil { 1142 + return err 1143 + } 1144 + } 1145 + 1146 + // t.Digest (string) (string) 1147 + if len("digest") > 8192 { 1148 + return xerrors.Errorf("Value in field \"digest\" was too long") 1149 + } 1150 + 1151 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("digest"))); err != nil { 1152 + return err 1153 + } 1154 + if _, err := cw.WriteString(string("digest")); err != nil { 1155 + return err 1156 + } 1157 + 1158 + if len(t.Digest) > 8192 { 1159 + return xerrors.Errorf("Value in field t.Digest was too long") 1160 + } 1161 + 1162 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Digest))); err != nil { 1163 + return err 1164 + } 1165 + if _, err := cw.WriteString(string(t.Digest)); err != nil { 1166 + return err 1167 + } 1168 + 1169 + // t.Platform (atproto.Manifest_Platform) (struct) 1170 + if t.Platform != nil { 1171 + 1172 + if len("platform") > 8192 { 1173 + return xerrors.Errorf("Value in field \"platform\" was too long") 1174 + } 1175 + 1176 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("platform"))); err != nil { 1177 + return err 1178 + } 1179 + if _, err := cw.WriteString(string("platform")); err != nil { 1180 + return err 1181 + } 1182 + 1183 + if err := t.Platform.MarshalCBOR(cw); err != nil { 1184 + return err 1185 + } 1186 + } 1187 + 1188 + // t.MediaType (string) (string) 1189 + if len("mediaType") > 8192 { 1190 + return xerrors.Errorf("Value in field \"mediaType\" was too long") 1191 + } 1192 + 1193 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("mediaType"))); err != nil { 1194 + return err 1195 + } 1196 + if _, err := cw.WriteString(string("mediaType")); err != nil { 1197 + return err 1198 + } 1199 + 1200 + if len(t.MediaType) > 8192 { 1201 + return xerrors.Errorf("Value in field t.MediaType was too long") 1202 + } 1203 + 1204 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.MediaType))); err != nil { 1205 + return err 1206 + } 1207 + if _, err := cw.WriteString(string(t.MediaType)); err != nil { 1208 + return err 1209 + } 1210 + 1211 + // t.Annotations (atproto.Manifest_ManifestReference_Annotations) (struct) 1212 + if t.Annotations != nil { 1213 + 1214 + if len("annotations") > 8192 { 1215 + return xerrors.Errorf("Value in field \"annotations\" was too long") 1216 + } 1217 + 1218 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("annotations"))); err != nil { 1219 + return err 1220 + } 1221 + if _, err := cw.WriteString(string("annotations")); err != nil { 1222 + return err 1223 + } 1224 + 1225 + if err := t.Annotations.MarshalCBOR(cw); err != nil { 1226 + return err 1227 + } 1228 + } 1229 + return nil 1230 + } 1231 + 1232 + func (t *Manifest_ManifestReference) UnmarshalCBOR(r io.Reader) (err error) { 1233 + *t = Manifest_ManifestReference{} 1234 + 1235 + cr := cbg.NewCborReader(r) 1236 + 1237 + maj, extra, err := cr.ReadHeader() 1238 + if err != nil { 1239 + return err 1240 + } 1241 + defer func() { 1242 + if err == io.EOF { 1243 + err = io.ErrUnexpectedEOF 1244 + } 1245 + }() 1246 + 1247 + if maj != cbg.MajMap { 1248 + return fmt.Errorf("cbor input should be of type map") 1249 + } 1250 + 1251 + if extra > cbg.MaxLength { 1252 + return fmt.Errorf("Manifest_ManifestReference: map struct too large (%d)", extra) 1253 + } 1254 + 1255 + n := extra 1256 + 1257 + nameBuf := make([]byte, 11) 1258 + for i := uint64(0); i < n; i++ { 1259 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 1260 + if err != nil { 1261 + return err 1262 + } 1263 + 1264 + if !ok { 1265 + // Field doesn't exist on this type, so ignore it 1266 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 1267 + return err 1268 + } 1269 + continue 1270 + } 1271 + 1272 + switch string(nameBuf[:nameLen]) { 1273 + // t.Size (int64) (int64) 1274 + case "size": 1275 + { 1276 + maj, extra, err := cr.ReadHeader() 1277 + if err != nil { 1278 + return err 1279 + } 1280 + var extraI int64 1281 + switch maj { 1282 + case cbg.MajUnsignedInt: 1283 + extraI = int64(extra) 1284 + if extraI < 0 { 1285 + return fmt.Errorf("int64 positive overflow") 1286 + } 1287 + case cbg.MajNegativeInt: 1288 + extraI = int64(extra) 1289 + if extraI < 0 { 1290 + return fmt.Errorf("int64 negative overflow") 1291 + } 1292 + extraI = -1 - extraI 1293 + default: 1294 + return fmt.Errorf("wrong type for int64 field: %d", maj) 1295 + } 1296 + 1297 + t.Size = int64(extraI) 211 1298 } 212 - // t.Type (string) (string) 1299 + // t.LexiconTypeID (string) (string) 213 1300 case "$type": 214 1301 215 1302 { ··· 218 1305 return err 219 1306 } 220 1307 221 - t.Type = string(sval) 1308 + t.LexiconTypeID = string(sval) 1309 + } 1310 + // t.Digest (string) (string) 1311 + case "digest": 1312 + 1313 + { 1314 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1315 + if err != nil { 1316 + return err 1317 + } 1318 + 1319 + t.Digest = string(sval) 1320 + } 1321 + // t.Platform (atproto.Manifest_Platform) (struct) 1322 + case "platform": 1323 + 1324 + { 1325 + 1326 + b, err := cr.ReadByte() 1327 + if err != nil { 1328 + return err 1329 + } 1330 + if b != cbg.CborNull[0] { 1331 + if err := cr.UnreadByte(); err != nil { 1332 + return err 1333 + } 1334 + t.Platform = new(Manifest_Platform) 1335 + if err := t.Platform.UnmarshalCBOR(cr); err != nil { 1336 + return xerrors.Errorf("unmarshaling t.Platform pointer: %w", err) 1337 + } 1338 + } 1339 + 1340 + } 1341 + // t.MediaType (string) (string) 1342 + case "mediaType": 1343 + 1344 + { 1345 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1346 + if err != nil { 1347 + return err 1348 + } 1349 + 1350 + t.MediaType = string(sval) 1351 + } 1352 + // t.Annotations (atproto.Manifest_ManifestReference_Annotations) (struct) 1353 + case "annotations": 1354 + 1355 + { 1356 + 1357 + b, err := cr.ReadByte() 1358 + if err != nil { 1359 + return err 1360 + } 1361 + if b != cbg.CborNull[0] { 1362 + if err := cr.UnreadByte(); err != nil { 1363 + return err 1364 + } 1365 + t.Annotations = new(Manifest_ManifestReference_Annotations) 1366 + if err := t.Annotations.UnmarshalCBOR(cr); err != nil { 1367 + return xerrors.Errorf("unmarshaling t.Annotations pointer: %w", err) 1368 + } 1369 + } 1370 + 1371 + } 1372 + 1373 + default: 1374 + // Field doesn't exist on this type, so ignore it 1375 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1376 + return err 1377 + } 1378 + } 1379 + } 1380 + 1381 + return nil 1382 + } 1383 + func (t *Manifest_Platform) MarshalCBOR(w io.Writer) error { 1384 + if t == nil { 1385 + _, err := w.Write(cbg.CborNull) 1386 + return err 1387 + } 1388 + 1389 + cw := cbg.NewCborWriter(w) 1390 + fieldCount := 6 1391 + 1392 + if t.LexiconTypeID == "" { 1393 + fieldCount-- 1394 + } 1395 + 1396 + if t.OsFeatures == nil { 1397 + fieldCount-- 1398 + } 1399 + 1400 + if t.OsVersion == nil { 1401 + fieldCount-- 1402 + } 1403 + 1404 + if t.Variant == nil { 1405 + fieldCount-- 1406 + } 1407 + 1408 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 1409 + return err 1410 + } 1411 + 1412 + // t.Os (string) (string) 1413 + if len("os") > 8192 { 1414 + return xerrors.Errorf("Value in field \"os\" was too long") 1415 + } 1416 + 1417 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("os"))); err != nil { 1418 + return err 1419 + } 1420 + if _, err := cw.WriteString(string("os")); err != nil { 1421 + return err 1422 + } 1423 + 1424 + if len(t.Os) > 8192 { 1425 + return xerrors.Errorf("Value in field t.Os was too long") 1426 + } 1427 + 1428 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Os))); err != nil { 1429 + return err 1430 + } 1431 + if _, err := cw.WriteString(string(t.Os)); err != nil { 1432 + return err 1433 + } 1434 + 1435 + // t.LexiconTypeID (string) (string) 1436 + if t.LexiconTypeID != "" { 1437 + 1438 + if len("$type") > 8192 { 1439 + return xerrors.Errorf("Value in field \"$type\" was too long") 1440 + } 1441 + 1442 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 1443 + return err 1444 + } 1445 + if _, err := cw.WriteString(string("$type")); err != nil { 1446 + return err 1447 + } 1448 + 1449 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.manifest#platform"))); err != nil { 1450 + return err 1451 + } 1452 + if _, err := cw.WriteString(string("io.atcr.manifest#platform")); err != nil { 1453 + return err 1454 + } 1455 + } 1456 + 1457 + // t.Variant (string) (string) 1458 + if t.Variant != nil { 1459 + 1460 + if len("variant") > 8192 { 1461 + return xerrors.Errorf("Value in field \"variant\" was too long") 1462 + } 1463 + 1464 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("variant"))); err != nil { 1465 + return err 1466 + } 1467 + if _, err := cw.WriteString(string("variant")); err != nil { 1468 + return err 1469 + } 1470 + 1471 + if t.Variant == nil { 1472 + if _, err := cw.Write(cbg.CborNull); err != nil { 1473 + return err 222 1474 } 223 - // t.Member (string) (string) 224 - case "member": 1475 + } else { 1476 + if len(*t.Variant) > 8192 { 1477 + return xerrors.Errorf("Value in field t.Variant was too long") 1478 + } 1479 + 1480 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Variant))); err != nil { 1481 + return err 1482 + } 1483 + if _, err := cw.WriteString(string(*t.Variant)); err != nil { 1484 + return err 1485 + } 1486 + } 1487 + } 1488 + 1489 + // t.OsVersion (string) (string) 1490 + if t.OsVersion != nil { 1491 + 1492 + if len("osVersion") > 8192 { 1493 + return xerrors.Errorf("Value in field \"osVersion\" was too long") 1494 + } 1495 + 1496 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("osVersion"))); err != nil { 1497 + return err 1498 + } 1499 + if _, err := cw.WriteString(string("osVersion")); err != nil { 1500 + return err 1501 + } 1502 + 1503 + if t.OsVersion == nil { 1504 + if _, err := cw.Write(cbg.CborNull); err != nil { 1505 + return err 1506 + } 1507 + } else { 1508 + if len(*t.OsVersion) > 8192 { 1509 + return xerrors.Errorf("Value in field t.OsVersion was too long") 1510 + } 1511 + 1512 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.OsVersion))); err != nil { 1513 + return err 1514 + } 1515 + if _, err := cw.WriteString(string(*t.OsVersion)); err != nil { 1516 + return err 1517 + } 1518 + } 1519 + } 1520 + 1521 + // t.OsFeatures ([]string) (slice) 1522 + if t.OsFeatures != nil { 1523 + 1524 + if len("osFeatures") > 8192 { 1525 + return xerrors.Errorf("Value in field \"osFeatures\" was too long") 1526 + } 1527 + 1528 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("osFeatures"))); err != nil { 1529 + return err 1530 + } 1531 + if _, err := cw.WriteString(string("osFeatures")); err != nil { 1532 + return err 1533 + } 1534 + 1535 + if len(t.OsFeatures) > 8192 { 1536 + return xerrors.Errorf("Slice value in field t.OsFeatures was too long") 1537 + } 1538 + 1539 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.OsFeatures))); err != nil { 1540 + return err 1541 + } 1542 + for _, v := range t.OsFeatures { 1543 + if len(v) > 8192 { 1544 + return xerrors.Errorf("Value in field v was too long") 1545 + } 1546 + 1547 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 1548 + return err 1549 + } 1550 + if _, err := cw.WriteString(string(v)); err != nil { 1551 + return err 1552 + } 1553 + 1554 + } 1555 + } 1556 + 1557 + // t.Architecture (string) (string) 1558 + if len("architecture") > 8192 { 1559 + return xerrors.Errorf("Value in field \"architecture\" was too long") 1560 + } 1561 + 1562 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("architecture"))); err != nil { 1563 + return err 1564 + } 1565 + if _, err := cw.WriteString(string("architecture")); err != nil { 1566 + return err 1567 + } 1568 + 1569 + if len(t.Architecture) > 8192 { 1570 + return xerrors.Errorf("Value in field t.Architecture was too long") 1571 + } 1572 + 1573 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Architecture))); err != nil { 1574 + return err 1575 + } 1576 + if _, err := cw.WriteString(string(t.Architecture)); err != nil { 1577 + return err 1578 + } 1579 + return nil 1580 + } 1581 + 1582 + func (t *Manifest_Platform) UnmarshalCBOR(r io.Reader) (err error) { 1583 + *t = Manifest_Platform{} 1584 + 1585 + cr := cbg.NewCborReader(r) 1586 + 1587 + maj, extra, err := cr.ReadHeader() 1588 + if err != nil { 1589 + return err 1590 + } 1591 + defer func() { 1592 + if err == io.EOF { 1593 + err = io.ErrUnexpectedEOF 1594 + } 1595 + }() 1596 + 1597 + if maj != cbg.MajMap { 1598 + return fmt.Errorf("cbor input should be of type map") 1599 + } 1600 + 1601 + if extra > cbg.MaxLength { 1602 + return fmt.Errorf("Manifest_Platform: map struct too large (%d)", extra) 1603 + } 1604 + 1605 + n := extra 1606 + 1607 + nameBuf := make([]byte, 12) 1608 + for i := uint64(0); i < n; i++ { 1609 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 1610 + if err != nil { 1611 + return err 1612 + } 1613 + 1614 + if !ok { 1615 + // Field doesn't exist on this type, so ignore it 1616 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 1617 + return err 1618 + } 1619 + continue 1620 + } 1621 + 1622 + switch string(nameBuf[:nameLen]) { 1623 + // t.Os (string) (string) 1624 + case "os": 225 1625 226 1626 { 227 1627 sval, err := cbg.ReadStringWithMax(cr, 8192) ··· 229 1629 return err 230 1630 } 231 1631 232 - t.Member = string(sval) 1632 + t.Os = string(sval) 233 1633 } 234 - // t.AddedAt (string) (string) 235 - case "addedAt": 1634 + // t.LexiconTypeID (string) (string) 1635 + case "$type": 236 1636 237 1637 { 238 1638 sval, err := cbg.ReadStringWithMax(cr, 8192) ··· 240 1640 return err 241 1641 } 242 1642 243 - t.AddedAt = string(sval) 1643 + t.LexiconTypeID = string(sval) 244 1644 } 245 - // t.Permissions ([]string) (slice) 246 - case "permissions": 1645 + // t.Variant (string) (string) 1646 + case "variant": 1647 + 1648 + { 1649 + b, err := cr.ReadByte() 1650 + if err != nil { 1651 + return err 1652 + } 1653 + if b != cbg.CborNull[0] { 1654 + if err := cr.UnreadByte(); err != nil { 1655 + return err 1656 + } 1657 + 1658 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1659 + if err != nil { 1660 + return err 1661 + } 1662 + 1663 + t.Variant = (*string)(&sval) 1664 + } 1665 + } 1666 + // t.OsVersion (string) (string) 1667 + case "osVersion": 1668 + 1669 + { 1670 + b, err := cr.ReadByte() 1671 + if err != nil { 1672 + return err 1673 + } 1674 + if b != cbg.CborNull[0] { 1675 + if err := cr.UnreadByte(); err != nil { 1676 + return err 1677 + } 1678 + 1679 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1680 + if err != nil { 1681 + return err 1682 + } 1683 + 1684 + t.OsVersion = (*string)(&sval) 1685 + } 1686 + } 1687 + // t.OsFeatures ([]string) (slice) 1688 + case "osFeatures": 247 1689 248 1690 maj, extra, err = cr.ReadHeader() 249 1691 if err != nil { ··· 251 1693 } 252 1694 253 1695 if extra > 8192 { 254 - return fmt.Errorf("t.Permissions: array too large (%d)", extra) 1696 + return fmt.Errorf("t.OsFeatures: array too large (%d)", extra) 255 1697 } 256 1698 257 1699 if maj != cbg.MajArray { ··· 259 1701 } 260 1702 261 1703 if extra > 0 { 262 - t.Permissions = make([]string, extra) 1704 + t.OsFeatures = make([]string, extra) 263 1705 } 264 1706 265 1707 for i := 0; i < int(extra); i++ { ··· 277 1719 return err 278 1720 } 279 1721 280 - t.Permissions[i] = string(sval) 1722 + t.OsFeatures[i] = string(sval) 1723 + } 1724 + 1725 + } 1726 + } 1727 + // t.Architecture (string) (string) 1728 + case "architecture": 1729 + 1730 + { 1731 + sval, err := cbg.ReadStringWithMax(cr, 8192) 1732 + if err != nil { 1733 + return err 1734 + } 1735 + 1736 + t.Architecture = string(sval) 1737 + } 1738 + 1739 + default: 1740 + // Field doesn't exist on this type, so ignore it 1741 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1742 + return err 1743 + } 1744 + } 1745 + } 1746 + 1747 + return nil 1748 + } 1749 + func (t *Manifest_Annotations) MarshalCBOR(w io.Writer) error { 1750 + if t == nil { 1751 + _, err := w.Write(cbg.CborNull) 1752 + return err 1753 + } 1754 + 1755 + cw := cbg.NewCborWriter(w) 1756 + 1757 + if _, err := cw.Write([]byte{160}); err != nil { 1758 + return err 1759 + } 1760 + return nil 1761 + } 1762 + 1763 + func (t *Manifest_Annotations) UnmarshalCBOR(r io.Reader) (err error) { 1764 + *t = Manifest_Annotations{} 1765 + 1766 + cr := cbg.NewCborReader(r) 1767 + 1768 + maj, extra, err := cr.ReadHeader() 1769 + if err != nil { 1770 + return err 1771 + } 1772 + defer func() { 1773 + if err == io.EOF { 1774 + err = io.ErrUnexpectedEOF 1775 + } 1776 + }() 1777 + 1778 + if maj != cbg.MajMap { 1779 + return fmt.Errorf("cbor input should be of type map") 1780 + } 1781 + 1782 + if extra > cbg.MaxLength { 1783 + return fmt.Errorf("Manifest_Annotations: map struct too large (%d)", extra) 1784 + } 1785 + 1786 + n := extra 1787 + 1788 + nameBuf := make([]byte, 0) 1789 + for i := uint64(0); i < n; i++ { 1790 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 1791 + if err != nil { 1792 + return err 1793 + } 1794 + 1795 + if !ok { 1796 + // Field doesn't exist on this type, so ignore it 1797 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 1798 + return err 1799 + } 1800 + continue 1801 + } 1802 + 1803 + switch string(nameBuf[:nameLen]) { 1804 + 1805 + default: 1806 + // Field doesn't exist on this type, so ignore it 1807 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1808 + return err 1809 + } 1810 + } 1811 + } 1812 + 1813 + return nil 1814 + } 1815 + func (t *Manifest_BlobReference_Annotations) MarshalCBOR(w io.Writer) error { 1816 + if t == nil { 1817 + _, err := w.Write(cbg.CborNull) 1818 + return err 1819 + } 1820 + 1821 + cw := cbg.NewCborWriter(w) 1822 + 1823 + if _, err := cw.Write([]byte{160}); err != nil { 1824 + return err 1825 + } 1826 + return nil 1827 + } 1828 + 1829 + func (t *Manifest_BlobReference_Annotations) UnmarshalCBOR(r io.Reader) (err error) { 1830 + *t = Manifest_BlobReference_Annotations{} 1831 + 1832 + cr := cbg.NewCborReader(r) 1833 + 1834 + maj, extra, err := cr.ReadHeader() 1835 + if err != nil { 1836 + return err 1837 + } 1838 + defer func() { 1839 + if err == io.EOF { 1840 + err = io.ErrUnexpectedEOF 1841 + } 1842 + }() 1843 + 1844 + if maj != cbg.MajMap { 1845 + return fmt.Errorf("cbor input should be of type map") 1846 + } 1847 + 1848 + if extra > cbg.MaxLength { 1849 + return fmt.Errorf("Manifest_BlobReference_Annotations: map struct too large (%d)", extra) 1850 + } 1851 + 1852 + n := extra 1853 + 1854 + nameBuf := make([]byte, 0) 1855 + for i := uint64(0); i < n; i++ { 1856 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 1857 + if err != nil { 1858 + return err 1859 + } 1860 + 1861 + if !ok { 1862 + // Field doesn't exist on this type, so ignore it 1863 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 1864 + return err 1865 + } 1866 + continue 1867 + } 1868 + 1869 + switch string(nameBuf[:nameLen]) { 1870 + 1871 + default: 1872 + // Field doesn't exist on this type, so ignore it 1873 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1874 + return err 1875 + } 1876 + } 1877 + } 1878 + 1879 + return nil 1880 + } 1881 + func (t *Manifest_ManifestReference_Annotations) MarshalCBOR(w io.Writer) error { 1882 + if t == nil { 1883 + _, err := w.Write(cbg.CborNull) 1884 + return err 1885 + } 1886 + 1887 + cw := cbg.NewCborWriter(w) 1888 + 1889 + if _, err := cw.Write([]byte{160}); err != nil { 1890 + return err 1891 + } 1892 + return nil 1893 + } 1894 + 1895 + func (t *Manifest_ManifestReference_Annotations) UnmarshalCBOR(r io.Reader) (err error) { 1896 + *t = Manifest_ManifestReference_Annotations{} 1897 + 1898 + cr := cbg.NewCborReader(r) 1899 + 1900 + maj, extra, err := cr.ReadHeader() 1901 + if err != nil { 1902 + return err 1903 + } 1904 + defer func() { 1905 + if err == io.EOF { 1906 + err = io.ErrUnexpectedEOF 1907 + } 1908 + }() 1909 + 1910 + if maj != cbg.MajMap { 1911 + return fmt.Errorf("cbor input should be of type map") 1912 + } 1913 + 1914 + if extra > cbg.MaxLength { 1915 + return fmt.Errorf("Manifest_ManifestReference_Annotations: map struct too large (%d)", extra) 1916 + } 1917 + 1918 + n := extra 1919 + 1920 + nameBuf := make([]byte, 0) 1921 + for i := uint64(0); i < n; i++ { 1922 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 1923 + if err != nil { 1924 + return err 1925 + } 1926 + 1927 + if !ok { 1928 + // Field doesn't exist on this type, so ignore it 1929 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 1930 + return err 1931 + } 1932 + continue 1933 + } 1934 + 1935 + switch string(nameBuf[:nameLen]) { 1936 + 1937 + default: 1938 + // Field doesn't exist on this type, so ignore it 1939 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 1940 + return err 1941 + } 1942 + } 1943 + } 1944 + 1945 + return nil 1946 + } 1947 + func (t *Tag) MarshalCBOR(w io.Writer) error { 1948 + if t == nil { 1949 + _, err := w.Write(cbg.CborNull) 1950 + return err 1951 + } 1952 + 1953 + cw := cbg.NewCborWriter(w) 1954 + fieldCount := 6 1955 + 1956 + if t.Manifest == nil { 1957 + fieldCount-- 1958 + } 1959 + 1960 + if t.ManifestDigest == nil { 1961 + fieldCount-- 1962 + } 1963 + 1964 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 1965 + return err 1966 + } 1967 + 1968 + // t.Tag (string) (string) 1969 + if len("tag") > 8192 { 1970 + return xerrors.Errorf("Value in field \"tag\" was too long") 1971 + } 1972 + 1973 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("tag"))); err != nil { 1974 + return err 1975 + } 1976 + if _, err := cw.WriteString(string("tag")); err != nil { 1977 + return err 1978 + } 1979 + 1980 + if len(t.Tag) > 8192 { 1981 + return xerrors.Errorf("Value in field t.Tag was too long") 1982 + } 1983 + 1984 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Tag))); err != nil { 1985 + return err 1986 + } 1987 + if _, err := cw.WriteString(string(t.Tag)); err != nil { 1988 + return err 1989 + } 1990 + 1991 + // t.LexiconTypeID (string) (string) 1992 + if len("$type") > 8192 { 1993 + return xerrors.Errorf("Value in field \"$type\" was too long") 1994 + } 1995 + 1996 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 1997 + return err 1998 + } 1999 + if _, err := cw.WriteString(string("$type")); err != nil { 2000 + return err 2001 + } 2002 + 2003 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.tag"))); err != nil { 2004 + return err 2005 + } 2006 + if _, err := cw.WriteString(string("io.atcr.tag")); err != nil { 2007 + return err 2008 + } 2009 + 2010 + // t.Manifest (string) (string) 2011 + if t.Manifest != nil { 2012 + 2013 + if len("manifest") > 8192 { 2014 + return xerrors.Errorf("Value in field \"manifest\" was too long") 2015 + } 2016 + 2017 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("manifest"))); err != nil { 2018 + return err 2019 + } 2020 + if _, err := cw.WriteString(string("manifest")); err != nil { 2021 + return err 2022 + } 2023 + 2024 + if t.Manifest == nil { 2025 + if _, err := cw.Write(cbg.CborNull); err != nil { 2026 + return err 2027 + } 2028 + } else { 2029 + if len(*t.Manifest) > 8192 { 2030 + return xerrors.Errorf("Value in field t.Manifest was too long") 2031 + } 2032 + 2033 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Manifest))); err != nil { 2034 + return err 2035 + } 2036 + if _, err := cw.WriteString(string(*t.Manifest)); err != nil { 2037 + return err 2038 + } 2039 + } 2040 + } 2041 + 2042 + // t.CreatedAt (string) (string) 2043 + if len("createdAt") > 8192 { 2044 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 2045 + } 2046 + 2047 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 2048 + return err 2049 + } 2050 + if _, err := cw.WriteString(string("createdAt")); err != nil { 2051 + return err 2052 + } 2053 + 2054 + if len(t.CreatedAt) > 8192 { 2055 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 2056 + } 2057 + 2058 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 2059 + return err 2060 + } 2061 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 2062 + return err 2063 + } 2064 + 2065 + // t.Repository (string) (string) 2066 + if len("repository") > 8192 { 2067 + return xerrors.Errorf("Value in field \"repository\" was too long") 2068 + } 2069 + 2070 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repository"))); err != nil { 2071 + return err 2072 + } 2073 + if _, err := cw.WriteString(string("repository")); err != nil { 2074 + return err 2075 + } 2076 + 2077 + if len(t.Repository) > 8192 { 2078 + return xerrors.Errorf("Value in field t.Repository was too long") 2079 + } 2080 + 2081 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repository))); err != nil { 2082 + return err 2083 + } 2084 + if _, err := cw.WriteString(string(t.Repository)); err != nil { 2085 + return err 2086 + } 2087 + 2088 + // t.ManifestDigest (string) (string) 2089 + if t.ManifestDigest != nil { 2090 + 2091 + if len("manifestDigest") > 8192 { 2092 + return xerrors.Errorf("Value in field \"manifestDigest\" was too long") 2093 + } 2094 + 2095 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("manifestDigest"))); err != nil { 2096 + return err 2097 + } 2098 + if _, err := cw.WriteString(string("manifestDigest")); err != nil { 2099 + return err 2100 + } 2101 + 2102 + if t.ManifestDigest == nil { 2103 + if _, err := cw.Write(cbg.CborNull); err != nil { 2104 + return err 2105 + } 2106 + } else { 2107 + if len(*t.ManifestDigest) > 8192 { 2108 + return xerrors.Errorf("Value in field t.ManifestDigest was too long") 2109 + } 2110 + 2111 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.ManifestDigest))); err != nil { 2112 + return err 2113 + } 2114 + if _, err := cw.WriteString(string(*t.ManifestDigest)); err != nil { 2115 + return err 2116 + } 2117 + } 2118 + } 2119 + return nil 2120 + } 2121 + 2122 + func (t *Tag) UnmarshalCBOR(r io.Reader) (err error) { 2123 + *t = Tag{} 2124 + 2125 + cr := cbg.NewCborReader(r) 2126 + 2127 + maj, extra, err := cr.ReadHeader() 2128 + if err != nil { 2129 + return err 2130 + } 2131 + defer func() { 2132 + if err == io.EOF { 2133 + err = io.ErrUnexpectedEOF 2134 + } 2135 + }() 2136 + 2137 + if maj != cbg.MajMap { 2138 + return fmt.Errorf("cbor input should be of type map") 2139 + } 2140 + 2141 + if extra > cbg.MaxLength { 2142 + return fmt.Errorf("Tag: map struct too large (%d)", extra) 2143 + } 2144 + 2145 + n := extra 2146 + 2147 + nameBuf := make([]byte, 14) 2148 + for i := uint64(0); i < n; i++ { 2149 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 2150 + if err != nil { 2151 + return err 2152 + } 2153 + 2154 + if !ok { 2155 + // Field doesn't exist on this type, so ignore it 2156 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 2157 + return err 2158 + } 2159 + continue 2160 + } 2161 + 2162 + switch string(nameBuf[:nameLen]) { 2163 + // t.Tag (string) (string) 2164 + case "tag": 2165 + 2166 + { 2167 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2168 + if err != nil { 2169 + return err 2170 + } 2171 + 2172 + t.Tag = string(sval) 2173 + } 2174 + // t.LexiconTypeID (string) (string) 2175 + case "$type": 2176 + 2177 + { 2178 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2179 + if err != nil { 2180 + return err 2181 + } 2182 + 2183 + t.LexiconTypeID = string(sval) 2184 + } 2185 + // t.Manifest (string) (string) 2186 + case "manifest": 2187 + 2188 + { 2189 + b, err := cr.ReadByte() 2190 + if err != nil { 2191 + return err 2192 + } 2193 + if b != cbg.CborNull[0] { 2194 + if err := cr.UnreadByte(); err != nil { 2195 + return err 281 2196 } 282 2197 2198 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2199 + if err != nil { 2200 + return err 2201 + } 2202 + 2203 + t.Manifest = (*string)(&sval) 2204 + } 2205 + } 2206 + // t.CreatedAt (string) (string) 2207 + case "createdAt": 2208 + 2209 + { 2210 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2211 + if err != nil { 2212 + return err 2213 + } 2214 + 2215 + t.CreatedAt = string(sval) 2216 + } 2217 + // t.Repository (string) (string) 2218 + case "repository": 2219 + 2220 + { 2221 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2222 + if err != nil { 2223 + return err 2224 + } 2225 + 2226 + t.Repository = string(sval) 2227 + } 2228 + // t.ManifestDigest (string) (string) 2229 + case "manifestDigest": 2230 + 2231 + { 2232 + b, err := cr.ReadByte() 2233 + if err != nil { 2234 + return err 2235 + } 2236 + if b != cbg.CborNull[0] { 2237 + if err := cr.UnreadByte(); err != nil { 2238 + return err 2239 + } 2240 + 2241 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2242 + if err != nil { 2243 + return err 2244 + } 2245 + 2246 + t.ManifestDigest = (*string)(&sval) 283 2247 } 284 2248 } 285 2249 ··· 293 2257 294 2258 return nil 295 2259 } 296 - func (t *CaptainRecord) MarshalCBOR(w io.Writer) error { 2260 + func (t *SailorProfile) MarshalCBOR(w io.Writer) error { 297 2261 if t == nil { 298 2262 _, err := w.Write(cbg.CborNull) 299 2263 return err 300 2264 } 301 2265 302 2266 cw := cbg.NewCborWriter(w) 303 - fieldCount := 8 2267 + fieldCount := 4 304 2268 305 - if t.Region == "" { 2269 + if t.DefaultHold == nil { 306 2270 fieldCount-- 307 2271 } 308 2272 309 - if t.Provider == "" { 2273 + if t.UpdatedAt == nil { 310 2274 fieldCount-- 311 2275 } 312 2276 ··· 314 2278 return err 315 2279 } 316 2280 317 - // t.Type (string) (string) 2281 + // t.LexiconTypeID (string) (string) 318 2282 if len("$type") > 8192 { 319 2283 return xerrors.Errorf("Value in field \"$type\" was too long") 320 2284 } ··· 326 2290 return err 327 2291 } 328 2292 329 - if len(t.Type) > 8192 { 330 - return xerrors.Errorf("Value in field t.Type was too long") 2293 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.sailor.profile"))); err != nil { 2294 + return err 2295 + } 2296 + if _, err := cw.WriteString(string("io.atcr.sailor.profile")); err != nil { 2297 + return err 2298 + } 2299 + 2300 + // t.CreatedAt (string) (string) 2301 + if len("createdAt") > 8192 { 2302 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 331 2303 } 332 2304 333 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { 2305 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 334 2306 return err 335 2307 } 336 - if _, err := cw.WriteString(string(t.Type)); err != nil { 2308 + if _, err := cw.WriteString(string("createdAt")); err != nil { 2309 + return err 2310 + } 2311 + 2312 + if len(t.CreatedAt) > 8192 { 2313 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 2314 + } 2315 + 2316 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 2317 + return err 2318 + } 2319 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 2320 + return err 2321 + } 2322 + 2323 + // t.UpdatedAt (string) (string) 2324 + if t.UpdatedAt != nil { 2325 + 2326 + if len("updatedAt") > 8192 { 2327 + return xerrors.Errorf("Value in field \"updatedAt\" was too long") 2328 + } 2329 + 2330 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("updatedAt"))); err != nil { 2331 + return err 2332 + } 2333 + if _, err := cw.WriteString(string("updatedAt")); err != nil { 2334 + return err 2335 + } 2336 + 2337 + if t.UpdatedAt == nil { 2338 + if _, err := cw.Write(cbg.CborNull); err != nil { 2339 + return err 2340 + } 2341 + } else { 2342 + if len(*t.UpdatedAt) > 8192 { 2343 + return xerrors.Errorf("Value in field t.UpdatedAt was too long") 2344 + } 2345 + 2346 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.UpdatedAt))); err != nil { 2347 + return err 2348 + } 2349 + if _, err := cw.WriteString(string(*t.UpdatedAt)); err != nil { 2350 + return err 2351 + } 2352 + } 2353 + } 2354 + 2355 + // t.DefaultHold (string) (string) 2356 + if t.DefaultHold != nil { 2357 + 2358 + if len("defaultHold") > 8192 { 2359 + return xerrors.Errorf("Value in field \"defaultHold\" was too long") 2360 + } 2361 + 2362 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("defaultHold"))); err != nil { 2363 + return err 2364 + } 2365 + if _, err := cw.WriteString(string("defaultHold")); err != nil { 2366 + return err 2367 + } 2368 + 2369 + if t.DefaultHold == nil { 2370 + if _, err := cw.Write(cbg.CborNull); err != nil { 2371 + return err 2372 + } 2373 + } else { 2374 + if len(*t.DefaultHold) > 8192 { 2375 + return xerrors.Errorf("Value in field t.DefaultHold was too long") 2376 + } 2377 + 2378 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.DefaultHold))); err != nil { 2379 + return err 2380 + } 2381 + if _, err := cw.WriteString(string(*t.DefaultHold)); err != nil { 2382 + return err 2383 + } 2384 + } 2385 + } 2386 + return nil 2387 + } 2388 + 2389 + func (t *SailorProfile) UnmarshalCBOR(r io.Reader) (err error) { 2390 + *t = SailorProfile{} 2391 + 2392 + cr := cbg.NewCborReader(r) 2393 + 2394 + maj, extra, err := cr.ReadHeader() 2395 + if err != nil { 2396 + return err 2397 + } 2398 + defer func() { 2399 + if err == io.EOF { 2400 + err = io.ErrUnexpectedEOF 2401 + } 2402 + }() 2403 + 2404 + if maj != cbg.MajMap { 2405 + return fmt.Errorf("cbor input should be of type map") 2406 + } 2407 + 2408 + if extra > cbg.MaxLength { 2409 + return fmt.Errorf("SailorProfile: map struct too large (%d)", extra) 2410 + } 2411 + 2412 + n := extra 2413 + 2414 + nameBuf := make([]byte, 11) 2415 + for i := uint64(0); i < n; i++ { 2416 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 2417 + if err != nil { 2418 + return err 2419 + } 2420 + 2421 + if !ok { 2422 + // Field doesn't exist on this type, so ignore it 2423 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 2424 + return err 2425 + } 2426 + continue 2427 + } 2428 + 2429 + switch string(nameBuf[:nameLen]) { 2430 + // t.LexiconTypeID (string) (string) 2431 + case "$type": 2432 + 2433 + { 2434 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2435 + if err != nil { 2436 + return err 2437 + } 2438 + 2439 + t.LexiconTypeID = string(sval) 2440 + } 2441 + // t.CreatedAt (string) (string) 2442 + case "createdAt": 2443 + 2444 + { 2445 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2446 + if err != nil { 2447 + return err 2448 + } 2449 + 2450 + t.CreatedAt = string(sval) 2451 + } 2452 + // t.UpdatedAt (string) (string) 2453 + case "updatedAt": 2454 + 2455 + { 2456 + b, err := cr.ReadByte() 2457 + if err != nil { 2458 + return err 2459 + } 2460 + if b != cbg.CborNull[0] { 2461 + if err := cr.UnreadByte(); err != nil { 2462 + return err 2463 + } 2464 + 2465 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2466 + if err != nil { 2467 + return err 2468 + } 2469 + 2470 + t.UpdatedAt = (*string)(&sval) 2471 + } 2472 + } 2473 + // t.DefaultHold (string) (string) 2474 + case "defaultHold": 2475 + 2476 + { 2477 + b, err := cr.ReadByte() 2478 + if err != nil { 2479 + return err 2480 + } 2481 + if b != cbg.CborNull[0] { 2482 + if err := cr.UnreadByte(); err != nil { 2483 + return err 2484 + } 2485 + 2486 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2487 + if err != nil { 2488 + return err 2489 + } 2490 + 2491 + t.DefaultHold = (*string)(&sval) 2492 + } 2493 + } 2494 + 2495 + default: 2496 + // Field doesn't exist on this type, so ignore it 2497 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 2498 + return err 2499 + } 2500 + } 2501 + } 2502 + 2503 + return nil 2504 + } 2505 + func (t *SailorStar) MarshalCBOR(w io.Writer) error { 2506 + if t == nil { 2507 + _, err := w.Write(cbg.CborNull) 2508 + return err 2509 + } 2510 + 2511 + cw := cbg.NewCborWriter(w) 2512 + 2513 + if _, err := cw.Write([]byte{163}); err != nil { 2514 + return err 2515 + } 2516 + 2517 + // t.LexiconTypeID (string) (string) 2518 + if len("$type") > 8192 { 2519 + return xerrors.Errorf("Value in field \"$type\" was too long") 2520 + } 2521 + 2522 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 2523 + return err 2524 + } 2525 + if _, err := cw.WriteString(string("$type")); err != nil { 2526 + return err 2527 + } 2528 + 2529 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.sailor.star"))); err != nil { 2530 + return err 2531 + } 2532 + if _, err := cw.WriteString(string("io.atcr.sailor.star")); err != nil { 2533 + return err 2534 + } 2535 + 2536 + // t.Subject (atproto.SailorStar_Subject) (struct) 2537 + if len("subject") > 8192 { 2538 + return xerrors.Errorf("Value in field \"subject\" was too long") 2539 + } 2540 + 2541 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("subject"))); err != nil { 2542 + return err 2543 + } 2544 + if _, err := cw.WriteString(string("subject")); err != nil { 2545 + return err 2546 + } 2547 + 2548 + if err := t.Subject.MarshalCBOR(cw); err != nil { 2549 + return err 2550 + } 2551 + 2552 + // t.CreatedAt (string) (string) 2553 + if len("createdAt") > 8192 { 2554 + return xerrors.Errorf("Value in field \"createdAt\" was too long") 2555 + } 2556 + 2557 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("createdAt"))); err != nil { 2558 + return err 2559 + } 2560 + if _, err := cw.WriteString(string("createdAt")); err != nil { 2561 + return err 2562 + } 2563 + 2564 + if len(t.CreatedAt) > 8192 { 2565 + return xerrors.Errorf("Value in field t.CreatedAt was too long") 2566 + } 2567 + 2568 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.CreatedAt))); err != nil { 2569 + return err 2570 + } 2571 + if _, err := cw.WriteString(string(t.CreatedAt)); err != nil { 2572 + return err 2573 + } 2574 + return nil 2575 + } 2576 + 2577 + func (t *SailorStar) UnmarshalCBOR(r io.Reader) (err error) { 2578 + *t = SailorStar{} 2579 + 2580 + cr := cbg.NewCborReader(r) 2581 + 2582 + maj, extra, err := cr.ReadHeader() 2583 + if err != nil { 2584 + return err 2585 + } 2586 + defer func() { 2587 + if err == io.EOF { 2588 + err = io.ErrUnexpectedEOF 2589 + } 2590 + }() 2591 + 2592 + if maj != cbg.MajMap { 2593 + return fmt.Errorf("cbor input should be of type map") 2594 + } 2595 + 2596 + if extra > cbg.MaxLength { 2597 + return fmt.Errorf("SailorStar: map struct too large (%d)", extra) 2598 + } 2599 + 2600 + n := extra 2601 + 2602 + nameBuf := make([]byte, 9) 2603 + for i := uint64(0); i < n; i++ { 2604 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 2605 + if err != nil { 2606 + return err 2607 + } 2608 + 2609 + if !ok { 2610 + // Field doesn't exist on this type, so ignore it 2611 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 2612 + return err 2613 + } 2614 + continue 2615 + } 2616 + 2617 + switch string(nameBuf[:nameLen]) { 2618 + // t.LexiconTypeID (string) (string) 2619 + case "$type": 2620 + 2621 + { 2622 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2623 + if err != nil { 2624 + return err 2625 + } 2626 + 2627 + t.LexiconTypeID = string(sval) 2628 + } 2629 + // t.Subject (atproto.SailorStar_Subject) (struct) 2630 + case "subject": 2631 + 2632 + { 2633 + 2634 + if err := t.Subject.UnmarshalCBOR(cr); err != nil { 2635 + return xerrors.Errorf("unmarshaling t.Subject: %w", err) 2636 + } 2637 + 2638 + } 2639 + // t.CreatedAt (string) (string) 2640 + case "createdAt": 2641 + 2642 + { 2643 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2644 + if err != nil { 2645 + return err 2646 + } 2647 + 2648 + t.CreatedAt = string(sval) 2649 + } 2650 + 2651 + default: 2652 + // Field doesn't exist on this type, so ignore it 2653 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 2654 + return err 2655 + } 2656 + } 2657 + } 2658 + 2659 + return nil 2660 + } 2661 + func (t *SailorStar_Subject) MarshalCBOR(w io.Writer) error { 2662 + if t == nil { 2663 + _, err := w.Write(cbg.CborNull) 2664 + return err 2665 + } 2666 + 2667 + cw := cbg.NewCborWriter(w) 2668 + fieldCount := 3 2669 + 2670 + if t.LexiconTypeID == "" { 2671 + fieldCount-- 2672 + } 2673 + 2674 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 2675 + return err 2676 + } 2677 + 2678 + // t.Did (string) (string) 2679 + if len("did") > 8192 { 2680 + return xerrors.Errorf("Value in field \"did\" was too long") 2681 + } 2682 + 2683 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("did"))); err != nil { 2684 + return err 2685 + } 2686 + if _, err := cw.WriteString(string("did")); err != nil { 2687 + return err 2688 + } 2689 + 2690 + if len(t.Did) > 8192 { 2691 + return xerrors.Errorf("Value in field t.Did was too long") 2692 + } 2693 + 2694 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Did))); err != nil { 2695 + return err 2696 + } 2697 + if _, err := cw.WriteString(string(t.Did)); err != nil { 2698 + return err 2699 + } 2700 + 2701 + // t.LexiconTypeID (string) (string) 2702 + if t.LexiconTypeID != "" { 2703 + 2704 + if len("$type") > 8192 { 2705 + return xerrors.Errorf("Value in field \"$type\" was too long") 2706 + } 2707 + 2708 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 2709 + return err 2710 + } 2711 + if _, err := cw.WriteString(string("$type")); err != nil { 2712 + return err 2713 + } 2714 + 2715 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.sailor.star#subject"))); err != nil { 2716 + return err 2717 + } 2718 + if _, err := cw.WriteString(string("io.atcr.sailor.star#subject")); err != nil { 2719 + return err 2720 + } 2721 + } 2722 + 2723 + // t.Repository (string) (string) 2724 + if len("repository") > 8192 { 2725 + return xerrors.Errorf("Value in field \"repository\" was too long") 2726 + } 2727 + 2728 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("repository"))); err != nil { 2729 + return err 2730 + } 2731 + if _, err := cw.WriteString(string("repository")); err != nil { 2732 + return err 2733 + } 2734 + 2735 + if len(t.Repository) > 8192 { 2736 + return xerrors.Errorf("Value in field t.Repository was too long") 2737 + } 2738 + 2739 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Repository))); err != nil { 2740 + return err 2741 + } 2742 + if _, err := cw.WriteString(string(t.Repository)); err != nil { 2743 + return err 2744 + } 2745 + return nil 2746 + } 2747 + 2748 + func (t *SailorStar_Subject) UnmarshalCBOR(r io.Reader) (err error) { 2749 + *t = SailorStar_Subject{} 2750 + 2751 + cr := cbg.NewCborReader(r) 2752 + 2753 + maj, extra, err := cr.ReadHeader() 2754 + if err != nil { 2755 + return err 2756 + } 2757 + defer func() { 2758 + if err == io.EOF { 2759 + err = io.ErrUnexpectedEOF 2760 + } 2761 + }() 2762 + 2763 + if maj != cbg.MajMap { 2764 + return fmt.Errorf("cbor input should be of type map") 2765 + } 2766 + 2767 + if extra > cbg.MaxLength { 2768 + return fmt.Errorf("SailorStar_Subject: map struct too large (%d)", extra) 2769 + } 2770 + 2771 + n := extra 2772 + 2773 + nameBuf := make([]byte, 10) 2774 + for i := uint64(0); i < n; i++ { 2775 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 2776 + if err != nil { 2777 + return err 2778 + } 2779 + 2780 + if !ok { 2781 + // Field doesn't exist on this type, so ignore it 2782 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 2783 + return err 2784 + } 2785 + continue 2786 + } 2787 + 2788 + switch string(nameBuf[:nameLen]) { 2789 + // t.Did (string) (string) 2790 + case "did": 2791 + 2792 + { 2793 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2794 + if err != nil { 2795 + return err 2796 + } 2797 + 2798 + t.Did = string(sval) 2799 + } 2800 + // t.LexiconTypeID (string) (string) 2801 + case "$type": 2802 + 2803 + { 2804 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2805 + if err != nil { 2806 + return err 2807 + } 2808 + 2809 + t.LexiconTypeID = string(sval) 2810 + } 2811 + // t.Repository (string) (string) 2812 + case "repository": 2813 + 2814 + { 2815 + sval, err := cbg.ReadStringWithMax(cr, 8192) 2816 + if err != nil { 2817 + return err 2818 + } 2819 + 2820 + t.Repository = string(sval) 2821 + } 2822 + 2823 + default: 2824 + // Field doesn't exist on this type, so ignore it 2825 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 2826 + return err 2827 + } 2828 + } 2829 + } 2830 + 2831 + return nil 2832 + } 2833 + func (t *HoldCaptain) MarshalCBOR(w io.Writer) error { 2834 + if t == nil { 2835 + _, err := w.Write(cbg.CborNull) 2836 + return err 2837 + } 2838 + 2839 + cw := cbg.NewCborWriter(w) 2840 + fieldCount := 8 2841 + 2842 + if t.Provider == nil { 2843 + fieldCount-- 2844 + } 2845 + 2846 + if t.Region == nil { 2847 + fieldCount-- 2848 + } 2849 + 2850 + if _, err := cw.Write(cbg.CborEncodeMajorType(cbg.MajMap, uint64(fieldCount))); err != nil { 2851 + return err 2852 + } 2853 + 2854 + // t.LexiconTypeID (string) (string) 2855 + if len("$type") > 8192 { 2856 + return xerrors.Errorf("Value in field \"$type\" was too long") 2857 + } 2858 + 2859 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 2860 + return err 2861 + } 2862 + if _, err := cw.WriteString(string("$type")); err != nil { 2863 + return err 2864 + } 2865 + 2866 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.hold.captain"))); err != nil { 2867 + return err 2868 + } 2869 + if _, err := cw.WriteString(string("io.atcr.hold.captain")); err != nil { 337 2870 return err 338 2871 } 339 2872 ··· 377 2910 } 378 2911 379 2912 // t.Region (string) (string) 380 - if t.Region != "" { 2913 + if t.Region != nil { 381 2914 382 2915 if len("region") > 8192 { 383 2916 return xerrors.Errorf("Value in field \"region\" was too long") ··· 390 2923 return err 391 2924 } 392 2925 393 - if len(t.Region) > 8192 { 394 - return xerrors.Errorf("Value in field t.Region was too long") 395 - } 2926 + if t.Region == nil { 2927 + if _, err := cw.Write(cbg.CborNull); err != nil { 2928 + return err 2929 + } 2930 + } else { 2931 + if len(*t.Region) > 8192 { 2932 + return xerrors.Errorf("Value in field t.Region was too long") 2933 + } 396 2934 397 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Region))); err != nil { 398 - return err 399 - } 400 - if _, err := cw.WriteString(string(t.Region)); err != nil { 401 - return err 2935 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Region))); err != nil { 2936 + return err 2937 + } 2938 + if _, err := cw.WriteString(string(*t.Region)); err != nil { 2939 + return err 2940 + } 402 2941 } 403 2942 } 404 2943 405 2944 // t.Provider (string) (string) 406 - if t.Provider != "" { 2945 + if t.Provider != nil { 407 2946 408 2947 if len("provider") > 8192 { 409 2948 return xerrors.Errorf("Value in field \"provider\" was too long") ··· 416 2955 return err 417 2956 } 418 2957 419 - if len(t.Provider) > 8192 { 420 - return xerrors.Errorf("Value in field t.Provider was too long") 421 - } 2958 + if t.Provider == nil { 2959 + if _, err := cw.Write(cbg.CborNull); err != nil { 2960 + return err 2961 + } 2962 + } else { 2963 + if len(*t.Provider) > 8192 { 2964 + return xerrors.Errorf("Value in field t.Provider was too long") 2965 + } 422 2966 423 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Provider))); err != nil { 424 - return err 425 - } 426 - if _, err := cw.WriteString(string(t.Provider)); err != nil { 427 - return err 2967 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Provider))); err != nil { 2968 + return err 2969 + } 2970 + if _, err := cw.WriteString(string(*t.Provider)); err != nil { 2971 + return err 2972 + } 428 2973 } 429 2974 } 430 2975 ··· 485 3030 return nil 486 3031 } 487 3032 488 - func (t *CaptainRecord) UnmarshalCBOR(r io.Reader) (err error) { 489 - *t = CaptainRecord{} 3033 + func (t *HoldCaptain) UnmarshalCBOR(r io.Reader) (err error) { 3034 + *t = HoldCaptain{} 490 3035 491 3036 cr := cbg.NewCborReader(r) 492 3037 ··· 505 3050 } 506 3051 507 3052 if extra > cbg.MaxLength { 508 - return fmt.Errorf("CaptainRecord: map struct too large (%d)", extra) 3053 + return fmt.Errorf("HoldCaptain: map struct too large (%d)", extra) 509 3054 } 510 3055 511 3056 n := extra ··· 526 3071 } 527 3072 528 3073 switch string(nameBuf[:nameLen]) { 529 - // t.Type (string) (string) 3074 + // t.LexiconTypeID (string) (string) 530 3075 case "$type": 531 3076 532 3077 { ··· 535 3080 return err 536 3081 } 537 3082 538 - t.Type = string(sval) 3083 + t.LexiconTypeID = string(sval) 539 3084 } 540 3085 // t.Owner (string) (string) 541 3086 case "owner": ··· 570 3115 case "region": 571 3116 572 3117 { 573 - sval, err := cbg.ReadStringWithMax(cr, 8192) 3118 + b, err := cr.ReadByte() 574 3119 if err != nil { 575 3120 return err 576 3121 } 3122 + if b != cbg.CborNull[0] { 3123 + if err := cr.UnreadByte(); err != nil { 3124 + return err 3125 + } 577 3126 578 - t.Region = string(sval) 3127 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3128 + if err != nil { 3129 + return err 3130 + } 3131 + 3132 + t.Region = (*string)(&sval) 3133 + } 579 3134 } 580 3135 // t.Provider (string) (string) 581 3136 case "provider": 582 3137 583 3138 { 584 - sval, err := cbg.ReadStringWithMax(cr, 8192) 3139 + b, err := cr.ReadByte() 585 3140 if err != nil { 586 3141 return err 587 3142 } 3143 + if b != cbg.CborNull[0] { 3144 + if err := cr.UnreadByte(); err != nil { 3145 + return err 3146 + } 588 3147 589 - t.Provider = string(sval) 3148 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3149 + if err != nil { 3150 + return err 3151 + } 3152 + 3153 + t.Provider = (*string)(&sval) 3154 + } 590 3155 } 591 3156 // t.DeployedAt (string) (string) 592 3157 case "deployedAt": ··· 646 3211 647 3212 return nil 648 3213 } 649 - func (t *LayerRecord) MarshalCBOR(w io.Writer) error { 3214 + func (t *HoldCrew) MarshalCBOR(w io.Writer) error { 3215 + if t == nil { 3216 + _, err := w.Write(cbg.CborNull) 3217 + return err 3218 + } 3219 + 3220 + cw := cbg.NewCborWriter(w) 3221 + 3222 + if _, err := cw.Write([]byte{165}); err != nil { 3223 + return err 3224 + } 3225 + 3226 + // t.Role (string) (string) 3227 + if len("role") > 8192 { 3228 + return xerrors.Errorf("Value in field \"role\" was too long") 3229 + } 3230 + 3231 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("role"))); err != nil { 3232 + return err 3233 + } 3234 + if _, err := cw.WriteString(string("role")); err != nil { 3235 + return err 3236 + } 3237 + 3238 + if len(t.Role) > 8192 { 3239 + return xerrors.Errorf("Value in field t.Role was too long") 3240 + } 3241 + 3242 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Role))); err != nil { 3243 + return err 3244 + } 3245 + if _, err := cw.WriteString(string(t.Role)); err != nil { 3246 + return err 3247 + } 3248 + 3249 + // t.LexiconTypeID (string) (string) 3250 + if len("$type") > 8192 { 3251 + return xerrors.Errorf("Value in field \"$type\" was too long") 3252 + } 3253 + 3254 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("$type"))); err != nil { 3255 + return err 3256 + } 3257 + if _, err := cw.WriteString(string("$type")); err != nil { 3258 + return err 3259 + } 3260 + 3261 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.hold.crew"))); err != nil { 3262 + return err 3263 + } 3264 + if _, err := cw.WriteString(string("io.atcr.hold.crew")); err != nil { 3265 + return err 3266 + } 3267 + 3268 + // t.Member (string) (string) 3269 + if len("member") > 8192 { 3270 + return xerrors.Errorf("Value in field \"member\" was too long") 3271 + } 3272 + 3273 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("member"))); err != nil { 3274 + return err 3275 + } 3276 + if _, err := cw.WriteString(string("member")); err != nil { 3277 + return err 3278 + } 3279 + 3280 + if len(t.Member) > 8192 { 3281 + return xerrors.Errorf("Value in field t.Member was too long") 3282 + } 3283 + 3284 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Member))); err != nil { 3285 + return err 3286 + } 3287 + if _, err := cw.WriteString(string(t.Member)); err != nil { 3288 + return err 3289 + } 3290 + 3291 + // t.AddedAt (string) (string) 3292 + if len("addedAt") > 8192 { 3293 + return xerrors.Errorf("Value in field \"addedAt\" was too long") 3294 + } 3295 + 3296 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("addedAt"))); err != nil { 3297 + return err 3298 + } 3299 + if _, err := cw.WriteString(string("addedAt")); err != nil { 3300 + return err 3301 + } 3302 + 3303 + if len(t.AddedAt) > 8192 { 3304 + return xerrors.Errorf("Value in field t.AddedAt was too long") 3305 + } 3306 + 3307 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.AddedAt))); err != nil { 3308 + return err 3309 + } 3310 + if _, err := cw.WriteString(string(t.AddedAt)); err != nil { 3311 + return err 3312 + } 3313 + 3314 + // t.Permissions ([]string) (slice) 3315 + if len("permissions") > 8192 { 3316 + return xerrors.Errorf("Value in field \"permissions\" was too long") 3317 + } 3318 + 3319 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("permissions"))); err != nil { 3320 + return err 3321 + } 3322 + if _, err := cw.WriteString(string("permissions")); err != nil { 3323 + return err 3324 + } 3325 + 3326 + if len(t.Permissions) > 8192 { 3327 + return xerrors.Errorf("Slice value in field t.Permissions was too long") 3328 + } 3329 + 3330 + if err := cw.WriteMajorTypeHeader(cbg.MajArray, uint64(len(t.Permissions))); err != nil { 3331 + return err 3332 + } 3333 + for _, v := range t.Permissions { 3334 + if len(v) > 8192 { 3335 + return xerrors.Errorf("Value in field v was too long") 3336 + } 3337 + 3338 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(v))); err != nil { 3339 + return err 3340 + } 3341 + if _, err := cw.WriteString(string(v)); err != nil { 3342 + return err 3343 + } 3344 + 3345 + } 3346 + return nil 3347 + } 3348 + 3349 + func (t *HoldCrew) UnmarshalCBOR(r io.Reader) (err error) { 3350 + *t = HoldCrew{} 3351 + 3352 + cr := cbg.NewCborReader(r) 3353 + 3354 + maj, extra, err := cr.ReadHeader() 3355 + if err != nil { 3356 + return err 3357 + } 3358 + defer func() { 3359 + if err == io.EOF { 3360 + err = io.ErrUnexpectedEOF 3361 + } 3362 + }() 3363 + 3364 + if maj != cbg.MajMap { 3365 + return fmt.Errorf("cbor input should be of type map") 3366 + } 3367 + 3368 + if extra > cbg.MaxLength { 3369 + return fmt.Errorf("HoldCrew: map struct too large (%d)", extra) 3370 + } 3371 + 3372 + n := extra 3373 + 3374 + nameBuf := make([]byte, 11) 3375 + for i := uint64(0); i < n; i++ { 3376 + nameLen, ok, err := cbg.ReadFullStringIntoBuf(cr, nameBuf, 8192) 3377 + if err != nil { 3378 + return err 3379 + } 3380 + 3381 + if !ok { 3382 + // Field doesn't exist on this type, so ignore it 3383 + if err := cbg.ScanForLinks(cr, func(cid.Cid) {}); err != nil { 3384 + return err 3385 + } 3386 + continue 3387 + } 3388 + 3389 + switch string(nameBuf[:nameLen]) { 3390 + // t.Role (string) (string) 3391 + case "role": 3392 + 3393 + { 3394 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3395 + if err != nil { 3396 + return err 3397 + } 3398 + 3399 + t.Role = string(sval) 3400 + } 3401 + // t.LexiconTypeID (string) (string) 3402 + case "$type": 3403 + 3404 + { 3405 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3406 + if err != nil { 3407 + return err 3408 + } 3409 + 3410 + t.LexiconTypeID = string(sval) 3411 + } 3412 + // t.Member (string) (string) 3413 + case "member": 3414 + 3415 + { 3416 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3417 + if err != nil { 3418 + return err 3419 + } 3420 + 3421 + t.Member = string(sval) 3422 + } 3423 + // t.AddedAt (string) (string) 3424 + case "addedAt": 3425 + 3426 + { 3427 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3428 + if err != nil { 3429 + return err 3430 + } 3431 + 3432 + t.AddedAt = string(sval) 3433 + } 3434 + // t.Permissions ([]string) (slice) 3435 + case "permissions": 3436 + 3437 + maj, extra, err = cr.ReadHeader() 3438 + if err != nil { 3439 + return err 3440 + } 3441 + 3442 + if extra > 8192 { 3443 + return fmt.Errorf("t.Permissions: array too large (%d)", extra) 3444 + } 3445 + 3446 + if maj != cbg.MajArray { 3447 + return fmt.Errorf("expected cbor array") 3448 + } 3449 + 3450 + if extra > 0 { 3451 + t.Permissions = make([]string, extra) 3452 + } 3453 + 3454 + for i := 0; i < int(extra); i++ { 3455 + { 3456 + var maj byte 3457 + var extra uint64 3458 + var err error 3459 + _ = maj 3460 + _ = extra 3461 + _ = err 3462 + 3463 + { 3464 + sval, err := cbg.ReadStringWithMax(cr, 8192) 3465 + if err != nil { 3466 + return err 3467 + } 3468 + 3469 + t.Permissions[i] = string(sval) 3470 + } 3471 + 3472 + } 3473 + } 3474 + 3475 + default: 3476 + // Field doesn't exist on this type, so ignore it 3477 + if err := cbg.ScanForLinks(r, func(cid.Cid) {}); err != nil { 3478 + return err 3479 + } 3480 + } 3481 + } 3482 + 3483 + return nil 3484 + } 3485 + func (t *HoldLayer) MarshalCBOR(w io.Writer) error { 650 3486 if t == nil { 651 3487 _, err := w.Write(cbg.CborNull) 652 3488 return err ··· 680 3516 } 681 3517 } 682 3518 683 - // t.Type (string) (string) 3519 + // t.LexiconTypeID (string) (string) 684 3520 if len("$type") > 8192 { 685 3521 return xerrors.Errorf("Value in field \"$type\" was too long") 686 3522 } ··· 692 3528 return err 693 3529 } 694 3530 695 - if len(t.Type) > 8192 { 696 - return xerrors.Errorf("Value in field t.Type was too long") 697 - } 698 - 699 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Type))); err != nil { 3531 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("io.atcr.hold.layer"))); err != nil { 700 3532 return err 701 3533 } 702 - if _, err := cw.WriteString(string(t.Type)); err != nil { 3534 + if _, err := cw.WriteString(string("io.atcr.hold.layer")); err != nil { 703 3535 return err 704 3536 } 705 3537 ··· 726 3558 return err 727 3559 } 728 3560 729 - // t.UserDID (string) (string) 3561 + // t.UserDid (string) (string) 730 3562 if len("userDid") > 8192 { 731 3563 return xerrors.Errorf("Value in field \"userDid\" was too long") 732 3564 } ··· 738 3570 return err 739 3571 } 740 3572 741 - if len(t.UserDID) > 8192 { 742 - return xerrors.Errorf("Value in field t.UserDID was too long") 3573 + if len(t.UserDid) > 8192 { 3574 + return xerrors.Errorf("Value in field t.UserDid was too long") 743 3575 } 744 3576 745 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UserDID))); err != nil { 3577 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.UserDid))); err != nil { 746 3578 return err 747 3579 } 748 - if _, err := cw.WriteString(string(t.UserDID)); err != nil { 3580 + if _, err := cw.WriteString(string(t.UserDid)); err != nil { 749 3581 return err 750 3582 } 751 3583 ··· 843 3675 return nil 844 3676 } 845 3677 846 - func (t *LayerRecord) UnmarshalCBOR(r io.Reader) (err error) { 847 - *t = LayerRecord{} 3678 + func (t *HoldLayer) UnmarshalCBOR(r io.Reader) (err error) { 3679 + *t = HoldLayer{} 848 3680 849 3681 cr := cbg.NewCborReader(r) 850 3682 ··· 863 3695 } 864 3696 865 3697 if extra > cbg.MaxLength { 866 - return fmt.Errorf("LayerRecord: map struct too large (%d)", extra) 3698 + return fmt.Errorf("HoldLayer: map struct too large (%d)", extra) 867 3699 } 868 3700 869 3701 n := extra ··· 910 3742 911 3743 t.Size = int64(extraI) 912 3744 } 913 - // t.Type (string) (string) 3745 + // t.LexiconTypeID (string) (string) 914 3746 case "$type": 915 3747 916 3748 { ··· 919 3751 return err 920 3752 } 921 3753 922 - t.Type = string(sval) 3754 + t.LexiconTypeID = string(sval) 923 3755 } 924 3756 // t.Digest (string) (string) 925 3757 case "digest": ··· 932 3764 933 3765 t.Digest = string(sval) 934 3766 } 935 - // t.UserDID (string) (string) 3767 + // t.UserDid (string) (string) 936 3768 case "userDid": 937 3769 938 3770 { ··· 941 3773 return err 942 3774 } 943 3775 944 - t.UserDID = string(sval) 3776 + t.UserDid = string(sval) 945 3777 } 946 3778 // t.CreatedAt (string) (string) 947 3779 case "createdAt":
+21 -7
pkg/atproto/client.go
··· 13 13 14 14 "github.com/bluesky-social/indigo/atproto/atclient" 15 15 indigo_oauth "github.com/bluesky-social/indigo/atproto/auth/oauth" 16 + lexutil "github.com/bluesky-social/indigo/lex/util" 17 + "github.com/ipfs/go-cid" 16 18 ) 17 19 18 20 // Sentinel errors ··· 301 303 } 302 304 303 305 // UploadBlob uploads binary data to the PDS and returns a blob reference 304 - func (c *Client) UploadBlob(ctx context.Context, data []byte, mimeType string) (*ATProtoBlobRef, error) { 306 + func (c *Client) UploadBlob(ctx context.Context, data []byte, mimeType string) (*lexutil.LexBlob, error) { 305 307 // Use session provider (locked OAuth with DPoP) - prevents nonce races 306 308 if c.sessionProvider != nil { 307 309 var result struct { ··· 310 312 311 313 err := c.sessionProvider.DoWithSession(ctx, c.did, func(session *indigo_oauth.ClientSession) error { 312 314 apiClient := session.APIClient() 313 - // IMPORTANT: Use io.Reader for blob uploads 314 - // LexDo JSON-encodes []byte (base64), but streams io.Reader as raw bytes 315 - // Use the actual MIME type so PDS can validate against blob:image/* scope 316 315 return apiClient.LexDo(ctx, 317 316 "POST", 318 317 mimeType, 319 318 "com.atproto.repo.uploadBlob", 320 319 nil, 321 - bytes.NewReader(data), 320 + data, 322 321 &result, 323 322 ) 324 323 }) ··· 326 325 return nil, fmt.Errorf("uploadBlob failed: %w", err) 327 326 } 328 327 329 - return &result.Blob, nil 328 + return atProtoBlobRefToLexBlob(&result.Blob) 330 329 } 331 330 332 331 // Basic Auth (app passwords) ··· 357 356 return nil, fmt.Errorf("failed to decode response: %w", err) 358 357 } 359 358 360 - return &result.Blob, nil 359 + return atProtoBlobRefToLexBlob(&result.Blob) 360 + } 361 + 362 + // atProtoBlobRefToLexBlob converts an ATProtoBlobRef to a lexutil.LexBlob 363 + func atProtoBlobRefToLexBlob(ref *ATProtoBlobRef) (*lexutil.LexBlob, error) { 364 + // Parse the CID string from the $link field 365 + c, err := cid.Decode(ref.Ref.Link) 366 + if err != nil { 367 + return nil, fmt.Errorf("failed to parse blob CID %q: %w", ref.Ref.Link, err) 368 + } 369 + 370 + return &lexutil.LexBlob{ 371 + Ref: lexutil.LexLink(c), 372 + MimeType: ref.MimeType, 373 + Size: ref.Size, 374 + }, nil 361 375 } 362 376 363 377 // GetBlob downloads a blob by its CID from the PDS
+8 -6
pkg/atproto/client_test.go
··· 386 386 t.Errorf("Content-Type = %v, want %v", r.Header.Get("Content-Type"), mimeType) 387 387 } 388 388 389 - // Send response 389 + // Send response - use a valid CIDv1 in base32 format 390 390 response := `{ 391 391 "blob": { 392 392 "$type": "blob", 393 - "ref": {"$link": "bafytest123"}, 393 + "ref": {"$link": "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"}, 394 394 "mimeType": "application/octet-stream", 395 395 "size": 17 396 396 } ··· 406 406 t.Fatalf("UploadBlob() error = %v", err) 407 407 } 408 408 409 - if blobRef.Type != "blob" { 410 - t.Errorf("Type = %v, want blob", blobRef.Type) 409 + if blobRef.MimeType != mimeType { 410 + t.Errorf("MimeType = %v, want %v", blobRef.MimeType, mimeType) 411 411 } 412 412 413 - if blobRef.Ref.Link != "bafytest123" { 414 - t.Errorf("Ref.Link = %v, want bafytest123", blobRef.Ref.Link) 413 + // LexBlob.Ref is a LexLink (cid.Cid alias), use .String() to get the CID string 414 + expectedCID := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku" 415 + if blobRef.Ref.String() != expectedCID { 416 + t.Errorf("Ref.String() = %v, want %v", blobRef.Ref.String(), expectedCID) 415 417 } 416 418 417 419 if blobRef.Size != 17 {
+255 -11
pkg/atproto/generate.go
··· 3 3 4 4 package main 5 5 6 - // CBOR Code Generator 6 + // Lexicon and CBOR Code Generator 7 7 // 8 - // This generates optimized CBOR marshaling code for ATProto records. 8 + // This generates: 9 + // 1. Go types from lexicon JSON files (via lex/lexgen library) 10 + // 2. CBOR marshaling code for ATProto records (via cbor-gen) 11 + // 3. Type registration for lexutil (register.go) 9 12 // 10 13 // Usage: 11 14 // go generate ./pkg/atproto/... 12 15 // 13 - // This creates pkg/atproto/cbor_gen.go which should be committed to git. 14 - // Only re-run when you modify types in pkg/atproto/types.go 15 - // 16 - // The //go:generate directive is in lexicon.go 16 + // Key insight: We use RegisterLexiconTypeID: false to avoid generating init() 17 + // blocks that require CBORMarshaler. This breaks the circular dependency between 18 + // lexgen and cbor-gen. See: https://github.com/bluesky-social/indigo/issues/931 19 + 20 + import ( 21 + "bytes" 22 + "encoding/json" 23 + "fmt" 24 + "os" 25 + "os/exec" 26 + "path/filepath" 27 + "strings" 28 + 29 + "github.com/bluesky-social/indigo/atproto/lexicon" 30 + "github.com/bluesky-social/indigo/lex/lexgen" 31 + "golang.org/x/tools/imports" 32 + ) 33 + 34 + func main() { 35 + // Find repo root 36 + repoRoot, err := findRepoRoot() 37 + if err != nil { 38 + fmt.Printf("failed to find repo root: %v\n", err) 39 + os.Exit(1) 40 + } 41 + 42 + pkgDir := filepath.Join(repoRoot, "pkg/atproto") 43 + lexDir := filepath.Join(repoRoot, "lexicons") 44 + 45 + // Step 0: Clean up old register.go to avoid conflicts 46 + // (It will be regenerated at the end) 47 + os.Remove(filepath.Join(pkgDir, "register.go")) 48 + 49 + // Step 1: Load all lexicon schemas into catalog (for cross-references) 50 + fmt.Println("Loading lexicons...") 51 + cat := lexicon.NewBaseCatalog() 52 + if err := cat.LoadDirectory(lexDir); err != nil { 53 + fmt.Printf("failed to load lexicons: %v\n", err) 54 + os.Exit(1) 55 + } 56 + 57 + // Step 2: Generate Go code for each lexicon file 58 + fmt.Println("Running lexgen...") 59 + config := &lexgen.GenConfig{ 60 + RegisterLexiconTypeID: false, // KEY: no init() blocks generated 61 + UnknownType: "map-string-any", 62 + WarningText: "Code generated by generate.go; DO NOT EDIT.", 63 + } 64 + 65 + // Track generated types for register.go 66 + var registeredTypes []typeInfo 67 + 68 + // Walk lexicon directory and generate code for each file 69 + err = filepath.Walk(lexDir, func(path string, info os.FileInfo, err error) error { 70 + if err != nil { 71 + return err 72 + } 73 + if info.IsDir() || !strings.HasSuffix(path, ".json") { 74 + return nil 75 + } 76 + 77 + // Load and parse the schema file 78 + data, err := os.ReadFile(path) 79 + if err != nil { 80 + return fmt.Errorf("failed to read %s: %w", path, err) 81 + } 82 + 83 + var sf lexicon.SchemaFile 84 + if err := json.Unmarshal(data, &sf); err != nil { 85 + return fmt.Errorf("failed to parse %s: %w", path, err) 86 + } 87 + 88 + if err := sf.FinishParse(); err != nil { 89 + return fmt.Errorf("failed to finish parse %s: %w", path, err) 90 + } 91 + 92 + // Flatten the schema 93 + flat, err := lexgen.FlattenSchemaFile(&sf) 94 + if err != nil { 95 + return fmt.Errorf("failed to flatten schema %s: %w", path, err) 96 + } 97 + 98 + // Generate code 99 + var buf bytes.Buffer 100 + gen := &lexgen.CodeGenerator{ 101 + Config: config, 102 + Lex: flat, 103 + Cat: &cat, 104 + Out: &buf, 105 + } 106 + 107 + if err := gen.WriteLexicon(); err != nil { 108 + return fmt.Errorf("failed to generate code for %s: %w", path, err) 109 + } 110 + 111 + // Fix package name: lexgen generates "ioatcr" but we want "atproto" 112 + code := bytes.Replace(buf.Bytes(), []byte("package ioatcr"), []byte("package atproto"), 1) 113 + 114 + // Format with goimports 115 + fileName := gen.FileName() 116 + formatted, err := imports.Process(fileName, code, nil) 117 + if err != nil { 118 + // Write unformatted for debugging 119 + outPath := filepath.Join(pkgDir, fileName) 120 + os.WriteFile(outPath+".broken", code, 0644) 121 + return fmt.Errorf("failed to format %s: %w (wrote to %s.broken)", fileName, err, outPath) 122 + } 123 + 124 + // Write output file 125 + outPath := filepath.Join(pkgDir, fileName) 126 + if err := os.WriteFile(outPath, formatted, 0644); err != nil { 127 + return fmt.Errorf("failed to write %s: %w", outPath, err) 128 + } 129 + 130 + fmt.Printf(" Generated %s\n", fileName) 131 + 132 + // Track type for registration - compute type name from NSID 133 + typeName := nsidToTypeName(sf.ID) 134 + registeredTypes = append(registeredTypes, typeInfo{ 135 + NSID: sf.ID, 136 + TypeName: typeName, 137 + }) 138 + 139 + return nil 140 + }) 141 + if err != nil { 142 + fmt.Printf("lexgen failed: %v\n", err) 143 + os.Exit(1) 144 + } 145 + 146 + // Step 3: Run cbor-gen via exec.Command 147 + // This must be a separate process so it can compile the freshly generated types 148 + fmt.Println("Running cbor-gen...") 149 + if err := runCborGen(repoRoot, pkgDir); err != nil { 150 + fmt.Printf("cbor-gen failed: %v\n", err) 151 + os.Exit(1) 152 + } 153 + 154 + // Step 4: Generate register.go 155 + fmt.Println("Generating register.go...") 156 + if err := generateRegisterFile(pkgDir, registeredTypes); err != nil { 157 + fmt.Printf("failed to generate register.go: %v\n", err) 158 + os.Exit(1) 159 + } 160 + 161 + fmt.Println("Code generation complete!") 162 + } 163 + 164 + type typeInfo struct { 165 + NSID string 166 + TypeName string 167 + } 168 + 169 + // nsidToTypeName converts an NSID to a Go type name 170 + // io.atcr.manifest โ†’ Manifest 171 + // io.atcr.hold.captain โ†’ HoldCaptain 172 + // io.atcr.sailor.profile โ†’ SailorProfile 173 + func nsidToTypeName(nsid string) string { 174 + parts := strings.Split(nsid, ".") 175 + if len(parts) < 3 { 176 + return "" 177 + } 178 + // Skip the first two parts (authority, e.g., "io.atcr") 179 + // and capitalize each remaining part 180 + var result string 181 + for _, part := range parts[2:] { 182 + if len(part) > 0 { 183 + result += strings.ToUpper(part[:1]) + part[1:] 184 + } 185 + } 186 + return result 187 + } 188 + 189 + func runCborGen(repoRoot, pkgDir string) error { 190 + // Create a temporary Go file that runs cbor-gen 191 + cborGenCode := `//go:build ignore 192 + 193 + package main 17 194 18 195 import ( 19 196 "fmt" ··· 25 202 ) 26 203 27 204 func main() { 28 - // Generate map-style encoders for CrewRecord, CaptainRecord, LayerRecord, and TangledProfileRecord 29 205 if err := cbg.WriteMapEncodersToFile("cbor_gen.go", "atproto", 30 - atproto.CrewRecord{}, 31 - atproto.CaptainRecord{}, 32 - atproto.LayerRecord{}, 206 + // Manifest types 207 + atproto.Manifest{}, 208 + atproto.Manifest_BlobReference{}, 209 + atproto.Manifest_ManifestReference{}, 210 + atproto.Manifest_Platform{}, 211 + atproto.Manifest_Annotations{}, 212 + atproto.Manifest_BlobReference_Annotations{}, 213 + atproto.Manifest_ManifestReference_Annotations{}, 214 + // Tag 215 + atproto.Tag{}, 216 + // Sailor types 217 + atproto.SailorProfile{}, 218 + atproto.SailorStar{}, 219 + atproto.SailorStar_Subject{}, 220 + // Hold types 221 + atproto.HoldCaptain{}, 222 + atproto.HoldCrew{}, 223 + atproto.HoldLayer{}, 224 + // External types 33 225 atproto.TangledProfileRecord{}, 34 226 ); err != nil { 35 - fmt.Printf("Failed to generate CBOR encoders: %v\n", err) 227 + fmt.Printf("cbor-gen failed: %v\n", err) 36 228 os.Exit(1) 37 229 } 38 230 } 231 + ` 232 + 233 + // Write temp file 234 + tmpFile := filepath.Join(pkgDir, "cborgen_tmp.go") 235 + if err := os.WriteFile(tmpFile, []byte(cborGenCode), 0644); err != nil { 236 + return fmt.Errorf("failed to write temp cbor-gen file: %w", err) 237 + } 238 + defer os.Remove(tmpFile) 239 + 240 + // Run it 241 + cmd := exec.Command("go", "run", tmpFile) 242 + cmd.Dir = pkgDir 243 + cmd.Stdout = os.Stdout 244 + cmd.Stderr = os.Stderr 245 + return cmd.Run() 246 + } 247 + 248 + func generateRegisterFile(pkgDir string, types []typeInfo) error { 249 + var buf bytes.Buffer 250 + 251 + buf.WriteString("// Code generated by generate.go; DO NOT EDIT.\n\n") 252 + buf.WriteString("package atproto\n\n") 253 + buf.WriteString("import lexutil \"github.com/bluesky-social/indigo/lex/util\"\n\n") 254 + buf.WriteString("func init() {\n") 255 + 256 + for _, t := range types { 257 + fmt.Fprintf(&buf, "\tlexutil.RegisterType(%q, &%s{})\n", t.NSID, t.TypeName) 258 + } 259 + 260 + buf.WriteString("}\n") 261 + 262 + outPath := filepath.Join(pkgDir, "register.go") 263 + return os.WriteFile(outPath, buf.Bytes(), 0644) 264 + } 265 + 266 + func findRepoRoot() (string, error) { 267 + dir, err := os.Getwd() 268 + if err != nil { 269 + return "", err 270 + } 271 + 272 + for { 273 + if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil { 274 + return dir, nil 275 + } 276 + parent := filepath.Dir(dir) 277 + if parent == dir { 278 + return "", fmt.Errorf("go.mod not found") 279 + } 280 + dir = parent 281 + } 282 + }
+24
pkg/atproto/holdcaptain.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.hold.captain 4 + 5 + package atproto 6 + 7 + // Represents the hold's ownership and metadata. Stored as a singleton record at rkey 'self' in the hold's embedded PDS. 8 + type HoldCaptain struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.captain"` 10 + // allowAllCrew: Allow any authenticated user to register as crew 11 + AllowAllCrew bool `json:"allowAllCrew" cborgen:"allowAllCrew"` 12 + // deployedAt: RFC3339 timestamp of when the hold was deployed 13 + DeployedAt string `json:"deployedAt" cborgen:"deployedAt"` 14 + // enableBlueskyPosts: Enable Bluesky posts when manifests are pushed 15 + EnableBlueskyPosts bool `json:"enableBlueskyPosts" cborgen:"enableBlueskyPosts"` 16 + // owner: DID of the hold owner 17 + Owner string `json:"owner" cborgen:"owner"` 18 + // provider: Deployment provider (e.g., fly.io, aws, etc.) 19 + Provider *string `json:"provider,omitempty" cborgen:"provider,omitempty"` 20 + // public: Whether this hold allows public blob reads (pulls) without authentication 21 + Public bool `json:"public" cborgen:"public"` 22 + // region: S3 region where blobs are stored 23 + Region *string `json:"region,omitempty" cborgen:"region,omitempty"` 24 + }
+18
pkg/atproto/holdcrew.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.hold.crew 4 + 5 + package atproto 6 + 7 + // Crew member in a hold's embedded PDS. Grants access permissions to push blobs to the hold. Stored in the hold's embedded PDS (one record per member). 8 + type HoldCrew struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.crew"` 10 + // addedAt: RFC3339 timestamp of when the member was added 11 + AddedAt string `json:"addedAt" cborgen:"addedAt"` 12 + // member: DID of the crew member 13 + Member string `json:"member" cborgen:"member"` 14 + // permissions: Specific permissions granted to this member 15 + Permissions []string `json:"permissions" cborgen:"permissions"` 16 + // role: Member's role in the hold 17 + Role string `json:"role" cborgen:"role"` 18 + }
+24
pkg/atproto/holdlayer.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.hold.layer 4 + 5 + package atproto 6 + 7 + // Represents metadata about a container layer stored in the hold. Stored in the hold's embedded PDS for tracking and analytics. 8 + type HoldLayer struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.layer"` 10 + // createdAt: RFC3339 timestamp of when the layer was uploaded 11 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 12 + // digest: Layer digest (e.g., sha256:abc123...) 13 + Digest string `json:"digest" cborgen:"digest"` 14 + // mediaType: Media type (e.g., application/vnd.oci.image.layer.v1.tar+gzip) 15 + MediaType string `json:"mediaType" cborgen:"mediaType"` 16 + // repository: Repository this layer belongs to 17 + Repository string `json:"repository" cborgen:"repository"` 18 + // size: Size in bytes 19 + Size int64 `json:"size" cborgen:"size"` 20 + // userDid: DID of user who uploaded this layer 21 + UserDid string `json:"userDid" cborgen:"userDid"` 22 + // userHandle: Handle of user (for display purposes) 23 + UserHandle string `json:"userHandle" cborgen:"userHandle"` 24 + }
+17 -40
pkg/atproto/lexicon.go
··· 18 18 // TagCollection is the collection name for image tags 19 19 TagCollection = "io.atcr.tag" 20 20 21 + // HoldCollection is the collection name for storage holds (BYOS) 22 + HoldCollection = "io.atcr.hold" 23 + 21 24 // HoldCrewCollection is the collection name for hold crew (membership) - LEGACY BYOS model 22 25 // Stored in owner's PDS for BYOS holds 23 26 HoldCrewCollection = "io.atcr.hold.crew" ··· 38 41 // TangledProfileCollection is the collection name for tangled profiles 39 42 // Stored in hold's embedded PDS (singleton record at rkey "self") 40 43 TangledProfileCollection = "sh.tangled.actor.profile" 44 + 45 + // BskyPostCollection is the collection name for Bluesky posts 46 + BskyPostCollection = "app.bsky.feed.post" 41 47 42 48 // BskyPostCollection is the collection name for Bluesky posts 43 49 BskyPostCollection = "app.bsky.feed.post" ··· 47 53 48 54 // StarCollection is the collection name for repository stars 49 55 StarCollection = "io.atcr.sailor.star" 50 - 51 - // RepoPageCollection is the collection name for repository page metadata 52 - // Stored in user's PDS with rkey = repository name 53 - RepoPageCollection = "io.atcr.repo.page" 54 56 ) 55 57 56 58 // ManifestRecord represents a container image manifest stored in ATProto ··· 310 312 CreatedAt time.Time `json:"createdAt"` 311 313 } 312 314 315 + // NewHoldRecord creates a new hold record 316 + func NewHoldRecord(endpoint, owner string, public bool) *HoldRecord { 317 + return &HoldRecord{ 318 + Type: HoldCollection, 319 + Endpoint: endpoint, 320 + Owner: owner, 321 + Public: public, 322 + CreatedAt: time.Now(), 323 + } 324 + } 325 + 313 326 // SailorProfileRecord represents a user's profile with registry preferences 314 327 // Stored in the user's PDS to configure default hold and other settings 315 328 type SailorProfileRecord struct { ··· 335 348 return &SailorProfileRecord{ 336 349 Type: SailorProfileCollection, 337 350 DefaultHold: defaultHold, 338 - CreatedAt: now, 339 - UpdatedAt: now, 340 - } 341 - } 342 - 343 - // RepoPageRecord represents repository page metadata (description + avatar) 344 - // Stored in the user's PDS with rkey = repository name 345 - // Users can edit this directly in their PDS to customize their repository page 346 - type RepoPageRecord struct { 347 - // Type should be "io.atcr.repo.page" 348 - Type string `json:"$type"` 349 - 350 - // Repository is the name of the repository (e.g., "myapp") 351 - Repository string `json:"repository"` 352 - 353 - // Description is the markdown README/description content 354 - Description string `json:"description,omitempty"` 355 - 356 - // Avatar is the repository avatar/icon blob reference 357 - Avatar *ATProtoBlobRef `json:"avatar,omitempty"` 358 - 359 - // CreatedAt timestamp 360 - CreatedAt time.Time `json:"createdAt"` 361 - 362 - // UpdatedAt timestamp 363 - UpdatedAt time.Time `json:"updatedAt"` 364 - } 365 - 366 - // NewRepoPageRecord creates a new repo page record 367 - func NewRepoPageRecord(repository, description string, avatar *ATProtoBlobRef) *RepoPageRecord { 368 - now := time.Now() 369 - return &RepoPageRecord{ 370 - Type: RepoPageCollection, 371 - Repository: repository, 372 - Description: description, 373 - Avatar: avatar, 374 351 CreatedAt: now, 375 352 UpdatedAt: now, 376 353 }
+18
pkg/atproto/lexicon_embedded.go
··· 1 + package atproto 2 + 3 + // This file contains ATProto record types that are NOT generated from our lexicons. 4 + // These are either external schemas or special types that require manual definition. 5 + 6 + // TangledProfileRecord represents a Tangled profile for the hold 7 + // Collection: sh.tangled.actor.profile (external schema - not controlled by ATCR) 8 + // Stored in hold's embedded PDS (singleton record at rkey "self") 9 + // Uses CBOR encoding for efficient storage in hold's carstore 10 + type TangledProfileRecord struct { 11 + Type string `json:"$type" cborgen:"$type"` 12 + Links []string `json:"links" cborgen:"links"` 13 + Stats []string `json:"stats" cborgen:"stats"` 14 + Bluesky bool `json:"bluesky" cborgen:"bluesky"` 15 + Location string `json:"location" cborgen:"location"` 16 + Description string `json:"description" cborgen:"description"` 17 + PinnedRepositories []string `json:"pinnedRepositories" cborgen:"pinnedRepositories"` 18 + }
+360
pkg/atproto/lexicon_helpers.go
··· 1 + package atproto 2 + 3 + //go:generate go run generate.go 4 + 5 + import ( 6 + "encoding/base64" 7 + "encoding/json" 8 + "fmt" 9 + "strings" 10 + "time" 11 + ) 12 + 13 + // Collection names for ATProto records 14 + const ( 15 + // ManifestCollection is the collection name for container manifests 16 + ManifestCollection = "io.atcr.manifest" 17 + 18 + // TagCollection is the collection name for image tags 19 + TagCollection = "io.atcr.tag" 20 + 21 + // HoldCollection is the collection name for storage holds (BYOS) - LEGACY 22 + HoldCollection = "io.atcr.hold" 23 + 24 + // HoldCrewCollection is the collection name for hold crew (membership) - LEGACY BYOS model 25 + // Stored in owner's PDS for BYOS holds 26 + HoldCrewCollection = "io.atcr.hold.crew" 27 + 28 + // CaptainCollection is the collection name for captain records (hold ownership) - EMBEDDED PDS model 29 + // Stored in hold's embedded PDS (singleton record at rkey "self") 30 + CaptainCollection = "io.atcr.hold.captain" 31 + 32 + // CrewCollection is the collection name for crew records (access control) - EMBEDDED PDS model 33 + // Stored in hold's embedded PDS (one record per member) 34 + // Note: Uses same collection name as HoldCrewCollection but stored in different PDS (hold's PDS vs owner's PDS) 35 + CrewCollection = "io.atcr.hold.crew" 36 + 37 + // LayerCollection is the collection name for container layer metadata 38 + // Stored in hold's embedded PDS to track which layers are stored 39 + LayerCollection = "io.atcr.hold.layer" 40 + 41 + // TangledProfileCollection is the collection name for tangled profiles 42 + // Stored in hold's embedded PDS (singleton record at rkey "self") 43 + TangledProfileCollection = "sh.tangled.actor.profile" 44 + 45 + // BskyPostCollection is the collection name for Bluesky posts 46 + BskyPostCollection = "app.bsky.feed.post" 47 + 48 + // SailorProfileCollection is the collection name for user profiles 49 + SailorProfileCollection = "io.atcr.sailor.profile" 50 + 51 + // StarCollection is the collection name for repository stars 52 + StarCollection = "io.atcr.sailor.star" 53 + ) 54 + 55 + // NewManifestRecord creates a new manifest record from OCI manifest JSON 56 + func NewManifestRecord(repository, digest string, ociManifest []byte) (*Manifest, error) { 57 + // Parse the OCI manifest 58 + var ociData struct { 59 + SchemaVersion int `json:"schemaVersion"` 60 + MediaType string `json:"mediaType"` 61 + Config json.RawMessage `json:"config,omitempty"` 62 + Layers []json.RawMessage `json:"layers,omitempty"` 63 + Manifests []json.RawMessage `json:"manifests,omitempty"` 64 + Subject json.RawMessage `json:"subject,omitempty"` 65 + Annotations map[string]string `json:"annotations,omitempty"` 66 + } 67 + 68 + if err := json.Unmarshal(ociManifest, &ociData); err != nil { 69 + return nil, err 70 + } 71 + 72 + // Detect manifest type based on media type 73 + isManifestList := strings.Contains(ociData.MediaType, "manifest.list") || 74 + strings.Contains(ociData.MediaType, "image.index") 75 + 76 + // Validate: must have either (config+layers) OR (manifests), never both 77 + hasImageFields := len(ociData.Config) > 0 || len(ociData.Layers) > 0 78 + hasIndexFields := len(ociData.Manifests) > 0 79 + 80 + if hasImageFields && hasIndexFields { 81 + return nil, fmt.Errorf("manifest cannot have both image fields (config/layers) and index fields (manifests)") 82 + } 83 + if !hasImageFields && !hasIndexFields { 84 + return nil, fmt.Errorf("manifest must have either image fields (config/layers) or index fields (manifests)") 85 + } 86 + 87 + record := &Manifest{ 88 + LexiconTypeID: ManifestCollection, 89 + Repository: repository, 90 + Digest: digest, 91 + MediaType: ociData.MediaType, 92 + SchemaVersion: int64(ociData.SchemaVersion), 93 + // ManifestBlob will be set by the caller after uploading to blob storage 94 + CreatedAt: time.Now().Format(time.RFC3339), 95 + } 96 + 97 + // Handle annotations - Manifest_Annotations is an empty struct in generated code 98 + // We don't copy ociData.Annotations since the generated type doesn't support arbitrary keys 99 + 100 + if isManifestList { 101 + // Parse manifest list/index 102 + record.Manifests = make([]Manifest_ManifestReference, len(ociData.Manifests)) 103 + for i, m := range ociData.Manifests { 104 + var ref struct { 105 + MediaType string `json:"mediaType"` 106 + Digest string `json:"digest"` 107 + Size int64 `json:"size"` 108 + Platform *Manifest_Platform `json:"platform,omitempty"` 109 + Annotations map[string]string `json:"annotations,omitempty"` 110 + } 111 + if err := json.Unmarshal(m, &ref); err != nil { 112 + return nil, fmt.Errorf("failed to parse manifest reference %d: %w", i, err) 113 + } 114 + record.Manifests[i] = Manifest_ManifestReference{ 115 + MediaType: ref.MediaType, 116 + Digest: ref.Digest, 117 + Size: ref.Size, 118 + Platform: ref.Platform, 119 + } 120 + } 121 + } else { 122 + // Parse image manifest 123 + if len(ociData.Config) > 0 { 124 + var config Manifest_BlobReference 125 + if err := json.Unmarshal(ociData.Config, &config); err != nil { 126 + return nil, fmt.Errorf("failed to parse config: %w", err) 127 + } 128 + record.Config = &config 129 + } 130 + 131 + // Parse layers 132 + record.Layers = make([]Manifest_BlobReference, len(ociData.Layers)) 133 + for i, layer := range ociData.Layers { 134 + if err := json.Unmarshal(layer, &record.Layers[i]); err != nil { 135 + return nil, fmt.Errorf("failed to parse layer %d: %w", i, err) 136 + } 137 + } 138 + } 139 + 140 + // Parse subject if present (works for both types) 141 + if len(ociData.Subject) > 0 { 142 + var subject Manifest_BlobReference 143 + if err := json.Unmarshal(ociData.Subject, &subject); err != nil { 144 + return nil, err 145 + } 146 + record.Subject = &subject 147 + } 148 + 149 + return record, nil 150 + } 151 + 152 + // NewTagRecord creates a new tag record with manifest AT-URI 153 + // did: The DID of the user (e.g., "did:plc:xyz123") 154 + // repository: The repository name (e.g., "myapp") 155 + // tag: The tag name (e.g., "latest", "v1.0.0") 156 + // manifestDigest: The manifest digest (e.g., "sha256:abc123...") 157 + func NewTagRecord(did, repository, tag, manifestDigest string) *Tag { 158 + // Build AT-URI for the manifest 159 + // Format: at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix> 160 + manifestURI := BuildManifestURI(did, manifestDigest) 161 + 162 + return &Tag{ 163 + LexiconTypeID: TagCollection, 164 + Repository: repository, 165 + Tag: tag, 166 + Manifest: &manifestURI, 167 + // Note: ManifestDigest is not set for new records (only for backward compat with old records) 168 + CreatedAt: time.Now().Format(time.RFC3339), 169 + } 170 + } 171 + 172 + // NewSailorProfileRecord creates a new sailor profile record 173 + func NewSailorProfileRecord(defaultHold string) *SailorProfile { 174 + now := time.Now().Format(time.RFC3339) 175 + var holdPtr *string 176 + if defaultHold != "" { 177 + holdPtr = &defaultHold 178 + } 179 + return &SailorProfile{ 180 + LexiconTypeID: SailorProfileCollection, 181 + DefaultHold: holdPtr, 182 + CreatedAt: now, 183 + UpdatedAt: &now, 184 + } 185 + } 186 + 187 + // NewStarRecord creates a new star record 188 + func NewStarRecord(ownerDID, repository string) *SailorStar { 189 + return &SailorStar{ 190 + LexiconTypeID: StarCollection, 191 + Subject: SailorStar_Subject{ 192 + Did: ownerDID, 193 + Repository: repository, 194 + }, 195 + CreatedAt: time.Now().Format(time.RFC3339), 196 + } 197 + } 198 + 199 + // NewLayerRecord creates a new layer record 200 + func NewLayerRecord(digest string, size int64, mediaType, repository, userDID, userHandle string) *HoldLayer { 201 + return &HoldLayer{ 202 + LexiconTypeID: LayerCollection, 203 + Digest: digest, 204 + Size: size, 205 + MediaType: mediaType, 206 + Repository: repository, 207 + UserDid: userDID, 208 + UserHandle: userHandle, 209 + CreatedAt: time.Now().Format(time.RFC3339), 210 + } 211 + } 212 + 213 + // StarRecordKey generates a record key for a star 214 + // Uses a simple hash to ensure uniqueness and prevent duplicate stars 215 + func StarRecordKey(ownerDID, repository string) string { 216 + // Use base64 encoding of "ownerDID/repository" as the record key 217 + // This is deterministic and prevents duplicate stars 218 + combined := ownerDID + "/" + repository 219 + return base64.RawURLEncoding.EncodeToString([]byte(combined)) 220 + } 221 + 222 + // ParseStarRecordKey decodes a star record key back to ownerDID and repository 223 + func ParseStarRecordKey(rkey string) (ownerDID, repository string, err error) { 224 + decoded, err := base64.RawURLEncoding.DecodeString(rkey) 225 + if err != nil { 226 + return "", "", fmt.Errorf("failed to decode star rkey: %w", err) 227 + } 228 + 229 + parts := strings.SplitN(string(decoded), "/", 2) 230 + if len(parts) != 2 { 231 + return "", "", fmt.Errorf("invalid star rkey format: %s", string(decoded)) 232 + } 233 + 234 + return parts[0], parts[1], nil 235 + } 236 + 237 + // ResolveHoldDIDFromURL converts a hold endpoint URL to a did:web DID 238 + // This ensures that different representations of the same hold are deduplicated: 239 + // - http://172.28.0.3:8080 โ†’ did:web:172.28.0.3:8080 240 + // - http://hold01.atcr.io โ†’ did:web:hold01.atcr.io 241 + // - https://hold01.atcr.io โ†’ did:web:hold01.atcr.io 242 + // - did:web:hold01.atcr.io โ†’ did:web:hold01.atcr.io (passthrough) 243 + func ResolveHoldDIDFromURL(holdURL string) string { 244 + // Handle empty URLs 245 + if holdURL == "" { 246 + return "" 247 + } 248 + 249 + // If already a DID, return as-is 250 + if IsDID(holdURL) { 251 + return holdURL 252 + } 253 + 254 + // Parse URL to get hostname 255 + holdURL = strings.TrimPrefix(holdURL, "http://") 256 + holdURL = strings.TrimPrefix(holdURL, "https://") 257 + holdURL = strings.TrimSuffix(holdURL, "/") 258 + 259 + // Extract hostname (remove path if present) 260 + parts := strings.Split(holdURL, "/") 261 + hostname := parts[0] 262 + 263 + // Convert to did:web 264 + // did:web uses hostname directly (port included if non-standard) 265 + return "did:web:" + hostname 266 + } 267 + 268 + // IsDID checks if a string is a DID (starts with "did:") 269 + func IsDID(s string) bool { 270 + return len(s) > 4 && s[:4] == "did:" 271 + } 272 + 273 + // RepositoryTagToRKey converts a repository and tag to an ATProto record key 274 + // ATProto record keys must match: ^[a-zA-Z0-9._~-]{1,512}$ 275 + func RepositoryTagToRKey(repository, tag string) string { 276 + // Combine repository and tag to create a unique key 277 + // Replace invalid characters: slashes become tildes (~) 278 + // We use tilde instead of dash to avoid ambiguity with repository names that contain hyphens 279 + key := fmt.Sprintf("%s_%s", repository, tag) 280 + 281 + // Replace / with ~ (slash not allowed in rkeys, tilde is allowed and unlikely in repo names) 282 + key = strings.ReplaceAll(key, "/", "~") 283 + 284 + return key 285 + } 286 + 287 + // RKeyToRepositoryTag converts an ATProto record key back to repository and tag 288 + // This is the inverse of RepositoryTagToRKey 289 + // Note: If the tag contains underscores, this will split on the LAST underscore 290 + func RKeyToRepositoryTag(rkey string) (repository, tag string) { 291 + // Find the last underscore to split repository and tag 292 + lastUnderscore := strings.LastIndex(rkey, "_") 293 + if lastUnderscore == -1 { 294 + // No underscore found - treat entire string as tag with empty repository 295 + return "", rkey 296 + } 297 + 298 + repository = rkey[:lastUnderscore] 299 + tag = rkey[lastUnderscore+1:] 300 + 301 + // Convert tildes back to slashes in repository (tilde was used to encode slashes) 302 + repository = strings.ReplaceAll(repository, "~", "/") 303 + 304 + return repository, tag 305 + } 306 + 307 + // BuildManifestURI creates an AT-URI for a manifest record 308 + // did: The DID of the user (e.g., "did:plc:xyz123") 309 + // manifestDigest: The manifest digest (e.g., "sha256:abc123...") 310 + // Returns: AT-URI in format "at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix>" 311 + func BuildManifestURI(did, manifestDigest string) string { 312 + // Remove the "sha256:" prefix from the digest to get the rkey 313 + rkey := strings.TrimPrefix(manifestDigest, "sha256:") 314 + return fmt.Sprintf("at://%s/%s/%s", did, ManifestCollection, rkey) 315 + } 316 + 317 + // ParseManifestURI extracts the digest from a manifest AT-URI 318 + // manifestURI: AT-URI in format "at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix>" 319 + // Returns: Full digest with "sha256:" prefix (e.g., "sha256:abc123...") 320 + func ParseManifestURI(manifestURI string) (string, error) { 321 + // Expected format: at://did:plc:xyz/io.atcr.manifest/<rkey> 322 + if !strings.HasPrefix(manifestURI, "at://") { 323 + return "", fmt.Errorf("invalid AT-URI format: must start with 'at://'") 324 + } 325 + 326 + // Remove "at://" prefix 327 + remainder := strings.TrimPrefix(manifestURI, "at://") 328 + 329 + // Split by "/" 330 + parts := strings.Split(remainder, "/") 331 + if len(parts) != 3 { 332 + return "", fmt.Errorf("invalid AT-URI format: expected 3 parts (did/collection/rkey), got %d", len(parts)) 333 + } 334 + 335 + // Validate collection 336 + if parts[1] != ManifestCollection { 337 + return "", fmt.Errorf("invalid AT-URI: expected collection %s, got %s", ManifestCollection, parts[1]) 338 + } 339 + 340 + // The rkey is the digest without the "sha256:" prefix 341 + // Add it back to get the full digest 342 + rkey := parts[2] 343 + return "sha256:" + rkey, nil 344 + } 345 + 346 + // GetManifestDigest extracts the digest from a Tag, preferring the manifest field 347 + // Returns the digest with "sha256:" prefix (e.g., "sha256:abc123...") 348 + func (t *Tag) GetManifestDigest() (string, error) { 349 + // Prefer the new manifest field 350 + if t.Manifest != nil && *t.Manifest != "" { 351 + return ParseManifestURI(*t.Manifest) 352 + } 353 + 354 + // Fall back to the legacy manifestDigest field 355 + if t.ManifestDigest != nil && *t.ManifestDigest != "" { 356 + return *t.ManifestDigest, nil 357 + } 358 + 359 + return "", fmt.Errorf("tag record has neither manifest nor manifestDigest field") 360 + }
+109 -215
pkg/atproto/lexicon_test.go
··· 104 104 digest string 105 105 ociManifest string 106 106 wantErr bool 107 - checkFunc func(*testing.T, *ManifestRecord) 107 + checkFunc func(*testing.T, *Manifest) 108 108 }{ 109 109 { 110 110 name: "valid OCI manifest", ··· 112 112 digest: "sha256:abc123", 113 113 ociManifest: validOCIManifest, 114 114 wantErr: false, 115 - checkFunc: func(t *testing.T, record *ManifestRecord) { 116 - if record.Type != ManifestCollection { 117 - t.Errorf("Type = %v, want %v", record.Type, ManifestCollection) 115 + checkFunc: func(t *testing.T, record *Manifest) { 116 + if record.LexiconTypeID != ManifestCollection { 117 + t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, ManifestCollection) 118 118 } 119 119 if record.Repository != "myapp" { 120 120 t.Errorf("Repository = %v, want myapp", record.Repository) ··· 143 143 if record.Layers[1].Digest != "sha256:layer2" { 144 144 t.Errorf("Layers[1].Digest = %v, want sha256:layer2", record.Layers[1].Digest) 145 145 } 146 - if record.Annotations["org.opencontainers.image.created"] != "2025-01-01T00:00:00Z" { 147 - t.Errorf("Annotations missing expected key") 148 - } 149 - if record.CreatedAt.IsZero() { 150 - t.Error("CreatedAt should not be zero") 146 + // Note: Annotations are not copied to generated type (empty struct) 147 + if record.CreatedAt == "" { 148 + t.Error("CreatedAt should not be empty") 151 149 } 152 150 if record.Subject != nil { 153 151 t.Error("Subject should be nil") ··· 160 158 digest: "sha256:abc123", 161 159 ociManifest: manifestWithSubject, 162 160 wantErr: false, 163 - checkFunc: func(t *testing.T, record *ManifestRecord) { 161 + checkFunc: func(t *testing.T, record *Manifest) { 164 162 if record.Subject == nil { 165 163 t.Fatal("Subject should not be nil") 166 164 } ··· 192 190 digest: "sha256:multiarch", 193 191 ociManifest: manifestList, 194 192 wantErr: false, 195 - checkFunc: func(t *testing.T, record *ManifestRecord) { 193 + checkFunc: func(t *testing.T, record *Manifest) { 196 194 if record.MediaType != "application/vnd.oci.image.index.v1+json" { 197 195 t.Errorf("MediaType = %v, want application/vnd.oci.image.index.v1+json", record.MediaType) 198 196 } ··· 219 217 if record.Manifests[0].Platform.Architecture != "amd64" { 220 218 t.Errorf("Platform.Architecture = %v, want amd64", record.Manifests[0].Platform.Architecture) 221 219 } 222 - if record.Manifests[0].Platform.OS != "linux" { 223 - t.Errorf("Platform.OS = %v, want linux", record.Manifests[0].Platform.OS) 220 + if record.Manifests[0].Platform.Os != "linux" { 221 + t.Errorf("Platform.Os = %v, want linux", record.Manifests[0].Platform.Os) 224 222 } 225 223 226 224 // Check second manifest (arm64) ··· 230 228 if record.Manifests[1].Platform.Architecture != "arm64" { 231 229 t.Errorf("Platform.Architecture = %v, want arm64", record.Manifests[1].Platform.Architecture) 232 230 } 233 - if record.Manifests[1].Platform.Variant != "v8" { 231 + if record.Manifests[1].Platform.Variant == nil || *record.Manifests[1].Platform.Variant != "v8" { 234 232 t.Errorf("Platform.Variant = %v, want v8", record.Manifests[1].Platform.Variant) 235 233 } 236 234 }, ··· 268 266 269 267 func TestNewTagRecord(t *testing.T) { 270 268 did := "did:plc:test123" 271 - before := time.Now() 269 + // Truncate to second precision since RFC3339 doesn't have sub-second precision 270 + before := time.Now().Truncate(time.Second) 272 271 record := NewTagRecord(did, "myapp", "latest", "sha256:abc123") 273 - after := time.Now() 272 + after := time.Now().Truncate(time.Second).Add(time.Second) 274 273 275 - if record.Type != TagCollection { 276 - t.Errorf("Type = %v, want %v", record.Type, TagCollection) 274 + if record.LexiconTypeID != TagCollection { 275 + t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, TagCollection) 277 276 } 278 277 279 278 if record.Repository != "myapp" { ··· 286 285 287 286 // New records should have manifest field (AT-URI) 288 287 expectedURI := "at://did:plc:test123/io.atcr.manifest/abc123" 289 - if record.Manifest != expectedURI { 288 + if record.Manifest == nil || *record.Manifest != expectedURI { 290 289 t.Errorf("Manifest = %v, want %v", record.Manifest, expectedURI) 291 290 } 292 291 293 292 // New records should NOT have manifestDigest field 294 - if record.ManifestDigest != "" { 295 - t.Errorf("ManifestDigest should be empty for new records, got %v", record.ManifestDigest) 293 + if record.ManifestDigest != nil && *record.ManifestDigest != "" { 294 + t.Errorf("ManifestDigest should be nil for new records, got %v", record.ManifestDigest) 296 295 } 297 296 298 - if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) { 299 - t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after) 297 + createdAt, err := time.Parse(time.RFC3339, record.CreatedAt) 298 + if err != nil { 299 + t.Errorf("CreatedAt is not valid RFC3339: %v", err) 300 + } 301 + if createdAt.Before(before) || createdAt.After(after) { 302 + t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after) 300 303 } 301 304 } 302 305 ··· 391 394 } 392 395 393 396 func TestTagRecord_GetManifestDigest(t *testing.T) { 397 + manifestURI := "at://did:plc:test123/io.atcr.manifest/abc123" 398 + digestValue := "sha256:def456" 399 + 394 400 tests := []struct { 395 401 name string 396 - record TagRecord 402 + record Tag 397 403 want string 398 404 wantErr bool 399 405 }{ 400 406 { 401 407 name: "new record with manifest field", 402 - record: TagRecord{ 403 - Manifest: "at://did:plc:test123/io.atcr.manifest/abc123", 408 + record: Tag{ 409 + Manifest: &manifestURI, 404 410 }, 405 411 want: "sha256:abc123", 406 412 wantErr: false, 407 413 }, 408 414 { 409 415 name: "old record with manifestDigest field", 410 - record: TagRecord{ 411 - ManifestDigest: "sha256:def456", 416 + record: Tag{ 417 + ManifestDigest: &digestValue, 412 418 }, 413 419 want: "sha256:def456", 414 420 wantErr: false, 415 421 }, 416 422 { 417 423 name: "prefers manifest over manifestDigest", 418 - record: TagRecord{ 419 - Manifest: "at://did:plc:test123/io.atcr.manifest/abc123", 420 - ManifestDigest: "sha256:def456", 424 + record: Tag{ 425 + Manifest: &manifestURI, 426 + ManifestDigest: &digestValue, 421 427 }, 422 428 want: "sha256:abc123", 423 429 wantErr: false, 424 430 }, 425 431 { 426 432 name: "no fields set", 427 - record: TagRecord{}, 433 + record: Tag{}, 428 434 want: "", 429 435 wantErr: true, 430 436 }, 431 437 { 432 438 name: "invalid manifest URI", 433 - record: TagRecord{ 434 - Manifest: "invalid-uri", 439 + record: Tag{ 440 + Manifest: func() *string { s := "invalid-uri"; return &s }(), 435 441 }, 436 442 want: "", 437 443 wantErr: true, ··· 451 457 }) 452 458 } 453 459 } 460 + 461 + // TestNewHoldRecord is removed - HoldRecord is no longer supported (legacy BYOS) 454 462 455 463 func TestNewSailorProfileRecord(t *testing.T) { 456 464 tests := []struct { ··· 473 481 474 482 for _, tt := range tests { 475 483 t.Run(tt.name, func(t *testing.T) { 476 - before := time.Now() 484 + // Truncate to second precision since RFC3339 doesn't have sub-second precision 485 + before := time.Now().Truncate(time.Second) 477 486 record := NewSailorProfileRecord(tt.defaultHold) 478 - after := time.Now() 487 + after := time.Now().Truncate(time.Second).Add(time.Second) 479 488 480 - if record.Type != SailorProfileCollection { 481 - t.Errorf("Type = %v, want %v", record.Type, SailorProfileCollection) 489 + if record.LexiconTypeID != SailorProfileCollection { 490 + t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, SailorProfileCollection) 482 491 } 483 492 484 - if record.DefaultHold != tt.defaultHold { 485 - t.Errorf("DefaultHold = %v, want %v", record.DefaultHold, tt.defaultHold) 493 + if tt.defaultHold == "" { 494 + if record.DefaultHold != nil { 495 + t.Errorf("DefaultHold = %v, want nil", record.DefaultHold) 496 + } 497 + } else { 498 + if record.DefaultHold == nil || *record.DefaultHold != tt.defaultHold { 499 + t.Errorf("DefaultHold = %v, want %v", record.DefaultHold, tt.defaultHold) 500 + } 486 501 } 487 502 488 - if record.CreatedAt.Before(before) || record.CreatedAt.After(after) { 489 - t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after) 503 + createdAt, err := time.Parse(time.RFC3339, record.CreatedAt) 504 + if err != nil { 505 + t.Errorf("CreatedAt is not valid RFC3339: %v", err) 490 506 } 491 - 492 - if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) { 493 - t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after) 507 + if createdAt.Before(before) || createdAt.After(after) { 508 + t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after) 494 509 } 495 510 496 - // CreatedAt and UpdatedAt should be equal for new records 497 - if !record.CreatedAt.Equal(record.UpdatedAt) { 498 - t.Errorf("CreatedAt (%v) != UpdatedAt (%v)", record.CreatedAt, record.UpdatedAt) 511 + if record.UpdatedAt == nil { 512 + t.Error("UpdatedAt should not be nil") 513 + } else { 514 + updatedAt, err := time.Parse(time.RFC3339, *record.UpdatedAt) 515 + if err != nil { 516 + t.Errorf("UpdatedAt is not valid RFC3339: %v", err) 517 + } 518 + if updatedAt.Before(before) || updatedAt.After(after) { 519 + t.Errorf("UpdatedAt = %v, want between %v and %v", updatedAt, before, after) 520 + } 499 521 } 500 522 }) 501 523 } 502 524 } 503 525 504 526 func TestNewStarRecord(t *testing.T) { 505 - before := time.Now() 527 + // Truncate to second precision since RFC3339 doesn't have sub-second precision 528 + before := time.Now().Truncate(time.Second) 506 529 record := NewStarRecord("did:plc:alice123", "myapp") 507 - after := time.Now() 530 + after := time.Now().Truncate(time.Second).Add(time.Second) 508 531 509 - if record.Type != StarCollection { 510 - t.Errorf("Type = %v, want %v", record.Type, StarCollection) 532 + if record.LexiconTypeID != StarCollection { 533 + t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, StarCollection) 511 534 } 512 535 513 - if record.Subject.DID != "did:plc:alice123" { 514 - t.Errorf("Subject.DID = %v, want did:plc:alice123", record.Subject.DID) 536 + if record.Subject.Did != "did:plc:alice123" { 537 + t.Errorf("Subject.Did = %v, want did:plc:alice123", record.Subject.Did) 515 538 } 516 539 517 540 if record.Subject.Repository != "myapp" { 518 541 t.Errorf("Subject.Repository = %v, want myapp", record.Subject.Repository) 519 542 } 520 543 521 - if record.CreatedAt.Before(before) || record.CreatedAt.After(after) { 522 - t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after) 544 + createdAt, err := time.Parse(time.RFC3339, record.CreatedAt) 545 + if err != nil { 546 + t.Errorf("CreatedAt is not valid RFC3339: %v", err) 547 + } 548 + if createdAt.Before(before) || createdAt.After(after) { 549 + t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after) 523 550 } 524 551 } 525 552 ··· 807 834 } 808 835 809 836 // Add hold DID 810 - record.HoldDID = "did:web:hold01.atcr.io" 837 + holdDID := "did:web:hold01.atcr.io" 838 + record.HoldDid = &holdDID 811 839 812 840 // Serialize to JSON 813 841 jsonData, err := json.Marshal(record) ··· 816 844 } 817 845 818 846 // Deserialize from JSON 819 - var decoded ManifestRecord 847 + var decoded Manifest 820 848 if err := json.Unmarshal(jsonData, &decoded); err != nil { 821 849 t.Fatalf("json.Unmarshal() error = %v", err) 822 850 } 823 851 824 852 // Verify fields 825 - if decoded.Type != record.Type { 826 - t.Errorf("Type = %v, want %v", decoded.Type, record.Type) 853 + if decoded.LexiconTypeID != record.LexiconTypeID { 854 + t.Errorf("LexiconTypeID = %v, want %v", decoded.LexiconTypeID, record.LexiconTypeID) 827 855 } 828 856 if decoded.Repository != record.Repository { 829 857 t.Errorf("Repository = %v, want %v", decoded.Repository, record.Repository) ··· 831 859 if decoded.Digest != record.Digest { 832 860 t.Errorf("Digest = %v, want %v", decoded.Digest, record.Digest) 833 861 } 834 - if decoded.HoldDID != record.HoldDID { 835 - t.Errorf("HoldDID = %v, want %v", decoded.HoldDID, record.HoldDID) 862 + if decoded.HoldDid == nil || *decoded.HoldDid != *record.HoldDid { 863 + t.Errorf("HoldDid = %v, want %v", decoded.HoldDid, record.HoldDid) 836 864 } 837 865 if decoded.Config.Digest != record.Config.Digest { 838 866 t.Errorf("Config.Digest = %v, want %v", decoded.Config.Digest, record.Config.Digest) ··· 843 871 } 844 872 845 873 func TestBlobReference_JSONSerialization(t *testing.T) { 846 - blob := BlobReference{ 874 + blob := Manifest_BlobReference{ 847 875 MediaType: "application/vnd.oci.image.layer.v1.tar+gzip", 848 876 Digest: "sha256:abc123", 849 877 Size: 12345, 850 - URLs: []string{"https://s3.example.com/blob"}, 851 - Annotations: map[string]string{ 852 - "key": "value", 853 - }, 878 + Urls: []string{"https://s3.example.com/blob"}, 879 + // Note: Annotations is now an empty struct, not a map 854 880 } 855 881 856 882 // Serialize ··· 860 886 } 861 887 862 888 // Deserialize 863 - var decoded BlobReference 889 + var decoded Manifest_BlobReference 864 890 if err := json.Unmarshal(jsonData, &decoded); err != nil { 865 891 t.Fatalf("json.Unmarshal() error = %v", err) 866 892 } ··· 878 904 } 879 905 880 906 func TestStarSubject_JSONSerialization(t *testing.T) { 881 - subject := StarSubject{ 882 - DID: "did:plc:alice123", 907 + subject := SailorStar_Subject{ 908 + Did: "did:plc:alice123", 883 909 Repository: "myapp", 884 910 } 885 911 ··· 890 916 } 891 917 892 918 // Deserialize 893 - var decoded StarSubject 919 + var decoded SailorStar_Subject 894 920 if err := json.Unmarshal(jsonData, &decoded); err != nil { 895 921 t.Fatalf("json.Unmarshal() error = %v", err) 896 922 } 897 923 898 924 // Verify 899 - if decoded.DID != subject.DID { 900 - t.Errorf("DID = %v, want %v", decoded.DID, subject.DID) 925 + if decoded.Did != subject.Did { 926 + t.Errorf("Did = %v, want %v", decoded.Did, subject.Did) 901 927 } 902 928 if decoded.Repository != subject.Repository { 903 929 t.Errorf("Repository = %v, want %v", decoded.Repository, subject.Repository) ··· 1144 1170 t.Fatal("NewLayerRecord() returned nil") 1145 1171 } 1146 1172 1147 - if record.Type != LayerCollection { 1148 - t.Errorf("Type = %q, want %q", record.Type, LayerCollection) 1173 + if record.LexiconTypeID != LayerCollection { 1174 + t.Errorf("LexiconTypeID = %q, want %q", record.LexiconTypeID, LayerCollection) 1149 1175 } 1150 1176 1151 1177 if record.Digest != tt.digest { ··· 1164 1190 t.Errorf("Repository = %q, want %q", record.Repository, tt.repository) 1165 1191 } 1166 1192 1167 - if record.UserDID != tt.userDID { 1168 - t.Errorf("UserDID = %q, want %q", record.UserDID, tt.userDID) 1193 + if record.UserDid != tt.userDID { 1194 + t.Errorf("UserDid = %q, want %q", record.UserDid, tt.userDID) 1169 1195 } 1170 1196 1171 1197 if record.UserHandle != tt.userHandle { ··· 1187 1213 } 1188 1214 1189 1215 func TestNewLayerRecordJSON(t *testing.T) { 1190 - // Test that LayerRecord can be marshaled/unmarshaled to/from JSON 1216 + // Test that HoldLayer can be marshaled/unmarshaled to/from JSON 1191 1217 record := NewLayerRecord( 1192 1218 "sha256:abc123", 1193 1219 1024, ··· 1204 1230 } 1205 1231 1206 1232 // Unmarshal back 1207 - var decoded LayerRecord 1233 + var decoded HoldLayer 1208 1234 if err := json.Unmarshal(jsonData, &decoded); err != nil { 1209 1235 t.Fatalf("json.Unmarshal() error = %v", err) 1210 1236 } 1211 1237 1212 1238 // Verify fields match 1213 - if decoded.Type != record.Type { 1214 - t.Errorf("Type = %q, want %q", decoded.Type, record.Type) 1239 + if decoded.LexiconTypeID != record.LexiconTypeID { 1240 + t.Errorf("LexiconTypeID = %q, want %q", decoded.LexiconTypeID, record.LexiconTypeID) 1215 1241 } 1216 1242 if decoded.Digest != record.Digest { 1217 1243 t.Errorf("Digest = %q, want %q", decoded.Digest, record.Digest) ··· 1225 1251 if decoded.Repository != record.Repository { 1226 1252 t.Errorf("Repository = %q, want %q", decoded.Repository, record.Repository) 1227 1253 } 1228 - if decoded.UserDID != record.UserDID { 1229 - t.Errorf("UserDID = %q, want %q", decoded.UserDID, record.UserDID) 1254 + if decoded.UserDid != record.UserDid { 1255 + t.Errorf("UserDid = %q, want %q", decoded.UserDid, record.UserDid) 1230 1256 } 1231 1257 if decoded.UserHandle != record.UserHandle { 1232 1258 t.Errorf("UserHandle = %q, want %q", decoded.UserHandle, record.UserHandle) ··· 1235 1261 t.Errorf("CreatedAt = %q, want %q", decoded.CreatedAt, record.CreatedAt) 1236 1262 } 1237 1263 } 1238 - 1239 - func TestNewRepoPageRecord(t *testing.T) { 1240 - tests := []struct { 1241 - name string 1242 - repository string 1243 - description string 1244 - avatar *ATProtoBlobRef 1245 - }{ 1246 - { 1247 - name: "with description only", 1248 - repository: "myapp", 1249 - description: "# My App\n\nA cool container image.", 1250 - avatar: nil, 1251 - }, 1252 - { 1253 - name: "with avatar only", 1254 - repository: "another-app", 1255 - description: "", 1256 - avatar: &ATProtoBlobRef{ 1257 - Type: "blob", 1258 - Ref: Link{Link: "bafyreiabc123"}, 1259 - MimeType: "image/png", 1260 - Size: 1024, 1261 - }, 1262 - }, 1263 - { 1264 - name: "with both description and avatar", 1265 - repository: "full-app", 1266 - description: "This is a full description.", 1267 - avatar: &ATProtoBlobRef{ 1268 - Type: "blob", 1269 - Ref: Link{Link: "bafyreiabc456"}, 1270 - MimeType: "image/jpeg", 1271 - Size: 2048, 1272 - }, 1273 - }, 1274 - { 1275 - name: "empty values", 1276 - repository: "", 1277 - description: "", 1278 - avatar: nil, 1279 - }, 1280 - } 1281 - 1282 - for _, tt := range tests { 1283 - t.Run(tt.name, func(t *testing.T) { 1284 - before := time.Now() 1285 - record := NewRepoPageRecord(tt.repository, tt.description, tt.avatar) 1286 - after := time.Now() 1287 - 1288 - if record.Type != RepoPageCollection { 1289 - t.Errorf("Type = %v, want %v", record.Type, RepoPageCollection) 1290 - } 1291 - 1292 - if record.Repository != tt.repository { 1293 - t.Errorf("Repository = %v, want %v", record.Repository, tt.repository) 1294 - } 1295 - 1296 - if record.Description != tt.description { 1297 - t.Errorf("Description = %v, want %v", record.Description, tt.description) 1298 - } 1299 - 1300 - if tt.avatar == nil && record.Avatar != nil { 1301 - t.Error("Avatar should be nil") 1302 - } 1303 - 1304 - if tt.avatar != nil { 1305 - if record.Avatar == nil { 1306 - t.Fatal("Avatar should not be nil") 1307 - } 1308 - if record.Avatar.Ref.Link != tt.avatar.Ref.Link { 1309 - t.Errorf("Avatar.Ref.Link = %v, want %v", record.Avatar.Ref.Link, tt.avatar.Ref.Link) 1310 - } 1311 - } 1312 - 1313 - if record.CreatedAt.Before(before) || record.CreatedAt.After(after) { 1314 - t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after) 1315 - } 1316 - 1317 - if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) { 1318 - t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after) 1319 - } 1320 - 1321 - // CreatedAt and UpdatedAt should be equal for new records 1322 - if !record.CreatedAt.Equal(record.UpdatedAt) { 1323 - t.Errorf("CreatedAt (%v) != UpdatedAt (%v)", record.CreatedAt, record.UpdatedAt) 1324 - } 1325 - }) 1326 - } 1327 - } 1328 - 1329 - func TestRepoPageRecord_JSONSerialization(t *testing.T) { 1330 - record := NewRepoPageRecord( 1331 - "myapp", 1332 - "# My App\n\nA description with **markdown**.", 1333 - &ATProtoBlobRef{ 1334 - Type: "blob", 1335 - Ref: Link{Link: "bafyreiabc123"}, 1336 - MimeType: "image/png", 1337 - Size: 1024, 1338 - }, 1339 - ) 1340 - 1341 - // Serialize to JSON 1342 - jsonData, err := json.Marshal(record) 1343 - if err != nil { 1344 - t.Fatalf("json.Marshal() error = %v", err) 1345 - } 1346 - 1347 - // Deserialize from JSON 1348 - var decoded RepoPageRecord 1349 - if err := json.Unmarshal(jsonData, &decoded); err != nil { 1350 - t.Fatalf("json.Unmarshal() error = %v", err) 1351 - } 1352 - 1353 - // Verify fields 1354 - if decoded.Type != record.Type { 1355 - t.Errorf("Type = %v, want %v", decoded.Type, record.Type) 1356 - } 1357 - if decoded.Repository != record.Repository { 1358 - t.Errorf("Repository = %v, want %v", decoded.Repository, record.Repository) 1359 - } 1360 - if decoded.Description != record.Description { 1361 - t.Errorf("Description = %v, want %v", decoded.Description, record.Description) 1362 - } 1363 - if decoded.Avatar == nil { 1364 - t.Fatal("Avatar should not be nil") 1365 - } 1366 - if decoded.Avatar.Ref.Link != record.Avatar.Ref.Link { 1367 - t.Errorf("Avatar.Ref.Link = %v, want %v", decoded.Avatar.Ref.Link, record.Avatar.Ref.Link) 1368 - } 1369 - }
+103
pkg/atproto/manifest.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.manifest 4 + 5 + package atproto 6 + 7 + import ( 8 + lexutil "github.com/bluesky-social/indigo/lex/util" 9 + ) 10 + 11 + // A container image manifest following OCI specification, stored in ATProto 12 + type Manifest struct { 13 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.manifest"` 14 + // annotations: Optional metadata annotations 15 + Annotations *Manifest_Annotations `json:"annotations,omitempty" cborgen:"annotations,omitempty"` 16 + // config: Reference to image configuration blob 17 + Config *Manifest_BlobReference `json:"config,omitempty" cborgen:"config,omitempty"` 18 + // createdAt: Record creation timestamp 19 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 20 + // digest: Content digest (e.g., 'sha256:abc123...') 21 + Digest string `json:"digest" cborgen:"digest"` 22 + // holdDid: DID of the hold service where blobs are stored (e.g., 'did:web:hold01.atcr.io'). Primary reference for hold resolution. 23 + HoldDid *string `json:"holdDid,omitempty" cborgen:"holdDid,omitempty"` 24 + // holdEndpoint: Hold service endpoint URL where blobs are stored. DEPRECATED: Use holdDid instead. Kept for backward compatibility. 25 + HoldEndpoint *string `json:"holdEndpoint,omitempty" cborgen:"holdEndpoint,omitempty"` 26 + // layers: Filesystem layers (for image manifests) 27 + Layers []Manifest_BlobReference `json:"layers,omitempty" cborgen:"layers,omitempty"` 28 + // manifestBlob: The full OCI manifest stored as a blob in ATProto. 29 + ManifestBlob *lexutil.LexBlob `json:"manifestBlob,omitempty" cborgen:"manifestBlob,omitempty"` 30 + // manifests: Referenced manifests (for manifest lists/indexes) 31 + Manifests []Manifest_ManifestReference `json:"manifests,omitempty" cborgen:"manifests,omitempty"` 32 + // mediaType: OCI media type 33 + MediaType string `json:"mediaType" cborgen:"mediaType"` 34 + // repository: Repository name (e.g., 'myapp'). Scoped to user's DID. 35 + Repository string `json:"repository" cborgen:"repository"` 36 + // schemaVersion: OCI schema version (typically 2) 37 + SchemaVersion int64 `json:"schemaVersion" cborgen:"schemaVersion"` 38 + // subject: Optional reference to another manifest (for attestations, signatures) 39 + Subject *Manifest_BlobReference `json:"subject,omitempty" cborgen:"subject,omitempty"` 40 + } 41 + 42 + // Optional metadata annotations 43 + type Manifest_Annotations struct { 44 + } 45 + 46 + // Manifest_BlobReference is a "blobReference" in the io.atcr.manifest schema. 47 + // 48 + // Reference to a blob stored in S3 or external storage 49 + type Manifest_BlobReference struct { 50 + LexiconTypeID string `json:"$type,omitempty" cborgen:"$type,const=io.atcr.manifest#blobReference,omitempty"` 51 + // annotations: Optional metadata 52 + Annotations *Manifest_BlobReference_Annotations `json:"annotations,omitempty" cborgen:"annotations,omitempty"` 53 + // digest: Content digest (e.g., 'sha256:...') 54 + Digest string `json:"digest" cborgen:"digest"` 55 + // mediaType: MIME type of the blob 56 + MediaType string `json:"mediaType" cborgen:"mediaType"` 57 + // size: Size in bytes 58 + Size int64 `json:"size" cborgen:"size"` 59 + // urls: Optional direct URLs to blob (for BYOS) 60 + Urls []string `json:"urls,omitempty" cborgen:"urls,omitempty"` 61 + } 62 + 63 + // Optional metadata 64 + type Manifest_BlobReference_Annotations struct { 65 + } 66 + 67 + // Manifest_ManifestReference is a "manifestReference" in the io.atcr.manifest schema. 68 + // 69 + // Reference to a manifest in a manifest list/index 70 + type Manifest_ManifestReference struct { 71 + LexiconTypeID string `json:"$type,omitempty" cborgen:"$type,const=io.atcr.manifest#manifestReference,omitempty"` 72 + // annotations: Optional metadata 73 + Annotations *Manifest_ManifestReference_Annotations `json:"annotations,omitempty" cborgen:"annotations,omitempty"` 74 + // digest: Content digest (e.g., 'sha256:...') 75 + Digest string `json:"digest" cborgen:"digest"` 76 + // mediaType: Media type of the referenced manifest 77 + MediaType string `json:"mediaType" cborgen:"mediaType"` 78 + // platform: Platform information for this manifest 79 + Platform *Manifest_Platform `json:"platform,omitempty" cborgen:"platform,omitempty"` 80 + // size: Size in bytes 81 + Size int64 `json:"size" cborgen:"size"` 82 + } 83 + 84 + // Optional metadata 85 + type Manifest_ManifestReference_Annotations struct { 86 + } 87 + 88 + // Manifest_Platform is a "platform" in the io.atcr.manifest schema. 89 + // 90 + // Platform information describing OS and architecture 91 + type Manifest_Platform struct { 92 + LexiconTypeID string `json:"$type,omitempty" cborgen:"$type,const=io.atcr.manifest#platform,omitempty"` 93 + // architecture: CPU architecture (e.g., 'amd64', 'arm64', 'arm') 94 + Architecture string `json:"architecture" cborgen:"architecture"` 95 + // os: Operating system (e.g., 'linux', 'windows', 'darwin') 96 + Os string `json:"os" cborgen:"os"` 97 + // osFeatures: Optional OS features 98 + OsFeatures []string `json:"osFeatures,omitempty" cborgen:"osFeatures,omitempty"` 99 + // osVersion: Optional OS version 100 + OsVersion *string `json:"osVersion,omitempty" cborgen:"osVersion,omitempty"` 101 + // variant: Optional CPU variant (e.g., 'v7' for ARM) 102 + Variant *string `json:"variant,omitempty" cborgen:"variant,omitempty"` 103 + }
+15
pkg/atproto/register.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + package atproto 4 + 5 + import lexutil "github.com/bluesky-social/indigo/lex/util" 6 + 7 + func init() { 8 + lexutil.RegisterType("io.atcr.hold.captain", &HoldCaptain{}) 9 + lexutil.RegisterType("io.atcr.hold.crew", &HoldCrew{}) 10 + lexutil.RegisterType("io.atcr.hold.layer", &HoldLayer{}) 11 + lexutil.RegisterType("io.atcr.manifest", &Manifest{}) 12 + lexutil.RegisterType("io.atcr.sailor.profile", &SailorProfile{}) 13 + lexutil.RegisterType("io.atcr.sailor.star", &SailorStar{}) 14 + lexutil.RegisterType("io.atcr.tag", &Tag{}) 15 + }
+16
pkg/atproto/sailorprofile.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.sailor.profile 4 + 5 + package atproto 6 + 7 + // User profile for ATCR registry. Stores preferences like default hold for blob storage. 8 + type SailorProfile struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.sailor.profile"` 10 + // createdAt: Profile creation timestamp 11 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 12 + // defaultHold: Default hold endpoint for blob storage. If null, user has opted out of defaults. 13 + DefaultHold *string `json:"defaultHold,omitempty" cborgen:"defaultHold,omitempty"` 14 + // updatedAt: Profile last updated timestamp 15 + UpdatedAt *string `json:"updatedAt,omitempty" cborgen:"updatedAt,omitempty"` 16 + }
+25
pkg/atproto/sailorstar.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.sailor.star 4 + 5 + package atproto 6 + 7 + // A star (like) on a container image repository. Stored in the starrer's PDS, similar to Bluesky likes. 8 + type SailorStar struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.sailor.star"` 10 + // createdAt: Star creation timestamp 11 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 12 + // subject: The repository being starred 13 + Subject SailorStar_Subject `json:"subject" cborgen:"subject"` 14 + } 15 + 16 + // SailorStar_Subject is a "subject" in the io.atcr.sailor.star schema. 17 + // 18 + // Reference to a repository owned by a user 19 + type SailorStar_Subject struct { 20 + LexiconTypeID string `json:"$type,omitempty" cborgen:"$type,const=io.atcr.sailor.star#subject,omitempty"` 21 + // did: DID of the repository owner 22 + Did string `json:"did" cborgen:"did"` 23 + // repository: Repository name (e.g., 'myapp') 24 + Repository string `json:"repository" cborgen:"repository"` 25 + }
+20
pkg/atproto/tag.go
··· 1 + // Code generated by generate.go; DO NOT EDIT. 2 + 3 + // Lexicon schema: io.atcr.tag 4 + 5 + package atproto 6 + 7 + // A named tag pointing to a specific manifest digest 8 + type Tag struct { 9 + LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.tag"` 10 + // createdAt: Tag creation timestamp 11 + CreatedAt string `json:"createdAt" cborgen:"createdAt"` 12 + // manifest: AT-URI of the manifest this tag points to (e.g., 'at://did:plc:xyz/io.atcr.manifest/abc123'). Preferred over manifestDigest for new records. 13 + Manifest *string `json:"manifest,omitempty" cborgen:"manifest,omitempty"` 14 + // manifestDigest: DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead. 15 + ManifestDigest *string `json:"manifestDigest,omitempty" cborgen:"manifestDigest,omitempty"` 16 + // repository: Repository name (e.g., 'myapp'). Scoped to user's DID. 17 + Repository string `json:"repository" cborgen:"repository"` 18 + // tag: Tag name (e.g., 'latest', 'v1.0.0', '12-slim') 19 + Tag string `json:"tag" cborgen:"tag"` 20 + }
-142
pkg/auth/cache.go
··· 1 - // Package token provides service token caching and management for AppView. 2 - // Service tokens are JWTs issued by a user's PDS to authorize AppView to 3 - // act on their behalf when communicating with hold services. Tokens are 4 - // cached with automatic expiry parsing and 10-second safety margins. 5 - package auth 6 - 7 - import ( 8 - "log/slog" 9 - "sync" 10 - "time" 11 - ) 12 - 13 - // serviceTokenEntry represents a cached service token 14 - type serviceTokenEntry struct { 15 - token string 16 - expiresAt time.Time 17 - err error 18 - once sync.Once 19 - } 20 - 21 - // Global cache for service tokens (DID:HoldDID -> token) 22 - // Service tokens are JWTs issued by a user's PDS to authorize AppView to act on their behalf 23 - // when communicating with hold services. These tokens are scoped to specific holds and have 24 - // limited lifetime (typically 60s, can request up to 5min). 25 - var ( 26 - globalServiceTokens = make(map[string]*serviceTokenEntry) 27 - globalServiceTokensMu sync.RWMutex 28 - ) 29 - 30 - // GetServiceToken retrieves a cached service token for the given DID and hold DID 31 - // Returns empty string if no valid cached token exists 32 - func GetServiceToken(did, holdDID string) (token string, expiresAt time.Time) { 33 - cacheKey := did + ":" + holdDID 34 - 35 - globalServiceTokensMu.RLock() 36 - entry, exists := globalServiceTokens[cacheKey] 37 - globalServiceTokensMu.RUnlock() 38 - 39 - if !exists { 40 - return "", time.Time{} 41 - } 42 - 43 - // Check if token is still valid 44 - if time.Now().After(entry.expiresAt) { 45 - // Token expired, remove from cache 46 - globalServiceTokensMu.Lock() 47 - delete(globalServiceTokens, cacheKey) 48 - globalServiceTokensMu.Unlock() 49 - return "", time.Time{} 50 - } 51 - 52 - return entry.token, entry.expiresAt 53 - } 54 - 55 - // SetServiceToken stores a service token in the cache 56 - // Automatically parses the JWT to extract the expiry time 57 - // Applies a 10-second safety margin (cache expires 10s before actual JWT expiry) 58 - func SetServiceToken(did, holdDID, token string) error { 59 - cacheKey := did + ":" + holdDID 60 - 61 - // Parse JWT to extract expiry (don't verify signature - we trust the PDS) 62 - expiry, err := ParseJWTExpiry(token) 63 - if err != nil { 64 - // If parsing fails, use default 50s TTL (conservative fallback) 65 - slog.Warn("Failed to parse JWT expiry, using default 50s", "error", err, "cacheKey", cacheKey) 66 - expiry = time.Now().Add(50 * time.Second) 67 - } else { 68 - // Apply 10s safety margin to avoid using nearly-expired tokens 69 - expiry = expiry.Add(-10 * time.Second) 70 - } 71 - 72 - globalServiceTokensMu.Lock() 73 - globalServiceTokens[cacheKey] = &serviceTokenEntry{ 74 - token: token, 75 - expiresAt: expiry, 76 - } 77 - globalServiceTokensMu.Unlock() 78 - 79 - slog.Debug("Cached service token", 80 - "cacheKey", cacheKey, 81 - "expiresIn", time.Until(expiry).Round(time.Second)) 82 - 83 - return nil 84 - } 85 - 86 - // InvalidateServiceToken removes a service token from the cache 87 - // Used when we detect that a token is invalid or the user's session has expired 88 - func InvalidateServiceToken(did, holdDID string) { 89 - cacheKey := did + ":" + holdDID 90 - 91 - globalServiceTokensMu.Lock() 92 - delete(globalServiceTokens, cacheKey) 93 - globalServiceTokensMu.Unlock() 94 - 95 - slog.Debug("Invalidated service token", "cacheKey", cacheKey) 96 - } 97 - 98 - // GetCacheStats returns statistics about the service token cache for debugging 99 - func GetCacheStats() map[string]any { 100 - globalServiceTokensMu.RLock() 101 - defer globalServiceTokensMu.RUnlock() 102 - 103 - validCount := 0 104 - expiredCount := 0 105 - now := time.Now() 106 - 107 - for _, entry := range globalServiceTokens { 108 - if now.Before(entry.expiresAt) { 109 - validCount++ 110 - } else { 111 - expiredCount++ 112 - } 113 - } 114 - 115 - return map[string]any{ 116 - "total_entries": len(globalServiceTokens), 117 - "valid_tokens": validCount, 118 - "expired_tokens": expiredCount, 119 - } 120 - } 121 - 122 - // CleanExpiredTokens removes expired tokens from the cache 123 - // Can be called periodically to prevent unbounded growth (though expired tokens 124 - // are also removed lazily on access) 125 - func CleanExpiredTokens() { 126 - globalServiceTokensMu.Lock() 127 - defer globalServiceTokensMu.Unlock() 128 - 129 - now := time.Now() 130 - removed := 0 131 - 132 - for key, entry := range globalServiceTokens { 133 - if now.After(entry.expiresAt) { 134 - delete(globalServiceTokens, key) 135 - removed++ 136 - } 137 - } 138 - 139 - if removed > 0 { 140 - slog.Debug("Cleaned expired service tokens", "count", removed) 141 - } 142 - }
-195
pkg/auth/cache_test.go
··· 1 - package auth 2 - 3 - import ( 4 - "testing" 5 - "time" 6 - ) 7 - 8 - func TestGetServiceToken_NotCached(t *testing.T) { 9 - // Clear cache first 10 - globalServiceTokensMu.Lock() 11 - globalServiceTokens = make(map[string]*serviceTokenEntry) 12 - globalServiceTokensMu.Unlock() 13 - 14 - did := "did:plc:test123" 15 - holdDID := "did:web:hold.example.com" 16 - 17 - token, expiresAt := GetServiceToken(did, holdDID) 18 - if token != "" { 19 - t.Errorf("Expected empty token for uncached entry, got %q", token) 20 - } 21 - if !expiresAt.IsZero() { 22 - t.Error("Expected zero time for uncached entry") 23 - } 24 - } 25 - 26 - func TestSetServiceToken_ManualExpiry(t *testing.T) { 27 - // Clear cache first 28 - globalServiceTokensMu.Lock() 29 - globalServiceTokens = make(map[string]*serviceTokenEntry) 30 - globalServiceTokensMu.Unlock() 31 - 32 - did := "did:plc:test123" 33 - holdDID := "did:web:hold.example.com" 34 - token := "invalid_jwt_token" // Will fall back to 50s default 35 - 36 - // This should succeed with default 50s TTL since JWT parsing will fail 37 - err := SetServiceToken(did, holdDID, token) 38 - if err != nil { 39 - t.Fatalf("SetServiceToken() error = %v", err) 40 - } 41 - 42 - // Verify token was cached 43 - cachedToken, expiresAt := GetServiceToken(did, holdDID) 44 - if cachedToken != token { 45 - t.Errorf("Expected token %q, got %q", token, cachedToken) 46 - } 47 - if expiresAt.IsZero() { 48 - t.Error("Expected non-zero expiry time") 49 - } 50 - 51 - // Expiry should be approximately 50s from now (with 10s margin subtracted in some cases) 52 - expectedExpiry := time.Now().Add(50 * time.Second) 53 - diff := expiresAt.Sub(expectedExpiry) 54 - if diff < -5*time.Second || diff > 5*time.Second { 55 - t.Errorf("Expiry time off by %v (expected ~50s from now)", diff) 56 - } 57 - } 58 - 59 - func TestGetServiceToken_Expired(t *testing.T) { 60 - // Manually insert an expired token 61 - did := "did:plc:test123" 62 - holdDID := "did:web:hold.example.com" 63 - cacheKey := did + ":" + holdDID 64 - 65 - globalServiceTokensMu.Lock() 66 - globalServiceTokens[cacheKey] = &serviceTokenEntry{ 67 - token: "expired_token", 68 - expiresAt: time.Now().Add(-1 * time.Hour), // 1 hour ago 69 - } 70 - globalServiceTokensMu.Unlock() 71 - 72 - // Try to get - should return empty since expired 73 - token, expiresAt := GetServiceToken(did, holdDID) 74 - if token != "" { 75 - t.Errorf("Expected empty token for expired entry, got %q", token) 76 - } 77 - if !expiresAt.IsZero() { 78 - t.Error("Expected zero time for expired entry") 79 - } 80 - 81 - // Verify token was removed from cache 82 - globalServiceTokensMu.RLock() 83 - _, exists := globalServiceTokens[cacheKey] 84 - globalServiceTokensMu.RUnlock() 85 - 86 - if exists { 87 - t.Error("Expected expired token to be removed from cache") 88 - } 89 - } 90 - 91 - func TestInvalidateServiceToken(t *testing.T) { 92 - // Set a token 93 - did := "did:plc:test123" 94 - holdDID := "did:web:hold.example.com" 95 - token := "test_token" 96 - 97 - err := SetServiceToken(did, holdDID, token) 98 - if err != nil { 99 - t.Fatalf("SetServiceToken() error = %v", err) 100 - } 101 - 102 - // Verify it's cached 103 - cachedToken, _ := GetServiceToken(did, holdDID) 104 - if cachedToken != token { 105 - t.Fatal("Token should be cached") 106 - } 107 - 108 - // Invalidate 109 - InvalidateServiceToken(did, holdDID) 110 - 111 - // Verify it's gone 112 - cachedToken, _ = GetServiceToken(did, holdDID) 113 - if cachedToken != "" { 114 - t.Error("Expected token to be invalidated") 115 - } 116 - } 117 - 118 - func TestCleanExpiredTokens(t *testing.T) { 119 - // Clear cache first 120 - globalServiceTokensMu.Lock() 121 - globalServiceTokens = make(map[string]*serviceTokenEntry) 122 - globalServiceTokensMu.Unlock() 123 - 124 - // Add expired and valid tokens 125 - globalServiceTokensMu.Lock() 126 - globalServiceTokens["expired:hold1"] = &serviceTokenEntry{ 127 - token: "expired1", 128 - expiresAt: time.Now().Add(-1 * time.Hour), 129 - } 130 - globalServiceTokens["valid:hold2"] = &serviceTokenEntry{ 131 - token: "valid1", 132 - expiresAt: time.Now().Add(1 * time.Hour), 133 - } 134 - globalServiceTokensMu.Unlock() 135 - 136 - // Clean expired 137 - CleanExpiredTokens() 138 - 139 - // Verify only valid token remains 140 - globalServiceTokensMu.RLock() 141 - _, expiredExists := globalServiceTokens["expired:hold1"] 142 - _, validExists := globalServiceTokens["valid:hold2"] 143 - globalServiceTokensMu.RUnlock() 144 - 145 - if expiredExists { 146 - t.Error("Expected expired token to be removed") 147 - } 148 - if !validExists { 149 - t.Error("Expected valid token to remain") 150 - } 151 - } 152 - 153 - func TestGetCacheStats(t *testing.T) { 154 - // Clear cache first 155 - globalServiceTokensMu.Lock() 156 - globalServiceTokens = make(map[string]*serviceTokenEntry) 157 - globalServiceTokensMu.Unlock() 158 - 159 - // Add some tokens 160 - globalServiceTokensMu.Lock() 161 - globalServiceTokens["did1:hold1"] = &serviceTokenEntry{ 162 - token: "token1", 163 - expiresAt: time.Now().Add(1 * time.Hour), 164 - } 165 - globalServiceTokens["did2:hold2"] = &serviceTokenEntry{ 166 - token: "token2", 167 - expiresAt: time.Now().Add(1 * time.Hour), 168 - } 169 - globalServiceTokensMu.Unlock() 170 - 171 - stats := GetCacheStats() 172 - if stats == nil { 173 - t.Fatal("Expected non-nil stats") 174 - } 175 - 176 - // GetCacheStats returns map[string]any with "total_entries" key 177 - totalEntries, ok := stats["total_entries"].(int) 178 - if !ok { 179 - t.Fatalf("Expected total_entries in stats map, got: %v", stats) 180 - } 181 - 182 - if totalEntries != 2 { 183 - t.Errorf("Expected 2 entries, got %d", totalEntries) 184 - } 185 - 186 - // Also check valid_tokens 187 - validTokens, ok := stats["valid_tokens"].(int) 188 - if !ok { 189 - t.Fatal("Expected valid_tokens in stats map") 190 - } 191 - 192 - if validTokens != 2 { 193 - t.Errorf("Expected 2 valid tokens, got %d", validTokens) 194 - } 195 - }
+3 -3
pkg/auth/hold_authorizer.go
··· 21 21 22 22 // GetCaptainRecord retrieves the captain record for a hold 23 23 // Used to check public flag and allowAllCrew settings 24 - GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) 24 + GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) 25 25 26 26 // IsCrewMember checks if userDID is a crew member of holdDID 27 27 IsCrewMember(ctx context.Context, holdDID, userDID string) (bool, error) ··· 32 32 // Read access rules: 33 33 // - Public hold: allow anyone (even anonymous) 34 34 // - Private hold: require authentication (any authenticated user) 35 - func CheckReadAccessWithCaptain(captain *atproto.CaptainRecord, userDID string) bool { 35 + func CheckReadAccessWithCaptain(captain *atproto.HoldCaptain, userDID string) bool { 36 36 if captain.Public { 37 37 // Public hold - allow anyone (even anonymous) 38 38 return true ··· 55 55 // Write access rules: 56 56 // - Must be authenticated 57 57 // - Must be hold owner OR crew member 58 - func CheckWriteAccessWithCaptain(captain *atproto.CaptainRecord, userDID string, isCrew bool) bool { 58 + func CheckWriteAccessWithCaptain(captain *atproto.HoldCaptain, userDID string, isCrew bool) bool { 59 59 slog.Debug("Checking write access", "userDID", userDID, "owner", captain.Owner, "isCrew", isCrew) 60 60 61 61 if userDID == "" {
+5 -5
pkg/auth/hold_authorizer_test.go
··· 7 7 ) 8 8 9 9 func TestCheckReadAccessWithCaptain_PublicHold(t *testing.T) { 10 - captain := &atproto.CaptainRecord{ 10 + captain := &atproto.HoldCaptain{ 11 11 Public: true, 12 12 Owner: "did:plc:owner123", 13 13 } ··· 26 26 } 27 27 28 28 func TestCheckReadAccessWithCaptain_PrivateHold(t *testing.T) { 29 - captain := &atproto.CaptainRecord{ 29 + captain := &atproto.HoldCaptain{ 30 30 Public: false, 31 31 Owner: "did:plc:owner123", 32 32 } ··· 45 45 } 46 46 47 47 func TestCheckWriteAccessWithCaptain_Owner(t *testing.T) { 48 - captain := &atproto.CaptainRecord{ 48 + captain := &atproto.HoldCaptain{ 49 49 Public: false, 50 50 Owner: "did:plc:owner123", 51 51 } ··· 58 58 } 59 59 60 60 func TestCheckWriteAccessWithCaptain_Crew(t *testing.T) { 61 - captain := &atproto.CaptainRecord{ 61 + captain := &atproto.HoldCaptain{ 62 62 Public: false, 63 63 Owner: "did:plc:owner123", 64 64 } ··· 77 77 } 78 78 79 79 func TestCheckWriteAccessWithCaptain_Anonymous(t *testing.T) { 80 - captain := &atproto.CaptainRecord{ 80 + captain := &atproto.HoldCaptain{ 81 81 Public: false, 82 82 Owner: "did:plc:owner123", 83 83 }
+2 -2
pkg/auth/hold_local.go
··· 35 35 } 36 36 37 37 // GetCaptainRecord retrieves the captain record from the hold's PDS 38 - func (a *LocalHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) { 38 + func (a *LocalHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) { 39 39 // Verify that the requested holdDID matches this hold 40 40 if holdDID != a.pds.DID() { 41 41 return nil, fmt.Errorf("holdDID mismatch: requested %s, this hold is %s", holdDID, a.pds.DID()) ··· 47 47 return nil, fmt.Errorf("failed to get captain record: %w", err) 48 48 } 49 49 50 - // The PDS returns *atproto.CaptainRecord directly now (after we update pds to use atproto types) 50 + // The PDS returns *atproto.HoldCaptain directly 51 51 return pdsCaptain, nil 52 52 } 53 53
+34 -20
pkg/auth/hold_remote.go
··· 101 101 // 1. Check database cache 102 102 // 2. If cache miss or expired, query hold's XRPC endpoint 103 103 // 3. Update cache 104 - func (a *RemoteHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) { 104 + func (a *RemoteHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) { 105 105 // Try cache first 106 106 if a.db != nil { 107 107 cached, err := a.getCachedCaptainRecord(holdDID) 108 108 if err == nil && cached != nil { 109 109 // Cache hit - check if still valid 110 110 if time.Since(cached.UpdatedAt) < a.cacheTTL { 111 - return cached.CaptainRecord, nil 111 + return cached.HoldCaptain, nil 112 112 } 113 113 // Cache expired - continue to fetch fresh data 114 114 } ··· 133 133 134 134 // captainRecordWithMeta includes UpdatedAt for cache management 135 135 type captainRecordWithMeta struct { 136 - *atproto.CaptainRecord 136 + *atproto.HoldCaptain 137 137 UpdatedAt time.Time 138 138 } 139 139 ··· 145 145 WHERE hold_did = ? 146 146 ` 147 147 148 - var record atproto.CaptainRecord 148 + var record atproto.HoldCaptain 149 149 var deployedAt, region, provider sql.NullString 150 150 var updatedAt time.Time 151 151 ··· 172 172 record.DeployedAt = deployedAt.String 173 173 } 174 174 if region.Valid { 175 - record.Region = region.String 175 + record.Region = &region.String 176 176 } 177 177 if provider.Valid { 178 - record.Provider = provider.String 178 + record.Provider = &provider.String 179 179 } 180 180 181 181 return &captainRecordWithMeta{ 182 - CaptainRecord: &record, 183 - UpdatedAt: updatedAt, 182 + HoldCaptain: &record, 183 + UpdatedAt: updatedAt, 184 184 }, nil 185 185 } 186 186 187 187 // setCachedCaptainRecord stores a captain record in database cache 188 - func (a *RemoteHoldAuthorizer) setCachedCaptainRecord(holdDID string, record *atproto.CaptainRecord) error { 188 + func (a *RemoteHoldAuthorizer) setCachedCaptainRecord(holdDID string, record *atproto.HoldCaptain) error { 189 189 query := ` 190 190 INSERT INTO hold_captain_records ( 191 191 hold_did, owner_did, public, allow_all_crew, ··· 207 207 record.Public, 208 208 record.AllowAllCrew, 209 209 nullString(record.DeployedAt), 210 - nullString(record.Region), 211 - nullString(record.Provider), 210 + nullStringPtr(record.Region), 211 + nullStringPtr(record.Provider), 212 212 time.Now(), 213 213 ) 214 214 ··· 216 216 } 217 217 218 218 // fetchCaptainRecordFromXRPC queries the hold's XRPC endpoint for captain record 219 - func (a *RemoteHoldAuthorizer) fetchCaptainRecordFromXRPC(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) { 219 + func (a *RemoteHoldAuthorizer) fetchCaptainRecordFromXRPC(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) { 220 220 // Resolve DID to URL 221 221 holdURL := atproto.ResolveHoldURL(holdDID) 222 222 ··· 261 261 } 262 262 263 263 // Convert to our type 264 - record := &atproto.CaptainRecord{ 265 - Type: atproto.CaptainCollection, 266 - Owner: xrpcResp.Value.Owner, 267 - Public: xrpcResp.Value.Public, 268 - AllowAllCrew: xrpcResp.Value.AllowAllCrew, 269 - DeployedAt: xrpcResp.Value.DeployedAt, 270 - Region: xrpcResp.Value.Region, 271 - Provider: xrpcResp.Value.Provider, 264 + record := &atproto.HoldCaptain{ 265 + LexiconTypeID: atproto.CaptainCollection, 266 + Owner: xrpcResp.Value.Owner, 267 + Public: xrpcResp.Value.Public, 268 + AllowAllCrew: xrpcResp.Value.AllowAllCrew, 269 + DeployedAt: xrpcResp.Value.DeployedAt, 270 + } 271 + 272 + // Handle optional pointer fields 273 + if xrpcResp.Value.Region != "" { 274 + record.Region = &xrpcResp.Value.Region 275 + } 276 + if xrpcResp.Value.Provider != "" { 277 + record.Provider = &xrpcResp.Value.Provider 272 278 } 273 279 274 280 return record, nil ··· 406 412 return sql.NullString{Valid: false} 407 413 } 408 414 return sql.NullString{String: s, Valid: true} 415 + } 416 + 417 + // nullStringPtr converts a *string to sql.NullString 418 + func nullStringPtr(s *string) sql.NullString { 419 + if s == nil || *s == "" { 420 + return sql.NullString{Valid: false} 421 + } 422 + return sql.NullString{String: *s, Valid: true} 409 423 } 410 424 411 425 // getCachedApproval checks if user has a cached crew approval
+13 -8
pkg/auth/hold_remote_test.go
··· 14 14 "atcr.io/pkg/atproto" 15 15 ) 16 16 17 + // ptrString returns a pointer to the given string 18 + func ptrString(s string) *string { 19 + return &s 20 + } 21 + 17 22 func TestNewRemoteHoldAuthorizer(t *testing.T) { 18 23 // Test with nil database (should still work) 19 24 authorizer := NewRemoteHoldAuthorizer(nil, false) ··· 133 138 holdDID := "did:web:hold01.atcr.io" 134 139 135 140 // Pre-populate cache with a captain record 136 - captainRecord := &atproto.CaptainRecord{ 137 - Type: atproto.CaptainCollection, 138 - Owner: "did:plc:owner123", 139 - Public: true, 140 - AllowAllCrew: false, 141 - DeployedAt: "2025-10-28T00:00:00Z", 142 - Region: "us-east-1", 143 - Provider: "fly.io", 141 + captainRecord := &atproto.HoldCaptain{ 142 + LexiconTypeID: atproto.CaptainCollection, 143 + Owner: "did:plc:owner123", 144 + Public: true, 145 + AllowAllCrew: false, 146 + DeployedAt: "2025-10-28T00:00:00Z", 147 + Region: ptrString("us-east-1"), 148 + Provider: ptrString("fly.io"), 144 149 } 145 150 146 151 err := remote.setCachedCaptainRecord(holdDID, captainRecord)
-80
pkg/auth/mock_authorizer.go
··· 1 - package auth 2 - 3 - import ( 4 - "context" 5 - 6 - "atcr.io/pkg/atproto" 7 - ) 8 - 9 - // MockHoldAuthorizer is a test double for HoldAuthorizer. 10 - // It allows tests to control the return values of authorization checks 11 - // without making network calls or querying a real PDS. 12 - type MockHoldAuthorizer struct { 13 - // Direct result control 14 - CanReadResult bool 15 - CanWriteResult bool 16 - CanAdminResult bool 17 - Error error 18 - 19 - // Captain record to return (optional, for GetCaptainRecord) 20 - CaptainRecord *atproto.CaptainRecord 21 - 22 - // Crew membership (optional, for IsCrewMember) 23 - IsCrewResult bool 24 - } 25 - 26 - // NewMockHoldAuthorizer creates a MockHoldAuthorizer with sensible defaults. 27 - // By default, it allows all access (public hold, user is owner). 28 - func NewMockHoldAuthorizer() *MockHoldAuthorizer { 29 - return &MockHoldAuthorizer{ 30 - CanReadResult: true, 31 - CanWriteResult: true, 32 - CanAdminResult: false, 33 - IsCrewResult: false, 34 - CaptainRecord: &atproto.CaptainRecord{ 35 - Type: "io.atcr.hold.captain", 36 - Owner: "did:plc:mock-owner", 37 - Public: true, 38 - }, 39 - } 40 - } 41 - 42 - // CheckReadAccess returns the configured CanReadResult. 43 - func (m *MockHoldAuthorizer) CheckReadAccess(ctx context.Context, holdDID, userDID string) (bool, error) { 44 - if m.Error != nil { 45 - return false, m.Error 46 - } 47 - return m.CanReadResult, nil 48 - } 49 - 50 - // CheckWriteAccess returns the configured CanWriteResult. 51 - func (m *MockHoldAuthorizer) CheckWriteAccess(ctx context.Context, holdDID, userDID string) (bool, error) { 52 - if m.Error != nil { 53 - return false, m.Error 54 - } 55 - return m.CanWriteResult, nil 56 - } 57 - 58 - // GetCaptainRecord returns the configured CaptainRecord or a default. 59 - func (m *MockHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) { 60 - if m.Error != nil { 61 - return nil, m.Error 62 - } 63 - if m.CaptainRecord != nil { 64 - return m.CaptainRecord, nil 65 - } 66 - // Return a default captain record 67 - return &atproto.CaptainRecord{ 68 - Type: "io.atcr.hold.captain", 69 - Owner: "did:plc:mock-owner", 70 - Public: true, 71 - }, nil 72 - } 73 - 74 - // IsCrewMember returns the configured IsCrewResult. 75 - func (m *MockHoldAuthorizer) IsCrewMember(ctx context.Context, holdDID, userDID string) (bool, error) { 76 - if m.Error != nil { 77 - return false, m.Error 78 - } 79 - return m.IsCrewResult, nil 80 - }
+32 -42
pkg/auth/oauth/client.go
··· 72 72 return baseURL + "/auth/oauth/callback" 73 73 } 74 74 75 - // GetDefaultScopes returns the default OAuth scopes for ATCR registry operations. 76 - // Includes io.atcr.authFullApp permission-set plus individual scopes for PDS compatibility. 77 - // Blob scopes are listed explicitly (not supported in Lexicon permission-sets). 75 + // GetDefaultScopes returns the default OAuth scopes for ATCR registry operations 76 + // testMode determines whether to use transition:generic (test) or rpc scopes (production) 78 77 func GetDefaultScopes(did string) []string { 79 - return []string{ 78 + scopes := []string{ 80 79 "atproto", 81 - // Permission-set (for future PDS support) 82 - // See lexicons/io/atcr/authFullApp.json for definition 83 - // Uses "include:" prefix per ATProto permission spec 84 - "include:io.atcr.authFullApp", 85 - // com.atproto scopes must be separate (permission-sets are namespace-limited) 86 - "rpc:com.atproto.repo.getRecord?aud=*", 87 - // Blob scopes (not supported in Lexicon permission-sets) 88 80 // Image manifest types (single-arch) 89 81 "blob:application/vnd.oci.image.manifest.v1+json", 90 82 "blob:application/vnd.docker.distribution.manifest.v2+json", ··· 93 85 "blob:application/vnd.docker.distribution.manifest.list.v2+json", 94 86 // OCI artifact manifests (for cosign signatures, SBOMs, attestations) 95 87 "blob:application/vnd.cncf.oras.artifact.manifest.v1+json", 96 - // Image avatars 97 - "blob:image/*", 88 + // Used for service token validation on holds 89 + "rpc:com.atproto.repo.getRecord?aud=*", 98 90 } 91 + 92 + // Add repo scopes 93 + scopes = append(scopes, 94 + fmt.Sprintf("repo:%s", atproto.ManifestCollection), 95 + fmt.Sprintf("repo:%s", atproto.TagCollection), 96 + fmt.Sprintf("repo:%s", atproto.StarCollection), 97 + fmt.Sprintf("repo:%s", atproto.SailorProfileCollection), 98 + ) 99 + 100 + return scopes 99 101 } 100 102 101 103 // ScopesMatch checks if two scope lists are equivalent (order-independent) ··· 223 225 // The session's PersistSessionCallback will save nonce updates to DB 224 226 err = fn(session) 225 227 226 - // If request failed with auth error, delete session to force re-auth 227 - if err != nil && isAuthError(err) { 228 - slog.Warn("Auth error detected, deleting session to force re-auth", 229 - "component", "oauth/refresher", 230 - "did", did, 231 - "error", err) 232 - // Don't hold the lock while deleting - release first 233 - mutex.Unlock() 234 - _ = r.DeleteSession(ctx, did) 235 - mutex.Lock() // Re-acquire for the deferred unlock 236 - } 237 - 238 228 slog.Debug("Released session lock for DoWithSession", 239 229 "component", "oauth/refresher", 240 230 "did", did, ··· 243 233 return err 244 234 } 245 235 246 - // isAuthError checks if an error looks like an OAuth/auth failure 247 - func isAuthError(err error) bool { 248 - if err == nil { 249 - return false 250 - } 251 - errStr := strings.ToLower(err.Error()) 252 - return strings.Contains(errStr, "unauthorized") || 253 - strings.Contains(errStr, "invalid_token") || 254 - strings.Contains(errStr, "insufficient_scope") || 255 - strings.Contains(errStr, "token expired") || 256 - strings.Contains(errStr, "401") 257 - } 258 - 259 236 // resumeSession loads a session from storage 260 237 func (r *Refresher) resumeSession(ctx context.Context, did string) (*oauth.ClientSession, error) { 261 238 // Parse DID ··· 280 257 return nil, fmt.Errorf("no session found for DID: %s", did) 281 258 } 282 259 283 - // Log scope differences for debugging, but don't delete session 284 - // The PDS will reject requests if scopes are insufficient 285 - // (Permission-sets get expanded by PDS, so exact matching doesn't work) 260 + // Validate that session scopes match current desired scopes 286 261 desiredScopes := r.clientApp.Config.Scopes 287 262 if !ScopesMatch(sessionData.Scopes, desiredScopes) { 288 - slog.Debug("Session scopes differ from desired (may be permission-set expansion)", 263 + slog.Debug("Scope mismatch, deleting session", 289 264 "did", did, 290 265 "storedScopes", sessionData.Scopes, 291 266 "desiredScopes", desiredScopes) 267 + 268 + // Delete the session from database since scopes have changed 269 + if err := r.clientApp.Store.DeleteSession(ctx, accountDID, sessionID); err != nil { 270 + slog.Warn("Failed to delete session with mismatched scopes", "error", err, "did", did) 271 + } 272 + 273 + // Also invalidate UI sessions since OAuth is now invalid 274 + if r.uiSessionStore != nil { 275 + r.uiSessionStore.DeleteByDID(did) 276 + slog.Info("Invalidated UI sessions due to scope mismatch", 277 + "component", "oauth/refresher", 278 + "did", did) 279 + } 280 + 281 + return nil, fmt.Errorf("OAuth scopes changed, re-authentication required") 292 282 } 293 283 294 284 // Resume session
+30 -7
pkg/auth/oauth/client_test.go
··· 1 1 package oauth 2 2 3 3 import ( 4 - "github.com/bluesky-social/indigo/atproto/auth/oauth" 5 4 "testing" 6 5 ) 7 6 8 7 func TestNewClientApp(t *testing.T) { 9 - keyPath := t.TempDir() + "/oauth-key.bin" 10 - store := oauth.NewMemStore() 8 + tmpDir := t.TempDir() 9 + storePath := tmpDir + "/oauth-test.json" 10 + keyPath := tmpDir + "/oauth-key.bin" 11 + 12 + store, err := NewFileStore(storePath) 13 + if err != nil { 14 + t.Fatalf("NewFileStore() error = %v", err) 15 + } 11 16 12 17 baseURL := "http://localhost:5000" 13 18 scopes := GetDefaultScopes("*") ··· 27 32 } 28 33 29 34 func TestNewClientAppWithCustomScopes(t *testing.T) { 30 - keyPath := t.TempDir() + "/oauth-key.bin" 31 - store := oauth.NewMemStore() 35 + tmpDir := t.TempDir() 36 + storePath := tmpDir + "/oauth-test.json" 37 + keyPath := tmpDir + "/oauth-key.bin" 38 + 39 + store, err := NewFileStore(storePath) 40 + if err != nil { 41 + t.Fatalf("NewFileStore() error = %v", err) 42 + } 32 43 33 44 baseURL := "http://localhost:5000" 34 45 scopes := []string{"atproto", "custom:scope"} ··· 117 128 // ---------------------------------------------------------------------------- 118 129 119 130 func TestNewRefresher(t *testing.T) { 120 - store := oauth.NewMemStore() 131 + tmpDir := t.TempDir() 132 + storePath := tmpDir + "/oauth-test.json" 133 + 134 + store, err := NewFileStore(storePath) 135 + if err != nil { 136 + t.Fatalf("NewFileStore() error = %v", err) 137 + } 121 138 122 139 scopes := GetDefaultScopes("*") 123 140 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 136 153 } 137 154 138 155 func TestRefresher_SetUISessionStore(t *testing.T) { 139 - store := oauth.NewMemStore() 156 + tmpDir := t.TempDir() 157 + storePath := tmpDir + "/oauth-test.json" 158 + 159 + store, err := NewFileStore(storePath) 160 + if err != nil { 161 + t.Fatalf("NewFileStore() error = %v", err) 162 + } 140 163 141 164 scopes := GetDefaultScopes("*") 142 165 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry")
+5 -1
pkg/auth/oauth/interactive.go
··· 26 26 registerCallback func(handler http.HandlerFunc) error, 27 27 displayAuthURL func(string) error, 28 28 ) (*InteractiveResult, error) { 29 - store := oauth.NewMemStore() 29 + // Create temporary file store for this flow 30 + store, err := NewFileStore("/tmp/atcr-oauth-temp.json") 31 + if err != nil { 32 + return nil, fmt.Errorf("failed to create OAuth store: %w", err) 33 + } 30 34 31 35 // Create OAuth client app with custom scopes (or defaults if nil) 32 36 // Interactive flows are typically for production use (credential helper, etc.)
+84 -13
pkg/auth/oauth/server_test.go
··· 2 2 3 3 import ( 4 4 "context" 5 - "github.com/bluesky-social/indigo/atproto/auth/oauth" 6 5 "net/http" 7 6 "net/http/httptest" 8 7 "strings" ··· 12 11 13 12 func TestNewServer(t *testing.T) { 14 13 // Create a basic OAuth app for testing 15 - store := oauth.NewMemStore() 14 + tmpDir := t.TempDir() 15 + storePath := tmpDir + "/oauth-test.json" 16 + 17 + store, err := NewFileStore(storePath) 18 + if err != nil { 19 + t.Fatalf("NewFileStore() error = %v", err) 20 + } 16 21 17 22 scopes := GetDefaultScopes("*") 18 23 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 31 36 } 32 37 33 38 func TestServer_SetRefresher(t *testing.T) { 34 - store := oauth.NewMemStore() 39 + tmpDir := t.TempDir() 40 + storePath := tmpDir + "/oauth-test.json" 41 + 42 + store, err := NewFileStore(storePath) 43 + if err != nil { 44 + t.Fatalf("NewFileStore() error = %v", err) 45 + } 35 46 36 47 scopes := GetDefaultScopes("*") 37 48 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 49 60 } 50 61 51 62 func TestServer_SetPostAuthCallback(t *testing.T) { 52 - store := oauth.NewMemStore() 63 + tmpDir := t.TempDir() 64 + storePath := tmpDir + "/oauth-test.json" 65 + 66 + store, err := NewFileStore(storePath) 67 + if err != nil { 68 + t.Fatalf("NewFileStore() error = %v", err) 69 + } 53 70 54 71 scopes := GetDefaultScopes("*") 55 72 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 70 87 } 71 88 72 89 func TestServer_SetUISessionStore(t *testing.T) { 73 - store := oauth.NewMemStore() 90 + tmpDir := t.TempDir() 91 + storePath := tmpDir + "/oauth-test.json" 92 + 93 + store, err := NewFileStore(storePath) 94 + if err != nil { 95 + t.Fatalf("NewFileStore() error = %v", err) 96 + } 74 97 75 98 scopes := GetDefaultScopes("*") 76 99 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 128 151 // ServeAuthorize tests 129 152 130 153 func TestServer_ServeAuthorize_MissingHandle(t *testing.T) { 131 - store := oauth.NewMemStore() 154 + tmpDir := t.TempDir() 155 + storePath := tmpDir + "/oauth-test.json" 156 + 157 + store, err := NewFileStore(storePath) 158 + if err != nil { 159 + t.Fatalf("NewFileStore() error = %v", err) 160 + } 132 161 133 162 scopes := GetDefaultScopes("*") 134 163 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 150 179 } 151 180 152 181 func TestServer_ServeAuthorize_InvalidMethod(t *testing.T) { 153 - store := oauth.NewMemStore() 182 + tmpDir := t.TempDir() 183 + storePath := tmpDir + "/oauth-test.json" 184 + 185 + store, err := NewFileStore(storePath) 186 + if err != nil { 187 + t.Fatalf("NewFileStore() error = %v", err) 188 + } 154 189 155 190 scopes := GetDefaultScopes("*") 156 191 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 174 209 // ServeCallback tests 175 210 176 211 func TestServer_ServeCallback_InvalidMethod(t *testing.T) { 177 - store := oauth.NewMemStore() 212 + tmpDir := t.TempDir() 213 + storePath := tmpDir + "/oauth-test.json" 214 + 215 + store, err := NewFileStore(storePath) 216 + if err != nil { 217 + t.Fatalf("NewFileStore() error = %v", err) 218 + } 178 219 179 220 scopes := GetDefaultScopes("*") 180 221 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 196 237 } 197 238 198 239 func TestServer_ServeCallback_OAuthError(t *testing.T) { 199 - store := oauth.NewMemStore() 240 + tmpDir := t.TempDir() 241 + storePath := tmpDir + "/oauth-test.json" 242 + 243 + store, err := NewFileStore(storePath) 244 + if err != nil { 245 + t.Fatalf("NewFileStore() error = %v", err) 246 + } 200 247 201 248 scopes := GetDefaultScopes("*") 202 249 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 223 270 } 224 271 225 272 func TestServer_ServeCallback_WithPostAuthCallback(t *testing.T) { 226 - store := oauth.NewMemStore() 273 + tmpDir := t.TempDir() 274 + storePath := tmpDir + "/oauth-test.json" 275 + 276 + store, err := NewFileStore(storePath) 277 + if err != nil { 278 + t.Fatalf("NewFileStore() error = %v", err) 279 + } 227 280 228 281 scopes := GetDefaultScopes("*") 229 282 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 262 315 }, 263 316 } 264 317 265 - store := oauth.NewMemStore() 318 + tmpDir := t.TempDir() 319 + storePath := tmpDir + "/oauth-test.json" 320 + 321 + store, err := NewFileStore(storePath) 322 + if err != nil { 323 + t.Fatalf("NewFileStore() error = %v", err) 324 + } 266 325 267 326 scopes := GetDefaultScopes("*") 268 327 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 286 345 } 287 346 288 347 func TestServer_RenderError(t *testing.T) { 289 - store := oauth.NewMemStore() 348 + tmpDir := t.TempDir() 349 + storePath := tmpDir + "/oauth-test.json" 350 + 351 + store, err := NewFileStore(storePath) 352 + if err != nil { 353 + t.Fatalf("NewFileStore() error = %v", err) 354 + } 290 355 291 356 scopes := GetDefaultScopes("*") 292 357 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry") ··· 315 380 } 316 381 317 382 func TestServer_RenderRedirectToSettings(t *testing.T) { 318 - store := oauth.NewMemStore() 383 + tmpDir := t.TempDir() 384 + storePath := tmpDir + "/oauth-test.json" 385 + 386 + store, err := NewFileStore(storePath) 387 + if err != nil { 388 + t.Fatalf("NewFileStore() error = %v", err) 389 + } 319 390 320 391 scopes := GetDefaultScopes("*") 321 392 clientApp, err := NewClientApp("http://localhost:5000", store, scopes, "", "AT Container Registry")
+236
pkg/auth/oauth/store.go
··· 1 + package oauth 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "fmt" 7 + "maps" 8 + "os" 9 + "path/filepath" 10 + "sync" 11 + "time" 12 + 13 + "github.com/bluesky-social/indigo/atproto/auth/oauth" 14 + "github.com/bluesky-social/indigo/atproto/syntax" 15 + ) 16 + 17 + // FileStore implements oauth.ClientAuthStore with file-based persistence 18 + type FileStore struct { 19 + path string 20 + sessions map[string]*oauth.ClientSessionData // Key: "did:sessionID" 21 + requests map[string]*oauth.AuthRequestData // Key: state 22 + mu sync.RWMutex 23 + } 24 + 25 + // FileStoreData represents the JSON structure stored on disk 26 + type FileStoreData struct { 27 + Sessions map[string]*oauth.ClientSessionData `json:"sessions"` 28 + Requests map[string]*oauth.AuthRequestData `json:"requests"` 29 + } 30 + 31 + // NewFileStore creates a new file-based OAuth store 32 + func NewFileStore(path string) (*FileStore, error) { 33 + store := &FileStore{ 34 + path: path, 35 + sessions: make(map[string]*oauth.ClientSessionData), 36 + requests: make(map[string]*oauth.AuthRequestData), 37 + } 38 + 39 + // Load existing data if file exists 40 + if err := store.load(); err != nil { 41 + if !os.IsNotExist(err) { 42 + return nil, fmt.Errorf("failed to load store: %w", err) 43 + } 44 + // File doesn't exist yet, that's ok 45 + } 46 + 47 + return store, nil 48 + } 49 + 50 + // GetDefaultStorePath returns the default storage path for OAuth data 51 + func GetDefaultStorePath() (string, error) { 52 + // For AppView: /var/lib/atcr/oauth-sessions.json 53 + // For CLI tools: ~/.atcr/oauth-sessions.json 54 + 55 + // Check if running as a service (has write access to /var/lib) 56 + servicePath := "/var/lib/atcr/oauth-sessions.json" 57 + if err := os.MkdirAll(filepath.Dir(servicePath), 0700); err == nil { 58 + // Can write to /var/lib, use service path 59 + return servicePath, nil 60 + } 61 + 62 + // Fall back to user home directory 63 + homeDir, err := os.UserHomeDir() 64 + if err != nil { 65 + return "", fmt.Errorf("failed to get home directory: %w", err) 66 + } 67 + 68 + atcrDir := filepath.Join(homeDir, ".atcr") 69 + if err := os.MkdirAll(atcrDir, 0700); err != nil { 70 + return "", fmt.Errorf("failed to create .atcr directory: %w", err) 71 + } 72 + 73 + return filepath.Join(atcrDir, "oauth-sessions.json"), nil 74 + } 75 + 76 + // GetSession retrieves a session by DID and session ID 77 + func (s *FileStore) GetSession(ctx context.Context, did syntax.DID, sessionID string) (*oauth.ClientSessionData, error) { 78 + s.mu.RLock() 79 + defer s.mu.RUnlock() 80 + 81 + key := makeSessionKey(did.String(), sessionID) 82 + session, ok := s.sessions[key] 83 + if !ok { 84 + return nil, fmt.Errorf("session not found: %s/%s", did, sessionID) 85 + } 86 + 87 + return session, nil 88 + } 89 + 90 + // SaveSession saves or updates a session (upsert) 91 + func (s *FileStore) SaveSession(ctx context.Context, sess oauth.ClientSessionData) error { 92 + s.mu.Lock() 93 + defer s.mu.Unlock() 94 + 95 + key := makeSessionKey(sess.AccountDID.String(), sess.SessionID) 96 + s.sessions[key] = &sess 97 + 98 + return s.save() 99 + } 100 + 101 + // DeleteSession removes a session 102 + func (s *FileStore) DeleteSession(ctx context.Context, did syntax.DID, sessionID string) error { 103 + s.mu.Lock() 104 + defer s.mu.Unlock() 105 + 106 + key := makeSessionKey(did.String(), sessionID) 107 + delete(s.sessions, key) 108 + 109 + return s.save() 110 + } 111 + 112 + // GetAuthRequestInfo retrieves authentication request data by state 113 + func (s *FileStore) GetAuthRequestInfo(ctx context.Context, state string) (*oauth.AuthRequestData, error) { 114 + s.mu.RLock() 115 + defer s.mu.RUnlock() 116 + 117 + request, ok := s.requests[state] 118 + if !ok { 119 + return nil, fmt.Errorf("auth request not found: %s", state) 120 + } 121 + 122 + return request, nil 123 + } 124 + 125 + // SaveAuthRequestInfo saves authentication request data 126 + func (s *FileStore) SaveAuthRequestInfo(ctx context.Context, info oauth.AuthRequestData) error { 127 + s.mu.Lock() 128 + defer s.mu.Unlock() 129 + 130 + s.requests[info.State] = &info 131 + 132 + return s.save() 133 + } 134 + 135 + // DeleteAuthRequestInfo removes authentication request data 136 + func (s *FileStore) DeleteAuthRequestInfo(ctx context.Context, state string) error { 137 + s.mu.Lock() 138 + defer s.mu.Unlock() 139 + 140 + delete(s.requests, state) 141 + 142 + return s.save() 143 + } 144 + 145 + // CleanupExpired removes expired sessions and auth requests 146 + // Should be called periodically (e.g., every hour) 147 + func (s *FileStore) CleanupExpired() error { 148 + s.mu.Lock() 149 + defer s.mu.Unlock() 150 + 151 + now := time.Now() 152 + modified := false 153 + 154 + // Clean up auth requests older than 10 minutes 155 + // (OAuth flows should complete quickly) 156 + for state := range s.requests { 157 + // Note: AuthRequestData doesn't have a timestamp in indigo's implementation 158 + // For now, we'll rely on the OAuth server's cleanup routine 159 + // or we could extend AuthRequestData with metadata 160 + _ = state // Placeholder for future expiration logic 161 + } 162 + 163 + // Sessions don't have expiry in the data structure 164 + // Cleanup would need to be token-based (check token expiry) 165 + // For now, manual cleanup via DeleteSession 166 + _ = now 167 + 168 + if modified { 169 + return s.save() 170 + } 171 + 172 + return nil 173 + } 174 + 175 + // ListSessions returns all stored sessions for debugging/management 176 + func (s *FileStore) ListSessions() map[string]*oauth.ClientSessionData { 177 + s.mu.RLock() 178 + defer s.mu.RUnlock() 179 + 180 + // Return a copy to prevent external modification 181 + result := make(map[string]*oauth.ClientSessionData) 182 + maps.Copy(result, s.sessions) 183 + return result 184 + } 185 + 186 + // load reads data from disk 187 + func (s *FileStore) load() error { 188 + data, err := os.ReadFile(s.path) 189 + if err != nil { 190 + return err 191 + } 192 + 193 + var storeData FileStoreData 194 + if err := json.Unmarshal(data, &storeData); err != nil { 195 + return fmt.Errorf("failed to parse store: %w", err) 196 + } 197 + 198 + if storeData.Sessions != nil { 199 + s.sessions = storeData.Sessions 200 + } 201 + if storeData.Requests != nil { 202 + s.requests = storeData.Requests 203 + } 204 + 205 + return nil 206 + } 207 + 208 + // save writes data to disk 209 + func (s *FileStore) save() error { 210 + storeData := FileStoreData{ 211 + Sessions: s.sessions, 212 + Requests: s.requests, 213 + } 214 + 215 + data, err := json.MarshalIndent(storeData, "", " ") 216 + if err != nil { 217 + return fmt.Errorf("failed to marshal store: %w", err) 218 + } 219 + 220 + // Ensure directory exists 221 + if err := os.MkdirAll(filepath.Dir(s.path), 0700); err != nil { 222 + return fmt.Errorf("failed to create directory: %w", err) 223 + } 224 + 225 + // Write with restrictive permissions 226 + if err := os.WriteFile(s.path, data, 0600); err != nil { 227 + return fmt.Errorf("failed to write store: %w", err) 228 + } 229 + 230 + return nil 231 + } 232 + 233 + // makeSessionKey creates a composite key for session storage 234 + func makeSessionKey(did, sessionID string) string { 235 + return fmt.Sprintf("%s:%s", did, sessionID) 236 + }
+631
pkg/auth/oauth/store_test.go
··· 1 + package oauth 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "os" 7 + "testing" 8 + "time" 9 + 10 + "github.com/bluesky-social/indigo/atproto/auth/oauth" 11 + "github.com/bluesky-social/indigo/atproto/syntax" 12 + ) 13 + 14 + func TestNewFileStore(t *testing.T) { 15 + tmpDir := t.TempDir() 16 + storePath := tmpDir + "/oauth-test.json" 17 + 18 + store, err := NewFileStore(storePath) 19 + if err != nil { 20 + t.Fatalf("NewFileStore() error = %v", err) 21 + } 22 + 23 + if store == nil { 24 + t.Fatal("Expected non-nil store") 25 + } 26 + 27 + if store.path != storePath { 28 + t.Errorf("Expected path %q, got %q", storePath, store.path) 29 + } 30 + 31 + if store.sessions == nil { 32 + t.Error("Expected sessions map to be initialized") 33 + } 34 + 35 + if store.requests == nil { 36 + t.Error("Expected requests map to be initialized") 37 + } 38 + } 39 + 40 + func TestFileStore_LoadNonExistent(t *testing.T) { 41 + tmpDir := t.TempDir() 42 + storePath := tmpDir + "/nonexistent.json" 43 + 44 + // Should succeed even if file doesn't exist 45 + store, err := NewFileStore(storePath) 46 + if err != nil { 47 + t.Fatalf("NewFileStore() should succeed with non-existent file, got error: %v", err) 48 + } 49 + 50 + if store == nil { 51 + t.Fatal("Expected non-nil store") 52 + } 53 + } 54 + 55 + func TestFileStore_LoadCorruptedFile(t *testing.T) { 56 + tmpDir := t.TempDir() 57 + storePath := tmpDir + "/corrupted.json" 58 + 59 + // Create corrupted JSON file 60 + if err := os.WriteFile(storePath, []byte("invalid json {{{"), 0600); err != nil { 61 + t.Fatalf("Failed to create corrupted file: %v", err) 62 + } 63 + 64 + // Should fail to load corrupted file 65 + _, err := NewFileStore(storePath) 66 + if err == nil { 67 + t.Error("Expected error when loading corrupted file") 68 + } 69 + } 70 + 71 + func TestFileStore_GetSession_NotFound(t *testing.T) { 72 + tmpDir := t.TempDir() 73 + storePath := tmpDir + "/oauth-test.json" 74 + 75 + store, err := NewFileStore(storePath) 76 + if err != nil { 77 + t.Fatalf("NewFileStore() error = %v", err) 78 + } 79 + 80 + ctx := context.Background() 81 + did, _ := syntax.ParseDID("did:plc:test123") 82 + sessionID := "session123" 83 + 84 + // Should return error for non-existent session 85 + session, err := store.GetSession(ctx, did, sessionID) 86 + if err == nil { 87 + t.Error("Expected error for non-existent session") 88 + } 89 + if session != nil { 90 + t.Error("Expected nil session for non-existent entry") 91 + } 92 + } 93 + 94 + func TestFileStore_SaveAndGetSession(t *testing.T) { 95 + tmpDir := t.TempDir() 96 + storePath := tmpDir + "/oauth-test.json" 97 + 98 + store, err := NewFileStore(storePath) 99 + if err != nil { 100 + t.Fatalf("NewFileStore() error = %v", err) 101 + } 102 + 103 + ctx := context.Background() 104 + did, _ := syntax.ParseDID("did:plc:alice123") 105 + 106 + // Create test session 107 + sessionData := oauth.ClientSessionData{ 108 + AccountDID: did, 109 + SessionID: "test-session-123", 110 + HostURL: "https://pds.example.com", 111 + Scopes: []string{"atproto", "blob:read"}, 112 + } 113 + 114 + // Save session 115 + if err := store.SaveSession(ctx, sessionData); err != nil { 116 + t.Fatalf("SaveSession() error = %v", err) 117 + } 118 + 119 + // Retrieve session 120 + retrieved, err := store.GetSession(ctx, did, "test-session-123") 121 + if err != nil { 122 + t.Fatalf("GetSession() error = %v", err) 123 + } 124 + 125 + if retrieved == nil { 126 + t.Fatal("Expected non-nil session") 127 + } 128 + 129 + if retrieved.SessionID != sessionData.SessionID { 130 + t.Errorf("Expected sessionID %q, got %q", sessionData.SessionID, retrieved.SessionID) 131 + } 132 + 133 + if retrieved.AccountDID.String() != did.String() { 134 + t.Errorf("Expected DID %q, got %q", did.String(), retrieved.AccountDID.String()) 135 + } 136 + 137 + if retrieved.HostURL != sessionData.HostURL { 138 + t.Errorf("Expected hostURL %q, got %q", sessionData.HostURL, retrieved.HostURL) 139 + } 140 + } 141 + 142 + func TestFileStore_UpdateSession(t *testing.T) { 143 + tmpDir := t.TempDir() 144 + storePath := tmpDir + "/oauth-test.json" 145 + 146 + store, err := NewFileStore(storePath) 147 + if err != nil { 148 + t.Fatalf("NewFileStore() error = %v", err) 149 + } 150 + 151 + ctx := context.Background() 152 + did, _ := syntax.ParseDID("did:plc:alice123") 153 + 154 + // Save initial session 155 + sessionData := oauth.ClientSessionData{ 156 + AccountDID: did, 157 + SessionID: "test-session-123", 158 + HostURL: "https://pds.example.com", 159 + Scopes: []string{"atproto"}, 160 + } 161 + 162 + if err := store.SaveSession(ctx, sessionData); err != nil { 163 + t.Fatalf("SaveSession() error = %v", err) 164 + } 165 + 166 + // Update session with new scopes 167 + sessionData.Scopes = []string{"atproto", "blob:read", "blob:write"} 168 + if err := store.SaveSession(ctx, sessionData); err != nil { 169 + t.Fatalf("SaveSession() (update) error = %v", err) 170 + } 171 + 172 + // Retrieve updated session 173 + retrieved, err := store.GetSession(ctx, did, "test-session-123") 174 + if err != nil { 175 + t.Fatalf("GetSession() error = %v", err) 176 + } 177 + 178 + if len(retrieved.Scopes) != 3 { 179 + t.Errorf("Expected 3 scopes, got %d", len(retrieved.Scopes)) 180 + } 181 + } 182 + 183 + func TestFileStore_DeleteSession(t *testing.T) { 184 + tmpDir := t.TempDir() 185 + storePath := tmpDir + "/oauth-test.json" 186 + 187 + store, err := NewFileStore(storePath) 188 + if err != nil { 189 + t.Fatalf("NewFileStore() error = %v", err) 190 + } 191 + 192 + ctx := context.Background() 193 + did, _ := syntax.ParseDID("did:plc:alice123") 194 + 195 + // Save session 196 + sessionData := oauth.ClientSessionData{ 197 + AccountDID: did, 198 + SessionID: "test-session-123", 199 + HostURL: "https://pds.example.com", 200 + } 201 + 202 + if err := store.SaveSession(ctx, sessionData); err != nil { 203 + t.Fatalf("SaveSession() error = %v", err) 204 + } 205 + 206 + // Verify it exists 207 + if _, err := store.GetSession(ctx, did, "test-session-123"); err != nil { 208 + t.Fatalf("GetSession() should succeed before delete, got error: %v", err) 209 + } 210 + 211 + // Delete session 212 + if err := store.DeleteSession(ctx, did, "test-session-123"); err != nil { 213 + t.Fatalf("DeleteSession() error = %v", err) 214 + } 215 + 216 + // Verify it's gone 217 + _, err = store.GetSession(ctx, did, "test-session-123") 218 + if err == nil { 219 + t.Error("Expected error after deleting session") 220 + } 221 + } 222 + 223 + func TestFileStore_DeleteNonExistentSession(t *testing.T) { 224 + tmpDir := t.TempDir() 225 + storePath := tmpDir + "/oauth-test.json" 226 + 227 + store, err := NewFileStore(storePath) 228 + if err != nil { 229 + t.Fatalf("NewFileStore() error = %v", err) 230 + } 231 + 232 + ctx := context.Background() 233 + did, _ := syntax.ParseDID("did:plc:alice123") 234 + 235 + // Delete non-existent session should not error 236 + if err := store.DeleteSession(ctx, did, "nonexistent"); err != nil { 237 + t.Errorf("DeleteSession() on non-existent session should not error, got: %v", err) 238 + } 239 + } 240 + 241 + func TestFileStore_SaveAndGetAuthRequestInfo(t *testing.T) { 242 + tmpDir := t.TempDir() 243 + storePath := tmpDir + "/oauth-test.json" 244 + 245 + store, err := NewFileStore(storePath) 246 + if err != nil { 247 + t.Fatalf("NewFileStore() error = %v", err) 248 + } 249 + 250 + ctx := context.Background() 251 + 252 + // Create test auth request 253 + did, _ := syntax.ParseDID("did:plc:alice123") 254 + authRequest := oauth.AuthRequestData{ 255 + State: "test-state-123", 256 + AuthServerURL: "https://pds.example.com", 257 + AccountDID: &did, 258 + Scopes: []string{"atproto", "blob:read"}, 259 + RequestURI: "urn:ietf:params:oauth:request_uri:test123", 260 + AuthServerTokenEndpoint: "https://pds.example.com/oauth/token", 261 + } 262 + 263 + // Save auth request 264 + if err := store.SaveAuthRequestInfo(ctx, authRequest); err != nil { 265 + t.Fatalf("SaveAuthRequestInfo() error = %v", err) 266 + } 267 + 268 + // Retrieve auth request 269 + retrieved, err := store.GetAuthRequestInfo(ctx, "test-state-123") 270 + if err != nil { 271 + t.Fatalf("GetAuthRequestInfo() error = %v", err) 272 + } 273 + 274 + if retrieved == nil { 275 + t.Fatal("Expected non-nil auth request") 276 + } 277 + 278 + if retrieved.State != authRequest.State { 279 + t.Errorf("Expected state %q, got %q", authRequest.State, retrieved.State) 280 + } 281 + 282 + if retrieved.AuthServerURL != authRequest.AuthServerURL { 283 + t.Errorf("Expected authServerURL %q, got %q", authRequest.AuthServerURL, retrieved.AuthServerURL) 284 + } 285 + } 286 + 287 + func TestFileStore_GetAuthRequestInfo_NotFound(t *testing.T) { 288 + tmpDir := t.TempDir() 289 + storePath := tmpDir + "/oauth-test.json" 290 + 291 + store, err := NewFileStore(storePath) 292 + if err != nil { 293 + t.Fatalf("NewFileStore() error = %v", err) 294 + } 295 + 296 + ctx := context.Background() 297 + 298 + // Should return error for non-existent request 299 + _, err = store.GetAuthRequestInfo(ctx, "nonexistent-state") 300 + if err == nil { 301 + t.Error("Expected error for non-existent auth request") 302 + } 303 + } 304 + 305 + func TestFileStore_DeleteAuthRequestInfo(t *testing.T) { 306 + tmpDir := t.TempDir() 307 + storePath := tmpDir + "/oauth-test.json" 308 + 309 + store, err := NewFileStore(storePath) 310 + if err != nil { 311 + t.Fatalf("NewFileStore() error = %v", err) 312 + } 313 + 314 + ctx := context.Background() 315 + 316 + // Save auth request 317 + authRequest := oauth.AuthRequestData{ 318 + State: "test-state-123", 319 + AuthServerURL: "https://pds.example.com", 320 + } 321 + 322 + if err := store.SaveAuthRequestInfo(ctx, authRequest); err != nil { 323 + t.Fatalf("SaveAuthRequestInfo() error = %v", err) 324 + } 325 + 326 + // Verify it exists 327 + if _, err := store.GetAuthRequestInfo(ctx, "test-state-123"); err != nil { 328 + t.Fatalf("GetAuthRequestInfo() should succeed before delete, got error: %v", err) 329 + } 330 + 331 + // Delete auth request 332 + if err := store.DeleteAuthRequestInfo(ctx, "test-state-123"); err != nil { 333 + t.Fatalf("DeleteAuthRequestInfo() error = %v", err) 334 + } 335 + 336 + // Verify it's gone 337 + _, err = store.GetAuthRequestInfo(ctx, "test-state-123") 338 + if err == nil { 339 + t.Error("Expected error after deleting auth request") 340 + } 341 + } 342 + 343 + func TestFileStore_ListSessions(t *testing.T) { 344 + tmpDir := t.TempDir() 345 + storePath := tmpDir + "/oauth-test.json" 346 + 347 + store, err := NewFileStore(storePath) 348 + if err != nil { 349 + t.Fatalf("NewFileStore() error = %v", err) 350 + } 351 + 352 + ctx := context.Background() 353 + 354 + // Initially empty 355 + sessions := store.ListSessions() 356 + if len(sessions) != 0 { 357 + t.Errorf("Expected 0 sessions, got %d", len(sessions)) 358 + } 359 + 360 + // Add multiple sessions 361 + did1, _ := syntax.ParseDID("did:plc:alice123") 362 + did2, _ := syntax.ParseDID("did:plc:bob456") 363 + 364 + session1 := oauth.ClientSessionData{ 365 + AccountDID: did1, 366 + SessionID: "session-1", 367 + HostURL: "https://pds1.example.com", 368 + } 369 + 370 + session2 := oauth.ClientSessionData{ 371 + AccountDID: did2, 372 + SessionID: "session-2", 373 + HostURL: "https://pds2.example.com", 374 + } 375 + 376 + if err := store.SaveSession(ctx, session1); err != nil { 377 + t.Fatalf("SaveSession() error = %v", err) 378 + } 379 + 380 + if err := store.SaveSession(ctx, session2); err != nil { 381 + t.Fatalf("SaveSession() error = %v", err) 382 + } 383 + 384 + // List sessions 385 + sessions = store.ListSessions() 386 + if len(sessions) != 2 { 387 + t.Errorf("Expected 2 sessions, got %d", len(sessions)) 388 + } 389 + 390 + // Verify we got both sessions 391 + key1 := makeSessionKey(did1.String(), "session-1") 392 + key2 := makeSessionKey(did2.String(), "session-2") 393 + 394 + if sessions[key1] == nil { 395 + t.Error("Expected session1 in list") 396 + } 397 + 398 + if sessions[key2] == nil { 399 + t.Error("Expected session2 in list") 400 + } 401 + } 402 + 403 + func TestFileStore_Persistence_Across_Instances(t *testing.T) { 404 + tmpDir := t.TempDir() 405 + storePath := tmpDir + "/oauth-test.json" 406 + 407 + ctx := context.Background() 408 + did, _ := syntax.ParseDID("did:plc:alice123") 409 + 410 + // Create first store and save data 411 + store1, err := NewFileStore(storePath) 412 + if err != nil { 413 + t.Fatalf("NewFileStore() error = %v", err) 414 + } 415 + 416 + sessionData := oauth.ClientSessionData{ 417 + AccountDID: did, 418 + SessionID: "persistent-session", 419 + HostURL: "https://pds.example.com", 420 + } 421 + 422 + if err := store1.SaveSession(ctx, sessionData); err != nil { 423 + t.Fatalf("SaveSession() error = %v", err) 424 + } 425 + 426 + authRequest := oauth.AuthRequestData{ 427 + State: "persistent-state", 428 + AuthServerURL: "https://pds.example.com", 429 + } 430 + 431 + if err := store1.SaveAuthRequestInfo(ctx, authRequest); err != nil { 432 + t.Fatalf("SaveAuthRequestInfo() error = %v", err) 433 + } 434 + 435 + // Create second store from same file 436 + store2, err := NewFileStore(storePath) 437 + if err != nil { 438 + t.Fatalf("Second NewFileStore() error = %v", err) 439 + } 440 + 441 + // Verify session persisted 442 + retrievedSession, err := store2.GetSession(ctx, did, "persistent-session") 443 + if err != nil { 444 + t.Fatalf("GetSession() from second store error = %v", err) 445 + } 446 + 447 + if retrievedSession.SessionID != "persistent-session" { 448 + t.Errorf("Expected persistent session ID, got %q", retrievedSession.SessionID) 449 + } 450 + 451 + // Verify auth request persisted 452 + retrievedAuth, err := store2.GetAuthRequestInfo(ctx, "persistent-state") 453 + if err != nil { 454 + t.Fatalf("GetAuthRequestInfo() from second store error = %v", err) 455 + } 456 + 457 + if retrievedAuth.State != "persistent-state" { 458 + t.Errorf("Expected persistent state, got %q", retrievedAuth.State) 459 + } 460 + } 461 + 462 + func TestFileStore_FileSecurity(t *testing.T) { 463 + tmpDir := t.TempDir() 464 + storePath := tmpDir + "/oauth-test.json" 465 + 466 + store, err := NewFileStore(storePath) 467 + if err != nil { 468 + t.Fatalf("NewFileStore() error = %v", err) 469 + } 470 + 471 + ctx := context.Background() 472 + did, _ := syntax.ParseDID("did:plc:alice123") 473 + 474 + // Save some data to trigger file creation 475 + sessionData := oauth.ClientSessionData{ 476 + AccountDID: did, 477 + SessionID: "test-session", 478 + HostURL: "https://pds.example.com", 479 + } 480 + 481 + if err := store.SaveSession(ctx, sessionData); err != nil { 482 + t.Fatalf("SaveSession() error = %v", err) 483 + } 484 + 485 + // Check file permissions (should be 0600) 486 + info, err := os.Stat(storePath) 487 + if err != nil { 488 + t.Fatalf("Failed to stat file: %v", err) 489 + } 490 + 491 + mode := info.Mode() 492 + if mode.Perm() != 0600 { 493 + t.Errorf("Expected file permissions 0600, got %o", mode.Perm()) 494 + } 495 + } 496 + 497 + func TestFileStore_JSONFormat(t *testing.T) { 498 + tmpDir := t.TempDir() 499 + storePath := tmpDir + "/oauth-test.json" 500 + 501 + store, err := NewFileStore(storePath) 502 + if err != nil { 503 + t.Fatalf("NewFileStore() error = %v", err) 504 + } 505 + 506 + ctx := context.Background() 507 + did, _ := syntax.ParseDID("did:plc:alice123") 508 + 509 + // Save data 510 + sessionData := oauth.ClientSessionData{ 511 + AccountDID: did, 512 + SessionID: "test-session", 513 + HostURL: "https://pds.example.com", 514 + } 515 + 516 + if err := store.SaveSession(ctx, sessionData); err != nil { 517 + t.Fatalf("SaveSession() error = %v", err) 518 + } 519 + 520 + // Read and verify JSON format 521 + data, err := os.ReadFile(storePath) 522 + if err != nil { 523 + t.Fatalf("Failed to read file: %v", err) 524 + } 525 + 526 + var storeData FileStoreData 527 + if err := json.Unmarshal(data, &storeData); err != nil { 528 + t.Fatalf("Failed to parse JSON: %v", err) 529 + } 530 + 531 + if storeData.Sessions == nil { 532 + t.Error("Expected sessions in JSON") 533 + } 534 + 535 + if storeData.Requests == nil { 536 + t.Error("Expected requests in JSON") 537 + } 538 + } 539 + 540 + func TestFileStore_CleanupExpired(t *testing.T) { 541 + tmpDir := t.TempDir() 542 + storePath := tmpDir + "/oauth-test.json" 543 + 544 + store, err := NewFileStore(storePath) 545 + if err != nil { 546 + t.Fatalf("NewFileStore() error = %v", err) 547 + } 548 + 549 + // CleanupExpired should not error even with no data 550 + if err := store.CleanupExpired(); err != nil { 551 + t.Errorf("CleanupExpired() error = %v", err) 552 + } 553 + 554 + // Note: Current implementation doesn't actually clean anything 555 + // since AuthRequestData and ClientSessionData don't have expiry timestamps 556 + // This test verifies the method doesn't panic 557 + } 558 + 559 + func TestGetDefaultStorePath(t *testing.T) { 560 + path, err := GetDefaultStorePath() 561 + if err != nil { 562 + t.Fatalf("GetDefaultStorePath() error = %v", err) 563 + } 564 + 565 + if path == "" { 566 + t.Fatal("Expected non-empty path") 567 + } 568 + 569 + // Path should either be /var/lib/atcr or ~/.atcr 570 + // We can't assert exact path since it depends on permissions 571 + t.Logf("Default store path: %s", path) 572 + } 573 + 574 + func TestMakeSessionKey(t *testing.T) { 575 + did := "did:plc:alice123" 576 + sessionID := "session-456" 577 + 578 + key := makeSessionKey(did, sessionID) 579 + expected := "did:plc:alice123:session-456" 580 + 581 + if key != expected { 582 + t.Errorf("Expected key %q, got %q", expected, key) 583 + } 584 + } 585 + 586 + func TestFileStore_ConcurrentAccess(t *testing.T) { 587 + tmpDir := t.TempDir() 588 + storePath := tmpDir + "/oauth-test.json" 589 + 590 + store, err := NewFileStore(storePath) 591 + if err != nil { 592 + t.Fatalf("NewFileStore() error = %v", err) 593 + } 594 + 595 + ctx := context.Background() 596 + 597 + // Run concurrent operations 598 + done := make(chan bool) 599 + 600 + // Writer goroutine 601 + go func() { 602 + for i := 0; i < 10; i++ { 603 + did, _ := syntax.ParseDID("did:plc:alice123") 604 + sessionData := oauth.ClientSessionData{ 605 + AccountDID: did, 606 + SessionID: "session-1", 607 + HostURL: "https://pds.example.com", 608 + } 609 + store.SaveSession(ctx, sessionData) 610 + time.Sleep(1 * time.Millisecond) 611 + } 612 + done <- true 613 + }() 614 + 615 + // Reader goroutine 616 + go func() { 617 + for i := 0; i < 10; i++ { 618 + did, _ := syntax.ParseDID("did:plc:alice123") 619 + store.GetSession(ctx, did, "session-1") 620 + time.Sleep(1 * time.Millisecond) 621 + } 622 + done <- true 623 + }() 624 + 625 + // Wait for both goroutines 626 + <-done 627 + <-done 628 + 629 + // If we got here without panicking, the locking works 630 + t.Log("Concurrent access test passed") 631 + }
-300
pkg/auth/servicetoken.go
··· 1 - package auth 2 - 3 - import ( 4 - "context" 5 - "encoding/base64" 6 - "encoding/json" 7 - "errors" 8 - "fmt" 9 - "io" 10 - "log/slog" 11 - "net/http" 12 - "net/url" 13 - "strings" 14 - "time" 15 - 16 - "atcr.io/pkg/atproto" 17 - "atcr.io/pkg/auth/oauth" 18 - "github.com/bluesky-social/indigo/atproto/atclient" 19 - indigo_oauth "github.com/bluesky-social/indigo/atproto/auth/oauth" 20 - ) 21 - 22 - // getErrorHint provides context-specific troubleshooting hints based on API error type 23 - func getErrorHint(apiErr *atclient.APIError) string { 24 - switch apiErr.Name { 25 - case "use_dpop_nonce": 26 - return "DPoP nonce mismatch - indigo library should automatically retry with new nonce. If this persists, check for concurrent request issues or PDS session corruption." 27 - case "invalid_client": 28 - if apiErr.Message != "" && apiErr.Message == "Validation of \"client_assertion\" failed: \"iat\" claim timestamp check failed (it should be in the past)" { 29 - return "JWT timestamp validation failed - system clock on AppView may be ahead of PDS clock. Check NTP sync with: timedatectl status" 30 - } 31 - return "OAuth client authentication failed - check client key configuration and PDS OAuth server status" 32 - case "invalid_token", "invalid_grant": 33 - return "OAuth tokens expired or invalidated - user will need to re-authenticate via OAuth flow" 34 - case "server_error": 35 - if apiErr.StatusCode == 500 { 36 - return "PDS returned internal server error - this may occur after repeated DPoP nonce failures or other PDS-side issues. Check PDS logs for root cause." 37 - } 38 - return "PDS server error - check PDS health and logs" 39 - case "invalid_dpop_proof": 40 - return "DPoP proof validation failed - check system clock sync and DPoP key configuration" 41 - default: 42 - if apiErr.StatusCode == 401 || apiErr.StatusCode == 403 { 43 - return "Authentication/authorization failed - OAuth session may be expired or revoked" 44 - } 45 - return "PDS rejected the request - see errorName and errorMessage for details" 46 - } 47 - } 48 - 49 - // ParseJWTExpiry extracts the expiry time from a JWT without verifying the signature 50 - // We trust tokens from the user's PDS, so signature verification isn't needed here 51 - // Manually decodes the JWT payload to avoid algorithm compatibility issues 52 - func ParseJWTExpiry(tokenString string) (time.Time, error) { 53 - // JWT format: header.payload.signature 54 - parts := strings.Split(tokenString, ".") 55 - if len(parts) != 3 { 56 - return time.Time{}, fmt.Errorf("invalid JWT format: expected 3 parts, got %d", len(parts)) 57 - } 58 - 59 - // Decode the payload (second part) 60 - payload, err := base64.RawURLEncoding.DecodeString(parts[1]) 61 - if err != nil { 62 - return time.Time{}, fmt.Errorf("failed to decode JWT payload: %w", err) 63 - } 64 - 65 - // Parse the JSON payload 66 - var claims struct { 67 - Exp int64 `json:"exp"` 68 - } 69 - if err := json.Unmarshal(payload, &claims); err != nil { 70 - return time.Time{}, fmt.Errorf("failed to parse JWT claims: %w", err) 71 - } 72 - 73 - if claims.Exp == 0 { 74 - return time.Time{}, fmt.Errorf("JWT missing exp claim") 75 - } 76 - 77 - return time.Unix(claims.Exp, 0), nil 78 - } 79 - 80 - // buildServiceAuthURL constructs the URL for com.atproto.server.getServiceAuth 81 - func buildServiceAuthURL(pdsEndpoint, holdDID string) string { 82 - // Request 5-minute expiry (PDS may grant less) 83 - // exp must be absolute Unix timestamp, not relative duration 84 - expiryTime := time.Now().Unix() + 300 // 5 minutes from now 85 - return fmt.Sprintf("%s%s?aud=%s&lxm=%s&exp=%d", 86 - pdsEndpoint, 87 - atproto.ServerGetServiceAuth, 88 - url.QueryEscape(holdDID), 89 - url.QueryEscape("com.atproto.repo.getRecord"), 90 - expiryTime, 91 - ) 92 - } 93 - 94 - // parseServiceTokenResponse extracts the token from a service auth response 95 - func parseServiceTokenResponse(resp *http.Response) (string, error) { 96 - defer resp.Body.Close() 97 - 98 - if resp.StatusCode != http.StatusOK { 99 - bodyBytes, _ := io.ReadAll(resp.Body) 100 - return "", fmt.Errorf("service auth failed with status %d: %s", resp.StatusCode, string(bodyBytes)) 101 - } 102 - 103 - var result struct { 104 - Token string `json:"token"` 105 - } 106 - if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { 107 - return "", fmt.Errorf("failed to decode service auth response: %w", err) 108 - } 109 - 110 - if result.Token == "" { 111 - return "", fmt.Errorf("empty token in service auth response") 112 - } 113 - 114 - return result.Token, nil 115 - } 116 - 117 - // GetOrFetchServiceToken gets a service token for hold authentication. 118 - // Handles both OAuth/DPoP and app-password authentication based on authMethod. 119 - // Checks cache first, then fetches from PDS if needed. 120 - // 121 - // For OAuth: Uses DoWithSession() to hold a per-DID lock through the entire PDS interaction. 122 - // This prevents DPoP nonce race conditions when multiple Docker layers upload concurrently. 123 - // 124 - // For app-password: Uses Bearer token authentication without locking (no DPoP complexity). 125 - func GetOrFetchServiceToken( 126 - ctx context.Context, 127 - authMethod string, 128 - refresher *oauth.Refresher, // Required for OAuth, nil for app-password 129 - did, holdDID, pdsEndpoint string, 130 - ) (string, error) { 131 - // Check cache first to avoid unnecessary PDS calls on every request 132 - cachedToken, expiresAt := GetServiceToken(did, holdDID) 133 - 134 - // Use cached token if it exists and has > 10s remaining 135 - if cachedToken != "" && time.Until(expiresAt) > 10*time.Second { 136 - slog.Debug("Using cached service token", 137 - "did", did, 138 - "authMethod", authMethod, 139 - "expiresIn", time.Until(expiresAt).Round(time.Second)) 140 - return cachedToken, nil 141 - } 142 - 143 - // Cache miss or expiring soon - fetch new service token 144 - if cachedToken == "" { 145 - slog.Debug("Service token cache miss, fetching new token", "did", did, "authMethod", authMethod) 146 - } else { 147 - slog.Debug("Service token expiring soon, proactively renewing", "did", did, "authMethod", authMethod) 148 - } 149 - 150 - var serviceToken string 151 - var err error 152 - 153 - // Branch based on auth method 154 - if authMethod == AuthMethodOAuth { 155 - serviceToken, err = doOAuthFetch(ctx, refresher, did, holdDID, pdsEndpoint) 156 - // OAuth-specific cleanup: delete stale session on error 157 - if err != nil && refresher != nil { 158 - if delErr := refresher.DeleteSession(ctx, did); delErr != nil { 159 - slog.Warn("Failed to delete stale OAuth session", 160 - "component", "auth/servicetoken", 161 - "did", did, 162 - "error", delErr) 163 - } 164 - } 165 - } else { 166 - serviceToken, err = doAppPasswordFetch(ctx, did, holdDID, pdsEndpoint) 167 - } 168 - 169 - // Unified error handling 170 - if err != nil { 171 - InvalidateServiceToken(did, holdDID) 172 - 173 - var apiErr *atclient.APIError 174 - if errors.As(err, &apiErr) { 175 - slog.Error("Service token request failed", 176 - "component", "auth/servicetoken", 177 - "authMethod", authMethod, 178 - "did", did, 179 - "holdDID", holdDID, 180 - "pdsEndpoint", pdsEndpoint, 181 - "error", err, 182 - "httpStatus", apiErr.StatusCode, 183 - "errorName", apiErr.Name, 184 - "errorMessage", apiErr.Message, 185 - "hint", getErrorHint(apiErr)) 186 - } else { 187 - slog.Error("Service token request failed", 188 - "component", "auth/servicetoken", 189 - "authMethod", authMethod, 190 - "did", did, 191 - "holdDID", holdDID, 192 - "pdsEndpoint", pdsEndpoint, 193 - "error", err) 194 - } 195 - return "", err 196 - } 197 - 198 - // Cache the token (parses JWT to extract actual expiry) 199 - if cacheErr := SetServiceToken(did, holdDID, serviceToken); cacheErr != nil { 200 - slog.Warn("Failed to cache service token", "error", cacheErr, "did", did, "holdDID", holdDID) 201 - } 202 - 203 - slog.Debug("Service token obtained", "did", did, "authMethod", authMethod) 204 - return serviceToken, nil 205 - } 206 - 207 - // doOAuthFetch fetches a service token using OAuth/DPoP authentication. 208 - // Uses DoWithSession() for per-DID locking to prevent DPoP nonce races. 209 - // Returns (token, error) without logging - caller handles error logging. 210 - func doOAuthFetch( 211 - ctx context.Context, 212 - refresher *oauth.Refresher, 213 - did, holdDID, pdsEndpoint string, 214 - ) (string, error) { 215 - if refresher == nil { 216 - return "", fmt.Errorf("refresher is nil (OAuth session required)") 217 - } 218 - 219 - var serviceToken string 220 - var fetchErr error 221 - 222 - err := refresher.DoWithSession(ctx, did, func(session *indigo_oauth.ClientSession) error { 223 - // Double-check cache after acquiring lock (double-checked locking pattern) 224 - cachedToken, expiresAt := GetServiceToken(did, holdDID) 225 - if cachedToken != "" && time.Until(expiresAt) > 10*time.Second { 226 - slog.Debug("Service token cache hit after lock acquisition", 227 - "did", did, 228 - "expiresIn", time.Until(expiresAt).Round(time.Second)) 229 - serviceToken = cachedToken 230 - return nil 231 - } 232 - 233 - serviceAuthURL := buildServiceAuthURL(pdsEndpoint, holdDID) 234 - 235 - req, err := http.NewRequestWithContext(ctx, "GET", serviceAuthURL, nil) 236 - if err != nil { 237 - fetchErr = fmt.Errorf("failed to create request: %w", err) 238 - return fetchErr 239 - } 240 - 241 - resp, err := session.DoWithAuth(session.Client, req, "com.atproto.server.getServiceAuth") 242 - if err != nil { 243 - fetchErr = fmt.Errorf("OAuth request failed: %w", err) 244 - return fetchErr 245 - } 246 - 247 - token, parseErr := parseServiceTokenResponse(resp) 248 - if parseErr != nil { 249 - fetchErr = parseErr 250 - return fetchErr 251 - } 252 - 253 - serviceToken = token 254 - return nil 255 - }) 256 - 257 - if err != nil { 258 - if fetchErr != nil { 259 - return "", fetchErr 260 - } 261 - return "", fmt.Errorf("failed to get OAuth session: %w", err) 262 - } 263 - 264 - return serviceToken, nil 265 - } 266 - 267 - // doAppPasswordFetch fetches a service token using Bearer token authentication. 268 - // Returns (token, error) without logging - caller handles error logging. 269 - func doAppPasswordFetch( 270 - ctx context.Context, 271 - did, holdDID, pdsEndpoint string, 272 - ) (string, error) { 273 - accessToken, ok := GetGlobalTokenCache().Get(did) 274 - if !ok { 275 - return "", fmt.Errorf("no app-password access token available for DID %s", did) 276 - } 277 - 278 - serviceAuthURL := buildServiceAuthURL(pdsEndpoint, holdDID) 279 - 280 - req, err := http.NewRequestWithContext(ctx, "GET", serviceAuthURL, nil) 281 - if err != nil { 282 - return "", fmt.Errorf("failed to create request: %w", err) 283 - } 284 - 285 - req.Header.Set("Authorization", "Bearer "+accessToken) 286 - 287 - resp, err := http.DefaultClient.Do(req) 288 - if err != nil { 289 - return "", fmt.Errorf("request failed: %w", err) 290 - } 291 - 292 - if resp.StatusCode == http.StatusUnauthorized { 293 - resp.Body.Close() 294 - // Clear stale app-password token 295 - GetGlobalTokenCache().Delete(did) 296 - return "", fmt.Errorf("app-password authentication failed: token expired or invalid") 297 - } 298 - 299 - return parseServiceTokenResponse(resp) 300 - }
-27
pkg/auth/servicetoken_test.go
··· 1 - package auth 2 - 3 - import ( 4 - "context" 5 - "testing" 6 - ) 7 - 8 - func TestGetOrFetchServiceToken_NilRefresher(t *testing.T) { 9 - ctx := context.Background() 10 - did := "did:plc:test123" 11 - holdDID := "did:web:hold.example.com" 12 - pdsEndpoint := "https://pds.example.com" 13 - 14 - // Test with nil refresher and OAuth auth method - should return error 15 - _, err := GetOrFetchServiceToken(ctx, AuthMethodOAuth, nil, did, holdDID, pdsEndpoint) 16 - if err == nil { 17 - t.Error("Expected error when refresher is nil for OAuth") 18 - } 19 - 20 - expectedErrMsg := "refresher is nil (OAuth session required)" 21 - if err.Error() != expectedErrMsg { 22 - t.Errorf("Expected error message %q, got %q", expectedErrMsg, err.Error()) 23 - } 24 - } 25 - 26 - // Note: Full tests with mocked OAuth refresher and HTTP client will be added 27 - // in the comprehensive test implementation phase
+175
pkg/auth/token/cache.go
··· 1 + // Package token provides service token caching and management for AppView. 2 + // Service tokens are JWTs issued by a user's PDS to authorize AppView to 3 + // act on their behalf when communicating with hold services. Tokens are 4 + // cached with automatic expiry parsing and 10-second safety margins. 5 + package token 6 + 7 + import ( 8 + "encoding/base64" 9 + "encoding/json" 10 + "fmt" 11 + "log/slog" 12 + "strings" 13 + "sync" 14 + "time" 15 + ) 16 + 17 + // serviceTokenEntry represents a cached service token 18 + type serviceTokenEntry struct { 19 + token string 20 + expiresAt time.Time 21 + } 22 + 23 + // Global cache for service tokens (DID:HoldDID -> token) 24 + // Service tokens are JWTs issued by a user's PDS to authorize AppView to act on their behalf 25 + // when communicating with hold services. These tokens are scoped to specific holds and have 26 + // limited lifetime (typically 60s, can request up to 5min). 27 + var ( 28 + globalServiceTokens = make(map[string]*serviceTokenEntry) 29 + globalServiceTokensMu sync.RWMutex 30 + ) 31 + 32 + // GetServiceToken retrieves a cached service token for the given DID and hold DID 33 + // Returns empty string if no valid cached token exists 34 + func GetServiceToken(did, holdDID string) (token string, expiresAt time.Time) { 35 + cacheKey := did + ":" + holdDID 36 + 37 + globalServiceTokensMu.RLock() 38 + entry, exists := globalServiceTokens[cacheKey] 39 + globalServiceTokensMu.RUnlock() 40 + 41 + if !exists { 42 + return "", time.Time{} 43 + } 44 + 45 + // Check if token is still valid 46 + if time.Now().After(entry.expiresAt) { 47 + // Token expired, remove from cache 48 + globalServiceTokensMu.Lock() 49 + delete(globalServiceTokens, cacheKey) 50 + globalServiceTokensMu.Unlock() 51 + return "", time.Time{} 52 + } 53 + 54 + return entry.token, entry.expiresAt 55 + } 56 + 57 + // SetServiceToken stores a service token in the cache 58 + // Automatically parses the JWT to extract the expiry time 59 + // Applies a 10-second safety margin (cache expires 10s before actual JWT expiry) 60 + func SetServiceToken(did, holdDID, token string) error { 61 + cacheKey := did + ":" + holdDID 62 + 63 + // Parse JWT to extract expiry (don't verify signature - we trust the PDS) 64 + expiry, err := parseJWTExpiry(token) 65 + if err != nil { 66 + // If parsing fails, use default 50s TTL (conservative fallback) 67 + slog.Warn("Failed to parse JWT expiry, using default 50s", "error", err, "cacheKey", cacheKey) 68 + expiry = time.Now().Add(50 * time.Second) 69 + } else { 70 + // Apply 10s safety margin to avoid using nearly-expired tokens 71 + expiry = expiry.Add(-10 * time.Second) 72 + } 73 + 74 + globalServiceTokensMu.Lock() 75 + globalServiceTokens[cacheKey] = &serviceTokenEntry{ 76 + token: token, 77 + expiresAt: expiry, 78 + } 79 + globalServiceTokensMu.Unlock() 80 + 81 + slog.Debug("Cached service token", 82 + "cacheKey", cacheKey, 83 + "expiresIn", time.Until(expiry).Round(time.Second)) 84 + 85 + return nil 86 + } 87 + 88 + // parseJWTExpiry extracts the expiry time from a JWT without verifying the signature 89 + // We trust tokens from the user's PDS, so signature verification isn't needed here 90 + // Manually decodes the JWT payload to avoid algorithm compatibility issues 91 + func parseJWTExpiry(tokenString string) (time.Time, error) { 92 + // JWT format: header.payload.signature 93 + parts := strings.Split(tokenString, ".") 94 + if len(parts) != 3 { 95 + return time.Time{}, fmt.Errorf("invalid JWT format: expected 3 parts, got %d", len(parts)) 96 + } 97 + 98 + // Decode the payload (second part) 99 + payload, err := base64.RawURLEncoding.DecodeString(parts[1]) 100 + if err != nil { 101 + return time.Time{}, fmt.Errorf("failed to decode JWT payload: %w", err) 102 + } 103 + 104 + // Parse the JSON payload 105 + var claims struct { 106 + Exp int64 `json:"exp"` 107 + } 108 + if err := json.Unmarshal(payload, &claims); err != nil { 109 + return time.Time{}, fmt.Errorf("failed to parse JWT claims: %w", err) 110 + } 111 + 112 + if claims.Exp == 0 { 113 + return time.Time{}, fmt.Errorf("JWT missing exp claim") 114 + } 115 + 116 + return time.Unix(claims.Exp, 0), nil 117 + } 118 + 119 + // InvalidateServiceToken removes a service token from the cache 120 + // Used when we detect that a token is invalid or the user's session has expired 121 + func InvalidateServiceToken(did, holdDID string) { 122 + cacheKey := did + ":" + holdDID 123 + 124 + globalServiceTokensMu.Lock() 125 + delete(globalServiceTokens, cacheKey) 126 + globalServiceTokensMu.Unlock() 127 + 128 + slog.Debug("Invalidated service token", "cacheKey", cacheKey) 129 + } 130 + 131 + // GetCacheStats returns statistics about the service token cache for debugging 132 + func GetCacheStats() map[string]any { 133 + globalServiceTokensMu.RLock() 134 + defer globalServiceTokensMu.RUnlock() 135 + 136 + validCount := 0 137 + expiredCount := 0 138 + now := time.Now() 139 + 140 + for _, entry := range globalServiceTokens { 141 + if now.Before(entry.expiresAt) { 142 + validCount++ 143 + } else { 144 + expiredCount++ 145 + } 146 + } 147 + 148 + return map[string]any{ 149 + "total_entries": len(globalServiceTokens), 150 + "valid_tokens": validCount, 151 + "expired_tokens": expiredCount, 152 + } 153 + } 154 + 155 + // CleanExpiredTokens removes expired tokens from the cache 156 + // Can be called periodically to prevent unbounded growth (though expired tokens 157 + // are also removed lazily on access) 158 + func CleanExpiredTokens() { 159 + globalServiceTokensMu.Lock() 160 + defer globalServiceTokensMu.Unlock() 161 + 162 + now := time.Now() 163 + removed := 0 164 + 165 + for key, entry := range globalServiceTokens { 166 + if now.After(entry.expiresAt) { 167 + delete(globalServiceTokens, key) 168 + removed++ 169 + } 170 + } 171 + 172 + if removed > 0 { 173 + slog.Debug("Cleaned expired service tokens", "count", removed) 174 + } 175 + }
+195
pkg/auth/token/cache_test.go
··· 1 + package token 2 + 3 + import ( 4 + "testing" 5 + "time" 6 + ) 7 + 8 + func TestGetServiceToken_NotCached(t *testing.T) { 9 + // Clear cache first 10 + globalServiceTokensMu.Lock() 11 + globalServiceTokens = make(map[string]*serviceTokenEntry) 12 + globalServiceTokensMu.Unlock() 13 + 14 + did := "did:plc:test123" 15 + holdDID := "did:web:hold.example.com" 16 + 17 + token, expiresAt := GetServiceToken(did, holdDID) 18 + if token != "" { 19 + t.Errorf("Expected empty token for uncached entry, got %q", token) 20 + } 21 + if !expiresAt.IsZero() { 22 + t.Error("Expected zero time for uncached entry") 23 + } 24 + } 25 + 26 + func TestSetServiceToken_ManualExpiry(t *testing.T) { 27 + // Clear cache first 28 + globalServiceTokensMu.Lock() 29 + globalServiceTokens = make(map[string]*serviceTokenEntry) 30 + globalServiceTokensMu.Unlock() 31 + 32 + did := "did:plc:test123" 33 + holdDID := "did:web:hold.example.com" 34 + token := "invalid_jwt_token" // Will fall back to 50s default 35 + 36 + // This should succeed with default 50s TTL since JWT parsing will fail 37 + err := SetServiceToken(did, holdDID, token) 38 + if err != nil { 39 + t.Fatalf("SetServiceToken() error = %v", err) 40 + } 41 + 42 + // Verify token was cached 43 + cachedToken, expiresAt := GetServiceToken(did, holdDID) 44 + if cachedToken != token { 45 + t.Errorf("Expected token %q, got %q", token, cachedToken) 46 + } 47 + if expiresAt.IsZero() { 48 + t.Error("Expected non-zero expiry time") 49 + } 50 + 51 + // Expiry should be approximately 50s from now (with 10s margin subtracted in some cases) 52 + expectedExpiry := time.Now().Add(50 * time.Second) 53 + diff := expiresAt.Sub(expectedExpiry) 54 + if diff < -5*time.Second || diff > 5*time.Second { 55 + t.Errorf("Expiry time off by %v (expected ~50s from now)", diff) 56 + } 57 + } 58 + 59 + func TestGetServiceToken_Expired(t *testing.T) { 60 + // Manually insert an expired token 61 + did := "did:plc:test123" 62 + holdDID := "did:web:hold.example.com" 63 + cacheKey := did + ":" + holdDID 64 + 65 + globalServiceTokensMu.Lock() 66 + globalServiceTokens[cacheKey] = &serviceTokenEntry{ 67 + token: "expired_token", 68 + expiresAt: time.Now().Add(-1 * time.Hour), // 1 hour ago 69 + } 70 + globalServiceTokensMu.Unlock() 71 + 72 + // Try to get - should return empty since expired 73 + token, expiresAt := GetServiceToken(did, holdDID) 74 + if token != "" { 75 + t.Errorf("Expected empty token for expired entry, got %q", token) 76 + } 77 + if !expiresAt.IsZero() { 78 + t.Error("Expected zero time for expired entry") 79 + } 80 + 81 + // Verify token was removed from cache 82 + globalServiceTokensMu.RLock() 83 + _, exists := globalServiceTokens[cacheKey] 84 + globalServiceTokensMu.RUnlock() 85 + 86 + if exists { 87 + t.Error("Expected expired token to be removed from cache") 88 + } 89 + } 90 + 91 + func TestInvalidateServiceToken(t *testing.T) { 92 + // Set a token 93 + did := "did:plc:test123" 94 + holdDID := "did:web:hold.example.com" 95 + token := "test_token" 96 + 97 + err := SetServiceToken(did, holdDID, token) 98 + if err != nil { 99 + t.Fatalf("SetServiceToken() error = %v", err) 100 + } 101 + 102 + // Verify it's cached 103 + cachedToken, _ := GetServiceToken(did, holdDID) 104 + if cachedToken != token { 105 + t.Fatal("Token should be cached") 106 + } 107 + 108 + // Invalidate 109 + InvalidateServiceToken(did, holdDID) 110 + 111 + // Verify it's gone 112 + cachedToken, _ = GetServiceToken(did, holdDID) 113 + if cachedToken != "" { 114 + t.Error("Expected token to be invalidated") 115 + } 116 + } 117 + 118 + func TestCleanExpiredTokens(t *testing.T) { 119 + // Clear cache first 120 + globalServiceTokensMu.Lock() 121 + globalServiceTokens = make(map[string]*serviceTokenEntry) 122 + globalServiceTokensMu.Unlock() 123 + 124 + // Add expired and valid tokens 125 + globalServiceTokensMu.Lock() 126 + globalServiceTokens["expired:hold1"] = &serviceTokenEntry{ 127 + token: "expired1", 128 + expiresAt: time.Now().Add(-1 * time.Hour), 129 + } 130 + globalServiceTokens["valid:hold2"] = &serviceTokenEntry{ 131 + token: "valid1", 132 + expiresAt: time.Now().Add(1 * time.Hour), 133 + } 134 + globalServiceTokensMu.Unlock() 135 + 136 + // Clean expired 137 + CleanExpiredTokens() 138 + 139 + // Verify only valid token remains 140 + globalServiceTokensMu.RLock() 141 + _, expiredExists := globalServiceTokens["expired:hold1"] 142 + _, validExists := globalServiceTokens["valid:hold2"] 143 + globalServiceTokensMu.RUnlock() 144 + 145 + if expiredExists { 146 + t.Error("Expected expired token to be removed") 147 + } 148 + if !validExists { 149 + t.Error("Expected valid token to remain") 150 + } 151 + } 152 + 153 + func TestGetCacheStats(t *testing.T) { 154 + // Clear cache first 155 + globalServiceTokensMu.Lock() 156 + globalServiceTokens = make(map[string]*serviceTokenEntry) 157 + globalServiceTokensMu.Unlock() 158 + 159 + // Add some tokens 160 + globalServiceTokensMu.Lock() 161 + globalServiceTokens["did1:hold1"] = &serviceTokenEntry{ 162 + token: "token1", 163 + expiresAt: time.Now().Add(1 * time.Hour), 164 + } 165 + globalServiceTokens["did2:hold2"] = &serviceTokenEntry{ 166 + token: "token2", 167 + expiresAt: time.Now().Add(1 * time.Hour), 168 + } 169 + globalServiceTokensMu.Unlock() 170 + 171 + stats := GetCacheStats() 172 + if stats == nil { 173 + t.Fatal("Expected non-nil stats") 174 + } 175 + 176 + // GetCacheStats returns map[string]any with "total_entries" key 177 + totalEntries, ok := stats["total_entries"].(int) 178 + if !ok { 179 + t.Fatalf("Expected total_entries in stats map, got: %v", stats) 180 + } 181 + 182 + if totalEntries != 2 { 183 + t.Errorf("Expected 2 entries, got %d", totalEntries) 184 + } 185 + 186 + // Also check valid_tokens 187 + validTokens, ok := stats["valid_tokens"].(int) 188 + if !ok { 189 + t.Fatal("Expected valid_tokens in stats map") 190 + } 191 + 192 + if validTokens != 2 { 193 + t.Errorf("Expected 2 valid tokens, got %d", validTokens) 194 + } 195 + }
-19
pkg/auth/token/claims.go
··· 56 56 57 57 return claims.AuthMethod 58 58 } 59 - 60 - // ExtractSubject parses a JWT token string and extracts the Subject claim (the user's DID) 61 - // Returns the subject or empty string if not found or token is invalid 62 - // This does NOT validate the token - it only parses it to extract the claim 63 - func ExtractSubject(tokenString string) string { 64 - // Parse token without validation (we only need the claims, validation is done by distribution library) 65 - parser := jwt.NewParser(jwt.WithoutClaimsValidation()) 66 - token, _, err := parser.ParseUnverified(tokenString, &Claims{}) 67 - if err != nil { 68 - return "" // Invalid token format 69 - } 70 - 71 - claims, ok := token.Claims.(*Claims) 72 - if !ok { 73 - return "" // Wrong claims type 74 - } 75 - 76 - return claims.Subject 77 - }
+362
pkg/auth/token/servicetoken.go
··· 1 + package token 2 + 3 + import ( 4 + "context" 5 + "encoding/json" 6 + "errors" 7 + "fmt" 8 + "io" 9 + "log/slog" 10 + "net/http" 11 + "net/url" 12 + "time" 13 + 14 + "atcr.io/pkg/atproto" 15 + "atcr.io/pkg/auth" 16 + "atcr.io/pkg/auth/oauth" 17 + "github.com/bluesky-social/indigo/atproto/atclient" 18 + indigo_oauth "github.com/bluesky-social/indigo/atproto/auth/oauth" 19 + ) 20 + 21 + // getErrorHint provides context-specific troubleshooting hints based on API error type 22 + func getErrorHint(apiErr *atclient.APIError) string { 23 + switch apiErr.Name { 24 + case "use_dpop_nonce": 25 + return "DPoP nonce mismatch - indigo library should automatically retry with new nonce. If this persists, check for concurrent request issues or PDS session corruption." 26 + case "invalid_client": 27 + if apiErr.Message != "" && apiErr.Message == "Validation of \"client_assertion\" failed: \"iat\" claim timestamp check failed (it should be in the past)" { 28 + return "JWT timestamp validation failed - system clock on AppView may be ahead of PDS clock. Check NTP sync with: timedatectl status" 29 + } 30 + return "OAuth client authentication failed - check client key configuration and PDS OAuth server status" 31 + case "invalid_token", "invalid_grant": 32 + return "OAuth tokens expired or invalidated - user will need to re-authenticate via OAuth flow" 33 + case "server_error": 34 + if apiErr.StatusCode == 500 { 35 + return "PDS returned internal server error - this may occur after repeated DPoP nonce failures or other PDS-side issues. Check PDS logs for root cause." 36 + } 37 + return "PDS server error - check PDS health and logs" 38 + case "invalid_dpop_proof": 39 + return "DPoP proof validation failed - check system clock sync and DPoP key configuration" 40 + default: 41 + if apiErr.StatusCode == 401 || apiErr.StatusCode == 403 { 42 + return "Authentication/authorization failed - OAuth session may be expired or revoked" 43 + } 44 + return "PDS rejected the request - see errorName and errorMessage for details" 45 + } 46 + } 47 + 48 + // GetOrFetchServiceToken gets a service token for hold authentication. 49 + // Checks cache first, then fetches from PDS with OAuth/DPoP if needed. 50 + // This is the canonical implementation used by both middleware and crew registration. 51 + // 52 + // IMPORTANT: Uses DoWithSession() to hold a per-DID lock through the entire PDS interaction. 53 + // This prevents DPoP nonce race conditions when multiple Docker layers upload concurrently. 54 + func GetOrFetchServiceToken( 55 + ctx context.Context, 56 + refresher *oauth.Refresher, 57 + did, holdDID, pdsEndpoint string, 58 + ) (string, error) { 59 + if refresher == nil { 60 + return "", fmt.Errorf("refresher is nil (OAuth session required for service tokens)") 61 + } 62 + 63 + // Check cache first to avoid unnecessary PDS calls on every request 64 + cachedToken, expiresAt := GetServiceToken(did, holdDID) 65 + 66 + // Use cached token if it exists and has > 10s remaining 67 + if cachedToken != "" && time.Until(expiresAt) > 10*time.Second { 68 + slog.Debug("Using cached service token", 69 + "did", did, 70 + "expiresIn", time.Until(expiresAt).Round(time.Second)) 71 + return cachedToken, nil 72 + } 73 + 74 + // Cache miss or expiring soon - validate OAuth and get new service token 75 + if cachedToken == "" { 76 + slog.Debug("Service token cache miss, fetching new token", "did", did) 77 + } else { 78 + slog.Debug("Service token expiring soon, proactively renewing", "did", did) 79 + } 80 + 81 + // Use DoWithSession to hold the lock through the entire PDS interaction. 82 + // This prevents DPoP nonce races when multiple goroutines try to fetch service tokens. 83 + var serviceToken string 84 + var fetchErr error 85 + 86 + err := refresher.DoWithSession(ctx, did, func(session *indigo_oauth.ClientSession) error { 87 + // Double-check cache after acquiring lock - another goroutine may have 88 + // populated it while we were waiting (classic double-checked locking pattern) 89 + cachedToken, expiresAt := GetServiceToken(did, holdDID) 90 + if cachedToken != "" && time.Until(expiresAt) > 10*time.Second { 91 + slog.Debug("Service token cache hit after lock acquisition", 92 + "did", did, 93 + "expiresIn", time.Until(expiresAt).Round(time.Second)) 94 + serviceToken = cachedToken 95 + return nil 96 + } 97 + 98 + // Cache still empty/expired - proceed with PDS call 99 + // Request 5-minute expiry (PDS may grant less) 100 + // exp must be absolute Unix timestamp, not relative duration 101 + // Note: OAuth scope includes #atcr_hold fragment, but service auth aud must be bare DID 102 + expiryTime := time.Now().Unix() + 300 // 5 minutes from now 103 + serviceAuthURL := fmt.Sprintf("%s%s?aud=%s&lxm=%s&exp=%d", 104 + pdsEndpoint, 105 + atproto.ServerGetServiceAuth, 106 + url.QueryEscape(holdDID), 107 + url.QueryEscape("com.atproto.repo.getRecord"), 108 + expiryTime, 109 + ) 110 + 111 + req, err := http.NewRequestWithContext(ctx, "GET", serviceAuthURL, nil) 112 + if err != nil { 113 + fetchErr = fmt.Errorf("failed to create service auth request: %w", err) 114 + return fetchErr 115 + } 116 + 117 + // Use OAuth session to authenticate to PDS (with DPoP) 118 + // The lock is held, so DPoP nonce negotiation is serialized per-DID 119 + resp, err := session.DoWithAuth(session.Client, req, "com.atproto.server.getServiceAuth") 120 + if err != nil { 121 + // Auth error - may indicate expired tokens or corrupted session 122 + InvalidateServiceToken(did, holdDID) 123 + 124 + // Inspect the error to extract detailed information from indigo's APIError 125 + var apiErr *atclient.APIError 126 + if errors.As(err, &apiErr) { 127 + // Log detailed API error information 128 + slog.Error("OAuth authentication failed during service token request", 129 + "component", "token/servicetoken", 130 + "did", did, 131 + "holdDID", holdDID, 132 + "pdsEndpoint", pdsEndpoint, 133 + "url", serviceAuthURL, 134 + "error", err, 135 + "httpStatus", apiErr.StatusCode, 136 + "errorName", apiErr.Name, 137 + "errorMessage", apiErr.Message, 138 + "hint", getErrorHint(apiErr)) 139 + } else { 140 + // Fallback for non-API errors (network errors, etc.) 141 + slog.Error("OAuth authentication failed during service token request", 142 + "component", "token/servicetoken", 143 + "did", did, 144 + "holdDID", holdDID, 145 + "pdsEndpoint", pdsEndpoint, 146 + "url", serviceAuthURL, 147 + "error", err, 148 + "errorType", fmt.Sprintf("%T", err), 149 + "hint", "Network error or unexpected failure during OAuth request") 150 + } 151 + 152 + fetchErr = fmt.Errorf("OAuth validation failed: %w", err) 153 + return fetchErr 154 + } 155 + defer resp.Body.Close() 156 + 157 + if resp.StatusCode != http.StatusOK { 158 + // Service auth failed 159 + bodyBytes, _ := io.ReadAll(resp.Body) 160 + InvalidateServiceToken(did, holdDID) 161 + slog.Error("Service token request returned non-200 status", 162 + "component", "token/servicetoken", 163 + "did", did, 164 + "holdDID", holdDID, 165 + "pdsEndpoint", pdsEndpoint, 166 + "statusCode", resp.StatusCode, 167 + "responseBody", string(bodyBytes), 168 + "hint", "PDS rejected the service token request - check PDS logs for details") 169 + fetchErr = fmt.Errorf("service auth failed with status %d: %s", resp.StatusCode, string(bodyBytes)) 170 + return fetchErr 171 + } 172 + 173 + // Parse response to get service token 174 + var result struct { 175 + Token string `json:"token"` 176 + } 177 + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { 178 + fetchErr = fmt.Errorf("failed to decode service auth response: %w", err) 179 + return fetchErr 180 + } 181 + 182 + if result.Token == "" { 183 + fetchErr = fmt.Errorf("empty token in service auth response") 184 + return fetchErr 185 + } 186 + 187 + serviceToken = result.Token 188 + return nil 189 + }) 190 + 191 + if err != nil { 192 + // DoWithSession failed (session load or callback error) 193 + InvalidateServiceToken(did, holdDID) 194 + 195 + // Try to extract detailed error information 196 + var apiErr *atclient.APIError 197 + if errors.As(err, &apiErr) { 198 + slog.Error("Failed to get OAuth session for service token", 199 + "component", "token/servicetoken", 200 + "did", did, 201 + "holdDID", holdDID, 202 + "pdsEndpoint", pdsEndpoint, 203 + "error", err, 204 + "httpStatus", apiErr.StatusCode, 205 + "errorName", apiErr.Name, 206 + "errorMessage", apiErr.Message, 207 + "hint", getErrorHint(apiErr)) 208 + } else if fetchErr == nil { 209 + // Session load failed (not a fetch error) 210 + slog.Error("Failed to get OAuth session for service token", 211 + "component", "token/servicetoken", 212 + "did", did, 213 + "holdDID", holdDID, 214 + "pdsEndpoint", pdsEndpoint, 215 + "error", err, 216 + "errorType", fmt.Sprintf("%T", err), 217 + "hint", "OAuth session not found in database or token refresh failed") 218 + } 219 + 220 + // Delete the stale OAuth session to force re-authentication 221 + // This also invalidates the UI session automatically 222 + if delErr := refresher.DeleteSession(ctx, did); delErr != nil { 223 + slog.Warn("Failed to delete stale OAuth session", 224 + "component", "token/servicetoken", 225 + "did", did, 226 + "error", delErr) 227 + } 228 + 229 + if fetchErr != nil { 230 + return "", fetchErr 231 + } 232 + return "", fmt.Errorf("failed to get OAuth session: %w", err) 233 + } 234 + 235 + // Cache the token (parses JWT to extract actual expiry) 236 + if err := SetServiceToken(did, holdDID, serviceToken); err != nil { 237 + slog.Warn("Failed to cache service token", "error", err, "did", did, "holdDID", holdDID) 238 + // Non-fatal - we have the token, just won't be cached 239 + } 240 + 241 + slog.Debug("OAuth validation succeeded, service token obtained", "did", did) 242 + return serviceToken, nil 243 + } 244 + 245 + // GetOrFetchServiceTokenWithAppPassword gets a service token using app-password Bearer authentication. 246 + // Used when auth method is app_password instead of OAuth. 247 + func GetOrFetchServiceTokenWithAppPassword( 248 + ctx context.Context, 249 + did, holdDID, pdsEndpoint string, 250 + ) (string, error) { 251 + // Check cache first to avoid unnecessary PDS calls on every request 252 + cachedToken, expiresAt := GetServiceToken(did, holdDID) 253 + 254 + // Use cached token if it exists and has > 10s remaining 255 + if cachedToken != "" && time.Until(expiresAt) > 10*time.Second { 256 + slog.Debug("Using cached service token (app-password)", 257 + "did", did, 258 + "expiresIn", time.Until(expiresAt).Round(time.Second)) 259 + return cachedToken, nil 260 + } 261 + 262 + // Cache miss or expiring soon - get app-password token and fetch new service token 263 + if cachedToken == "" { 264 + slog.Debug("Service token cache miss, fetching new token with app-password", "did", did) 265 + } else { 266 + slog.Debug("Service token expiring soon, proactively renewing with app-password", "did", did) 267 + } 268 + 269 + // Get app-password access token from cache 270 + accessToken, ok := auth.GetGlobalTokenCache().Get(did) 271 + if !ok { 272 + InvalidateServiceToken(did, holdDID) 273 + slog.Error("No app-password access token found in cache", 274 + "component", "token/servicetoken", 275 + "did", did, 276 + "holdDID", holdDID, 277 + "hint", "User must re-authenticate with docker login") 278 + return "", fmt.Errorf("no app-password access token available for DID %s", did) 279 + } 280 + 281 + // Call com.atproto.server.getServiceAuth on the user's PDS with Bearer token 282 + // Request 5-minute expiry (PDS may grant less) 283 + // exp must be absolute Unix timestamp, not relative duration 284 + expiryTime := time.Now().Unix() + 300 // 5 minutes from now 285 + serviceAuthURL := fmt.Sprintf("%s%s?aud=%s&lxm=%s&exp=%d", 286 + pdsEndpoint, 287 + atproto.ServerGetServiceAuth, 288 + url.QueryEscape(holdDID), 289 + url.QueryEscape("com.atproto.repo.getRecord"), 290 + expiryTime, 291 + ) 292 + 293 + req, err := http.NewRequestWithContext(ctx, "GET", serviceAuthURL, nil) 294 + if err != nil { 295 + return "", fmt.Errorf("failed to create service auth request: %w", err) 296 + } 297 + 298 + // Set Bearer token authentication (app-password) 299 + req.Header.Set("Authorization", "Bearer "+accessToken) 300 + 301 + // Make request with standard HTTP client 302 + resp, err := http.DefaultClient.Do(req) 303 + if err != nil { 304 + InvalidateServiceToken(did, holdDID) 305 + slog.Error("App-password service token request failed", 306 + "component", "token/servicetoken", 307 + "did", did, 308 + "holdDID", holdDID, 309 + "pdsEndpoint", pdsEndpoint, 310 + "error", err) 311 + return "", fmt.Errorf("failed to request service token: %w", err) 312 + } 313 + defer resp.Body.Close() 314 + 315 + if resp.StatusCode == http.StatusUnauthorized { 316 + // App-password token is invalid or expired - clear from cache 317 + auth.GetGlobalTokenCache().Delete(did) 318 + InvalidateServiceToken(did, holdDID) 319 + slog.Error("App-password token rejected by PDS", 320 + "component", "token/servicetoken", 321 + "did", did, 322 + "hint", "User must re-authenticate with docker login") 323 + return "", fmt.Errorf("app-password authentication failed: token expired or invalid") 324 + } 325 + 326 + if resp.StatusCode != http.StatusOK { 327 + // Service auth failed 328 + bodyBytes, _ := io.ReadAll(resp.Body) 329 + InvalidateServiceToken(did, holdDID) 330 + slog.Error("Service token request returned non-200 status (app-password)", 331 + "component", "token/servicetoken", 332 + "did", did, 333 + "holdDID", holdDID, 334 + "pdsEndpoint", pdsEndpoint, 335 + "statusCode", resp.StatusCode, 336 + "responseBody", string(bodyBytes)) 337 + return "", fmt.Errorf("service auth failed with status %d: %s", resp.StatusCode, string(bodyBytes)) 338 + } 339 + 340 + // Parse response to get service token 341 + var result struct { 342 + Token string `json:"token"` 343 + } 344 + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { 345 + return "", fmt.Errorf("failed to decode service auth response: %w", err) 346 + } 347 + 348 + if result.Token == "" { 349 + return "", fmt.Errorf("empty token in service auth response") 350 + } 351 + 352 + serviceToken := result.Token 353 + 354 + // Cache the token (parses JWT to extract actual expiry) 355 + if err := SetServiceToken(did, holdDID, serviceToken); err != nil { 356 + slog.Warn("Failed to cache service token", "error", err, "did", did, "holdDID", holdDID) 357 + // Non-fatal - we have the token, just won't be cached 358 + } 359 + 360 + slog.Debug("App-password validation succeeded, service token obtained", "did", did) 361 + return serviceToken, nil 362 + }
+27
pkg/auth/token/servicetoken_test.go
··· 1 + package token 2 + 3 + import ( 4 + "context" 5 + "testing" 6 + ) 7 + 8 + func TestGetOrFetchServiceToken_NilRefresher(t *testing.T) { 9 + ctx := context.Background() 10 + did := "did:plc:test123" 11 + holdDID := "did:web:hold.example.com" 12 + pdsEndpoint := "https://pds.example.com" 13 + 14 + // Test with nil refresher - should return error 15 + _, err := GetOrFetchServiceToken(ctx, nil, did, holdDID, pdsEndpoint) 16 + if err == nil { 17 + t.Error("Expected error when refresher is nil") 18 + } 19 + 20 + expectedErrMsg := "refresher is nil" 21 + if err.Error() != "refresher is nil (OAuth session required for service tokens)" { 22 + t.Errorf("Expected error message to contain %q, got %q", expectedErrMsg, err.Error()) 23 + } 24 + } 25 + 26 + // Note: Full tests with mocked OAuth refresher and HTTP client will be added 27 + // in the comprehensive test implementation phase
-784
pkg/auth/usercontext.go
··· 1 - // Package auth provides UserContext for managing authenticated user state 2 - // throughout request handling in the AppView. 3 - package auth 4 - 5 - import ( 6 - "context" 7 - "database/sql" 8 - "encoding/json" 9 - "fmt" 10 - "io" 11 - "log/slog" 12 - "net/http" 13 - "sync" 14 - "time" 15 - 16 - "atcr.io/pkg/appview/db" 17 - "atcr.io/pkg/atproto" 18 - "atcr.io/pkg/auth/oauth" 19 - ) 20 - 21 - // Auth method constants (duplicated from token package to avoid import cycle) 22 - const ( 23 - AuthMethodOAuth = "oauth" 24 - AuthMethodAppPassword = "app_password" 25 - ) 26 - 27 - // RequestAction represents the type of registry operation 28 - type RequestAction int 29 - 30 - const ( 31 - ActionUnknown RequestAction = iota 32 - ActionPull // GET/HEAD - reading from registry 33 - ActionPush // PUT/POST/DELETE - writing to registry 34 - ActionInspect // Metadata operations only 35 - ) 36 - 37 - func (a RequestAction) String() string { 38 - switch a { 39 - case ActionPull: 40 - return "pull" 41 - case ActionPush: 42 - return "push" 43 - case ActionInspect: 44 - return "inspect" 45 - default: 46 - return "unknown" 47 - } 48 - } 49 - 50 - // HoldPermissions describes what the user can do on a specific hold 51 - type HoldPermissions struct { 52 - HoldDID string // Hold being checked 53 - IsOwner bool // User is captain of this hold 54 - IsCrew bool // User is a crew member 55 - IsPublic bool // Hold allows public reads 56 - CanRead bool // Computed: can user read blobs? 57 - CanWrite bool // Computed: can user write blobs? 58 - CanAdmin bool // Computed: can user manage crew? 59 - Permissions []string // Raw permissions from crew record 60 - } 61 - 62 - // contextKey is unexported to prevent collisions 63 - type contextKey struct{} 64 - 65 - // userContextKey is the context key for UserContext 66 - var userContextKey = contextKey{} 67 - 68 - // userSetupCache tracks which users have had their profile/crew setup ensured 69 - var userSetupCache sync.Map // did -> time.Time 70 - 71 - // userSetupTTL is how long to cache user setup status (1 hour) 72 - const userSetupTTL = 1 * time.Hour 73 - 74 - // Dependencies bundles services needed by UserContext 75 - type Dependencies struct { 76 - Refresher *oauth.Refresher 77 - Authorizer HoldAuthorizer 78 - DefaultHoldDID string // AppView's default hold DID 79 - } 80 - 81 - // UserContext encapsulates authenticated user state for a request. 82 - // Built early in the middleware chain and available throughout request processing. 83 - // 84 - // Two-phase initialization: 85 - // 1. Middleware phase: Identity is set (DID, authMethod, action) 86 - // 2. Repository() phase: Target is set via SetTarget() (owner, repo, holdDID) 87 - type UserContext struct { 88 - // === User Identity (set in middleware) === 89 - DID string // User's DID (empty if unauthenticated) 90 - Handle string // User's handle (may be empty) 91 - PDSEndpoint string // User's PDS endpoint 92 - AuthMethod string // "oauth", "app_password", or "" 93 - IsAuthenticated bool 94 - 95 - // === Request Info === 96 - Action RequestAction 97 - HTTPMethod string 98 - 99 - // === Target Info (set by SetTarget) === 100 - TargetOwnerDID string // whose repo is being accessed 101 - TargetOwnerHandle string 102 - TargetOwnerPDS string 103 - TargetRepo string // image name (e.g., "quickslice") 104 - TargetHoldDID string // hold where blobs live/will live 105 - 106 - // === Dependencies (injected) === 107 - refresher *oauth.Refresher 108 - authorizer HoldAuthorizer 109 - defaultHoldDID string 110 - 111 - // === Cached State (lazy-loaded) === 112 - serviceTokens sync.Map // holdDID -> *serviceTokenEntry 113 - permissions sync.Map // holdDID -> *HoldPermissions 114 - pdsResolved bool 115 - pdsResolveErr error 116 - mu sync.Mutex // protects PDS resolution 117 - atprotoClient *atproto.Client 118 - atprotoClientOnce sync.Once 119 - } 120 - 121 - // FromContext retrieves UserContext from context. 122 - // Returns nil if not present (unauthenticated or before middleware). 123 - func FromContext(ctx context.Context) *UserContext { 124 - uc, _ := ctx.Value(userContextKey).(*UserContext) 125 - return uc 126 - } 127 - 128 - // WithUserContext adds UserContext to context 129 - func WithUserContext(ctx context.Context, uc *UserContext) context.Context { 130 - return context.WithValue(ctx, userContextKey, uc) 131 - } 132 - 133 - // NewUserContext creates a UserContext from extracted JWT claims. 134 - // The deps parameter provides access to services needed for lazy operations. 135 - func NewUserContext(did, authMethod, httpMethod string, deps *Dependencies) *UserContext { 136 - action := ActionUnknown 137 - switch httpMethod { 138 - case "GET", "HEAD": 139 - action = ActionPull 140 - case "PUT", "POST", "PATCH", "DELETE": 141 - action = ActionPush 142 - } 143 - 144 - var refresher *oauth.Refresher 145 - var authorizer HoldAuthorizer 146 - var defaultHoldDID string 147 - 148 - if deps != nil { 149 - refresher = deps.Refresher 150 - authorizer = deps.Authorizer 151 - defaultHoldDID = deps.DefaultHoldDID 152 - } 153 - 154 - return &UserContext{ 155 - DID: did, 156 - AuthMethod: authMethod, 157 - IsAuthenticated: did != "", 158 - Action: action, 159 - HTTPMethod: httpMethod, 160 - refresher: refresher, 161 - authorizer: authorizer, 162 - defaultHoldDID: defaultHoldDID, 163 - } 164 - } 165 - 166 - // SetPDS sets the user's PDS endpoint directly, bypassing network resolution. 167 - // Use when PDS is already known (e.g., from previous resolution or client). 168 - func (uc *UserContext) SetPDS(handle, pdsEndpoint string) { 169 - uc.mu.Lock() 170 - defer uc.mu.Unlock() 171 - uc.Handle = handle 172 - uc.PDSEndpoint = pdsEndpoint 173 - uc.pdsResolved = true 174 - uc.pdsResolveErr = nil 175 - } 176 - 177 - // SetTarget sets the target repository information. 178 - // Called in Repository() after resolving the owner identity. 179 - func (uc *UserContext) SetTarget(ownerDID, ownerHandle, ownerPDS, repo, holdDID string) { 180 - uc.TargetOwnerDID = ownerDID 181 - uc.TargetOwnerHandle = ownerHandle 182 - uc.TargetOwnerPDS = ownerPDS 183 - uc.TargetRepo = repo 184 - uc.TargetHoldDID = holdDID 185 - } 186 - 187 - // ResolvePDS resolves the user's PDS endpoint (lazy, cached). 188 - // Safe to call multiple times; resolution happens once. 189 - func (uc *UserContext) ResolvePDS(ctx context.Context) error { 190 - if !uc.IsAuthenticated { 191 - return nil // Nothing to resolve for anonymous users 192 - } 193 - 194 - uc.mu.Lock() 195 - defer uc.mu.Unlock() 196 - 197 - if uc.pdsResolved { 198 - return uc.pdsResolveErr 199 - } 200 - 201 - _, handle, pds, err := atproto.ResolveIdentity(ctx, uc.DID) 202 - if err != nil { 203 - uc.pdsResolveErr = err 204 - uc.pdsResolved = true 205 - return err 206 - } 207 - 208 - uc.Handle = handle 209 - uc.PDSEndpoint = pds 210 - uc.pdsResolved = true 211 - return nil 212 - } 213 - 214 - // GetServiceToken returns a service token for the target hold. 215 - // Uses internal caching with sync.Once per holdDID. 216 - // Requires target to be set via SetTarget(). 217 - func (uc *UserContext) GetServiceToken(ctx context.Context) (string, error) { 218 - if uc.TargetHoldDID == "" { 219 - return "", fmt.Errorf("target hold not set (call SetTarget first)") 220 - } 221 - return uc.GetServiceTokenForHold(ctx, uc.TargetHoldDID) 222 - } 223 - 224 - // GetServiceTokenForHold returns a service token for an arbitrary hold. 225 - // Uses internal caching with sync.Once per holdDID. 226 - func (uc *UserContext) GetServiceTokenForHold(ctx context.Context, holdDID string) (string, error) { 227 - if !uc.IsAuthenticated { 228 - return "", fmt.Errorf("cannot get service token: user not authenticated") 229 - } 230 - 231 - // Ensure PDS is resolved 232 - if err := uc.ResolvePDS(ctx); err != nil { 233 - return "", fmt.Errorf("failed to resolve PDS: %w", err) 234 - } 235 - 236 - // Load or create cache entry 237 - entryVal, _ := uc.serviceTokens.LoadOrStore(holdDID, &serviceTokenEntry{}) 238 - entry := entryVal.(*serviceTokenEntry) 239 - 240 - entry.once.Do(func() { 241 - slog.Debug("Fetching service token", 242 - "component", "auth/context", 243 - "userDID", uc.DID, 244 - "holdDID", holdDID, 245 - "authMethod", uc.AuthMethod) 246 - 247 - // Use unified service token function (handles both OAuth and app-password) 248 - serviceToken, err := GetOrFetchServiceToken( 249 - ctx, uc.AuthMethod, uc.refresher, uc.DID, holdDID, uc.PDSEndpoint, 250 - ) 251 - 252 - entry.token = serviceToken 253 - entry.err = err 254 - if err == nil { 255 - // Parse JWT to get expiry 256 - expiry, parseErr := ParseJWTExpiry(serviceToken) 257 - if parseErr == nil { 258 - entry.expiresAt = expiry.Add(-10 * time.Second) // Safety margin 259 - } else { 260 - entry.expiresAt = time.Now().Add(45 * time.Second) // Default fallback 261 - } 262 - } 263 - }) 264 - 265 - return entry.token, entry.err 266 - } 267 - 268 - // CanRead checks if user can read blobs from target hold. 269 - // - Public hold: any user (even anonymous) 270 - // - Private hold: owner OR crew with blob:read/blob:write 271 - func (uc *UserContext) CanRead(ctx context.Context) (bool, error) { 272 - if uc.TargetHoldDID == "" { 273 - return false, fmt.Errorf("target hold not set (call SetTarget first)") 274 - } 275 - 276 - if uc.authorizer == nil { 277 - return false, fmt.Errorf("authorizer not configured") 278 - } 279 - 280 - return uc.authorizer.CheckReadAccess(ctx, uc.TargetHoldDID, uc.DID) 281 - } 282 - 283 - // CanWrite checks if user can write blobs to target hold. 284 - // - Must be authenticated 285 - // - Must be owner OR crew with blob:write 286 - func (uc *UserContext) CanWrite(ctx context.Context) (bool, error) { 287 - if uc.TargetHoldDID == "" { 288 - return false, fmt.Errorf("target hold not set (call SetTarget first)") 289 - } 290 - 291 - if !uc.IsAuthenticated { 292 - return false, nil // Anonymous writes never allowed 293 - } 294 - 295 - if uc.authorizer == nil { 296 - return false, fmt.Errorf("authorizer not configured") 297 - } 298 - 299 - return uc.authorizer.CheckWriteAccess(ctx, uc.TargetHoldDID, uc.DID) 300 - } 301 - 302 - // GetPermissions returns detailed permissions for target hold. 303 - // Lazy-loaded and cached per holdDID. 304 - func (uc *UserContext) GetPermissions(ctx context.Context) (*HoldPermissions, error) { 305 - if uc.TargetHoldDID == "" { 306 - return nil, fmt.Errorf("target hold not set (call SetTarget first)") 307 - } 308 - return uc.GetPermissionsForHold(ctx, uc.TargetHoldDID) 309 - } 310 - 311 - // GetPermissionsForHold returns detailed permissions for an arbitrary hold. 312 - // Lazy-loaded and cached per holdDID. 313 - func (uc *UserContext) GetPermissionsForHold(ctx context.Context, holdDID string) (*HoldPermissions, error) { 314 - // Check cache first 315 - if cached, ok := uc.permissions.Load(holdDID); ok { 316 - return cached.(*HoldPermissions), nil 317 - } 318 - 319 - if uc.authorizer == nil { 320 - return nil, fmt.Errorf("authorizer not configured") 321 - } 322 - 323 - // Build permissions by querying authorizer 324 - captain, err := uc.authorizer.GetCaptainRecord(ctx, holdDID) 325 - if err != nil { 326 - return nil, fmt.Errorf("failed to get captain record: %w", err) 327 - } 328 - 329 - perms := &HoldPermissions{ 330 - HoldDID: holdDID, 331 - IsPublic: captain.Public, 332 - IsOwner: uc.DID != "" && uc.DID == captain.Owner, 333 - } 334 - 335 - // Check crew membership if authenticated and not owner 336 - if uc.IsAuthenticated && !perms.IsOwner { 337 - isCrew, crewErr := uc.authorizer.IsCrewMember(ctx, holdDID, uc.DID) 338 - if crewErr != nil { 339 - slog.Warn("Failed to check crew membership", 340 - "component", "auth/context", 341 - "holdDID", holdDID, 342 - "userDID", uc.DID, 343 - "error", crewErr) 344 - } 345 - perms.IsCrew = isCrew 346 - } 347 - 348 - // Compute permissions based on role 349 - if perms.IsOwner { 350 - perms.CanRead = true 351 - perms.CanWrite = true 352 - perms.CanAdmin = true 353 - } else if perms.IsCrew { 354 - // Crew members can read and write (for now, all crew have blob:write) 355 - // TODO: Check specific permissions from crew record 356 - perms.CanRead = true 357 - perms.CanWrite = true 358 - perms.CanAdmin = false 359 - } else if perms.IsPublic { 360 - // Public hold - anyone can read 361 - perms.CanRead = true 362 - perms.CanWrite = false 363 - perms.CanAdmin = false 364 - } else if uc.IsAuthenticated { 365 - // Private hold, authenticated non-crew 366 - // Per permission matrix: cannot read private holds 367 - perms.CanRead = false 368 - perms.CanWrite = false 369 - perms.CanAdmin = false 370 - } else { 371 - // Anonymous on private hold 372 - perms.CanRead = false 373 - perms.CanWrite = false 374 - perms.CanAdmin = false 375 - } 376 - 377 - // Cache and return 378 - uc.permissions.Store(holdDID, perms) 379 - return perms, nil 380 - } 381 - 382 - // IsCrewMember checks if user is crew of target hold. 383 - func (uc *UserContext) IsCrewMember(ctx context.Context) (bool, error) { 384 - if uc.TargetHoldDID == "" { 385 - return false, fmt.Errorf("target hold not set (call SetTarget first)") 386 - } 387 - 388 - if !uc.IsAuthenticated { 389 - return false, nil 390 - } 391 - 392 - if uc.authorizer == nil { 393 - return false, fmt.Errorf("authorizer not configured") 394 - } 395 - 396 - return uc.authorizer.IsCrewMember(ctx, uc.TargetHoldDID, uc.DID) 397 - } 398 - 399 - // EnsureCrewMembership is a standalone function to register as crew on a hold. 400 - // Use this when you don't have a UserContext (e.g., OAuth callback). 401 - // This is best-effort and logs errors without failing. 402 - func EnsureCrewMembership(ctx context.Context, did, pdsEndpoint string, refresher *oauth.Refresher, holdDID string) { 403 - if holdDID == "" { 404 - return 405 - } 406 - 407 - // Only works with OAuth (refresher required) - app passwords can't get service tokens 408 - if refresher == nil { 409 - slog.Debug("skipping crew registration - no OAuth refresher (app password flow)", "holdDID", holdDID) 410 - return 411 - } 412 - 413 - // Normalize URL to DID if needed 414 - if !atproto.IsDID(holdDID) { 415 - holdDID = atproto.ResolveHoldDIDFromURL(holdDID) 416 - if holdDID == "" { 417 - slog.Warn("failed to resolve hold DID", "defaultHold", holdDID) 418 - return 419 - } 420 - } 421 - 422 - // Get service token for the hold (OAuth only at this point) 423 - serviceToken, err := GetOrFetchServiceToken(ctx, AuthMethodOAuth, refresher, did, holdDID, pdsEndpoint) 424 - if err != nil { 425 - slog.Warn("failed to get service token", "holdDID", holdDID, "error", err) 426 - return 427 - } 428 - 429 - // Resolve hold DID to HTTP endpoint 430 - holdEndpoint := atproto.ResolveHoldURL(holdDID) 431 - if holdEndpoint == "" { 432 - slog.Warn("failed to resolve hold endpoint", "holdDID", holdDID) 433 - return 434 - } 435 - 436 - // Call requestCrew endpoint 437 - if err := requestCrewMembership(ctx, holdEndpoint, serviceToken); err != nil { 438 - slog.Warn("failed to request crew membership", "holdDID", holdDID, "error", err) 439 - return 440 - } 441 - 442 - slog.Info("successfully registered as crew member", "holdDID", holdDID, "userDID", did) 443 - } 444 - 445 - // ensureCrewMembership attempts to register as crew on target hold (UserContext method). 446 - // Called automatically during first push; idempotent. 447 - // This is a best-effort operation and logs errors without failing. 448 - // Requires SetTarget() to be called first. 449 - func (uc *UserContext) ensureCrewMembership(ctx context.Context) error { 450 - if uc.TargetHoldDID == "" { 451 - return fmt.Errorf("target hold not set (call SetTarget first)") 452 - } 453 - return uc.EnsureCrewMembershipForHold(ctx, uc.TargetHoldDID) 454 - } 455 - 456 - // EnsureCrewMembershipForHold attempts to register as crew on the specified hold. 457 - // This is the core implementation that can be called with any holdDID. 458 - // Called automatically during first push; idempotent. 459 - // This is a best-effort operation and logs errors without failing. 460 - func (uc *UserContext) EnsureCrewMembershipForHold(ctx context.Context, holdDID string) error { 461 - if holdDID == "" { 462 - return nil // Nothing to do 463 - } 464 - 465 - // Normalize URL to DID if needed 466 - if !atproto.IsDID(holdDID) { 467 - holdDID = atproto.ResolveHoldDIDFromURL(holdDID) 468 - if holdDID == "" { 469 - return fmt.Errorf("failed to resolve hold DID from URL") 470 - } 471 - } 472 - 473 - if !uc.IsAuthenticated { 474 - return fmt.Errorf("cannot register as crew: user not authenticated") 475 - } 476 - 477 - if uc.refresher == nil { 478 - return fmt.Errorf("cannot register as crew: OAuth session required") 479 - } 480 - 481 - // Get service token for the hold 482 - serviceToken, err := uc.GetServiceTokenForHold(ctx, holdDID) 483 - if err != nil { 484 - return fmt.Errorf("failed to get service token: %w", err) 485 - } 486 - 487 - // Resolve hold DID to HTTP endpoint 488 - holdEndpoint := atproto.ResolveHoldURL(holdDID) 489 - if holdEndpoint == "" { 490 - return fmt.Errorf("failed to resolve hold endpoint for %s", holdDID) 491 - } 492 - 493 - // Call requestCrew endpoint 494 - return requestCrewMembership(ctx, holdEndpoint, serviceToken) 495 - } 496 - 497 - // requestCrewMembership calls the hold's requestCrew endpoint 498 - // The endpoint handles all authorization and duplicate checking internally 499 - func requestCrewMembership(ctx context.Context, holdEndpoint, serviceToken string) error { 500 - // Add 5 second timeout to prevent hanging on offline holds 501 - ctx, cancel := context.WithTimeout(ctx, 5*time.Second) 502 - defer cancel() 503 - 504 - url := fmt.Sprintf("%s%s", holdEndpoint, atproto.HoldRequestCrew) 505 - 506 - req, err := http.NewRequestWithContext(ctx, "POST", url, nil) 507 - if err != nil { 508 - return err 509 - } 510 - 511 - req.Header.Set("Authorization", "Bearer "+serviceToken) 512 - req.Header.Set("Content-Type", "application/json") 513 - 514 - resp, err := http.DefaultClient.Do(req) 515 - if err != nil { 516 - return err 517 - } 518 - defer resp.Body.Close() 519 - 520 - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { 521 - // Read response body to capture actual error message from hold 522 - body, readErr := io.ReadAll(resp.Body) 523 - if readErr != nil { 524 - return fmt.Errorf("requestCrew failed with status %d (failed to read error body: %w)", resp.StatusCode, readErr) 525 - } 526 - return fmt.Errorf("requestCrew failed with status %d: %s", resp.StatusCode, string(body)) 527 - } 528 - 529 - return nil 530 - } 531 - 532 - // GetUserClient returns an authenticated ATProto client for the user's own PDS. 533 - // Used for profile operations (reading/writing to user's own repo). 534 - // Returns nil if not authenticated or PDS not resolved. 535 - func (uc *UserContext) GetUserClient() *atproto.Client { 536 - if !uc.IsAuthenticated || uc.PDSEndpoint == "" { 537 - return nil 538 - } 539 - 540 - if uc.AuthMethod == AuthMethodOAuth && uc.refresher != nil { 541 - return atproto.NewClientWithSessionProvider(uc.PDSEndpoint, uc.DID, uc.refresher) 542 - } else if uc.AuthMethod == AuthMethodAppPassword { 543 - accessToken, _ := GetGlobalTokenCache().Get(uc.DID) 544 - return atproto.NewClient(uc.PDSEndpoint, uc.DID, accessToken) 545 - } 546 - 547 - return nil 548 - } 549 - 550 - // EnsureUserSetup ensures the user has a profile and crew membership. 551 - // Called once per user (cached for userSetupTTL). Runs in background - does not block. 552 - // Safe to call on every request. 553 - func (uc *UserContext) EnsureUserSetup() { 554 - if !uc.IsAuthenticated || uc.DID == "" { 555 - return 556 - } 557 - 558 - // Check cache - skip if recently set up 559 - if lastSetup, ok := userSetupCache.Load(uc.DID); ok { 560 - if time.Since(lastSetup.(time.Time)) < userSetupTTL { 561 - return 562 - } 563 - } 564 - 565 - // Run in background to avoid blocking requests 566 - go func() { 567 - bgCtx := context.Background() 568 - 569 - // 1. Ensure profile exists 570 - if client := uc.GetUserClient(); client != nil { 571 - uc.ensureProfile(bgCtx, client) 572 - } 573 - 574 - // 2. Ensure crew membership on default hold 575 - if uc.defaultHoldDID != "" { 576 - EnsureCrewMembership(bgCtx, uc.DID, uc.PDSEndpoint, uc.refresher, uc.defaultHoldDID) 577 - } 578 - 579 - // Mark as set up 580 - userSetupCache.Store(uc.DID, time.Now()) 581 - slog.Debug("User setup complete", 582 - "component", "auth/usercontext", 583 - "did", uc.DID, 584 - "defaultHoldDID", uc.defaultHoldDID) 585 - }() 586 - } 587 - 588 - // ensureProfile creates sailor profile if it doesn't exist. 589 - // Inline implementation to avoid circular import with storage package. 590 - func (uc *UserContext) ensureProfile(ctx context.Context, client *atproto.Client) { 591 - // Check if profile already exists 592 - profile, err := client.GetRecord(ctx, atproto.SailorProfileCollection, "self") 593 - if err == nil && profile != nil { 594 - return // Already exists 595 - } 596 - 597 - // Create profile with default hold 598 - normalizedDID := "" 599 - if uc.defaultHoldDID != "" { 600 - normalizedDID = atproto.ResolveHoldDIDFromURL(uc.defaultHoldDID) 601 - } 602 - 603 - newProfile := atproto.NewSailorProfileRecord(normalizedDID) 604 - if _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, "self", newProfile); err != nil { 605 - slog.Warn("Failed to create sailor profile", 606 - "component", "auth/usercontext", 607 - "did", uc.DID, 608 - "error", err) 609 - return 610 - } 611 - 612 - slog.Debug("Created sailor profile", 613 - "component", "auth/usercontext", 614 - "did", uc.DID, 615 - "defaultHold", normalizedDID) 616 - } 617 - 618 - // GetATProtoClient returns a cached ATProto client for the target owner's PDS. 619 - // Authenticated if user is owner, otherwise anonymous. 620 - // Cached per-request (uses sync.Once). 621 - func (uc *UserContext) GetATProtoClient() *atproto.Client { 622 - uc.atprotoClientOnce.Do(func() { 623 - if uc.TargetOwnerPDS == "" { 624 - return 625 - } 626 - 627 - // If puller is owner and authenticated, use authenticated client 628 - if uc.DID == uc.TargetOwnerDID && uc.IsAuthenticated { 629 - if uc.AuthMethod == AuthMethodOAuth && uc.refresher != nil { 630 - uc.atprotoClient = atproto.NewClientWithSessionProvider(uc.TargetOwnerPDS, uc.TargetOwnerDID, uc.refresher) 631 - return 632 - } else if uc.AuthMethod == AuthMethodAppPassword { 633 - accessToken, _ := GetGlobalTokenCache().Get(uc.TargetOwnerDID) 634 - uc.atprotoClient = atproto.NewClient(uc.TargetOwnerPDS, uc.TargetOwnerDID, accessToken) 635 - return 636 - } 637 - } 638 - 639 - // Anonymous client for reads 640 - uc.atprotoClient = atproto.NewClient(uc.TargetOwnerPDS, uc.TargetOwnerDID, "") 641 - }) 642 - return uc.atprotoClient 643 - } 644 - 645 - // ResolveHoldDID finds the hold for the target repository. 646 - // - Pull: uses database lookup (historical from manifest) 647 - // - Push: uses discovery (sailor profile โ†’ default) 648 - // 649 - // Must be called after SetTarget() is called with at least TargetOwnerDID and TargetRepo set. 650 - // Updates TargetHoldDID on success. 651 - func (uc *UserContext) ResolveHoldDID(ctx context.Context, sqlDB *sql.DB) (string, error) { 652 - if uc.TargetOwnerDID == "" { 653 - return "", fmt.Errorf("target owner not set") 654 - } 655 - 656 - var holdDID string 657 - var err error 658 - 659 - switch uc.Action { 660 - case ActionPull: 661 - // For pulls, look up historical hold from database 662 - holdDID, err = uc.resolveHoldForPull(ctx, sqlDB) 663 - case ActionPush: 664 - // For pushes, discover hold from owner's profile 665 - holdDID, err = uc.resolveHoldForPush(ctx) 666 - default: 667 - // Default to push discovery 668 - holdDID, err = uc.resolveHoldForPush(ctx) 669 - } 670 - 671 - if err != nil { 672 - return "", err 673 - } 674 - 675 - if holdDID == "" { 676 - return "", fmt.Errorf("no hold DID found for %s/%s", uc.TargetOwnerDID, uc.TargetRepo) 677 - } 678 - 679 - uc.TargetHoldDID = holdDID 680 - return holdDID, nil 681 - } 682 - 683 - // resolveHoldForPull looks up the hold from the database (historical reference) 684 - func (uc *UserContext) resolveHoldForPull(ctx context.Context, sqlDB *sql.DB) (string, error) { 685 - // If no database is available, fall back to discovery 686 - if sqlDB == nil { 687 - return uc.resolveHoldForPush(ctx) 688 - } 689 - 690 - // Try database lookup first 691 - holdDID, err := db.GetLatestHoldDIDForRepo(sqlDB, uc.TargetOwnerDID, uc.TargetRepo) 692 - if err != nil { 693 - slog.Debug("Database lookup failed, falling back to discovery", 694 - "component", "auth/context", 695 - "ownerDID", uc.TargetOwnerDID, 696 - "repo", uc.TargetRepo, 697 - "error", err) 698 - return uc.resolveHoldForPush(ctx) 699 - } 700 - 701 - if holdDID != "" { 702 - return holdDID, nil 703 - } 704 - 705 - // No historical hold found, fall back to discovery 706 - return uc.resolveHoldForPush(ctx) 707 - } 708 - 709 - // resolveHoldForPush discovers hold from owner's sailor profile or default 710 - func (uc *UserContext) resolveHoldForPush(ctx context.Context) (string, error) { 711 - // Create anonymous client to query owner's profile 712 - client := atproto.NewClient(uc.TargetOwnerPDS, uc.TargetOwnerDID, "") 713 - 714 - // Try to get owner's sailor profile 715 - record, err := client.GetRecord(ctx, atproto.SailorProfileCollection, "self") 716 - if err == nil && record != nil { 717 - var profile atproto.SailorProfileRecord 718 - if jsonErr := json.Unmarshal(record.Value, &profile); jsonErr == nil { 719 - if profile.DefaultHold != "" { 720 - // Normalize to DID if needed 721 - holdDID := profile.DefaultHold 722 - if !atproto.IsDID(holdDID) { 723 - holdDID = atproto.ResolveHoldDIDFromURL(holdDID) 724 - } 725 - slog.Debug("Found hold from owner's profile", 726 - "component", "auth/context", 727 - "ownerDID", uc.TargetOwnerDID, 728 - "holdDID", holdDID) 729 - return holdDID, nil 730 - } 731 - } 732 - } 733 - 734 - // Fall back to default hold 735 - if uc.defaultHoldDID != "" { 736 - slog.Debug("Using default hold", 737 - "component", "auth/context", 738 - "ownerDID", uc.TargetOwnerDID, 739 - "defaultHoldDID", uc.defaultHoldDID) 740 - return uc.defaultHoldDID, nil 741 - } 742 - 743 - return "", fmt.Errorf("no hold configured for %s and no default hold set", uc.TargetOwnerDID) 744 - } 745 - 746 - // ============================================================================= 747 - // Test Helper Methods 748 - // ============================================================================= 749 - // These methods are designed to make UserContext testable by allowing tests 750 - // to bypass network-dependent code paths (PDS resolution, OAuth token fetching). 751 - // Only use these in tests - they are not intended for production use. 752 - 753 - // SetPDSForTest sets the PDS endpoint directly, bypassing ResolvePDS network calls. 754 - // This allows tests to skip DID resolution which would make network requests. 755 - // Deprecated: Use SetPDS instead. 756 - func (uc *UserContext) SetPDSForTest(handle, pdsEndpoint string) { 757 - uc.SetPDS(handle, pdsEndpoint) 758 - } 759 - 760 - // SetServiceTokenForTest pre-populates a service token for the given holdDID, 761 - // bypassing the sync.Once and OAuth/app-password fetching logic. 762 - // The token will appear as if it was already fetched and cached. 763 - func (uc *UserContext) SetServiceTokenForTest(holdDID, token string) { 764 - entry := &serviceTokenEntry{ 765 - token: token, 766 - expiresAt: time.Now().Add(5 * time.Minute), 767 - err: nil, 768 - } 769 - // Mark the sync.Once as done so real fetch won't happen 770 - entry.once.Do(func() {}) 771 - uc.serviceTokens.Store(holdDID, entry) 772 - } 773 - 774 - // SetAuthorizerForTest sets the authorizer for permission checks. 775 - // Use with MockHoldAuthorizer to control CanRead/CanWrite behavior in tests. 776 - func (uc *UserContext) SetAuthorizerForTest(authorizer HoldAuthorizer) { 777 - uc.authorizer = authorizer 778 - } 779 - 780 - // SetDefaultHoldDIDForTest sets the default hold DID for tests. 781 - // This is used as fallback when resolving hold for push operations. 782 - func (uc *UserContext) SetDefaultHoldDIDForTest(holdDID string) { 783 - uc.defaultHoldDID = holdDID 784 - }
+27 -70
pkg/hold/pds/auth.go
··· 4 4 "context" 5 5 "encoding/base64" 6 6 "encoding/json" 7 - "errors" 8 7 "fmt" 9 8 "io" 10 9 "log/slog" ··· 19 18 "github.com/golang-jwt/jwt/v5" 20 19 ) 21 20 22 - // Authentication errors 23 - var ( 24 - ErrMissingAuthHeader = errors.New("missing Authorization header") 25 - ErrInvalidAuthFormat = errors.New("invalid Authorization header format") 26 - ErrInvalidAuthScheme = errors.New("invalid authorization scheme: expected 'Bearer' or 'DPoP'") 27 - ErrMissingToken = errors.New("missing token") 28 - ErrMissingDPoPHeader = errors.New("missing DPoP header") 29 - ) 30 - 31 - // JWT validation errors 32 - var ( 33 - ErrInvalidJWTFormat = errors.New("invalid JWT format: expected header.payload.signature") 34 - ErrMissingISSClaim = errors.New("missing 'iss' claim in token") 35 - ErrMissingSubClaim = errors.New("missing 'sub' claim in token") 36 - ErrTokenExpired = errors.New("token has expired") 37 - ) 38 - 39 - // AuthError provides structured authorization error information 40 - type AuthError struct { 41 - Action string // The action being attempted: "blob:read", "blob:write", "crew:admin" 42 - Reason string // Why access was denied 43 - Required []string // What permission(s) would grant access 44 - } 45 - 46 - func (e *AuthError) Error() string { 47 - return fmt.Sprintf("access denied for %s: %s (required: %s)", 48 - e.Action, e.Reason, strings.Join(e.Required, " or ")) 49 - } 50 - 51 - // NewAuthError creates a new AuthError 52 - func NewAuthError(action, reason string, required ...string) *AuthError { 53 - return &AuthError{ 54 - Action: action, 55 - Reason: reason, 56 - Required: required, 57 - } 58 - } 59 - 60 21 // HTTPClient interface allows injecting a custom HTTP client for testing 61 22 type HTTPClient interface { 62 23 Do(*http.Request) (*http.Response, error) ··· 83 44 // Extract Authorization header 84 45 authHeader := r.Header.Get("Authorization") 85 46 if authHeader == "" { 86 - return nil, ErrMissingAuthHeader 47 + return nil, fmt.Errorf("missing Authorization header") 87 48 } 88 49 89 50 // Check for DPoP authorization scheme 90 51 parts := strings.SplitN(authHeader, " ", 2) 91 52 if len(parts) != 2 { 92 - return nil, ErrInvalidAuthFormat 53 + return nil, fmt.Errorf("invalid Authorization header format") 93 54 } 94 55 95 56 if parts[0] != "DPoP" { ··· 98 59 99 60 accessToken := parts[1] 100 61 if accessToken == "" { 101 - return nil, ErrMissingToken 62 + return nil, fmt.Errorf("missing access token") 102 63 } 103 64 104 65 // Extract DPoP header 105 66 dpopProof := r.Header.Get("DPoP") 106 67 if dpopProof == "" { 107 - return nil, ErrMissingDPoPHeader 68 + return nil, fmt.Errorf("missing DPoP header") 108 69 } 109 70 110 71 // TODO: We could verify the DPoP proof locally (signature, HTM, HTU, etc.) ··· 148 109 // JWT format: header.payload.signature 149 110 parts := strings.Split(token, ".") 150 111 if len(parts) != 3 { 151 - return "", "", ErrInvalidJWTFormat 112 + return "", "", fmt.Errorf("invalid JWT format") 152 113 } 153 114 154 115 // Decode payload (base64url) ··· 168 129 } 169 130 170 131 if claims.Sub == "" { 171 - return "", "", ErrMissingSubClaim 132 + return "", "", fmt.Errorf("missing sub claim (DID)") 172 133 } 173 134 174 135 if claims.Iss == "" { 175 - return "", "", ErrMissingISSClaim 136 + return "", "", fmt.Errorf("missing iss claim (PDS)") 176 137 } 177 138 178 139 return claims.Sub, claims.Iss, nil ··· 255 216 return nil, fmt.Errorf("DPoP authentication failed: %w", err) 256 217 } 257 218 } else { 258 - return nil, ErrInvalidAuthScheme 219 + return nil, fmt.Errorf("missing or invalid Authorization header (expected Bearer or DPoP)") 259 220 } 260 221 261 222 // Get captain record to check owner ··· 282 243 return user, nil 283 244 } 284 245 // User is crew but doesn't have admin permission 285 - return nil, NewAuthError("crew:admin", "crew member lacks permission", "crew:admin") 246 + return nil, fmt.Errorf("crew member lacks required 'crew:admin' permission") 286 247 } 287 248 } 288 249 289 250 // User is neither owner nor authorized crew 290 - return nil, NewAuthError("crew:admin", "user is not a crew member", "crew:admin") 251 + return nil, fmt.Errorf("user is not authorized (must be hold owner or crew admin)") 291 252 } 292 253 293 254 // ValidateBlobWriteAccess validates that the request has valid authentication ··· 315 276 return nil, fmt.Errorf("DPoP authentication failed: %w", err) 316 277 } 317 278 } else { 318 - return nil, ErrInvalidAuthScheme 279 + return nil, fmt.Errorf("missing or invalid Authorization header (expected Bearer or DPoP)") 319 280 } 320 281 321 282 // Get captain record to check owner and public settings ··· 342 303 return user, nil 343 304 } 344 305 // User is crew but doesn't have write permission 345 - return nil, NewAuthError("blob:write", "crew member lacks permission", "blob:write") 306 + return nil, fmt.Errorf("crew member lacks required 'blob:write' permission") 346 307 } 347 308 } 348 309 349 310 // User is neither owner nor authorized crew 350 - return nil, NewAuthError("blob:write", "user is not a crew member", "blob:write") 311 + return nil, fmt.Errorf("user is not authorized for blob write (must be hold owner or crew with blob:write permission)") 351 312 } 352 313 353 314 // ValidateBlobReadAccess validates that the request has read access to blobs 354 315 // If captain.public = true: No auth required (returns nil user to indicate public access) 355 - // If captain.public = false: Requires valid DPoP + OAuth and (captain OR crew with blob:read or blob:write permission). 356 - // Note: blob:write implicitly grants blob:read access. 316 + // If captain.public = false: Requires valid DPoP + OAuth and (captain OR crew with blob:read permission). 357 317 // The httpClient parameter is optional and defaults to http.DefaultClient if nil. 358 318 func ValidateBlobReadAccess(r *http.Request, pds *HoldPDS, httpClient HTTPClient) (*ValidatedUser, error) { 359 319 // Get captain record to check public setting ··· 384 344 return nil, fmt.Errorf("DPoP authentication failed: %w", err) 385 345 } 386 346 } else { 387 - return nil, ErrInvalidAuthScheme 347 + return nil, fmt.Errorf("missing or invalid Authorization header (expected Bearer or DPoP)") 388 348 } 389 349 390 350 // Check if user is the owner (always has read access) ··· 392 352 return user, nil 393 353 } 394 354 395 - // Check if user is crew with blob:read or blob:write permission 396 - // Note: blob:write implicitly grants blob:read access 355 + // Check if user is crew with blob:read permission 397 356 crew, err := pds.ListCrewMembers(r.Context()) 398 357 if err != nil { 399 358 return nil, fmt.Errorf("failed to check crew membership: %w", err) ··· 401 360 402 361 for _, member := range crew { 403 362 if member.Record.Member == user.DID { 404 - // Check if this crew member has blob:read or blob:write permission 405 - // blob:write implicitly grants read access (can't push without pulling) 406 - if slices.Contains(member.Record.Permissions, "blob:read") || 407 - slices.Contains(member.Record.Permissions, "blob:write") { 363 + // Check if this crew member has blob:read permission 364 + if slices.Contains(member.Record.Permissions, "blob:read") { 408 365 return user, nil 409 366 } 410 - // User is crew but doesn't have read or write permission 411 - return nil, NewAuthError("blob:read", "crew member lacks permission", "blob:read", "blob:write") 367 + // User is crew but doesn't have read permission 368 + return nil, fmt.Errorf("crew member lacks required 'blob:read' permission") 412 369 } 413 370 } 414 371 415 372 // User is neither owner nor authorized crew 416 - return nil, NewAuthError("blob:read", "user is not a crew member", "blob:read", "blob:write") 373 + return nil, fmt.Errorf("user is not authorized for blob read (must be hold owner or crew with blob:read permission)") 417 374 } 418 375 419 376 // ServiceTokenClaims represents the claims in a service token JWT ··· 428 385 // Extract Authorization header 429 386 authHeader := r.Header.Get("Authorization") 430 387 if authHeader == "" { 431 - return nil, ErrMissingAuthHeader 388 + return nil, fmt.Errorf("missing Authorization header") 432 389 } 433 390 434 391 // Check for Bearer authorization scheme 435 392 parts := strings.SplitN(authHeader, " ", 2) 436 393 if len(parts) != 2 { 437 - return nil, ErrInvalidAuthFormat 394 + return nil, fmt.Errorf("invalid Authorization header format") 438 395 } 439 396 440 397 if parts[0] != "Bearer" { ··· 443 400 444 401 tokenString := parts[1] 445 402 if tokenString == "" { 446 - return nil, ErrMissingToken 403 + return nil, fmt.Errorf("missing token") 447 404 } 448 405 449 406 slog.Debug("Validating service token", "holdDID", holdDID) ··· 452 409 // Split token: header.payload.signature 453 410 tokenParts := strings.Split(tokenString, ".") 454 411 if len(tokenParts) != 3 { 455 - return nil, ErrInvalidJWTFormat 412 + return nil, fmt.Errorf("invalid JWT format") 456 413 } 457 414 458 415 // Decode payload (second part) to extract claims ··· 470 427 // Get issuer (user DID) 471 428 issuerDID := claims.Issuer 472 429 if issuerDID == "" { 473 - return nil, ErrMissingISSClaim 430 + return nil, fmt.Errorf("missing iss claim") 474 431 } 475 432 476 433 // Verify audience matches this hold service ··· 488 445 return nil, fmt.Errorf("failed to get expiration: %w", err) 489 446 } 490 447 if exp != nil && time.Now().After(exp.Time) { 491 - return nil, ErrTokenExpired 448 + return nil, fmt.Errorf("token has expired") 492 449 } 493 450 494 451 // Verify JWT signature using ATProto's secp256k1 crypto
-110
pkg/hold/pds/auth_test.go
··· 771 771 } 772 772 } 773 773 774 - // TestValidateBlobReadAccess_BlobWriteImpliesRead tests that blob:write grants read access 775 - func TestValidateBlobReadAccess_BlobWriteImpliesRead(t *testing.T) { 776 - ownerDID := "did:plc:owner123" 777 - 778 - pds, ctx := setupTestPDSWithBootstrap(t, ownerDID, false, false) 779 - 780 - // Verify captain record has public=false (private hold) 781 - _, captain, err := pds.GetCaptainRecord(ctx) 782 - if err != nil { 783 - t.Fatalf("Failed to get captain record: %v", err) 784 - } 785 - 786 - if captain.Public { 787 - t.Error("Expected public=false for captain record") 788 - } 789 - 790 - // Add crew member with ONLY blob:write permission (no blob:read) 791 - writerDID := "did:plc:writer123" 792 - _, err = pds.AddCrewMember(ctx, writerDID, "writer", []string{"blob:write"}) 793 - if err != nil { 794 - t.Fatalf("Failed to add crew writer: %v", err) 795 - } 796 - 797 - mockClient := &mockPDSClient{} 798 - 799 - // Test writer (has only blob:write permission) can read 800 - t.Run("crew with blob:write can read", func(t *testing.T) { 801 - dpopHelper, err := NewDPoPTestHelper(writerDID, "https://test-pds.example.com") 802 - if err != nil { 803 - t.Fatalf("Failed to create DPoP helper: %v", err) 804 - } 805 - 806 - req := httptest.NewRequest(http.MethodGet, "/test", nil) 807 - if err := dpopHelper.AddDPoPToRequest(req); err != nil { 808 - t.Fatalf("Failed to add DPoP to request: %v", err) 809 - } 810 - 811 - // This should SUCCEED because blob:write implies blob:read 812 - user, err := ValidateBlobReadAccess(req, pds, mockClient) 813 - if err != nil { 814 - t.Errorf("Expected blob:write to grant read access, got error: %v", err) 815 - } 816 - 817 - if user == nil { 818 - t.Error("Expected user to be returned for valid read access") 819 - } else if user.DID != writerDID { 820 - t.Errorf("Expected user DID %s, got %s", writerDID, user.DID) 821 - } 822 - }) 823 - 824 - // Also verify that crew with only blob:read still works 825 - t.Run("crew with blob:read can read", func(t *testing.T) { 826 - readerDID := "did:plc:reader123" 827 - _, err = pds.AddCrewMember(ctx, readerDID, "reader", []string{"blob:read"}) 828 - if err != nil { 829 - t.Fatalf("Failed to add crew reader: %v", err) 830 - } 831 - 832 - dpopHelper, err := NewDPoPTestHelper(readerDID, "https://test-pds.example.com") 833 - if err != nil { 834 - t.Fatalf("Failed to create DPoP helper: %v", err) 835 - } 836 - 837 - req := httptest.NewRequest(http.MethodGet, "/test", nil) 838 - if err := dpopHelper.AddDPoPToRequest(req); err != nil { 839 - t.Fatalf("Failed to add DPoP to request: %v", err) 840 - } 841 - 842 - user, err := ValidateBlobReadAccess(req, pds, mockClient) 843 - if err != nil { 844 - t.Errorf("Expected blob:read to grant read access, got error: %v", err) 845 - } 846 - 847 - if user == nil { 848 - t.Error("Expected user to be returned for valid read access") 849 - } else if user.DID != readerDID { 850 - t.Errorf("Expected user DID %s, got %s", readerDID, user.DID) 851 - } 852 - }) 853 - 854 - // Verify crew with neither permission cannot read 855 - t.Run("crew without read or write cannot read", func(t *testing.T) { 856 - noPermDID := "did:plc:noperm123" 857 - _, err = pds.AddCrewMember(ctx, noPermDID, "noperm", []string{"crew:admin"}) 858 - if err != nil { 859 - t.Fatalf("Failed to add crew member: %v", err) 860 - } 861 - 862 - dpopHelper, err := NewDPoPTestHelper(noPermDID, "https://test-pds.example.com") 863 - if err != nil { 864 - t.Fatalf("Failed to create DPoP helper: %v", err) 865 - } 866 - 867 - req := httptest.NewRequest(http.MethodGet, "/test", nil) 868 - if err := dpopHelper.AddDPoPToRequest(req); err != nil { 869 - t.Fatalf("Failed to add DPoP to request: %v", err) 870 - } 871 - 872 - _, err = ValidateBlobReadAccess(req, pds, mockClient) 873 - if err == nil { 874 - t.Error("Expected error for crew without read or write permission") 875 - } 876 - 877 - // Verify error message format 878 - if !strings.Contains(err.Error(), "access denied for blob:read") { 879 - t.Errorf("Expected structured error message, got: %v", err) 880 - } 881 - }) 882 - } 883 - 884 774 // TestValidateOwnerOrCrewAdmin tests admin permission checking 885 775 func TestValidateOwnerOrCrewAdmin(t *testing.T) { 886 776 ownerDID := "did:plc:owner123"
+4 -4
pkg/hold/pds/captain.go
··· 18 18 // CreateCaptainRecord creates the captain record for the hold (first-time only). 19 19 // This will FAIL if the captain record already exists. Use UpdateCaptainRecord to modify. 20 20 func (p *HoldPDS) CreateCaptainRecord(ctx context.Context, ownerDID string, public bool, allowAllCrew bool, enableBlueskyPosts bool) (cid.Cid, error) { 21 - captainRecord := &atproto.CaptainRecord{ 22 - Type: atproto.CaptainCollection, 21 + captainRecord := &atproto.HoldCaptain{ 22 + LexiconTypeID: atproto.CaptainCollection, 23 23 Owner: ownerDID, 24 24 Public: public, 25 25 AllowAllCrew: allowAllCrew, ··· 40 40 } 41 41 42 42 // GetCaptainRecord retrieves the captain record 43 - func (p *HoldPDS) GetCaptainRecord(ctx context.Context) (cid.Cid, *atproto.CaptainRecord, error) { 43 + func (p *HoldPDS) GetCaptainRecord(ctx context.Context) (cid.Cid, *atproto.HoldCaptain, error) { 44 44 // Use repomgr.GetRecord - our types are registered in init() 45 45 // so it will automatically unmarshal to the concrete type 46 46 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CaptainCollection, CaptainRkey, cid.Undef) ··· 49 49 } 50 50 51 51 // Type assert to our concrete type 52 - captainRecord, ok := val.(*atproto.CaptainRecord) 52 + captainRecord, ok := val.(*atproto.HoldCaptain) 53 53 if !ok { 54 54 return cid.Undef, nil, fmt.Errorf("unexpected type for captain record: %T", val) 55 55 }
+43 -32
pkg/hold/pds/captain_test.go
··· 12 12 "atcr.io/pkg/atproto" 13 13 ) 14 14 15 + // ptrString returns a pointer to the given string 16 + func ptrString(s string) *string { 17 + return &s 18 + } 19 + 15 20 // setupTestPDS creates a test PDS instance in a temporary directory 16 21 // It initializes the repo but does NOT create captain/crew records 17 22 // Tests should call Bootstrap or create records as needed ··· 146 151 if captain.EnableBlueskyPosts != tt.enableBlueskyPosts { 147 152 t.Errorf("Expected enableBlueskyPosts=%v, got %v", tt.enableBlueskyPosts, captain.EnableBlueskyPosts) 148 153 } 149 - if captain.Type != atproto.CaptainCollection { 150 - t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.Type) 154 + if captain.LexiconTypeID != atproto.CaptainCollection { 155 + t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID) 151 156 } 152 157 if captain.DeployedAt == "" { 153 158 t.Error("Expected deployedAt to be set") ··· 322 327 func TestCaptainRecord_CBORRoundtrip(t *testing.T) { 323 328 tests := []struct { 324 329 name string 325 - record *atproto.CaptainRecord 330 + record *atproto.HoldCaptain 326 331 }{ 327 332 { 328 333 name: "Basic captain", 329 - record: &atproto.CaptainRecord{ 330 - Type: atproto.CaptainCollection, 331 - Owner: "did:plc:alice123", 332 - Public: true, 333 - AllowAllCrew: false, 334 - DeployedAt: "2025-10-16T12:00:00Z", 334 + record: &atproto.HoldCaptain{ 335 + LexiconTypeID: atproto.CaptainCollection, 336 + Owner: "did:plc:alice123", 337 + Public: true, 338 + AllowAllCrew: false, 339 + DeployedAt: "2025-10-16T12:00:00Z", 335 340 }, 336 341 }, 337 342 { 338 343 name: "Captain with optional fields", 339 - record: &atproto.CaptainRecord{ 340 - Type: atproto.CaptainCollection, 341 - Owner: "did:plc:bob456", 342 - Public: false, 343 - AllowAllCrew: true, 344 - DeployedAt: "2025-10-16T12:00:00Z", 345 - Region: "us-west-2", 346 - Provider: "fly.io", 344 + record: &atproto.HoldCaptain{ 345 + LexiconTypeID: atproto.CaptainCollection, 346 + Owner: "did:plc:bob456", 347 + Public: false, 348 + AllowAllCrew: true, 349 + DeployedAt: "2025-10-16T12:00:00Z", 350 + Region: ptrString("us-west-2"), 351 + Provider: ptrString("fly.io"), 347 352 }, 348 353 }, 349 354 { 350 355 name: "Captain with empty optional fields", 351 - record: &atproto.CaptainRecord{ 352 - Type: atproto.CaptainCollection, 353 - Owner: "did:plc:charlie789", 354 - Public: true, 355 - AllowAllCrew: true, 356 - DeployedAt: "2025-10-16T12:00:00Z", 357 - Region: "", 358 - Provider: "", 356 + record: &atproto.HoldCaptain{ 357 + LexiconTypeID: atproto.CaptainCollection, 358 + Owner: "did:plc:charlie789", 359 + Public: true, 360 + AllowAllCrew: true, 361 + DeployedAt: "2025-10-16T12:00:00Z", 362 + Region: ptrString(""), 363 + Provider: ptrString(""), 359 364 }, 360 365 }, 361 366 } ··· 375 380 } 376 381 377 382 // Unmarshal from CBOR 378 - var decoded atproto.CaptainRecord 383 + var decoded atproto.HoldCaptain 379 384 err = decoded.UnmarshalCBOR(bytes.NewReader(cborBytes)) 380 385 if err != nil { 381 386 t.Fatalf("UnmarshalCBOR failed: %v", err) 382 387 } 383 388 384 389 // Verify all fields match 385 - if decoded.Type != tt.record.Type { 386 - t.Errorf("Type mismatch: expected %s, got %s", tt.record.Type, decoded.Type) 390 + if decoded.LexiconTypeID != tt.record.LexiconTypeID { 391 + t.Errorf("LexiconTypeID mismatch: expected %s, got %s", tt.record.LexiconTypeID, decoded.LexiconTypeID) 387 392 } 388 393 if decoded.Owner != tt.record.Owner { 389 394 t.Errorf("Owner mismatch: expected %s, got %s", tt.record.Owner, decoded.Owner) ··· 397 402 if decoded.DeployedAt != tt.record.DeployedAt { 398 403 t.Errorf("DeployedAt mismatch: expected %s, got %s", tt.record.DeployedAt, decoded.DeployedAt) 399 404 } 400 - if decoded.Region != tt.record.Region { 401 - t.Errorf("Region mismatch: expected %s, got %s", tt.record.Region, decoded.Region) 405 + // Compare Region pointers (may be nil) 406 + if (decoded.Region == nil) != (tt.record.Region == nil) { 407 + t.Errorf("Region nil mismatch: expected %v, got %v", tt.record.Region, decoded.Region) 408 + } else if decoded.Region != nil && *decoded.Region != *tt.record.Region { 409 + t.Errorf("Region mismatch: expected %q, got %q", *tt.record.Region, *decoded.Region) 402 410 } 403 - if decoded.Provider != tt.record.Provider { 404 - t.Errorf("Provider mismatch: expected %s, got %s", tt.record.Provider, decoded.Provider) 411 + // Compare Provider pointers (may be nil) 412 + if (decoded.Provider == nil) != (tt.record.Provider == nil) { 413 + t.Errorf("Provider nil mismatch: expected %v, got %v", tt.record.Provider, decoded.Provider) 414 + } else if decoded.Provider != nil && *decoded.Provider != *tt.record.Provider { 415 + t.Errorf("Provider mismatch: expected %q, got %q", *tt.record.Provider, *decoded.Provider) 405 416 } 406 417 }) 407 418 }
+10 -10
pkg/hold/pds/crew.go
··· 15 15 16 16 // AddCrewMember adds a new crew member to the hold and commits to carstore 17 17 func (p *HoldPDS) AddCrewMember(ctx context.Context, memberDID, role string, permissions []string) (cid.Cid, error) { 18 - crewRecord := &atproto.CrewRecord{ 19 - Type: atproto.CrewCollection, 20 - Member: memberDID, 21 - Role: role, 22 - Permissions: permissions, 23 - AddedAt: time.Now().Format(time.RFC3339), 18 + crewRecord := &atproto.HoldCrew{ 19 + LexiconTypeID: atproto.CrewCollection, 20 + Member: memberDID, 21 + Role: role, 22 + Permissions: permissions, 23 + AddedAt: time.Now().Format(time.RFC3339), 24 24 } 25 25 26 26 // Use repomgr for crew operations - auto-generated rkey is fine ··· 33 33 } 34 34 35 35 // GetCrewMember retrieves a crew member by their record key 36 - func (p *HoldPDS) GetCrewMember(ctx context.Context, rkey string) (cid.Cid, *atproto.CrewRecord, error) { 36 + func (p *HoldPDS) GetCrewMember(ctx context.Context, rkey string) (cid.Cid, *atproto.HoldCrew, error) { 37 37 // Use repomgr.GetRecord - our types are registered in init() 38 38 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CrewCollection, rkey, cid.Undef) 39 39 if err != nil { ··· 41 41 } 42 42 43 43 // Type assert to our concrete type 44 - crewRecord, ok := val.(*atproto.CrewRecord) 44 + crewRecord, ok := val.(*atproto.HoldCrew) 45 45 if !ok { 46 46 return cid.Undef, nil, fmt.Errorf("unexpected type for crew record: %T", val) 47 47 } ··· 53 53 type CrewMemberWithKey struct { 54 54 Rkey string 55 55 Cid cid.Cid 56 - Record *atproto.CrewRecord 56 + Record *atproto.HoldCrew 57 57 } 58 58 59 59 // ListCrewMembers returns all crew members with their rkeys ··· 108 108 } 109 109 110 110 // Unmarshal the CBOR bytes into our concrete type 111 - var crewRecord atproto.CrewRecord 111 + var crewRecord atproto.HoldCrew 112 112 if err := crewRecord.UnmarshalCBOR(bytes.NewReader(*recBytes)); err != nil { 113 113 return fmt.Errorf("failed to decode crew record: %w", err) 114 114 }
+30 -30
pkg/hold/pds/crew_test.go
··· 53 53 t.Errorf("Expected permission[%d]=%s, got %s", i, perm, crew.Record.Permissions[i]) 54 54 } 55 55 } 56 - if crew.Record.Type != atproto.CrewCollection { 57 - t.Errorf("Expected type %s, got %s", atproto.CrewCollection, crew.Record.Type) 56 + if crew.Record.LexiconTypeID != atproto.CrewCollection { 57 + t.Errorf("Expected type %s, got %s", atproto.CrewCollection, crew.Record.LexiconTypeID) 58 58 } 59 59 if crew.Record.AddedAt == "" { 60 60 t.Error("Expected addedAt to be set") ··· 348 348 func TestCrewRecord_CBORRoundtrip(t *testing.T) { 349 349 tests := []struct { 350 350 name string 351 - record *atproto.CrewRecord 351 + record *atproto.HoldCrew 352 352 }{ 353 353 { 354 354 name: "Basic crew member", 355 - record: &atproto.CrewRecord{ 356 - Type: atproto.CrewCollection, 357 - Member: "did:plc:alice123", 358 - Role: "writer", 359 - Permissions: []string{"blob:read", "blob:write"}, 360 - AddedAt: "2025-10-16T12:00:00Z", 355 + record: &atproto.HoldCrew{ 356 + LexiconTypeID: atproto.CrewCollection, 357 + Member: "did:plc:alice123", 358 + Role: "writer", 359 + Permissions: []string{"blob:read", "blob:write"}, 360 + AddedAt: "2025-10-16T12:00:00Z", 361 361 }, 362 362 }, 363 363 { 364 364 name: "Admin crew member", 365 - record: &atproto.CrewRecord{ 366 - Type: atproto.CrewCollection, 367 - Member: "did:plc:bob456", 368 - Role: "admin", 369 - Permissions: []string{"blob:read", "blob:write", "crew:admin"}, 370 - AddedAt: "2025-10-16T13:00:00Z", 365 + record: &atproto.HoldCrew{ 366 + LexiconTypeID: atproto.CrewCollection, 367 + Member: "did:plc:bob456", 368 + Role: "admin", 369 + Permissions: []string{"blob:read", "blob:write", "crew:admin"}, 370 + AddedAt: "2025-10-16T13:00:00Z", 371 371 }, 372 372 }, 373 373 { 374 374 name: "Reader crew member", 375 - record: &atproto.CrewRecord{ 376 - Type: atproto.CrewCollection, 377 - Member: "did:plc:charlie789", 378 - Role: "reader", 379 - Permissions: []string{"blob:read"}, 380 - AddedAt: "2025-10-16T14:00:00Z", 375 + record: &atproto.HoldCrew{ 376 + LexiconTypeID: atproto.CrewCollection, 377 + Member: "did:plc:charlie789", 378 + Role: "reader", 379 + Permissions: []string{"blob:read"}, 380 + AddedAt: "2025-10-16T14:00:00Z", 381 381 }, 382 382 }, 383 383 { 384 384 name: "Crew member with empty permissions", 385 - record: &atproto.CrewRecord{ 386 - Type: atproto.CrewCollection, 387 - Member: "did:plc:dave012", 388 - Role: "none", 389 - Permissions: []string{}, 390 - AddedAt: "2025-10-16T15:00:00Z", 385 + record: &atproto.HoldCrew{ 386 + LexiconTypeID: atproto.CrewCollection, 387 + Member: "did:plc:dave012", 388 + Role: "none", 389 + Permissions: []string{}, 390 + AddedAt: "2025-10-16T15:00:00Z", 391 391 }, 392 392 }, 393 393 } ··· 407 407 } 408 408 409 409 // Unmarshal from CBOR 410 - var decoded atproto.CrewRecord 410 + var decoded atproto.HoldCrew 411 411 err = decoded.UnmarshalCBOR(bytes.NewReader(cborBytes)) 412 412 if err != nil { 413 413 t.Fatalf("UnmarshalCBOR failed: %v", err) 414 414 } 415 415 416 416 // Verify all fields match 417 - if decoded.Type != tt.record.Type { 418 - t.Errorf("Type mismatch: expected %s, got %s", tt.record.Type, decoded.Type) 417 + if decoded.LexiconTypeID != tt.record.LexiconTypeID { 418 + t.Errorf("LexiconTypeID mismatch: expected %s, got %s", tt.record.LexiconTypeID, decoded.LexiconTypeID) 419 419 } 420 420 if decoded.Member != tt.record.Member { 421 421 t.Errorf("Member mismatch: expected %s, got %s", tt.record.Member, decoded.Member)
+5 -5
pkg/hold/pds/layer.go
··· 9 9 10 10 // CreateLayerRecord creates a new layer record in the hold's PDS 11 11 // Returns the rkey and CID of the created record 12 - func (p *HoldPDS) CreateLayerRecord(ctx context.Context, record *atproto.LayerRecord) (string, string, error) { 12 + func (p *HoldPDS) CreateLayerRecord(ctx context.Context, record *atproto.HoldLayer) (string, string, error) { 13 13 // Validate record 14 - if record.Type != atproto.LayerCollection { 15 - return "", "", fmt.Errorf("invalid record type: %s", record.Type) 14 + if record.LexiconTypeID != atproto.LayerCollection { 15 + return "", "", fmt.Errorf("invalid record type: %s", record.LexiconTypeID) 16 16 } 17 17 18 18 if record.Digest == "" { ··· 40 40 41 41 // GetLayerRecord retrieves a specific layer record by rkey 42 42 // Note: This is a simplified implementation. For production, you may need to pass the CID 43 - func (p *HoldPDS) GetLayerRecord(ctx context.Context, rkey string) (*atproto.LayerRecord, error) { 43 + func (p *HoldPDS) GetLayerRecord(ctx context.Context, rkey string) (*atproto.HoldLayer, error) { 44 44 // For now, we don't implement this as it's not needed for the manifest post feature 45 45 // Full implementation would require querying the carstore with a specific CID 46 46 return nil, fmt.Errorf("GetLayerRecord not yet implemented - use via XRPC listRecords instead") ··· 50 50 // Returns records, next cursor (empty if no more), and error 51 51 // Note: This is a simplified implementation. For production, consider adding filters 52 52 // (by repository, user, digest, etc.) and proper pagination 53 - func (p *HoldPDS) ListLayerRecords(ctx context.Context, limit int, cursor string) ([]*atproto.LayerRecord, string, error) { 53 + func (p *HoldPDS) ListLayerRecords(ctx context.Context, limit int, cursor string) ([]*atproto.HoldLayer, string, error) { 54 54 // For now, return empty list - full implementation would query the carstore 55 55 // This would require iterating over records in the collection and filtering 56 56 // In practice, layer records are mainly for analytics and Bluesky posts,
+19 -19
pkg/hold/pds/layer_test.go
··· 12 12 13 13 tests := []struct { 14 14 name string 15 - record *atproto.LayerRecord 15 + record *atproto.HoldLayer 16 16 wantErr bool 17 17 errSubstr string 18 18 }{ ··· 42 42 }, 43 43 { 44 44 name: "invalid record type", 45 - record: &atproto.LayerRecord{ 46 - Type: "wrong.type", 45 + record: &atproto.HoldLayer{ 46 + LexiconTypeID: "wrong.type", 47 47 Digest: "sha256:abc123", 48 48 Size: 1024, 49 49 MediaType: "application/vnd.oci.image.layer.v1.tar", 50 50 Repository: "test", 51 - UserDID: "did:plc:test", 51 + UserDid: "did:plc:test", 52 52 UserHandle: "test.example.com", 53 53 }, 54 54 wantErr: true, ··· 56 56 }, 57 57 { 58 58 name: "missing digest", 59 - record: &atproto.LayerRecord{ 60 - Type: atproto.LayerCollection, 59 + record: &atproto.HoldLayer{ 60 + LexiconTypeID: atproto.LayerCollection, 61 61 Digest: "", 62 62 Size: 1024, 63 63 MediaType: "application/vnd.oci.image.layer.v1.tar", 64 64 Repository: "test", 65 - UserDID: "did:plc:test", 65 + UserDid: "did:plc:test", 66 66 UserHandle: "test.example.com", 67 67 }, 68 68 wantErr: true, ··· 70 70 }, 71 71 { 72 72 name: "zero size", 73 - record: &atproto.LayerRecord{ 74 - Type: atproto.LayerCollection, 73 + record: &atproto.HoldLayer{ 74 + LexiconTypeID: atproto.LayerCollection, 75 75 Digest: "sha256:abc123", 76 76 Size: 0, 77 77 MediaType: "application/vnd.oci.image.layer.v1.tar", 78 78 Repository: "test", 79 - UserDID: "did:plc:test", 79 + UserDid: "did:plc:test", 80 80 UserHandle: "test.example.com", 81 81 }, 82 82 wantErr: true, ··· 84 84 }, 85 85 { 86 86 name: "negative size", 87 - record: &atproto.LayerRecord{ 88 - Type: atproto.LayerCollection, 87 + record: &atproto.HoldLayer{ 88 + LexiconTypeID: atproto.LayerCollection, 89 89 Digest: "sha256:abc123", 90 90 Size: -1, 91 91 MediaType: "application/vnd.oci.image.layer.v1.tar", 92 92 Repository: "test", 93 - UserDID: "did:plc:test", 93 + UserDid: "did:plc:test", 94 94 UserHandle: "test.example.com", 95 95 }, 96 96 wantErr: true, ··· 191 191 } 192 192 193 193 // Verify all fields are set correctly 194 - if record.Type != atproto.LayerCollection { 195 - t.Errorf("Type = %q, want %q", record.Type, atproto.LayerCollection) 194 + if record.LexiconTypeID != atproto.LayerCollection { 195 + t.Errorf("LexiconTypeID = %q, want %q", record.LexiconTypeID, atproto.LayerCollection) 196 196 } 197 197 198 198 if record.Digest != digest { ··· 211 211 t.Errorf("Repository = %q, want %q", record.Repository, repository) 212 212 } 213 213 214 - if record.UserDID != userDID { 215 - t.Errorf("UserDID = %q, want %q", record.UserDID, userDID) 214 + if record.UserDid != userDID { 215 + t.Errorf("UserDid = %q, want %q", record.UserDid, userDID) 216 216 } 217 217 218 218 if record.UserHandle != userHandle { ··· 282 282 } 283 283 284 284 // Verify the record can be created 285 - if record.Type != atproto.LayerCollection { 286 - t.Errorf("Type = %q, want %q", record.Type, atproto.LayerCollection) 285 + if record.LexiconTypeID != atproto.LayerCollection { 286 + t.Errorf("Type = %q, want %q", record.LexiconTypeID, atproto.LayerCollection) 287 287 } 288 288 289 289 if record.Digest != tt.digest {
+3 -7
pkg/hold/pds/server.go
··· 19 19 "github.com/ipfs/go-cid" 20 20 ) 21 21 22 - // init registers our custom ATProto types with indigo's lexutil type registry 23 - // This allows repomgr.GetRecord to automatically unmarshal our types 22 + // init registers the TangledProfileRecord type with indigo's lexutil type registry. 23 + // Note: HoldCaptain, HoldCrew, and HoldLayer are registered in pkg/atproto/register.go (generated). 24 + // TangledProfileRecord is external (sh.tangled.actor.profile) so we register it here. 24 25 func init() { 25 - // Register captain, crew, tangled profile, and layer record types 26 - // These must match the $type field in the records 27 - lexutil.RegisterType(atproto.CaptainCollection, &atproto.CaptainRecord{}) 28 - lexutil.RegisterType(atproto.CrewCollection, &atproto.CrewRecord{}) 29 - lexutil.RegisterType(atproto.LayerCollection, &atproto.LayerRecord{}) 30 26 lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{}) 31 27 } 32 28
+6 -6
pkg/hold/pds/server_test.go
··· 150 150 if captain.AllowAllCrew != allowAllCrew { 151 151 t.Errorf("Expected allowAllCrew=%v, got %v", allowAllCrew, captain.AllowAllCrew) 152 152 } 153 - if captain.Type != atproto.CaptainCollection { 154 - t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.Type) 153 + if captain.LexiconTypeID != atproto.CaptainCollection { 154 + t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID) 155 155 } 156 156 if captain.DeployedAt == "" { 157 157 t.Error("Expected deployedAt to be set") ··· 317 317 if captain == nil { 318 318 t.Fatal("Expected non-nil captain record") 319 319 } 320 - if captain.Type != atproto.CaptainCollection { 321 - t.Errorf("Expected captain type %s, got %s", atproto.CaptainCollection, captain.Type) 320 + if captain.LexiconTypeID != atproto.CaptainCollection { 321 + t.Errorf("Expected captain type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID) 322 322 } 323 323 324 324 // Do the same for crew record ··· 331 331 } 332 332 333 333 crew := crewMembers[0].Record 334 - if crew.Type != atproto.CrewCollection { 335 - t.Errorf("Expected crew type %s, got %s", atproto.CrewCollection, crew.Type) 334 + if crew.LexiconTypeID != atproto.CrewCollection { 335 + t.Errorf("Expected crew type %s, got %s", atproto.CrewCollection, crew.LexiconTypeID) 336 336 } 337 337 } 338 338