···1-{
2- "lexicon": 1,
3- "id": "io.atcr.repo.page",
4- "defs": {
5- "main": {
6- "type": "record",
7- "description": "Repository page metadata including description and avatar. Users can edit this directly in their PDS to customize their repository page.",
8- "key": "any",
9- "record": {
10- "type": "object",
11- "required": ["repository", "createdAt", "updatedAt"],
12- "properties": {
13- "repository": {
14- "type": "string",
15- "description": "The name of the repository (e.g., 'myapp'). Must match the rkey.",
16- "maxLength": 256
17- },
18- "description": {
19- "type": "string",
20- "description": "Markdown README/description content for the repository page.",
21- "maxLength": 100000
22- },
23- "avatar": {
24- "type": "blob",
25- "description": "Repository avatar/icon image.",
26- "accept": ["image/png", "image/jpeg", "image/webp"],
27- "maxSize": 3000000
28- },
29- "createdAt": {
30- "type": "string",
31- "format": "datetime",
32- "description": "Record creation timestamp"
33- },
34- "updatedAt": {
35- "type": "string",
36- "format": "datetime",
37- "description": "Record last updated timestamp"
38- }
39- }
40- }
41- }
42- }
43-}
···0000000000000000000000000000000000000000000
+1-2
lexicons/io/atcr/tag.json
···27 },
28 "manifestDigest": {
29 "type": "string",
30- "description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead.",
31- "maxLength": 128
32 },
33 "createdAt": {
34 "type": "string",
···27 },
28 "manifestDigest": {
29 "type": "string",
30+ "description": "DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead."
031 },
32 "createdAt": {
33 "type": "string",
+4
pkg/appview/config.go
···7980 // CheckInterval is the hold health check refresh interval (from env: ATCR_HEALTH_CHECK_INTERVAL, default: 15m)
81 CheckInterval time.Duration `yaml:"check_interval"`
00082}
8384// JetstreamConfig defines ATProto Jetstream settings
···162 // Health and cache configuration
163 cfg.Health.CacheTTL = getDurationOrDefault("ATCR_HEALTH_CACHE_TTL", 15*time.Minute)
164 cfg.Health.CheckInterval = getDurationOrDefault("ATCR_HEALTH_CHECK_INTERVAL", 15*time.Minute)
0165166 // Jetstream configuration
167 cfg.Jetstream.URL = getEnvOrDefault("JETSTREAM_URL", "wss://jetstream2.us-west.bsky.network/subscribe")
···7980 // CheckInterval is the hold health check refresh interval (from env: ATCR_HEALTH_CHECK_INTERVAL, default: 15m)
81 CheckInterval time.Duration `yaml:"check_interval"`
82+83+ // ReadmeCacheTTL is the README cache TTL (from env: ATCR_README_CACHE_TTL, default: 1h)
84+ ReadmeCacheTTL time.Duration `yaml:"readme_cache_ttl"`
85}
8687// JetstreamConfig defines ATProto Jetstream settings
···165 // Health and cache configuration
166 cfg.Health.CacheTTL = getDurationOrDefault("ATCR_HEALTH_CACHE_TTL", 15*time.Minute)
167 cfg.Health.CheckInterval = getDurationOrDefault("ATCR_HEALTH_CHECK_INTERVAL", 15*time.Minute)
168+ cfg.Health.ReadmeCacheTTL = getDurationOrDefault("ATCR_README_CACHE_TTL", 1*time.Hour)
169170 // Jetstream configuration
171 cfg.Jetstream.URL = getEnvOrDefault("JETSTREAM_URL", "wss://jetstream2.us-west.bsky.network/subscribe")
···1-description: Add repo_pages table and remove readme_cache
2-query: |
3- -- Create repo_pages table for storing repository page metadata
4- -- This replaces readme_cache with PDS-synced data
5- CREATE TABLE IF NOT EXISTS repo_pages (
6- did TEXT NOT NULL,
7- repository TEXT NOT NULL,
8- description TEXT,
9- avatar_cid TEXT,
10- created_at TIMESTAMP NOT NULL,
11- updated_at TIMESTAMP NOT NULL,
12- PRIMARY KEY(did, repository),
13- FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
14- );
15- CREATE INDEX IF NOT EXISTS idx_repo_pages_did ON repo_pages(did);
16-17- -- Drop readme_cache table (no longer needed)
18- DROP TABLE IF EXISTS readme_cache;
···000000000000000000
+2-3
pkg/appview/db/models.go
···148// TagWithPlatforms extends Tag with platform information
149type TagWithPlatforms struct {
150 Tag
151- Platforms []PlatformInfo
152- IsMultiArch bool
153- HasAttestations bool // true if manifest list contains attestation references
154}
155156// ManifestWithMetadata extends Manifest with tags and platform information
···148// TagWithPlatforms extends Tag with platform information
149type TagWithPlatforms struct {
150 Tag
151+ Platforms []PlatformInfo
152+ IsMultiArch bool
0153}
154155// ManifestWithMetadata extends Manifest with tags and platform information
+33-119
pkg/appview/db/queries.go
···7 "time"
8)
910-// BlobCDNURL returns the CDN URL for an ATProto blob
11-// This is a local copy to avoid importing atproto (prevents circular dependencies)
12-func BlobCDNURL(did, cid string) string {
13- return fmt.Sprintf("https://imgs.blue/%s/%s", did, cid)
14-}
15-16// escapeLikePattern escapes SQL LIKE wildcards (%, _) and backslash for safe searching.
17// It also sanitizes the input to prevent injection attacks via special characters.
18func escapeLikePattern(s string) string {
···52 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
53 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
54 t.created_at,
55- m.hold_endpoint,
56- COALESCE(rp.avatar_cid, '')
57 FROM tags t
58 JOIN users u ON t.did = u.did
59 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
60 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository
61- LEFT JOIN repo_pages rp ON t.did = rp.did AND t.repository = rp.repository
62 `
6364 args := []any{currentUserDID}
···81 for rows.Next() {
82 var p Push
83 var isStarredInt int
84- var avatarCID string
85- if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint, &avatarCID); err != nil {
86 return nil, 0, err
87 }
88 p.IsStarred = isStarredInt > 0
89- // Prefer repo page avatar over annotation icon
90- if avatarCID != "" {
91- p.IconURL = BlobCDNURL(p.DID, avatarCID)
92- }
93 pushes = append(pushes, p)
94 }
95···132 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
133 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
134 t.created_at,
135- m.hold_endpoint,
136- COALESCE(rp.avatar_cid, '')
137 FROM tags t
138 JOIN users u ON t.did = u.did
139 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
140 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository
141- LEFT JOIN repo_pages rp ON t.did = rp.did AND t.repository = rp.repository
142 WHERE u.handle LIKE ? ESCAPE '\'
143 OR u.did = ?
144 OR t.repository LIKE ? ESCAPE '\'
···161 for rows.Next() {
162 var p Push
163 var isStarredInt int
164- var avatarCID string
165- if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint, &avatarCID); err != nil {
166 return nil, 0, err
167 }
168 p.IsStarred = isStarredInt > 0
169- // Prefer repo page avatar over annotation icon
170- if avatarCID != "" {
171- p.IconURL = BlobCDNURL(p.DID, avatarCID)
172- }
173 pushes = append(pushes, p)
174 }
175···312 r.Licenses = annotations["org.opencontainers.image.licenses"]
313 r.IconURL = annotations["io.atcr.icon"]
314 r.ReadmeURL = annotations["io.atcr.readme"]
315-316- // Check for repo page avatar (overrides annotation icon)
317- repoPage, err := GetRepoPage(db, did, r.Name)
318- if err == nil && repoPage != nil && repoPage.AvatarCID != "" {
319- r.IconURL = BlobCDNURL(did, repoPage.AvatarCID)
320- }
321322 repos = append(repos, r)
323 }
···622// GetTagsWithPlatforms returns all tags for a repository with platform information
623// Only multi-arch tags (manifest lists) have platform info in manifest_references
624// Single-arch tags will have empty Platforms slice (platform is obvious for single-arch)
625-// Attestation references (unknown/unknown platforms) are filtered out but tracked via HasAttestations
626func GetTagsWithPlatforms(db *sql.DB, did, repository string) ([]TagWithPlatforms, error) {
627 rows, err := db.Query(`
628 SELECT
···636 COALESCE(mr.platform_os, '') as platform_os,
637 COALESCE(mr.platform_architecture, '') as platform_architecture,
638 COALESCE(mr.platform_variant, '') as platform_variant,
639- COALESCE(mr.platform_os_version, '') as platform_os_version,
640- COALESCE(mr.is_attestation, 0) as is_attestation
641 FROM tags t
642 JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository
643 LEFT JOIN manifest_references mr ON m.id = mr.manifest_id
···657 for rows.Next() {
658 var t Tag
659 var mediaType, platformOS, platformArch, platformVariant, platformOSVersion string
660- var isAttestation bool
661662 if err := rows.Scan(&t.ID, &t.DID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt,
663- &mediaType, &platformOS, &platformArch, &platformVariant, &platformOSVersion, &isAttestation); err != nil {
664 return nil, err
665 }
666···672 Platforms: []PlatformInfo{},
673 }
674 tagOrder = append(tagOrder, tagKey)
675- }
676-677- // Track if manifest list has attestations
678- if isAttestation {
679- tagMap[tagKey].HasAttestations = true
680- // Skip attestation references in platform display
681- continue
682 }
683684 // Add platform info if present (only for multi-arch manifest lists)
···1634 return time.Time{}, fmt.Errorf("unable to parse timestamp: %s", s)
1635}
163600000000000000000000000001637// GetFeaturedRepositories fetches top repositories sorted by stars and pulls
1638func GetFeaturedRepositories(db *sql.DB, limit int, currentUserDID string) ([]FeaturedRepository, error) {
1639 query := `
···1661 COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'io.atcr.icon'), ''),
1662 rs.pull_count,
1663 rs.star_count,
1664- COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0),
1665- COALESCE(rp.avatar_cid, '')
1666 FROM latest_manifests lm
1667 JOIN manifests m ON lm.latest_id = m.id
1668 JOIN users u ON m.did = u.did
1669 JOIN repo_stats rs ON m.did = rs.did AND m.repository = rs.repository
1670- LEFT JOIN repo_pages rp ON m.did = rp.did AND m.repository = rp.repository
1671 ORDER BY rs.score DESC, rs.star_count DESC, rs.pull_count DESC, m.created_at DESC
1672 LIMIT ?
1673 `
···1682 for rows.Next() {
1683 var f FeaturedRepository
1684 var isStarredInt int
1685- var avatarCID string
16861687 if err := rows.Scan(&f.OwnerDID, &f.OwnerHandle, &f.Repository,
1688- &f.Title, &f.Description, &f.IconURL, &f.PullCount, &f.StarCount, &isStarredInt, &avatarCID); err != nil {
1689 return nil, err
1690 }
1691 f.IsStarred = isStarredInt > 0
1692- // Prefer repo page avatar over annotation icon
1693- if avatarCID != "" {
1694- f.IconURL = BlobCDNURL(f.OwnerDID, avatarCID)
1695- }
16961697 featured = append(featured, f)
1698 }
16991700 return featured, nil
1701}
1702-1703-// RepoPage represents a repository page record cached from PDS
1704-type RepoPage struct {
1705- DID string
1706- Repository string
1707- Description string
1708- AvatarCID string
1709- CreatedAt time.Time
1710- UpdatedAt time.Time
1711-}
1712-1713-// UpsertRepoPage inserts or updates a repo page record
1714-func UpsertRepoPage(db *sql.DB, did, repository, description, avatarCID string, createdAt, updatedAt time.Time) error {
1715- _, err := db.Exec(`
1716- INSERT INTO repo_pages (did, repository, description, avatar_cid, created_at, updated_at)
1717- VALUES (?, ?, ?, ?, ?, ?)
1718- ON CONFLICT(did, repository) DO UPDATE SET
1719- description = excluded.description,
1720- avatar_cid = excluded.avatar_cid,
1721- updated_at = excluded.updated_at
1722- `, did, repository, description, avatarCID, createdAt, updatedAt)
1723- return err
1724-}
1725-1726-// GetRepoPage retrieves a repo page record
1727-func GetRepoPage(db *sql.DB, did, repository string) (*RepoPage, error) {
1728- var rp RepoPage
1729- err := db.QueryRow(`
1730- SELECT did, repository, description, avatar_cid, created_at, updated_at
1731- FROM repo_pages
1732- WHERE did = ? AND repository = ?
1733- `, did, repository).Scan(&rp.DID, &rp.Repository, &rp.Description, &rp.AvatarCID, &rp.CreatedAt, &rp.UpdatedAt)
1734- if err != nil {
1735- return nil, err
1736- }
1737- return &rp, nil
1738-}
1739-1740-// DeleteRepoPage deletes a repo page record
1741-func DeleteRepoPage(db *sql.DB, did, repository string) error {
1742- _, err := db.Exec(`
1743- DELETE FROM repo_pages WHERE did = ? AND repository = ?
1744- `, did, repository)
1745- return err
1746-}
1747-1748-// GetRepoPagesByDID returns all repo pages for a DID
1749-func GetRepoPagesByDID(db *sql.DB, did string) ([]RepoPage, error) {
1750- rows, err := db.Query(`
1751- SELECT did, repository, description, avatar_cid, created_at, updated_at
1752- FROM repo_pages
1753- WHERE did = ?
1754- `, did)
1755- if err != nil {
1756- return nil, err
1757- }
1758- defer rows.Close()
1759-1760- var pages []RepoPage
1761- for rows.Next() {
1762- var rp RepoPage
1763- if err := rows.Scan(&rp.DID, &rp.Repository, &rp.Description, &rp.AvatarCID, &rp.CreatedAt, &rp.UpdatedAt); err != nil {
1764- return nil, err
1765- }
1766- pages = append(pages, rp)
1767- }
1768- return pages, rows.Err()
1769-}
···7 "time"
8)
900000010// escapeLikePattern escapes SQL LIKE wildcards (%, _) and backslash for safe searching.
11// It also sanitizes the input to prevent injection attacks via special characters.
12func escapeLikePattern(s string) string {
···46 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
47 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
48 t.created_at,
49+ m.hold_endpoint
050 FROM tags t
51 JOIN users u ON t.did = u.did
52 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
53 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository
054 `
5556 args := []any{currentUserDID}
···73 for rows.Next() {
74 var p Push
75 var isStarredInt int
76+ if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil {
077 return nil, 0, err
78 }
79 p.IsStarred = isStarredInt > 0
000080 pushes = append(pushes, p)
81 }
82···119 COALESCE((SELECT COUNT(*) FROM stars WHERE owner_did = u.did AND repository = t.repository), 0),
120 COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = u.did AND repository = t.repository), 0),
121 t.created_at,
122+ m.hold_endpoint
0123 FROM tags t
124 JOIN users u ON t.did = u.did
125 JOIN manifests m ON t.did = m.did AND t.repository = m.repository AND t.digest = m.digest
126 LEFT JOIN repository_stats rs ON t.did = rs.did AND t.repository = rs.repository
0127 WHERE u.handle LIKE ? ESCAPE '\'
128 OR u.did = ?
129 OR t.repository LIKE ? ESCAPE '\'
···146 for rows.Next() {
147 var p Push
148 var isStarredInt int
149+ if err := rows.Scan(&p.DID, &p.Handle, &p.Repository, &p.Tag, &p.Digest, &p.Title, &p.Description, &p.IconURL, &p.PullCount, &p.StarCount, &isStarredInt, &p.CreatedAt, &p.HoldEndpoint); err != nil {
0150 return nil, 0, err
151 }
152 p.IsStarred = isStarredInt > 0
0000153 pushes = append(pushes, p)
154 }
155···292 r.Licenses = annotations["org.opencontainers.image.licenses"]
293 r.IconURL = annotations["io.atcr.icon"]
294 r.ReadmeURL = annotations["io.atcr.readme"]
000000295296 repos = append(repos, r)
297 }
···596// GetTagsWithPlatforms returns all tags for a repository with platform information
597// Only multi-arch tags (manifest lists) have platform info in manifest_references
598// Single-arch tags will have empty Platforms slice (platform is obvious for single-arch)
0599func GetTagsWithPlatforms(db *sql.DB, did, repository string) ([]TagWithPlatforms, error) {
600 rows, err := db.Query(`
601 SELECT
···609 COALESCE(mr.platform_os, '') as platform_os,
610 COALESCE(mr.platform_architecture, '') as platform_architecture,
611 COALESCE(mr.platform_variant, '') as platform_variant,
612+ COALESCE(mr.platform_os_version, '') as platform_os_version
0613 FROM tags t
614 JOIN manifests m ON t.digest = m.digest AND t.did = m.did AND t.repository = m.repository
615 LEFT JOIN manifest_references mr ON m.id = mr.manifest_id
···629 for rows.Next() {
630 var t Tag
631 var mediaType, platformOS, platformArch, platformVariant, platformOSVersion string
0632633 if err := rows.Scan(&t.ID, &t.DID, &t.Repository, &t.Tag, &t.Digest, &t.CreatedAt,
634+ &mediaType, &platformOS, &platformArch, &platformVariant, &platformOSVersion); err != nil {
635 return nil, err
636 }
637···643 Platforms: []PlatformInfo{},
644 }
645 tagOrder = append(tagOrder, tagKey)
0000000646 }
647648 // Add platform info if present (only for multi-arch manifest lists)
···1598 return time.Time{}, fmt.Errorf("unable to parse timestamp: %s", s)
1599}
16001601+// MetricsDB wraps a sql.DB and implements the metrics interface for middleware
1602+type MetricsDB struct {
1603+ db *sql.DB
1604+}
1605+1606+// NewMetricsDB creates a new metrics database wrapper
1607+func NewMetricsDB(db *sql.DB) *MetricsDB {
1608+ return &MetricsDB{db: db}
1609+}
1610+1611+// IncrementPullCount increments the pull count for a repository
1612+func (m *MetricsDB) IncrementPullCount(did, repository string) error {
1613+ return IncrementPullCount(m.db, did, repository)
1614+}
1615+1616+// IncrementPushCount increments the push count for a repository
1617+func (m *MetricsDB) IncrementPushCount(did, repository string) error {
1618+ return IncrementPushCount(m.db, did, repository)
1619+}
1620+1621+// GetLatestHoldDIDForRepo returns the hold DID from the most recent manifest for a repository
1622+func (m *MetricsDB) GetLatestHoldDIDForRepo(did, repository string) (string, error) {
1623+ return GetLatestHoldDIDForRepo(m.db, did, repository)
1624+}
1625+1626// GetFeaturedRepositories fetches top repositories sorted by stars and pulls
1627func GetFeaturedRepositories(db *sql.DB, limit int, currentUserDID string) ([]FeaturedRepository, error) {
1628 query := `
···1650 COALESCE((SELECT value FROM repository_annotations WHERE did = m.did AND repository = m.repository AND key = 'io.atcr.icon'), ''),
1651 rs.pull_count,
1652 rs.star_count,
1653+ COALESCE((SELECT COUNT(*) FROM stars WHERE starrer_did = ? AND owner_did = m.did AND repository = m.repository), 0)
01654 FROM latest_manifests lm
1655 JOIN manifests m ON lm.latest_id = m.id
1656 JOIN users u ON m.did = u.did
1657 JOIN repo_stats rs ON m.did = rs.did AND m.repository = rs.repository
01658 ORDER BY rs.score DESC, rs.star_count DESC, rs.pull_count DESC, m.created_at DESC
1659 LIMIT ?
1660 `
···1669 for rows.Next() {
1670 var f FeaturedRepository
1671 var isStarredInt int
016721673 if err := rows.Scan(&f.OwnerDID, &f.OwnerHandle, &f.Repository,
1674+ &f.Title, &f.Description, &f.IconURL, &f.PullCount, &f.StarCount, &isStarredInt); err != nil {
1675 return nil, err
1676 }
1677 f.IsStarred = isStarredInt > 0
000016781679 featured = append(featured, f)
1680 }
16811682 return featured, nil
1683}
00000000000000000000000000000000000000000000000000000000000000000000
+5-10
pkg/appview/db/schema.sql
···205);
206CREATE INDEX IF NOT EXISTS idx_crew_denials_retry ON hold_crew_denials(next_retry_at);
207208-CREATE TABLE IF NOT EXISTS repo_pages (
209- did TEXT NOT NULL,
210- repository TEXT NOT NULL,
211- description TEXT,
212- avatar_cid TEXT,
213- created_at TIMESTAMP NOT NULL,
214- updated_at TIMESTAMP NOT NULL,
215- PRIMARY KEY(did, repository),
216- FOREIGN KEY(did) REFERENCES users(did) ON DELETE CASCADE
217);
218-CREATE INDEX IF NOT EXISTS idx_repo_pages_did ON repo_pages(did);
···205);
206CREATE INDEX IF NOT EXISTS idx_crew_denials_retry ON hold_crew_denials(next_retry_at);
207208+CREATE TABLE IF NOT EXISTS readme_cache (
209+ url TEXT PRIMARY KEY,
210+ html TEXT NOT NULL,
211+ fetched_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP
00000212);
213+CREATE INDEX IF NOT EXISTS idx_readme_cache_fetched ON readme_cache(fetched_at);
-32
pkg/appview/handlers/errors.go
···1-package handlers
2-3-import (
4- "html/template"
5- "net/http"
6-)
7-8-// NotFoundHandler handles 404 errors
9-type NotFoundHandler struct {
10- Templates *template.Template
11- RegistryURL string
12-}
13-14-func (h *NotFoundHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
15- RenderNotFound(w, r, h.Templates, h.RegistryURL)
16-}
17-18-// RenderNotFound renders the 404 page template.
19-// Use this from other handlers when a resource is not found.
20-func RenderNotFound(w http.ResponseWriter, r *http.Request, templates *template.Template, registryURL string) {
21- w.WriteHeader(http.StatusNotFound)
22-23- data := struct {
24- PageData
25- }{
26- PageData: NewPageData(r, registryURL),
27- }
28-29- if err := templates.ExecuteTemplate(w, "404", data); err != nil {
30- http.Error(w, "Page not found", http.StatusNotFound)
31- }
32-}
···00000000000000000000000000000000
-114
pkg/appview/handlers/images.go
···3import (
4 "database/sql"
5 "encoding/json"
6- "errors"
7 "fmt"
8- "io"
9 "net/http"
10 "strings"
11- "time"
1213 "atcr.io/pkg/appview/db"
14 "atcr.io/pkg/appview/middleware"
···158159 w.WriteHeader(http.StatusOK)
160}
161-162-// UploadAvatarHandler handles uploading/updating a repository avatar
163-type UploadAvatarHandler struct {
164- DB *sql.DB
165- Refresher *oauth.Refresher
166-}
167-168-// validImageTypes are the allowed MIME types for avatars (matches lexicon)
169-var validImageTypes = map[string]bool{
170- "image/png": true,
171- "image/jpeg": true,
172- "image/webp": true,
173-}
174-175-func (h *UploadAvatarHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
176- user := middleware.GetUser(r)
177- if user == nil {
178- http.Error(w, "Unauthorized", http.StatusUnauthorized)
179- return
180- }
181-182- repo := chi.URLParam(r, "repository")
183-184- // Parse multipart form (max 3MB to match lexicon maxSize)
185- if err := r.ParseMultipartForm(3 << 20); err != nil {
186- http.Error(w, "File too large (max 3MB)", http.StatusBadRequest)
187- return
188- }
189-190- file, header, err := r.FormFile("avatar")
191- if err != nil {
192- http.Error(w, "No file provided", http.StatusBadRequest)
193- return
194- }
195- defer file.Close()
196-197- // Validate MIME type
198- contentType := header.Header.Get("Content-Type")
199- if !validImageTypes[contentType] {
200- http.Error(w, "Invalid file type. Must be PNG, JPEG, or WebP", http.StatusBadRequest)
201- return
202- }
203-204- // Read file data
205- data, err := io.ReadAll(io.LimitReader(file, 3<<20+1)) // Read up to 3MB + 1 byte
206- if err != nil {
207- http.Error(w, "Failed to read file", http.StatusInternalServerError)
208- return
209- }
210- if len(data) > 3<<20 {
211- http.Error(w, "File too large (max 3MB)", http.StatusBadRequest)
212- return
213- }
214-215- // Create ATProto client with session provider (uses DoWithSession for DPoP nonce safety)
216- pdsClient := atproto.NewClientWithSessionProvider(user.PDSEndpoint, user.DID, h.Refresher)
217-218- // Upload blob to PDS
219- blobRef, err := pdsClient.UploadBlob(r.Context(), data, contentType)
220- if err != nil {
221- if handleOAuthError(r.Context(), h.Refresher, user.DID, err) {
222- http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized)
223- return
224- }
225- http.Error(w, fmt.Sprintf("Failed to upload image: %v", err), http.StatusInternalServerError)
226- return
227- }
228-229- // Fetch existing repo page record to preserve description
230- var existingDescription string
231- var existingCreatedAt time.Time
232- record, err := pdsClient.GetRecord(r.Context(), atproto.RepoPageCollection, repo)
233- if err == nil {
234- // Parse existing record to preserve description
235- var existingRecord atproto.RepoPageRecord
236- if jsonErr := json.Unmarshal(record.Value, &existingRecord); jsonErr == nil {
237- existingDescription = existingRecord.Description
238- existingCreatedAt = existingRecord.CreatedAt
239- }
240- } else if !errors.Is(err, atproto.ErrRecordNotFound) {
241- // Some other error - check if OAuth error
242- if handleOAuthError(r.Context(), h.Refresher, user.DID, err) {
243- http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized)
244- return
245- }
246- // Log but continue - we'll create a new record
247- }
248-249- // Create updated repo page record
250- repoPage := atproto.NewRepoPageRecord(repo, existingDescription, blobRef)
251- // Preserve original createdAt if record existed
252- if !existingCreatedAt.IsZero() {
253- repoPage.CreatedAt = existingCreatedAt
254- }
255-256- // Save record to PDS
257- _, err = pdsClient.PutRecord(r.Context(), atproto.RepoPageCollection, repo, repoPage)
258- if err != nil {
259- if handleOAuthError(r.Context(), h.Refresher, user.DID, err) {
260- http.Error(w, "Authentication failed, please log in again", http.StatusUnauthorized)
261- return
262- }
263- http.Error(w, fmt.Sprintf("Failed to update repository page: %v", err), http.StatusInternalServerError)
264- return
265- }
266-267- // Return new avatar URL
268- avatarURL := atproto.BlobCDNURL(user.DID, blobRef.Ref.Link)
269- w.Header().Set("Content-Type", "application/json")
270- json.NewEncoder(w).Encode(map[string]string{"avatarURL": avatarURL})
271-}
···23 // Resolve identifier (handle or DID) to canonical DID and current handle
24 did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(r.Context(), identifier)
25 if err != nil {
26- RenderNotFound(w, r, h.Templates, h.RegistryURL)
27 return
28 }
29
···23 // Resolve identifier (handle or DID) to canonical DID and current handle
24 did, resolvedHandle, pdsEndpoint, err := atproto.ResolveIdentity(r.Context(), identifier)
25 if err != nil {
26+ http.Error(w, "User not found", http.StatusNotFound)
27 return
28 }
29
+20-261
pkg/appview/jetstream/backfill.go
···5 "database/sql"
6 "encoding/json"
7 "fmt"
8- "io"
9 "log/slog"
10- "net/http"
11 "strings"
12 "time"
1314 "atcr.io/pkg/appview/db"
15- "atcr.io/pkg/appview/readme"
16 "atcr.io/pkg/atproto"
17- "atcr.io/pkg/auth/oauth"
18)
1920// BackfillWorker uses com.atproto.sync.listReposByCollection to backfill historical data
21type BackfillWorker struct {
22 db *sql.DB
23 client *atproto.Client
24- processor *Processor // Shared processor for DB operations
25- defaultHoldDID string // Default hold DID from AppView config (e.g., "did:web:hold01.atcr.io")
26- testMode bool // If true, suppress warnings for external holds
27- refresher *oauth.Refresher // OAuth refresher for PDS writes (optional, can be nil)
28}
2930// BackfillState tracks backfill progress
···41// NewBackfillWorker creates a backfill worker using sync API
42// defaultHoldDID should be in format "did:web:hold01.atcr.io"
43// To find a hold's DID, visit: https://hold-url/.well-known/did.json
44-// refresher is optional - if provided, backfill will try to update PDS records when fetching README content
45-func NewBackfillWorker(database *sql.DB, relayEndpoint, defaultHoldDID string, testMode bool, refresher *oauth.Refresher) (*BackfillWorker, error) {
46 // Create client for relay - used only for listReposByCollection
47 client := atproto.NewClient(relayEndpoint, "", "")
48···52 processor: NewProcessor(database, false), // No cache for batch processing
53 defaultHoldDID: defaultHoldDID,
54 testMode: testMode,
55- refresher: refresher,
56 }, nil
57}
58···74 atproto.TagCollection, // io.atcr.tag
75 atproto.StarCollection, // io.atcr.sailor.star
76 atproto.SailorProfileCollection, // io.atcr.sailor.profile
77- atproto.RepoPageCollection, // io.atcr.repo.page
78 }
7980 for _, collection := range collections {
···172 // Track what we found for deletion reconciliation
173 switch collection {
174 case atproto.ManifestCollection:
175- var manifestRecord atproto.ManifestRecord
176 if err := json.Unmarshal(record.Value, &manifestRecord); err == nil {
177 foundManifestDigests = append(foundManifestDigests, manifestRecord.Digest)
178 }
179 case atproto.TagCollection:
180- var tagRecord atproto.TagRecord
181 if err := json.Unmarshal(record.Value, &tagRecord); err == nil {
182 foundTags = append(foundTags, struct{ Repository, Tag string }{
183 Repository: tagRecord.Repository,
···185 })
186 }
187 case atproto.StarCollection:
188- var starRecord atproto.StarRecord
189 if err := json.Unmarshal(record.Value, &starRecord); err == nil {
190- key := fmt.Sprintf("%s/%s", starRecord.Subject.DID, starRecord.Subject.Repository)
191- foundStars[key] = starRecord.CreatedAt
00000192 }
193 }
194···225 }
226 }
227228- // After processing repo pages, fetch descriptions from external sources if empty
229- if collection == atproto.RepoPageCollection {
230- if err := b.reconcileRepoPageDescriptions(ctx, did, pdsEndpoint); err != nil {
231- slog.Warn("Backfill failed to reconcile repo page descriptions", "did", did, "error", err)
232- }
233- }
234-235 return recordCount, nil
236}
237···297 return b.processor.ProcessStar(context.Background(), did, record.Value)
298 case atproto.SailorProfileCollection:
299 return b.processor.ProcessSailorProfile(ctx, did, record.Value, b.queryCaptainRecordWrapper)
300- case atproto.RepoPageCollection:
301- // rkey is extracted from the record URI, but for repo pages we use Repository field
302- return b.processor.ProcessRepoPage(ctx, did, record.URI, record.Value, false)
303 default:
304 return fmt.Errorf("unsupported collection: %s", collection)
305 }
···377378// reconcileAnnotations ensures annotations come from the newest manifest in each repository
379// This fixes the out-of-order backfill issue where older manifests can overwrite newer annotations
00380func (b *BackfillWorker) reconcileAnnotations(ctx context.Context, did string, pdsClient *atproto.Client) error {
381- // Get all repositories for this DID
382- repositories, err := db.GetRepositoriesForDID(b.db, did)
383- if err != nil {
384- return fmt.Errorf("failed to get repositories: %w", err)
385- }
386-387- for _, repo := range repositories {
388- // Find newest manifest for this repository
389- newestManifest, err := db.GetNewestManifestForRepo(b.db, did, repo)
390- if err != nil {
391- slog.Warn("Backfill failed to get newest manifest for repo", "did", did, "repository", repo, "error", err)
392- continue // Skip on error
393- }
394-395- // Fetch the full manifest record from PDS using the digest as rkey
396- rkey := strings.TrimPrefix(newestManifest.Digest, "sha256:")
397- record, err := pdsClient.GetRecord(ctx, atproto.ManifestCollection, rkey)
398- if err != nil {
399- slog.Warn("Backfill failed to fetch manifest record for repo", "did", did, "repository", repo, "error", err)
400- continue // Skip on error
401- }
402-403- // Parse manifest record
404- var manifestRecord atproto.ManifestRecord
405- if err := json.Unmarshal(record.Value, &manifestRecord); err != nil {
406- slog.Warn("Backfill failed to parse manifest record for repo", "did", did, "repository", repo, "error", err)
407- continue
408- }
409-410- // Update annotations from newest manifest only
411- if len(manifestRecord.Annotations) > 0 {
412- // Filter out empty annotations
413- hasData := false
414- for _, value := range manifestRecord.Annotations {
415- if value != "" {
416- hasData = true
417- break
418- }
419- }
420-421- if hasData {
422- err = db.UpsertRepositoryAnnotations(b.db, did, repo, manifestRecord.Annotations)
423- if err != nil {
424- slog.Warn("Backfill failed to reconcile annotations for repo", "did", did, "repository", repo, "error", err)
425- } else {
426- slog.Info("Backfill reconciled annotations for repo from newest manifest", "did", did, "repository", repo, "digest", newestManifest.Digest)
427- }
428- }
429- }
430- }
431-432- return nil
433-}
434-435-// reconcileRepoPageDescriptions fetches README content from external sources for repo pages with empty descriptions
436-// If the user has an OAuth session, it updates the PDS record (source of truth)
437-// Otherwise, it just stores the fetched content in the database
438-func (b *BackfillWorker) reconcileRepoPageDescriptions(ctx context.Context, did, pdsEndpoint string) error {
439- // Get all repo pages for this DID
440- repoPages, err := db.GetRepoPagesByDID(b.db, did)
441- if err != nil {
442- return fmt.Errorf("failed to get repo pages: %w", err)
443- }
444-445- for _, page := range repoPages {
446- // Skip pages that already have a description
447- if page.Description != "" {
448- continue
449- }
450-451- // Get annotations from the repository's manifest
452- annotations, err := db.GetRepositoryAnnotations(b.db, did, page.Repository)
453- if err != nil {
454- slog.Debug("Failed to get annotations for repo page", "did", did, "repository", page.Repository, "error", err)
455- continue
456- }
457-458- // Try to fetch README content from external sources
459- description := b.fetchReadmeContent(ctx, annotations)
460- if description == "" {
461- // No README content available, skip
462- continue
463- }
464-465- slog.Info("Fetched README for repo page", "did", did, "repository", page.Repository, "descriptionLength", len(description))
466-467- // Try to update PDS if we have OAuth session
468- pdsUpdated := false
469- if b.refresher != nil {
470- if err := b.updateRepoPageInPDS(ctx, did, pdsEndpoint, page.Repository, description, page.AvatarCID); err != nil {
471- slog.Debug("Could not update repo page in PDS, falling back to DB-only", "did", did, "repository", page.Repository, "error", err)
472- } else {
473- pdsUpdated = true
474- slog.Info("Updated repo page in PDS with fetched description", "did", did, "repository", page.Repository)
475- }
476- }
477-478- // Always update database with the fetched content
479- if err := db.UpsertRepoPage(b.db, did, page.Repository, description, page.AvatarCID, page.CreatedAt, time.Now()); err != nil {
480- slog.Warn("Failed to update repo page in database", "did", did, "repository", page.Repository, "error", err)
481- } else if !pdsUpdated {
482- slog.Info("Updated repo page in database (PDS not updated)", "did", did, "repository", page.Repository)
483- }
484- }
485-486- return nil
487-}
488-489-// fetchReadmeContent attempts to fetch README content from external sources based on annotations
490-// Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source
491-func (b *BackfillWorker) fetchReadmeContent(ctx context.Context, annotations map[string]string) string {
492- // Create a context with timeout for README fetching
493- fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
494- defer cancel()
495-496- // Priority 1: Direct README URL from io.atcr.readme annotation
497- if readmeURL := annotations["io.atcr.readme"]; readmeURL != "" {
498- content, err := b.fetchRawReadme(fetchCtx, readmeURL)
499- if err != nil {
500- slog.Debug("Failed to fetch README from io.atcr.readme annotation", "url", readmeURL, "error", err)
501- } else if content != "" {
502- return content
503- }
504- }
505-506- // Priority 2: Derive README URL from org.opencontainers.image.source
507- if sourceURL := annotations["org.opencontainers.image.source"]; sourceURL != "" {
508- // Try main branch first, then master
509- for _, branch := range []string{"main", "master"} {
510- readmeURL := readme.DeriveReadmeURL(sourceURL, branch)
511- if readmeURL == "" {
512- continue
513- }
514-515- content, err := b.fetchRawReadme(fetchCtx, readmeURL)
516- if err != nil {
517- // Only log non-404 errors (404 is expected when trying main vs master)
518- if !readme.Is404(err) {
519- slog.Debug("Failed to fetch README from source URL", "url", readmeURL, "branch", branch, "error", err)
520- }
521- continue
522- }
523-524- if content != "" {
525- return content
526- }
527- }
528- }
529-530- return ""
531-}
532-533-// fetchRawReadme fetches raw markdown content from a URL
534-func (b *BackfillWorker) fetchRawReadme(ctx context.Context, readmeURL string) (string, error) {
535- req, err := http.NewRequestWithContext(ctx, "GET", readmeURL, nil)
536- if err != nil {
537- return "", fmt.Errorf("failed to create request: %w", err)
538- }
539-540- req.Header.Set("User-Agent", "ATCR-Backfill-README-Fetcher/1.0")
541-542- client := &http.Client{
543- Timeout: 10 * time.Second,
544- CheckRedirect: func(req *http.Request, via []*http.Request) error {
545- if len(via) >= 5 {
546- return fmt.Errorf("too many redirects")
547- }
548- return nil
549- },
550- }
551-552- resp, err := client.Do(req)
553- if err != nil {
554- return "", fmt.Errorf("failed to fetch URL: %w", err)
555- }
556- defer resp.Body.Close()
557-558- if resp.StatusCode != http.StatusOK {
559- return "", fmt.Errorf("status %d", resp.StatusCode)
560- }
561-562- // Limit content size to 100KB
563- limitedReader := io.LimitReader(resp.Body, 100*1024)
564- content, err := io.ReadAll(limitedReader)
565- if err != nil {
566- return "", fmt.Errorf("failed to read response body: %w", err)
567- }
568-569- return string(content), nil
570-}
571-572-// updateRepoPageInPDS updates the repo page record in the user's PDS using OAuth
573-func (b *BackfillWorker) updateRepoPageInPDS(ctx context.Context, did, pdsEndpoint, repository, description, avatarCID string) error {
574- if b.refresher == nil {
575- return fmt.Errorf("no OAuth refresher available")
576- }
577-578- // Create ATProto client with session provider
579- pdsClient := atproto.NewClientWithSessionProvider(pdsEndpoint, did, b.refresher)
580-581- // Get existing repo page record to preserve other fields
582- existingRecord, err := pdsClient.GetRecord(ctx, atproto.RepoPageCollection, repository)
583- var createdAt time.Time
584- var avatarRef *atproto.ATProtoBlobRef
585-586- if err == nil && existingRecord != nil {
587- // Parse existing record
588- var existingPage atproto.RepoPageRecord
589- if err := json.Unmarshal(existingRecord.Value, &existingPage); err == nil {
590- createdAt = existingPage.CreatedAt
591- avatarRef = existingPage.Avatar
592- }
593- }
594-595- if createdAt.IsZero() {
596- createdAt = time.Now()
597- }
598-599- // Create updated repo page record
600- repoPage := &atproto.RepoPageRecord{
601- Type: atproto.RepoPageCollection,
602- Repository: repository,
603- Description: description,
604- Avatar: avatarRef,
605- CreatedAt: createdAt,
606- UpdatedAt: time.Now(),
607- }
608-609- // Write to PDS - this will use DoWithSession internally
610- _, err = pdsClient.PutRecord(ctx, atproto.RepoPageCollection, repository, repoPage)
611- if err != nil {
612- return fmt.Errorf("failed to write to PDS: %w", err)
613- }
614-615 return nil
616}
···5 "database/sql"
6 "encoding/json"
7 "fmt"
08 "log/slog"
09 "strings"
10 "time"
1112 "atcr.io/pkg/appview/db"
013 "atcr.io/pkg/atproto"
014)
1516// BackfillWorker uses com.atproto.sync.listReposByCollection to backfill historical data
17type BackfillWorker struct {
18 db *sql.DB
19 client *atproto.Client
20+ processor *Processor // Shared processor for DB operations
21+ defaultHoldDID string // Default hold DID from AppView config (e.g., "did:web:hold01.atcr.io")
22+ testMode bool // If true, suppress warnings for external holds
023}
2425// BackfillState tracks backfill progress
···36// NewBackfillWorker creates a backfill worker using sync API
37// defaultHoldDID should be in format "did:web:hold01.atcr.io"
38// To find a hold's DID, visit: https://hold-url/.well-known/did.json
39+func NewBackfillWorker(database *sql.DB, relayEndpoint, defaultHoldDID string, testMode bool) (*BackfillWorker, error) {
040 // Create client for relay - used only for listReposByCollection
41 client := atproto.NewClient(relayEndpoint, "", "")
42···46 processor: NewProcessor(database, false), // No cache for batch processing
47 defaultHoldDID: defaultHoldDID,
48 testMode: testMode,
049 }, nil
50}
51···67 atproto.TagCollection, // io.atcr.tag
68 atproto.StarCollection, // io.atcr.sailor.star
69 atproto.SailorProfileCollection, // io.atcr.sailor.profile
070 }
7172 for _, collection := range collections {
···164 // Track what we found for deletion reconciliation
165 switch collection {
166 case atproto.ManifestCollection:
167+ var manifestRecord atproto.Manifest
168 if err := json.Unmarshal(record.Value, &manifestRecord); err == nil {
169 foundManifestDigests = append(foundManifestDigests, manifestRecord.Digest)
170 }
171 case atproto.TagCollection:
172+ var tagRecord atproto.Tag
173 if err := json.Unmarshal(record.Value, &tagRecord); err == nil {
174 foundTags = append(foundTags, struct{ Repository, Tag string }{
175 Repository: tagRecord.Repository,
···177 })
178 }
179 case atproto.StarCollection:
180+ var starRecord atproto.SailorStar
181 if err := json.Unmarshal(record.Value, &starRecord); err == nil {
182+ key := fmt.Sprintf("%s/%s", starRecord.Subject.Did, starRecord.Subject.Repository)
183+ // Parse CreatedAt string to time.Time
184+ createdAt, parseErr := time.Parse(time.RFC3339, starRecord.CreatedAt)
185+ if parseErr != nil {
186+ createdAt = time.Now()
187+ }
188+ foundStars[key] = createdAt
189 }
190 }
191···222 }
223 }
2240000000225 return recordCount, nil
226}
227···287 return b.processor.ProcessStar(context.Background(), did, record.Value)
288 case atproto.SailorProfileCollection:
289 return b.processor.ProcessSailorProfile(ctx, did, record.Value, b.queryCaptainRecordWrapper)
000290 default:
291 return fmt.Errorf("unsupported collection: %s", collection)
292 }
···364365// reconcileAnnotations ensures annotations come from the newest manifest in each repository
366// This fixes the out-of-order backfill issue where older manifests can overwrite newer annotations
367+// NOTE: Currently disabled because the generated Manifest_Annotations type doesn't support
368+// arbitrary key-value pairs. Would need to update lexicon schema with "unknown" type.
369func (b *BackfillWorker) reconcileAnnotations(ctx context.Context, did string, pdsClient *atproto.Client) error {
370+ // TODO: Re-enable once lexicon supports annotations as map[string]string
371+ // For now, skip annotation reconciliation as the generated type is an empty struct
372+ _ = did
373+ _ = pdsClient
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000374 return nil
375}
+51-65
pkg/appview/jetstream/processor.go
···100// Returns the manifest ID for further processing (layers/references)
101func (p *Processor) ProcessManifest(ctx context.Context, did string, recordData []byte) (int64, error) {
102 // Unmarshal manifest record
103- var manifestRecord atproto.ManifestRecord
104 if err := json.Unmarshal(recordData, &manifestRecord); err != nil {
105 return 0, fmt.Errorf("failed to unmarshal manifest: %w", err)
106 }
···110 // Extract hold DID from manifest (with fallback for legacy manifests)
111 // New manifests use holdDid field (DID format)
112 // Old manifests use holdEndpoint field (URL format) - convert to DID
113- holdDID := manifestRecord.HoldDID
114- if holdDID == "" && manifestRecord.HoldEndpoint != "" {
00115 // Legacy manifest - convert URL to DID
116- holdDID = atproto.ResolveHoldDIDFromURL(manifestRecord.HoldEndpoint)
0000000117 }
118119 // Prepare manifest for insertion (WITHOUT annotation fields)
···122 Repository: manifestRecord.Repository,
123 Digest: manifestRecord.Digest,
124 MediaType: manifestRecord.MediaType,
125- SchemaVersion: manifestRecord.SchemaVersion,
126 HoldEndpoint: holdDID,
127- CreatedAt: manifestRecord.CreatedAt,
128 // Annotations removed - stored separately in repository_annotations table
129 }
130···154 }
155 }
156157- // Update repository annotations ONLY if manifest has at least one non-empty annotation
158- if manifestRecord.Annotations != nil {
159- hasData := false
160- for _, value := range manifestRecord.Annotations {
161- if value != "" {
162- hasData = true
163- break
164- }
165- }
166-167- if hasData {
168- // Replace all annotations for this repository
169- err = db.UpsertRepositoryAnnotations(p.db, did, manifestRecord.Repository, manifestRecord.Annotations)
170- if err != nil {
171- return 0, fmt.Errorf("failed to upsert annotations: %w", err)
172- }
173- }
174- }
175176 // Insert manifest references or layers
177 if isManifestList {
···184185 if ref.Platform != nil {
186 platformArch = ref.Platform.Architecture
187- platformOS = ref.Platform.OS
188- platformVariant = ref.Platform.Variant
189- platformOSVersion = ref.Platform.OSVersion
0000190 }
191192- // Detect attestation manifests from annotations
00193 isAttestation := false
194- if ref.Annotations != nil {
195- if refType, ok := ref.Annotations["vnd.docker.reference.type"]; ok {
196- isAttestation = refType == "attestation-manifest"
197- }
198- }
199200 if err := db.InsertManifestReference(p.db, &db.ManifestReference{
201 ManifestID: manifestID,
···235// ProcessTag processes a tag record and stores it in the database
236func (p *Processor) ProcessTag(ctx context.Context, did string, recordData []byte) error {
237 // Unmarshal tag record
238- var tagRecord atproto.TagRecord
239 if err := json.Unmarshal(recordData, &tagRecord); err != nil {
240 return fmt.Errorf("failed to unmarshal tag: %w", err)
241 }
···245 return fmt.Errorf("failed to get manifest digest from tag record: %w", err)
246 }
2470000000248 // Insert or update tag
249 return db.UpsertTag(p.db, &db.Tag{
250 DID: did,
251 Repository: tagRecord.Repository,
252 Tag: tagRecord.Tag,
253 Digest: manifestDigest,
254- CreatedAt: tagRecord.UpdatedAt,
255 })
256}
257258// ProcessStar processes a star record and stores it in the database
259func (p *Processor) ProcessStar(ctx context.Context, did string, recordData []byte) error {
260 // Unmarshal star record
261- var starRecord atproto.StarRecord
262 if err := json.Unmarshal(recordData, &starRecord); err != nil {
263 return fmt.Errorf("failed to unmarshal star: %w", err)
264 }
···266 // The DID here is the starrer (user who starred)
267 // The subject contains the owner DID and repository
268 // Star count will be calculated on demand from the stars table
269- return db.UpsertStar(p.db, did, starRecord.Subject.DID, starRecord.Subject.Repository, starRecord.CreatedAt)
000000270}
271272// ProcessSailorProfile processes a sailor profile record
273// This is primarily used by backfill to cache captain records for holds
274func (p *Processor) ProcessSailorProfile(ctx context.Context, did string, recordData []byte, queryCaptainFn func(context.Context, string) error) error {
275 // Unmarshal sailor profile record
276- var profileRecord atproto.SailorProfileRecord
277 if err := json.Unmarshal(recordData, &profileRecord); err != nil {
278 return fmt.Errorf("failed to unmarshal sailor profile: %w", err)
279 }
280281 // Skip if no default hold set
282- if profileRecord.DefaultHold == "" {
283 return nil
284 }
285286 // Convert hold URL/DID to canonical DID
287- holdDID := atproto.ResolveHoldDIDFromURL(profileRecord.DefaultHold)
288 if holdDID == "" {
289- slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", profileRecord.DefaultHold)
290 return nil
291 }
292···297 }
298299 return nil
300-}
301-302-// ProcessRepoPage processes a repository page record
303-// This is called when Jetstream receives a repo page create/update event
304-func (p *Processor) ProcessRepoPage(ctx context.Context, did string, rkey string, recordData []byte, isDelete bool) error {
305- if isDelete {
306- // Delete the repo page from our cache
307- return db.DeleteRepoPage(p.db, did, rkey)
308- }
309-310- // Unmarshal repo page record
311- var pageRecord atproto.RepoPageRecord
312- if err := json.Unmarshal(recordData, &pageRecord); err != nil {
313- return fmt.Errorf("failed to unmarshal repo page: %w", err)
314- }
315-316- // Extract avatar CID if present
317- avatarCID := ""
318- if pageRecord.Avatar != nil && pageRecord.Avatar.Ref.Link != "" {
319- avatarCID = pageRecord.Avatar.Ref.Link
320- }
321-322- // Upsert to database
323- return db.UpsertRepoPage(p.db, did, pageRecord.Repository, pageRecord.Description, avatarCID, pageRecord.CreatedAt, pageRecord.UpdatedAt)
324}
325326// ProcessIdentity handles identity change events (handle updates)
···100// Returns the manifest ID for further processing (layers/references)
101func (p *Processor) ProcessManifest(ctx context.Context, did string, recordData []byte) (int64, error) {
102 // Unmarshal manifest record
103+ var manifestRecord atproto.Manifest
104 if err := json.Unmarshal(recordData, &manifestRecord); err != nil {
105 return 0, fmt.Errorf("failed to unmarshal manifest: %w", err)
106 }
···110 // Extract hold DID from manifest (with fallback for legacy manifests)
111 // New manifests use holdDid field (DID format)
112 // Old manifests use holdEndpoint field (URL format) - convert to DID
113+ var holdDID string
114+ if manifestRecord.HoldDid != nil && *manifestRecord.HoldDid != "" {
115+ holdDID = *manifestRecord.HoldDid
116+ } else if manifestRecord.HoldEndpoint != nil && *manifestRecord.HoldEndpoint != "" {
117 // Legacy manifest - convert URL to DID
118+ holdDID = atproto.ResolveHoldDIDFromURL(*manifestRecord.HoldEndpoint)
119+ }
120+121+ // Parse CreatedAt string to time.Time
122+ createdAt, err := time.Parse(time.RFC3339, manifestRecord.CreatedAt)
123+ if err != nil {
124+ // Fall back to current time if parsing fails
125+ createdAt = time.Now()
126 }
127128 // Prepare manifest for insertion (WITHOUT annotation fields)
···131 Repository: manifestRecord.Repository,
132 Digest: manifestRecord.Digest,
133 MediaType: manifestRecord.MediaType,
134+ SchemaVersion: int(manifestRecord.SchemaVersion),
135 HoldEndpoint: holdDID,
136+ CreatedAt: createdAt,
137 // Annotations removed - stored separately in repository_annotations table
138 }
139···163 }
164 }
165166+ // Note: Repository annotations are currently disabled because the generated
167+ // Manifest_Annotations type doesn't support arbitrary key-value pairs.
168+ // The lexicon would need to use "unknown" type for annotations to support this.
169+ // TODO: Re-enable once lexicon supports annotations as map[string]string
170+ _ = manifestRecord.Annotations
0000000000000171172 // Insert manifest references or layers
173 if isManifestList {
···180181 if ref.Platform != nil {
182 platformArch = ref.Platform.Architecture
183+ platformOS = ref.Platform.Os
184+ if ref.Platform.Variant != nil {
185+ platformVariant = *ref.Platform.Variant
186+ }
187+ if ref.Platform.OsVersion != nil {
188+ platformOSVersion = *ref.Platform.OsVersion
189+ }
190 }
191192+ // Note: Attestation detection via annotations is currently disabled
193+ // because the generated Manifest_ManifestReference_Annotations type
194+ // doesn't support arbitrary key-value pairs.
195 isAttestation := false
00000196197 if err := db.InsertManifestReference(p.db, &db.ManifestReference{
198 ManifestID: manifestID,
···232// ProcessTag processes a tag record and stores it in the database
233func (p *Processor) ProcessTag(ctx context.Context, did string, recordData []byte) error {
234 // Unmarshal tag record
235+ var tagRecord atproto.Tag
236 if err := json.Unmarshal(recordData, &tagRecord); err != nil {
237 return fmt.Errorf("failed to unmarshal tag: %w", err)
238 }
···242 return fmt.Errorf("failed to get manifest digest from tag record: %w", err)
243 }
244245+ // Parse CreatedAt string to time.Time
246+ tagCreatedAt, err := time.Parse(time.RFC3339, tagRecord.CreatedAt)
247+ if err != nil {
248+ // Fall back to current time if parsing fails
249+ tagCreatedAt = time.Now()
250+ }
251+252 // Insert or update tag
253 return db.UpsertTag(p.db, &db.Tag{
254 DID: did,
255 Repository: tagRecord.Repository,
256 Tag: tagRecord.Tag,
257 Digest: manifestDigest,
258+ CreatedAt: tagCreatedAt,
259 })
260}
261262// ProcessStar processes a star record and stores it in the database
263func (p *Processor) ProcessStar(ctx context.Context, did string, recordData []byte) error {
264 // Unmarshal star record
265+ var starRecord atproto.SailorStar
266 if err := json.Unmarshal(recordData, &starRecord); err != nil {
267 return fmt.Errorf("failed to unmarshal star: %w", err)
268 }
···270 // The DID here is the starrer (user who starred)
271 // The subject contains the owner DID and repository
272 // Star count will be calculated on demand from the stars table
273+ // Parse the CreatedAt string to time.Time
274+ createdAt, err := time.Parse(time.RFC3339, starRecord.CreatedAt)
275+ if err != nil {
276+ // Fall back to current time if parsing fails
277+ createdAt = time.Now()
278+ }
279+ return db.UpsertStar(p.db, did, starRecord.Subject.Did, starRecord.Subject.Repository, createdAt)
280}
281282// ProcessSailorProfile processes a sailor profile record
283// This is primarily used by backfill to cache captain records for holds
284func (p *Processor) ProcessSailorProfile(ctx context.Context, did string, recordData []byte, queryCaptainFn func(context.Context, string) error) error {
285 // Unmarshal sailor profile record
286+ var profileRecord atproto.SailorProfile
287 if err := json.Unmarshal(recordData, &profileRecord); err != nil {
288 return fmt.Errorf("failed to unmarshal sailor profile: %w", err)
289 }
290291 // Skip if no default hold set
292+ if profileRecord.DefaultHold == nil || *profileRecord.DefaultHold == "" {
293 return nil
294 }
295296 // Convert hold URL/DID to canonical DID
297+ holdDID := atproto.ResolveHoldDIDFromURL(*profileRecord.DefaultHold)
298 if holdDID == "" {
299+ slog.Warn("Invalid hold reference in profile", "component", "processor", "did", did, "default_hold", *profileRecord.DefaultHold)
300 return nil
301 }
302···307 }
308309 return nil
000000000000000000000000310}
311312// ProcessIdentity handles identity change events (handle updates)
+36-54
pkg/appview/jetstream/processor_test.go
···11 _ "github.com/mattn/go-sqlite3"
12)
130000014// setupTestDB creates an in-memory SQLite database for testing
15func setupTestDB(t *testing.T) *sql.DB {
16 database, err := sql.Open("sqlite3", ":memory:")
···143 ctx := context.Background()
144145 // Create test manifest record
146- manifestRecord := &atproto.ManifestRecord{
147 Repository: "test-app",
148 Digest: "sha256:abc123",
149 MediaType: "application/vnd.oci.image.manifest.v1+json",
150 SchemaVersion: 2,
151- HoldEndpoint: "did:web:hold01.atcr.io",
152- CreatedAt: time.Now(),
153- Config: &atproto.BlobReference{
154 Digest: "sha256:config123",
155 Size: 1234,
156 },
157- Layers: []atproto.BlobReference{
158 {Digest: "sha256:layer1", Size: 5000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"},
159 {Digest: "sha256:layer2", Size: 3000, MediaType: "application/vnd.oci.image.layer.v1.tar+gzip"},
160 },
161- Annotations: map[string]string{
162- "org.opencontainers.image.title": "Test App",
163- "org.opencontainers.image.description": "A test application",
164- "org.opencontainers.image.source": "https://github.com/test/app",
165- "org.opencontainers.image.licenses": "MIT",
166- "io.atcr.icon": "https://example.com/icon.png",
167- },
168 }
169170 // Marshal to bytes for ProcessManifest
···193 t.Errorf("Expected 1 manifest, got %d", count)
194 }
195196- // Verify annotations were stored in repository_annotations table
197- var title, source string
198- err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?",
199- "did:plc:test123", "test-app", "org.opencontainers.image.title").Scan(&title)
200- if err != nil {
201- t.Fatalf("Failed to query title annotation: %v", err)
202- }
203- if title != "Test App" {
204- t.Errorf("title = %q, want %q", title, "Test App")
205- }
206-207- err = database.QueryRow("SELECT value FROM repository_annotations WHERE did = ? AND repository = ? AND key = ?",
208- "did:plc:test123", "test-app", "org.opencontainers.image.source").Scan(&source)
209- if err != nil {
210- t.Fatalf("Failed to query source annotation: %v", err)
211- }
212- if source != "https://github.com/test/app" {
213- t.Errorf("source = %q, want %q", source, "https://github.com/test/app")
214- }
215216 // Verify layers were inserted
217 var layerCount int
···242 ctx := context.Background()
243244 // Create test manifest list record
245- manifestRecord := &atproto.ManifestRecord{
246 Repository: "test-app",
247 Digest: "sha256:list123",
248 MediaType: "application/vnd.oci.image.index.v1+json",
249 SchemaVersion: 2,
250- HoldEndpoint: "did:web:hold01.atcr.io",
251- CreatedAt: time.Now(),
252- Manifests: []atproto.ManifestReference{
253 {
254 Digest: "sha256:amd64manifest",
255 MediaType: "application/vnd.oci.image.manifest.v1+json",
256 Size: 1000,
257- Platform: &atproto.Platform{
258 Architecture: "amd64",
259- OS: "linux",
260 },
261 },
262 {
263 Digest: "sha256:arm64manifest",
264 MediaType: "application/vnd.oci.image.manifest.v1+json",
265 Size: 1100,
266- Platform: &atproto.Platform{
267 Architecture: "arm64",
268- OS: "linux",
269- Variant: "v8",
270 },
271 },
272 },
···326 ctx := context.Background()
327328 // Create test tag record (using ManifestDigest field for simplicity)
329- tagRecord := &atproto.TagRecord{
330 Repository: "test-app",
331 Tag: "latest",
332- ManifestDigest: "sha256:abc123",
333- UpdatedAt: time.Now(),
334 }
335336 // Marshal to bytes for ProcessTag
···368 }
369370 // Test upserting same tag with new digest
371- tagRecord.ManifestDigest = "sha256:newdigest"
372 recordBytes, err = json.Marshal(tagRecord)
373 if err != nil {
374 t.Fatalf("Failed to marshal tag: %v", err)
···407 ctx := context.Background()
408409 // Create test star record
410- starRecord := &atproto.StarRecord{
411- Subject: atproto.StarSubject{
412- DID: "did:plc:owner123",
413 Repository: "test-app",
414 },
415- CreatedAt: time.Now(),
416 }
417418 // Marshal to bytes for ProcessStar
···466 p := NewProcessor(database, false)
467 ctx := context.Background()
468469- manifestRecord := &atproto.ManifestRecord{
470 Repository: "test-app",
471 Digest: "sha256:abc123",
472 MediaType: "application/vnd.oci.image.manifest.v1+json",
473 SchemaVersion: 2,
474- HoldEndpoint: "did:web:hold01.atcr.io",
475- CreatedAt: time.Now(),
476 }
477478 // Marshal to bytes for ProcessManifest
···518 ctx := context.Background()
519520 // Manifest with nil annotations
521- manifestRecord := &atproto.ManifestRecord{
522 Repository: "test-app",
523 Digest: "sha256:abc123",
524 MediaType: "application/vnd.oci.image.manifest.v1+json",
525 SchemaVersion: 2,
526- HoldEndpoint: "did:web:hold01.atcr.io",
527- CreatedAt: time.Now(),
528 Annotations: nil,
529 }
530
···61 jetstreamURL: jetstreamURL,
62 startCursor: startCursor,
63 wantedCollections: []string{
64- "io.atcr.*", // Subscribe to all ATCR collections
0065 },
66 processor: NewProcessor(database, true), // Use cache for live streaming
67 }
···310 case atproto.StarCollection:
311 slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
312 return w.processStar(commit)
313- case atproto.RepoPageCollection:
314- slog.Info("Jetstream processing repo page event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
315- return w.processRepoPage(commit)
316 default:
317 // Ignore other collections
318 return nil
···435436 // Use shared processor for DB operations
437 return w.processor.ProcessStar(context.Background(), commit.DID, recordBytes)
438-}
439-440-// processRepoPage processes a repo page commit event
441-func (w *Worker) processRepoPage(commit *CommitEvent) error {
442- // Resolve and upsert user with handle/PDS endpoint
443- if err := w.processor.EnsureUser(context.Background(), commit.DID); err != nil {
444- return fmt.Errorf("failed to ensure user: %w", err)
445- }
446-447- isDelete := commit.Operation == "delete"
448-449- if isDelete {
450- // Delete - rkey is the repository name
451- slog.Info("Jetstream deleting repo page", "did", commit.DID, "repository", commit.RKey)
452- if err := w.processor.ProcessRepoPage(context.Background(), commit.DID, commit.RKey, nil, true); err != nil {
453- slog.Error("Jetstream ERROR deleting repo page", "error", err)
454- return err
455- }
456- slog.Info("Jetstream successfully deleted repo page", "did", commit.DID, "repository", commit.RKey)
457- return nil
458- }
459-460- // Parse repo page record
461- if commit.Record == nil {
462- return nil
463- }
464-465- // Marshal map to bytes for processing
466- recordBytes, err := json.Marshal(commit.Record)
467- if err != nil {
468- return fmt.Errorf("failed to marshal record: %w", err)
469- }
470-471- // Use shared processor for DB operations
472- return w.processor.ProcessRepoPage(context.Background(), commit.DID, commit.RKey, recordBytes, false)
473}
474475// processIdentity processes an identity event (handle change)
···61 jetstreamURL: jetstreamURL,
62 startCursor: startCursor,
63 wantedCollections: []string{
64+ atproto.ManifestCollection, // io.atcr.manifest
65+ atproto.TagCollection, // io.atcr.tag
66+ atproto.StarCollection, // io.atcr.sailor.star
67 },
68 processor: NewProcessor(database, true), // Use cache for live streaming
69 }
···312 case atproto.StarCollection:
313 slog.Info("Jetstream processing star event", "did", commit.DID, "operation", commit.Operation, "rkey", commit.RKey)
314 return w.processStar(commit)
000315 default:
316 // Ignore other collections
317 return nil
···434435 // Use shared processor for DB operations
436 return w.processor.ProcessStar(context.Background(), commit.DID, recordBytes)
00000000000000000000000000000000000437}
438439// processIdentity processes an identity event (handle change)
+6-59
pkg/appview/middleware/auth.go
···11 "net/url"
1213 "atcr.io/pkg/appview/db"
14- "atcr.io/pkg/auth"
15- "atcr.io/pkg/auth/oauth"
16)
1718type contextKey string
1920const userKey contextKey = "user"
2122-// WebAuthDeps contains dependencies for web auth middleware
23-type WebAuthDeps struct {
24- SessionStore *db.SessionStore
25- Database *sql.DB
26- Refresher *oauth.Refresher
27- DefaultHoldDID string
28-}
29-30// RequireAuth is middleware that requires authentication
31func RequireAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler {
32- return RequireAuthWithDeps(WebAuthDeps{
33- SessionStore: store,
34- Database: database,
35- })
36-}
37-38-// RequireAuthWithDeps is middleware that requires authentication and creates UserContext
39-func RequireAuthWithDeps(deps WebAuthDeps) func(http.Handler) http.Handler {
40 return func(next http.Handler) http.Handler {
41 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
42 sessionID, ok := getSessionID(r)
···50 return
51 }
5253- sess, ok := deps.SessionStore.Get(sessionID)
54 if !ok {
55 // Build return URL with query parameters preserved
56 returnTo := r.URL.Path
···62 }
6364 // Look up full user from database to get avatar
65- user, err := db.GetUserByDID(deps.Database, sess.DID)
66 if err != nil || user == nil {
67 // Fallback to session data if DB lookup fails
68 user = &db.User{
···72 }
73 }
7475- ctx := r.Context()
76- ctx = context.WithValue(ctx, userKey, user)
77-78- // Create UserContext for authenticated users (enables EnsureUserSetup)
79- if deps.Refresher != nil {
80- userCtx := auth.NewUserContext(sess.DID, auth.AuthMethodOAuth, r.Method, &auth.Dependencies{
81- Refresher: deps.Refresher,
82- DefaultHoldDID: deps.DefaultHoldDID,
83- })
84- userCtx.SetPDS(sess.Handle, sess.PDSEndpoint)
85- userCtx.EnsureUserSetup()
86- ctx = auth.WithUserContext(ctx, userCtx)
87- }
88-89 next.ServeHTTP(w, r.WithContext(ctx))
90 })
91 }
···9394// OptionalAuth is middleware that optionally includes user if authenticated
95func OptionalAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler {
96- return OptionalAuthWithDeps(WebAuthDeps{
97- SessionStore: store,
98- Database: database,
99- })
100-}
101-102-// OptionalAuthWithDeps is middleware that optionally includes user and UserContext if authenticated
103-func OptionalAuthWithDeps(deps WebAuthDeps) func(http.Handler) http.Handler {
104 return func(next http.Handler) http.Handler {
105 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
106 sessionID, ok := getSessionID(r)
107 if ok {
108- if sess, ok := deps.SessionStore.Get(sessionID); ok {
109 // Look up full user from database to get avatar
110- user, err := db.GetUserByDID(deps.Database, sess.DID)
111 if err != nil || user == nil {
112 // Fallback to session data if DB lookup fails
113 user = &db.User{
···116 PDSEndpoint: sess.PDSEndpoint,
117 }
118 }
119-120- ctx := r.Context()
121- ctx = context.WithValue(ctx, userKey, user)
122-123- // Create UserContext for authenticated users (enables EnsureUserSetup)
124- if deps.Refresher != nil {
125- userCtx := auth.NewUserContext(sess.DID, auth.AuthMethodOAuth, r.Method, &auth.Dependencies{
126- Refresher: deps.Refresher,
127- DefaultHoldDID: deps.DefaultHoldDID,
128- })
129- userCtx.SetPDS(sess.Handle, sess.PDSEndpoint)
130- userCtx.EnsureUserSetup()
131- ctx = auth.WithUserContext(ctx, userCtx)
132- }
133-134 r = r.WithContext(ctx)
135 }
136 }
···11 "net/url"
1213 "atcr.io/pkg/appview/db"
0014)
1516type contextKey string
1718const userKey contextKey = "user"
190000000020// RequireAuth is middleware that requires authentication
21func RequireAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler {
0000000022 return func(next http.Handler) http.Handler {
23 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
24 sessionID, ok := getSessionID(r)
···32 return
33 }
3435+ sess, ok := store.Get(sessionID)
36 if !ok {
37 // Build return URL with query parameters preserved
38 returnTo := r.URL.Path
···44 }
4546 // Look up full user from database to get avatar
47+ user, err := db.GetUserByDID(database, sess.DID)
48 if err != nil || user == nil {
49 // Fallback to session data if DB lookup fails
50 user = &db.User{
···54 }
55 }
5657+ ctx := context.WithValue(r.Context(), userKey, user)
000000000000058 next.ServeHTTP(w, r.WithContext(ctx))
59 })
60 }
···6263// OptionalAuth is middleware that optionally includes user if authenticated
64func OptionalAuth(store *db.SessionStore, database *sql.DB) func(http.Handler) http.Handler {
0000000065 return func(next http.Handler) http.Handler {
66 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
67 sessionID, ok := getSessionID(r)
68 if ok {
69+ if sess, ok := store.Get(sessionID); ok {
70 // Look up full user from database to get avatar
71+ user, err := db.GetUserByDID(database, sess.DID)
72 if err != nil || user == nil {
73 // Fallback to session data if DB lookup fails
74 user = &db.User{
···77 PDSEndpoint: sess.PDSEndpoint,
78 }
79 }
80+ ctx := context.WithValue(r.Context(), userKey, user)
0000000000000081 r = r.WithContext(ctx)
82 }
83 }
+340-111
pkg/appview/middleware/registry.go
···23import (
4 "context"
5- "database/sql"
6 "fmt"
7 "log/slog"
8 "net/http"
9 "strings"
001011 "github.com/distribution/distribution/v3"
012 registrymw "github.com/distribution/distribution/v3/registry/middleware/registry"
13 "github.com/distribution/distribution/v3/registry/storage/driver"
14 "github.com/distribution/reference"
···26// authMethodKey is the context key for storing auth method from JWT
27const authMethodKey contextKey = "auth.method"
2829-// pullerDIDKey is the context key for storing the authenticated user's DID from JWT
30-const pullerDIDKey contextKey = "puller.did"
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003132// Global variables for initialization only
33// These are set by main.go during startup and copied into NamespaceResolver instances.
34// After initialization, request handling uses the NamespaceResolver's instance fields.
35var (
36- globalRefresher *oauth.Refresher
37- globalDatabase *sql.DB
38- globalAuthorizer auth.HoldAuthorizer
039)
4041// SetGlobalRefresher sets the OAuth refresher instance during initialization
···4647// SetGlobalDatabase sets the database instance during initialization
48// Must be called before the registry starts serving requests
49-func SetGlobalDatabase(database *sql.DB) {
50 globalDatabase = database
51}
52···56 globalAuthorizer = authorizer
57}
5800000059func init() {
60 // Register the name resolution middleware
61 registrymw.Register("atproto-resolver", initATProtoResolver)
···64// NamespaceResolver wraps a namespace and resolves names
65type NamespaceResolver struct {
66 distribution.Namespace
67- defaultHoldDID string // Default hold DID (e.g., "did:web:hold01.atcr.io")
68- baseURL string // Base URL for error messages (e.g., "https://atcr.io")
69- testMode bool // If true, fallback to default hold when user's hold is unreachable
70- refresher *oauth.Refresher // OAuth session manager (copied from global on init)
71- sqlDB *sql.DB // Database for hold DID lookup and metrics (copied from global on init)
72- authorizer auth.HoldAuthorizer // Hold authorization (copied from global on init)
0073}
7475// initATProtoResolver initializes the name resolution middleware
···96 // Copy shared services from globals into the instance
97 // This avoids accessing globals during request handling
98 return &NamespaceResolver{
99- Namespace: ns,
100- defaultHoldDID: defaultHoldDID,
101- baseURL: baseURL,
102- testMode: testMode,
103- refresher: globalRefresher,
104- sqlDB: globalDatabase,
105- authorizer: globalAuthorizer,
00106 }, nil
0000000107}
108109// Repository resolves the repository name and delegates to underlying namespace
···139 }
140 ctx = context.WithValue(ctx, holdDIDKey, holdDID)
141142- // Note: Profile and crew membership are now ensured in UserContextMiddleware
143- // via EnsureUserSetup() - no need to call here
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000144145 // Create a new reference with identity/image format
146 // Use the identity (or DID) as the namespace to ensure canonical format
···157 return nil, err
158 }
159000000000000000000000000000000000160 // IMPORTANT: Use only the image name (not identity/image) for ATProto storage
161 // ATProto records are scoped to the user's DID, so we don't need the identity prefix
162 // Example: "evan.jarrett.net/debian" -> store as "debian"
163 repositoryName := imageName
164165- // Get UserContext from request context (set by UserContextMiddleware)
166- userCtx := auth.FromContext(ctx)
167- if userCtx == nil {
168- return nil, fmt.Errorf("UserContext not set in request context - ensure UserContextMiddleware is configured")
169 }
170171- // Set target repository info on UserContext
172- // ATProtoClient is cached lazily via userCtx.GetATProtoClient()
173- userCtx.SetTarget(did, handle, pdsEndpoint, repositoryName, holdDID)
174-175 // Create routing repository - routes manifests to ATProto, blobs to hold service
176 // The registry is stateless - no local storage is used
0177 //
178 // NOTE: We create a fresh RoutingRepository on every request (no caching) because:
179 // 1. Each layer upload is a separate HTTP request (possibly different process)
180 // 2. OAuth sessions can be refreshed/invalidated between requests
181 // 3. The refresher already caches sessions efficiently (in-memory + DB)
182- // 4. ATProtoClient is now cached in UserContext via GetATProtoClient()
183- return storage.NewRoutingRepository(repo, userCtx, nr.sqlDB), nil
000000000000000184}
185186// Repositories delegates to underlying namespace
···201// findHoldDID determines which hold DID to use for blob storage
202// Priority order:
203// 1. User's sailor profile defaultHold (if set)
204-// 2. AppView's default hold DID
0205// Returns a hold DID (e.g., "did:web:hold01.atcr.io"), or empty string if none configured
206func (nr *NamespaceResolver) findHoldDID(ctx context.Context, did, pdsEndpoint string) string {
207 // Create ATProto client (without auth - reading public records)
···214 slog.Warn("Failed to read profile", "did", did, "error", err)
215 }
216217- if profile != nil && profile.DefaultHold != "" {
218- // In test mode, verify the hold is reachable (fall back to default if not)
219- // In production, trust the user's profile and return their hold
0220 if nr.testMode {
221- if nr.isHoldReachable(ctx, profile.DefaultHold) {
222- return profile.DefaultHold
223 }
224- slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", profile.DefaultHold)
225 return nr.defaultHoldDID
226 }
227- return profile.DefaultHold
228 }
229230- // No profile defaultHold - use AppView default
0231 return nr.defaultHoldDID
232}
233···250 return false
251}
252253-// ExtractAuthMethod is an HTTP middleware that extracts the auth method and puller DID from the JWT Authorization header
254-// and stores them in the request context for later use by the registry middleware.
255-// Also stores the HTTP method for routing decisions (GET/HEAD = pull, PUT/POST = push).
256func ExtractAuthMethod(next http.Handler) http.Handler {
257 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
258- ctx := r.Context()
259-260- // Store HTTP method in context for routing decisions
261- // This is used by routing_repository.go to distinguish pull (GET/HEAD) from push (PUT/POST)
262- ctx = context.WithValue(ctx, "http.request.method", r.Method)
263-264 // Extract Authorization header
265 authHeader := r.Header.Get("Authorization")
266 if authHeader != "" {
···273 authMethod := token.ExtractAuthMethod(tokenString)
274 if authMethod != "" {
275 // Store in context for registry middleware
276- ctx = context.WithValue(ctx, authMethodKey, authMethod)
277- }
278-279- // Extract puller DID (Subject) from JWT
280- // This is the authenticated user's DID, used for service token requests
281- pullerDID := token.ExtractSubject(tokenString)
282- if pullerDID != "" {
283- ctx = context.WithValue(ctx, pullerDIDKey, pullerDID)
284 }
285-286- slog.Debug("Extracted auth info from JWT",
287- "component", "registry/middleware",
288- "authMethod", authMethod,
289- "pullerDID", pullerDID,
290- "httpMethod", r.Method)
291 }
292 }
293294- r = r.WithContext(ctx)
295 next.ServeHTTP(w, r)
296 })
297}
298-299-// UserContextMiddleware creates a UserContext from the extracted JWT claims
300-// and stores it in the request context for use throughout request processing.
301-// This middleware should be chained AFTER ExtractAuthMethod.
302-func UserContextMiddleware(deps *auth.Dependencies) func(http.Handler) http.Handler {
303- return func(next http.Handler) http.Handler {
304- return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
305- ctx := r.Context()
306-307- // Get values set by ExtractAuthMethod
308- authMethod, _ := ctx.Value(authMethodKey).(string)
309- pullerDID, _ := ctx.Value(pullerDIDKey).(string)
310-311- // Build UserContext with all dependencies
312- userCtx := auth.NewUserContext(pullerDID, authMethod, r.Method, deps)
313-314- // Eagerly resolve user's PDS for authenticated users
315- // This is a fast path that avoids lazy loading in most cases
316- if userCtx.IsAuthenticated {
317- if err := userCtx.ResolvePDS(ctx); err != nil {
318- slog.Warn("Failed to resolve puller's PDS",
319- "component", "registry/middleware",
320- "did", pullerDID,
321- "error", err)
322- // Continue without PDS - will fail on service token request
323- }
324-325- // Ensure user has profile and crew membership (runs in background, cached)
326- userCtx.EnsureUserSetup()
327- }
328-329- // Store UserContext in request context
330- ctx = auth.WithUserContext(ctx, userCtx)
331- r = r.WithContext(ctx)
332-333- slog.Debug("Created UserContext",
334- "component", "registry/middleware",
335- "isAuthenticated", userCtx.IsAuthenticated,
336- "authMethod", userCtx.AuthMethod,
337- "action", userCtx.Action.String(),
338- "pullerDID", pullerDID)
339-340- next.ServeHTTP(w, r)
341- })
342- }
343-}
···23import (
4 "context"
05 "fmt"
6 "log/slog"
7 "net/http"
8 "strings"
9+ "sync"
10+ "time"
1112 "github.com/distribution/distribution/v3"
13+ "github.com/distribution/distribution/v3/registry/api/errcode"
14 registrymw "github.com/distribution/distribution/v3/registry/middleware/registry"
15 "github.com/distribution/distribution/v3/registry/storage/driver"
16 "github.com/distribution/reference"
···28// authMethodKey is the context key for storing auth method from JWT
29const authMethodKey contextKey = "auth.method"
3031+// validationCacheEntry stores a validated service token with expiration
32+type validationCacheEntry struct {
33+ serviceToken string
34+ validUntil time.Time
35+ err error // Cached error for fast-fail
36+ mu sync.Mutex // Per-entry lock to serialize cache population
37+ inFlight bool // True if another goroutine is fetching the token
38+ done chan struct{} // Closed when fetch completes
39+}
40+41+// validationCache provides request-level caching for service tokens
42+// This prevents concurrent layer uploads from racing on OAuth/DPoP requests
43+type validationCache struct {
44+ mu sync.RWMutex
45+ entries map[string]*validationCacheEntry // key: "did:holdDID"
46+}
47+48+// newValidationCache creates a new validation cache
49+func newValidationCache() *validationCache {
50+ return &validationCache{
51+ entries: make(map[string]*validationCacheEntry),
52+ }
53+}
54+55+// getOrFetch retrieves a service token from cache or fetches it
56+// Multiple concurrent requests for the same DID:holdDID will share the fetch operation
57+func (vc *validationCache) getOrFetch(ctx context.Context, cacheKey string, fetchFunc func() (string, error)) (string, error) {
58+ // Fast path: check cache with read lock
59+ vc.mu.RLock()
60+ entry, exists := vc.entries[cacheKey]
61+ vc.mu.RUnlock()
62+63+ if exists {
64+ // Entry exists, check if it's still valid
65+ entry.mu.Lock()
66+67+ // If another goroutine is fetching, wait for it
68+ if entry.inFlight {
69+ done := entry.done
70+ entry.mu.Unlock()
71+72+ select {
73+ case <-done:
74+ // Fetch completed, check result
75+ entry.mu.Lock()
76+ defer entry.mu.Unlock()
77+78+ if entry.err != nil {
79+ return "", entry.err
80+ }
81+ if time.Now().Before(entry.validUntil) {
82+ return entry.serviceToken, nil
83+ }
84+ // Fall through to refetch
85+ case <-ctx.Done():
86+ return "", ctx.Err()
87+ }
88+ } else {
89+ // Check if cached token is still valid
90+ if entry.err != nil && time.Now().Before(entry.validUntil) {
91+ // Return cached error (fast-fail)
92+ entry.mu.Unlock()
93+ return "", entry.err
94+ }
95+ if entry.err == nil && time.Now().Before(entry.validUntil) {
96+ // Return cached token
97+ token := entry.serviceToken
98+ entry.mu.Unlock()
99+ return token, nil
100+ }
101+ entry.mu.Unlock()
102+ }
103+ }
104+105+ // Slow path: need to fetch token
106+ vc.mu.Lock()
107+ entry, exists = vc.entries[cacheKey]
108+ if !exists {
109+ // Create new entry
110+ entry = &validationCacheEntry{
111+ inFlight: true,
112+ done: make(chan struct{}),
113+ }
114+ vc.entries[cacheKey] = entry
115+ }
116+ vc.mu.Unlock()
117+118+ // Lock the entry to perform fetch
119+ entry.mu.Lock()
120+121+ // Double-check: another goroutine may have fetched while we waited
122+ if !entry.inFlight {
123+ if entry.err != nil && time.Now().Before(entry.validUntil) {
124+ err := entry.err
125+ entry.mu.Unlock()
126+ return "", err
127+ }
128+ if entry.err == nil && time.Now().Before(entry.validUntil) {
129+ token := entry.serviceToken
130+ entry.mu.Unlock()
131+ return token, nil
132+ }
133+ }
134+135+ // Mark as in-flight and create fresh done channel for this fetch
136+ // IMPORTANT: Always create a new channel - a closed channel is not nil
137+ entry.done = make(chan struct{})
138+ entry.inFlight = true
139+ done := entry.done
140+ entry.mu.Unlock()
141+142+ // Perform the fetch (outside the lock to allow other operations)
143+ serviceToken, err := fetchFunc()
144+145+ // Update the entry with result
146+ entry.mu.Lock()
147+ entry.inFlight = false
148+149+ if err != nil {
150+ // Cache errors for 5 seconds (fast-fail for subsequent requests)
151+ entry.err = err
152+ entry.validUntil = time.Now().Add(5 * time.Second)
153+ entry.serviceToken = ""
154+ } else {
155+ // Cache token for 45 seconds (covers typical Docker push operation)
156+ entry.err = nil
157+ entry.serviceToken = serviceToken
158+ entry.validUntil = time.Now().Add(45 * time.Second)
159+ }
160+161+ // Signal completion to waiting goroutines
162+ close(done)
163+ entry.mu.Unlock()
164+165+ return serviceToken, err
166+}
167168// Global variables for initialization only
169// These are set by main.go during startup and copied into NamespaceResolver instances.
170// After initialization, request handling uses the NamespaceResolver's instance fields.
171var (
172+ globalRefresher *oauth.Refresher
173+ globalDatabase storage.DatabaseMetrics
174+ globalAuthorizer auth.HoldAuthorizer
175+ globalReadmeCache storage.ReadmeCache
176)
177178// SetGlobalRefresher sets the OAuth refresher instance during initialization
···183184// SetGlobalDatabase sets the database instance during initialization
185// Must be called before the registry starts serving requests
186+func SetGlobalDatabase(database storage.DatabaseMetrics) {
187 globalDatabase = database
188}
189···193 globalAuthorizer = authorizer
194}
195196+// SetGlobalReadmeCache sets the readme cache instance during initialization
197+// Must be called before the registry starts serving requests
198+func SetGlobalReadmeCache(readmeCache storage.ReadmeCache) {
199+ globalReadmeCache = readmeCache
200+}
201+202func init() {
203 // Register the name resolution middleware
204 registrymw.Register("atproto-resolver", initATProtoResolver)
···207// NamespaceResolver wraps a namespace and resolves names
208type NamespaceResolver struct {
209 distribution.Namespace
210+ defaultHoldDID string // Default hold DID (e.g., "did:web:hold01.atcr.io")
211+ baseURL string // Base URL for error messages (e.g., "https://atcr.io")
212+ testMode bool // If true, fallback to default hold when user's hold is unreachable
213+ refresher *oauth.Refresher // OAuth session manager (copied from global on init)
214+ database storage.DatabaseMetrics // Metrics database (copied from global on init)
215+ authorizer auth.HoldAuthorizer // Hold authorization (copied from global on init)
216+ readmeCache storage.ReadmeCache // README cache (copied from global on init)
217+ validationCache *validationCache // Request-level service token cache
218}
219220// initATProtoResolver initializes the name resolution middleware
···241 // Copy shared services from globals into the instance
242 // This avoids accessing globals during request handling
243 return &NamespaceResolver{
244+ Namespace: ns,
245+ defaultHoldDID: defaultHoldDID,
246+ baseURL: baseURL,
247+ testMode: testMode,
248+ refresher: globalRefresher,
249+ database: globalDatabase,
250+ authorizer: globalAuthorizer,
251+ readmeCache: globalReadmeCache,
252+ validationCache: newValidationCache(),
253 }, nil
254+}
255+256+// authErrorMessage creates a user-friendly auth error with login URL
257+func (nr *NamespaceResolver) authErrorMessage(message string) error {
258+ loginURL := fmt.Sprintf("%s/auth/oauth/login", nr.baseURL)
259+ fullMessage := fmt.Sprintf("%s - please re-authenticate at %s", message, loginURL)
260+ return errcode.ErrorCodeUnauthorized.WithMessage(fullMessage)
261}
262263// Repository resolves the repository name and delegates to underlying namespace
···293 }
294 ctx = context.WithValue(ctx, holdDIDKey, holdDID)
295296+ // Auto-reconcile crew membership on first push/pull
297+ // This ensures users can push immediately after docker login without web sign-in
298+ // EnsureCrewMembership is best-effort and logs errors without failing the request
299+ // Run in background to avoid blocking registry operations if hold is offline
300+ if holdDID != "" && nr.refresher != nil {
301+ slog.Debug("Auto-reconciling crew membership", "component", "registry/middleware", "did", did, "hold_did", holdDID)
302+ client := atproto.NewClient(pdsEndpoint, did, "")
303+ go func(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, holdDID string) {
304+ storage.EnsureCrewMembership(ctx, client, refresher, holdDID)
305+ }(ctx, client, nr.refresher, holdDID)
306+ }
307+308+ // Get service token for hold authentication (only if authenticated)
309+ // Use validation cache to prevent concurrent requests from racing on OAuth/DPoP
310+ // Route based on auth method from JWT token
311+ var serviceToken string
312+ authMethod, _ := ctx.Value(authMethodKey).(string)
313+314+ // Only fetch service token if user is authenticated
315+ // Unauthenticated requests (like /v2/ ping) should not trigger token fetching
316+ if authMethod != "" {
317+ // Create cache key: "did:holdDID"
318+ cacheKey := fmt.Sprintf("%s:%s", did, holdDID)
319+320+ // Fetch service token through validation cache
321+ // This ensures only ONE request per DID:holdDID pair fetches the token
322+ // Concurrent requests will wait for the first request to complete
323+ var fetchErr error
324+ serviceToken, fetchErr = nr.validationCache.getOrFetch(ctx, cacheKey, func() (string, error) {
325+ if authMethod == token.AuthMethodAppPassword {
326+ // App-password flow: use Bearer token authentication
327+ slog.Debug("Using app-password flow for service token",
328+ "component", "registry/middleware",
329+ "did", did,
330+ "cacheKey", cacheKey)
331+332+ token, err := token.GetOrFetchServiceTokenWithAppPassword(ctx, did, holdDID, pdsEndpoint)
333+ if err != nil {
334+ slog.Error("Failed to get service token with app-password",
335+ "component", "registry/middleware",
336+ "did", did,
337+ "holdDID", holdDID,
338+ "pdsEndpoint", pdsEndpoint,
339+ "error", err)
340+ return "", err
341+ }
342+ return token, nil
343+ } else if nr.refresher != nil {
344+ // OAuth flow: use DPoP authentication
345+ slog.Debug("Using OAuth flow for service token",
346+ "component", "registry/middleware",
347+ "did", did,
348+ "cacheKey", cacheKey)
349+350+ token, err := token.GetOrFetchServiceToken(ctx, nr.refresher, did, holdDID, pdsEndpoint)
351+ if err != nil {
352+ slog.Error("Failed to get service token with OAuth",
353+ "component", "registry/middleware",
354+ "did", did,
355+ "holdDID", holdDID,
356+ "pdsEndpoint", pdsEndpoint,
357+ "error", err)
358+ return "", err
359+ }
360+ return token, nil
361+ }
362+ return "", fmt.Errorf("no authentication method available")
363+ })
364+365+ // Handle errors from cached fetch
366+ if fetchErr != nil {
367+ errMsg := fetchErr.Error()
368+369+ // Check for app-password specific errors
370+ if authMethod == token.AuthMethodAppPassword {
371+ if strings.Contains(errMsg, "expired or invalid") || strings.Contains(errMsg, "no app-password") {
372+ return nil, nr.authErrorMessage("App-password authentication failed. Please re-authenticate with: docker login")
373+ }
374+ }
375+376+ // Check for OAuth specific errors
377+ if strings.Contains(errMsg, "OAuth session") || strings.Contains(errMsg, "OAuth validation") {
378+ return nil, nr.authErrorMessage("OAuth session expired or invalidated by PDS. Your session has been cleared")
379+ }
380+381+ // Generic service token error
382+ return nil, nr.authErrorMessage(fmt.Sprintf("Failed to obtain storage credentials: %v", fetchErr))
383+ }
384+ } else {
385+ slog.Debug("Skipping service token fetch for unauthenticated request",
386+ "component", "registry/middleware",
387+ "did", did)
388+ }
389390 // Create a new reference with identity/image format
391 // Use the identity (or DID) as the namespace to ensure canonical format
···402 return nil, err
403 }
404405+ // Get access token for PDS operations
406+ // Use auth method from JWT to determine client type:
407+ // - OAuth users: use session provider (DPoP-enabled)
408+ // - App-password users: use Basic Auth token cache
409+ var atprotoClient *atproto.Client
410+411+ if authMethod == token.AuthMethodOAuth && nr.refresher != nil {
412+ // OAuth flow: use session provider for locked OAuth sessions
413+ // This prevents DPoP nonce race conditions during concurrent layer uploads
414+ slog.Debug("Creating ATProto client with OAuth session provider",
415+ "component", "registry/middleware",
416+ "did", did,
417+ "authMethod", authMethod)
418+ atprotoClient = atproto.NewClientWithSessionProvider(pdsEndpoint, did, nr.refresher)
419+ } else {
420+ // App-password flow (or fallback): use Basic Auth token cache
421+ accessToken, ok := auth.GetGlobalTokenCache().Get(did)
422+ if !ok {
423+ slog.Debug("No cached access token found for app-password auth",
424+ "component", "registry/middleware",
425+ "did", did,
426+ "authMethod", authMethod)
427+ accessToken = "" // Will fail on manifest push, but let it try
428+ } else {
429+ slog.Debug("Creating ATProto client with app-password",
430+ "component", "registry/middleware",
431+ "did", did,
432+ "authMethod", authMethod,
433+ "token_length", len(accessToken))
434+ }
435+ atprotoClient = atproto.NewClient(pdsEndpoint, did, accessToken)
436+ }
437+438 // IMPORTANT: Use only the image name (not identity/image) for ATProto storage
439 // ATProto records are scoped to the user's DID, so we don't need the identity prefix
440 // Example: "evan.jarrett.net/debian" -> store as "debian"
441 repositoryName := imageName
442443+ // Default auth method to OAuth if not already set (backward compatibility with old tokens)
444+ if authMethod == "" {
445+ authMethod = token.AuthMethodOAuth
0446 }
4470000448 // Create routing repository - routes manifests to ATProto, blobs to hold service
449 // The registry is stateless - no local storage is used
450+ // Bundle all context into a single RegistryContext struct
451 //
452 // NOTE: We create a fresh RoutingRepository on every request (no caching) because:
453 // 1. Each layer upload is a separate HTTP request (possibly different process)
454 // 2. OAuth sessions can be refreshed/invalidated between requests
455 // 3. The refresher already caches sessions efficiently (in-memory + DB)
456+ // 4. Caching the repository with a stale ATProtoClient causes refresh token errors
457+ registryCtx := &storage.RegistryContext{
458+ DID: did,
459+ Handle: handle,
460+ HoldDID: holdDID,
461+ PDSEndpoint: pdsEndpoint,
462+ Repository: repositoryName,
463+ ServiceToken: serviceToken, // Cached service token from middleware validation
464+ ATProtoClient: atprotoClient,
465+ AuthMethod: authMethod, // Auth method from JWT token
466+ Database: nr.database,
467+ Authorizer: nr.authorizer,
468+ Refresher: nr.refresher,
469+ ReadmeCache: nr.readmeCache,
470+ }
471+472+ return storage.NewRoutingRepository(repo, registryCtx), nil
473}
474475// Repositories delegates to underlying namespace
···490// findHoldDID determines which hold DID to use for blob storage
491// Priority order:
492// 1. User's sailor profile defaultHold (if set)
493+// 2. User's own hold record (io.atcr.hold)
494+// 3. AppView's default hold DID
495// Returns a hold DID (e.g., "did:web:hold01.atcr.io"), or empty string if none configured
496func (nr *NamespaceResolver) findHoldDID(ctx context.Context, did, pdsEndpoint string) string {
497 // Create ATProto client (without auth - reading public records)
···504 slog.Warn("Failed to read profile", "did", did, "error", err)
505 }
506507+ if profile != nil && profile.DefaultHold != nil && *profile.DefaultHold != "" {
508+ defaultHold := *profile.DefaultHold
509+ // Profile exists with defaultHold set
510+ // In test mode, verify it's reachable before using it
511 if nr.testMode {
512+ if nr.isHoldReachable(ctx, defaultHold) {
513+ return defaultHold
514 }
515+ slog.Debug("User's defaultHold unreachable, falling back to default", "component", "registry/middleware/testmode", "default_hold", defaultHold)
516 return nr.defaultHoldDID
517 }
518+ return defaultHold
519 }
520521+ // Profile doesn't exist or defaultHold is null/empty
522+ // Legacy io.atcr.hold records are no longer supported - use AppView default
523 return nr.defaultHoldDID
524}
525···542 return false
543}
544545+// ExtractAuthMethod is an HTTP middleware that extracts the auth method from the JWT Authorization header
546+// and stores it in the request context for later use by the registry middleware
0547func ExtractAuthMethod(next http.Handler) http.Handler {
548 return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
000000549 // Extract Authorization header
550 authHeader := r.Header.Get("Authorization")
551 if authHeader != "" {
···558 authMethod := token.ExtractAuthMethod(tokenString)
559 if authMethod != "" {
560 // Store in context for registry middleware
561+ ctx := context.WithValue(r.Context(), authMethodKey, authMethod)
562+ r = r.WithContext(ctx)
563+ slog.Debug("Extracted auth method from JWT",
564+ "component", "registry/middleware",
565+ "authMethod", authMethod)
000566 }
000000567 }
568 }
5690570 next.ServeHTTP(w, r)
571 })
572}
0000000000000000000000000000000000000000000000
+43-2
pkg/appview/middleware/registry_test.go
···67 // If we get here without panic, test passes
68}
690000070// TestInitATProtoResolver tests the initialization function
71func TestInitATProtoResolver(t *testing.T) {
72 ctx := context.Background()
···129 }
130}
13100000000000132// TestFindHoldDID_DefaultFallback tests default hold DID fallback
133func TestFindHoldDID_DefaultFallback(t *testing.T) {
134 // Start a mock PDS server that returns 404 for profile and empty list for holds
···188 assert.Equal(t, "did:web:user.hold.io", holdDID, "should use sailor profile's defaultHold")
189}
190191-// TestFindHoldDID_Priority tests the priority order
0000000000000000000000000192func TestFindHoldDID_Priority(t *testing.T) {
193- // Start a mock PDS server that returns both profile and hold records
194 mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
195 if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
196 // Return sailor profile with defaultHold (highest priority)
···67 // If we get here without panic, test passes
68}
6970+func TestSetGlobalReadmeCache(t *testing.T) {
71+ SetGlobalReadmeCache(nil)
72+ // If we get here without panic, test passes
73+}
74+75// TestInitATProtoResolver tests the initialization function
76func TestInitATProtoResolver(t *testing.T) {
77 ctx := context.Background()
···134 }
135}
136137+// TestAuthErrorMessage tests the error message formatting
138+func TestAuthErrorMessage(t *testing.T) {
139+ resolver := &NamespaceResolver{
140+ baseURL: "https://atcr.io",
141+ }
142+143+ err := resolver.authErrorMessage("OAuth session expired")
144+ assert.Contains(t, err.Error(), "OAuth session expired")
145+ assert.Contains(t, err.Error(), "https://atcr.io/auth/oauth/login")
146+}
147+148// TestFindHoldDID_DefaultFallback tests default hold DID fallback
149func TestFindHoldDID_DefaultFallback(t *testing.T) {
150 // Start a mock PDS server that returns 404 for profile and empty list for holds
···204 assert.Equal(t, "did:web:user.hold.io", holdDID, "should use sailor profile's defaultHold")
205}
206207+// TestFindHoldDID_NoProfile tests fallback to default hold when no profile exists
208+func TestFindHoldDID_NoProfile(t *testing.T) {
209+ // Start a mock PDS server that returns 404 for profile
210+ mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
211+ if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
212+ // Profile not found
213+ w.WriteHeader(http.StatusNotFound)
214+ return
215+ }
216+ w.WriteHeader(http.StatusNotFound)
217+ }))
218+ defer mockPDS.Close()
219+220+ resolver := &NamespaceResolver{
221+ defaultHoldDID: "did:web:default.atcr.io",
222+ }
223+224+ ctx := context.Background()
225+ holdDID := resolver.findHoldDID(ctx, "did:plc:test123", mockPDS.URL)
226+227+ // Should fall back to default hold DID when no profile exists
228+ // Note: Legacy io.atcr.hold records are no longer supported
229+ assert.Equal(t, "did:web:default.atcr.io", holdDID, "should fall back to default hold DID")
230+}
231+232+// TestFindHoldDID_Priority tests that profile takes priority over default
233func TestFindHoldDID_Priority(t *testing.T) {
234+ // Start a mock PDS server that returns profile
235 mockPDS := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
236 if r.URL.Path == "/xrpc/com.atproto.repo.getRecord" {
237 // Return sailor profile with defaultHold (highest priority)
···1+package storage
2+3+import (
4+ "context"
5+ "fmt"
6+ "io"
7+ "log/slog"
8+ "net/http"
9+ "time"
10+11+ "atcr.io/pkg/atproto"
12+ "atcr.io/pkg/auth/oauth"
13+ "atcr.io/pkg/auth/token"
14+)
15+16+// EnsureCrewMembership attempts to register the user as a crew member on their default hold.
17+// The hold's requestCrew endpoint handles all authorization logic (checking allowAllCrew, existing membership, etc).
18+// This is best-effort and does not fail on errors.
19+func EnsureCrewMembership(ctx context.Context, client *atproto.Client, refresher *oauth.Refresher, defaultHoldDID string) {
20+ if defaultHoldDID == "" {
21+ return
22+ }
23+24+ // Normalize URL to DID if needed
25+ holdDID := atproto.ResolveHoldDIDFromURL(defaultHoldDID)
26+ if holdDID == "" {
27+ slog.Warn("failed to resolve hold DID", "defaultHold", defaultHoldDID)
28+ return
29+ }
30+31+ // Resolve hold DID to HTTP endpoint
32+ holdEndpoint := atproto.ResolveHoldURL(holdDID)
33+34+ // Get service token for the hold
35+ // Only works with OAuth (refresher required) - app passwords can't get service tokens
36+ if refresher == nil {
37+ slog.Debug("skipping crew registration - no OAuth refresher (app password flow)", "holdDID", holdDID)
38+ return
39+ }
40+41+ // Wrap the refresher to match OAuthSessionRefresher interface
42+ serviceToken, err := token.GetOrFetchServiceToken(ctx, refresher, client.DID(), holdDID, client.PDSEndpoint())
43+ if err != nil {
44+ slog.Warn("failed to get service token", "holdDID", holdDID, "error", err)
45+ return
46+ }
47+48+ // Call requestCrew endpoint - it handles all the logic:
49+ // - Checks allowAllCrew flag
50+ // - Checks if already a crew member (returns success if so)
51+ // - Creates crew record if authorized
52+ if err := requestCrewMembership(ctx, holdEndpoint, serviceToken); err != nil {
53+ slog.Warn("failed to request crew membership", "holdDID", holdDID, "error", err)
54+ return
55+ }
56+57+ slog.Info("successfully registered as crew member", "holdDID", holdDID, "userDID", client.DID())
58+}
59+60+// requestCrewMembership calls the hold's requestCrew endpoint
61+// The endpoint handles all authorization and duplicate checking internally
62+func requestCrewMembership(ctx context.Context, holdEndpoint, serviceToken string) error {
63+ // Add 5 second timeout to prevent hanging on offline holds
64+ ctx, cancel := context.WithTimeout(ctx, 5*time.Second)
65+ defer cancel()
66+67+ url := fmt.Sprintf("%s%s", holdEndpoint, atproto.HoldRequestCrew)
68+69+ req, err := http.NewRequestWithContext(ctx, "POST", url, nil)
70+ if err != nil {
71+ return err
72+ }
73+74+ req.Header.Set("Authorization", "Bearer "+serviceToken)
75+ req.Header.Set("Content-Type", "application/json")
76+77+ resp, err := http.DefaultClient.Do(req)
78+ if err != nil {
79+ return err
80+ }
81+ defer resp.Body.Close()
82+83+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
84+ // Read response body to capture actual error message from hold
85+ body, readErr := io.ReadAll(resp.Body)
86+ if readErr != nil {
87+ return fmt.Errorf("requestCrew failed with status %d (failed to read error body: %w)", resp.StatusCode, readErr)
88+ }
89+ return fmt.Errorf("requestCrew failed with status %d: %s", resp.StatusCode, string(body))
90+ }
91+92+ return nil
93+}
+14
pkg/appview/storage/crew_test.go
···00000000000000
···1+package storage
2+3+import (
4+ "context"
5+ "testing"
6+)
7+8+func TestEnsureCrewMembership_EmptyHoldDID(t *testing.T) {
9+ // Test that empty hold DID returns early without error (best-effort function)
10+ EnsureCrewMembership(context.Background(), nil, nil, "")
11+ // If we get here without panic, test passes
12+}
13+14+// TODO: Add comprehensive tests with HTTP client mocking
+86-314
pkg/appview/storage/manifest_store.go
···3import (
4 "bytes"
5 "context"
6- "database/sql"
7 "encoding/json"
8 "errors"
9 "fmt"
···11 "log/slog"
12 "net/http"
13 "strings"
14- "time"
1516- "atcr.io/pkg/appview/db"
17- "atcr.io/pkg/appview/readme"
18 "atcr.io/pkg/atproto"
19- "atcr.io/pkg/auth"
20 "github.com/distribution/distribution/v3"
21 "github.com/opencontainers/go-digest"
22)
···24// ManifestStore implements distribution.ManifestService
25// It stores manifests in ATProto as records
26type ManifestStore struct {
27- ctx *auth.UserContext // User context with identity, target, permissions
0028 blobStore distribution.BlobStore // Blob store for fetching config during push
29- sqlDB *sql.DB // Database for pull/push counts
30}
3132// NewManifestStore creates a new ATProto-backed manifest store
33-func NewManifestStore(userCtx *auth.UserContext, blobStore distribution.BlobStore, sqlDB *sql.DB) *ManifestStore {
34 return &ManifestStore{
35- ctx: userCtx,
36 blobStore: blobStore,
37- sqlDB: sqlDB,
38 }
39}
4041// Exists checks if a manifest exists by digest
42func (s *ManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
43 rkey := digestToRKey(dgst)
44- _, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.ManifestCollection, rkey)
45 if err != nil {
46 // If not found, return false without error
47 if errors.Is(err, atproto.ErrRecordNotFound) {
···55// Get retrieves a manifest by digest
56func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
57 rkey := digestToRKey(dgst)
58- record, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.ManifestCollection, rkey)
59 if err != nil {
60 return nil, distribution.ErrManifestUnknownRevision{
61- Name: s.ctx.TargetRepo,
62 Revision: dgst,
63 }
64 }
6566- var manifestRecord atproto.ManifestRecord
67 if err := json.Unmarshal(record.Value, &manifestRecord); err != nil {
68 return nil, fmt.Errorf("failed to unmarshal manifest record: %w", err)
69 }
70000000000000071 var ociManifest []byte
7273 // New records: Download blob from ATProto blob storage
74- if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Link != "" {
75- ociManifest, err = s.ctx.GetATProtoClient().GetBlob(ctx, manifestRecord.ManifestBlob.Ref.Link)
76 if err != nil {
77 return nil, fmt.Errorf("failed to download manifest blob: %w", err)
78 }
···8081 // Track pull count (increment asynchronously to avoid blocking the response)
82 // Only count GET requests (actual downloads), not HEAD requests (existence checks)
83- if s.sqlDB != nil {
84 // Check HTTP method from context (distribution library stores it as "http.request.method")
85 if method, ok := ctx.Value("http.request.method").(string); ok && method == "GET" {
86 go func() {
87- if err := db.IncrementPullCount(s.sqlDB, s.ctx.TargetOwnerDID, s.ctx.TargetRepo); err != nil {
88- slog.Warn("Failed to increment pull count", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err)
89 }
90 }()
91 }
···112 dgst := digest.FromBytes(payload)
113114 // Upload manifest as blob to PDS
115- blobRef, err := s.ctx.GetATProtoClient().UploadBlob(ctx, payload, mediaType)
116 if err != nil {
117 return "", fmt.Errorf("failed to upload manifest blob: %w", err)
118 }
119120 // Create manifest record with structured metadata
121- manifestRecord, err := atproto.NewManifestRecord(s.ctx.TargetRepo, dgst.String(), payload)
122 if err != nil {
123 return "", fmt.Errorf("failed to create manifest record: %w", err)
124 }
125126 // Set the blob reference, hold DID, and hold endpoint
127 manifestRecord.ManifestBlob = blobRef
128- manifestRecord.HoldDID = s.ctx.TargetHoldDID // Primary reference (DID)
00129130 // Extract Dockerfile labels from config blob and add to annotations
131 // Only for image manifests (not manifest lists which don't have config blobs)
···152 if !exists {
153 platform := "unknown"
154 if ref.Platform != nil {
155- platform = fmt.Sprintf("%s/%s", ref.Platform.OS, ref.Platform.Architecture)
156 }
157 slog.Warn("Manifest list references non-existent child manifest",
158- "repository", s.ctx.TargetRepo,
159 "missingDigest", ref.Digest,
160 "platform", platform)
161 return "", distribution.ErrManifestBlobUnknown{Digest: refDigest}
···163 }
164 }
165166- if !isManifestList && s.blobStore != nil && manifestRecord.Config != nil && manifestRecord.Config.Digest != "" {
167- labels, err := s.extractConfigLabels(ctx, manifestRecord.Config.Digest)
168- if err != nil {
169- // Log error but don't fail the push - labels are optional
170- slog.Warn("Failed to extract config labels", "error", err)
171- } else if len(labels) > 0 {
172- // Initialize annotations map if needed
173- if manifestRecord.Annotations == nil {
174- manifestRecord.Annotations = make(map[string]string)
175- }
176-177- // Copy labels to annotations as fallback
178- // Only set label values for keys NOT already in manifest annotations
179- // This ensures explicit annotations take precedence over Dockerfile LABELs
180- // (which may be inherited from base images)
181- for key, value := range labels {
182- if _, exists := manifestRecord.Annotations[key]; !exists {
183- manifestRecord.Annotations[key] = value
184- }
185- }
186-187- slog.Debug("Merged labels from config blob", "labelsCount", len(labels), "annotationsCount", len(manifestRecord.Annotations))
188- }
189- }
190191 // Store manifest record in ATProto
192 rkey := digestToRKey(dgst)
193- _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.ManifestCollection, rkey, manifestRecord)
194 if err != nil {
195 return "", fmt.Errorf("failed to store manifest record in ATProto: %w", err)
196 }
197198 // Track push count (increment asynchronously to avoid blocking the response)
199- if s.sqlDB != nil {
200 go func() {
201- if err := db.IncrementPushCount(s.sqlDB, s.ctx.TargetOwnerDID, s.ctx.TargetRepo); err != nil {
202- slog.Warn("Failed to increment push count", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err)
203 }
204 }()
205 }
···209 for _, option := range options {
210 if tagOpt, ok := option.(distribution.WithTagOption); ok {
211 tag = tagOpt.Tag
212- tagRecord := atproto.NewTagRecord(s.ctx.GetATProtoClient().DID(), s.ctx.TargetRepo, tag, dgst.String())
213- tagRKey := atproto.RepositoryTagToRKey(s.ctx.TargetRepo, tag)
214- _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.TagCollection, tagRKey, tagRecord)
215 if err != nil {
216 return "", fmt.Errorf("failed to store tag in ATProto: %w", err)
217 }
···220221 // Notify hold about manifest upload (for layer tracking and Bluesky posts)
222 // Do this asynchronously to avoid blocking the push
223- // Get service token before goroutine (requires context)
224- serviceToken, _ := s.ctx.GetServiceToken(ctx)
225- if tag != "" && serviceToken != "" && s.ctx.TargetOwnerHandle != "" {
226- go func(serviceToken string) {
227 defer func() {
228 if r := recover(); r != nil {
229 slog.Error("Panic in notifyHoldAboutManifest", "panic", r)
230 }
231 }()
232- if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String(), serviceToken); err != nil {
233 slog.Warn("Failed to notify hold about manifest", "error", err)
234 }
235- }(serviceToken)
236 }
237238- // Create or update repo page asynchronously if manifest has relevant annotations
239- // This ensures repository metadata is synced to user's PDS
240 go func() {
241 defer func() {
242 if r := recover(); r != nil {
243- slog.Error("Panic in ensureRepoPage", "panic", r)
244 }
245 }()
246- s.ensureRepoPage(context.Background(), manifestRecord)
247 }()
248249 return dgst, nil
···252// Delete removes a manifest
253func (s *ManifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
254 rkey := digestToRKey(dgst)
255- return s.ctx.GetATProtoClient().DeleteRecord(ctx, atproto.ManifestCollection, rkey)
256}
257258// digestToRKey converts a digest to an ATProto record key
···262 return dgst.Encoded()
263}
26400000000265// rawManifest is a simple implementation of distribution.Manifest
266type rawManifest struct {
267 mediaType string
···307308// notifyHoldAboutManifest notifies the hold service about a manifest upload
309// This enables the hold to create layer records and Bluesky posts
310-func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRecord *atproto.ManifestRecord, tag, manifestDigest, serviceToken string) error {
311- // Skip if no service token provided
312- if serviceToken == "" {
313 return nil
314 }
315316 // Resolve hold DID to HTTP endpoint
317 // For did:web, this is straightforward (e.g., did:web:hold01.atcr.io โ https://hold01.atcr.io)
318- holdEndpoint := atproto.ResolveHoldURL(s.ctx.TargetHoldDID)
319320- // Service token is passed in (already cached and validated)
0321322 // Build notification request
323 manifestData := map[string]any{
···356 }
357 if m.Platform != nil {
358 mData["platform"] = map[string]any{
359- "os": m.Platform.OS,
360 "architecture": m.Platform.Architecture,
361 }
362 }
···366 }
367368 notifyReq := map[string]any{
369- "repository": s.ctx.TargetRepo,
370 "tag": tag,
371- "userDid": s.ctx.TargetOwnerDID,
372- "userHandle": s.ctx.TargetOwnerHandle,
373 "manifest": manifestData,
374 }
375···407 // Parse response (optional logging)
408 var notifyResp map[string]any
409 if err := json.NewDecoder(resp.Body).Decode(¬ifyResp); err == nil {
410- slog.Info("Hold notification successful", "repository", s.ctx.TargetRepo, "tag", tag, "response", notifyResp)
411 }
412413 return nil
414}
415416-// ensureRepoPage creates or updates a repo page record in the user's PDS if needed
417-// This syncs repository metadata from manifest annotations to the io.atcr.repo.page collection
418-// Only creates a new record if one doesn't exist (doesn't overwrite user's custom content)
419-func (s *ManifestStore) ensureRepoPage(ctx context.Context, manifestRecord *atproto.ManifestRecord) {
420- // Check if repo page already exists (don't overwrite user's custom content)
421- rkey := s.ctx.TargetRepo
422- _, err := s.ctx.GetATProtoClient().GetRecord(ctx, atproto.RepoPageCollection, rkey)
423- if err == nil {
424- // Record already exists - don't overwrite
425- slog.Debug("Repo page already exists, skipping creation", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo)
426- return
427- }
428-429- // Only continue if it's a "not found" error - other errors mean we should skip
430- if !errors.Is(err, atproto.ErrRecordNotFound) {
431- slog.Warn("Failed to check for existing repo page", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err)
432- return
433- }
434-435- // Get annotations (may be nil if image has no OCI labels)
436- annotations := manifestRecord.Annotations
437- if annotations == nil {
438- annotations = make(map[string]string)
439- }
440-441- // Try to fetch README content from external sources
442- // Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source > org.opencontainers.image.description
443- description := s.fetchReadmeContent(ctx, annotations)
444-445- // If no README content could be fetched, fall back to description annotation
446- if description == "" {
447- description = annotations["org.opencontainers.image.description"]
448- }
449-450- // Try to fetch and upload icon from io.atcr.icon annotation
451- var avatarRef *atproto.ATProtoBlobRef
452- if iconURL := annotations["io.atcr.icon"]; iconURL != "" {
453- avatarRef = s.fetchAndUploadIcon(ctx, iconURL)
454- }
455-456- // Create new repo page record with description and optional avatar
457- repoPage := atproto.NewRepoPageRecord(s.ctx.TargetRepo, description, avatarRef)
458-459- slog.Info("Creating repo page from manifest annotations", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "descriptionLength", len(description), "hasAvatar", avatarRef != nil)
460-461- _, err = s.ctx.GetATProtoClient().PutRecord(ctx, atproto.RepoPageCollection, rkey, repoPage)
462- if err != nil {
463- slog.Warn("Failed to create repo page", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo, "error", err)
464 return
465 }
466467- slog.Info("Repo page created successfully", "did", s.ctx.TargetOwnerDID, "repository", s.ctx.TargetRepo)
468-}
469-470-// fetchReadmeContent attempts to fetch README content from external sources
471-// Priority: io.atcr.readme annotation > derived from org.opencontainers.image.source
472-// Returns the raw markdown content, or empty string if not available
473-func (s *ManifestStore) fetchReadmeContent(ctx context.Context, annotations map[string]string) string {
474-475- // Create a context with timeout for README fetching (don't block push too long)
476- fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
477- defer cancel()
478-479- // Priority 1: Direct README URL from io.atcr.readme annotation
480- if readmeURL := annotations["io.atcr.readme"]; readmeURL != "" {
481- content, err := s.fetchRawReadme(fetchCtx, readmeURL)
482- if err != nil {
483- slog.Debug("Failed to fetch README from io.atcr.readme annotation", "url", readmeURL, "error", err)
484- } else if content != "" {
485- slog.Info("Fetched README from io.atcr.readme annotation", "url", readmeURL, "length", len(content))
486- return content
487- }
488- }
489-490- // Priority 2: Derive README URL from org.opencontainers.image.source
491- if sourceURL := annotations["org.opencontainers.image.source"]; sourceURL != "" {
492- // Try main branch first, then master
493- for _, branch := range []string{"main", "master"} {
494- readmeURL := readme.DeriveReadmeURL(sourceURL, branch)
495- if readmeURL == "" {
496- continue
497- }
498-499- content, err := s.fetchRawReadme(fetchCtx, readmeURL)
500- if err != nil {
501- // Only log non-404 errors (404 is expected when trying main vs master)
502- if !readme.Is404(err) {
503- slog.Debug("Failed to fetch README from source URL", "url", readmeURL, "branch", branch, "error", err)
504- }
505- continue
506- }
507-508- if content != "" {
509- slog.Info("Fetched README from source URL", "sourceURL", sourceURL, "branch", branch, "length", len(content))
510- return content
511- }
512- }
513- }
514-515- return ""
516-}
517-518-// fetchRawReadme fetches raw markdown content from a URL
519-// Returns the raw markdown (not rendered HTML) for storage in the repo page record
520-func (s *ManifestStore) fetchRawReadme(ctx context.Context, readmeURL string) (string, error) {
521- // Use a simple HTTP client to fetch raw content
522- // We want raw markdown, not rendered HTML (the Fetcher renders to HTML)
523- req, err := http.NewRequestWithContext(ctx, "GET", readmeURL, nil)
524- if err != nil {
525- return "", fmt.Errorf("failed to create request: %w", err)
526- }
527-528- req.Header.Set("User-Agent", "ATCR-README-Fetcher/1.0")
529-530- client := &http.Client{
531- Timeout: 10 * time.Second,
532- CheckRedirect: func(req *http.Request, via []*http.Request) error {
533- if len(via) >= 5 {
534- return fmt.Errorf("too many redirects")
535- }
536- return nil
537- },
538- }
539-540- resp, err := client.Do(req)
541- if err != nil {
542- return "", fmt.Errorf("failed to fetch URL: %w", err)
543- }
544- defer resp.Body.Close()
545-546- if resp.StatusCode != http.StatusOK {
547- return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
548- }
549-550- // Limit content size to 100KB (repo page description has 100KB limit in lexicon)
551- limitedReader := io.LimitReader(resp.Body, 100*1024)
552- content, err := io.ReadAll(limitedReader)
553- if err != nil {
554- return "", fmt.Errorf("failed to read response body: %w", err)
555- }
556-557- return string(content), nil
558-}
559-560-// fetchAndUploadIcon fetches an image from a URL and uploads it as a blob to the user's PDS
561-// Returns the blob reference for use in the repo page record, or nil on error
562-func (s *ManifestStore) fetchAndUploadIcon(ctx context.Context, iconURL string) *atproto.ATProtoBlobRef {
563- // Create a context with timeout for icon fetching
564- fetchCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
565- defer cancel()
566-567- // Fetch the icon
568- req, err := http.NewRequestWithContext(fetchCtx, "GET", iconURL, nil)
569- if err != nil {
570- slog.Debug("Failed to create icon request", "url", iconURL, "error", err)
571- return nil
572- }
573-574- req.Header.Set("User-Agent", "ATCR-Icon-Fetcher/1.0")
575-576- client := &http.Client{
577- Timeout: 10 * time.Second,
578- CheckRedirect: func(req *http.Request, via []*http.Request) error {
579- if len(via) >= 5 {
580- return fmt.Errorf("too many redirects")
581- }
582- return nil
583- },
584- }
585-586- resp, err := client.Do(req)
587- if err != nil {
588- slog.Debug("Failed to fetch icon", "url", iconURL, "error", err)
589- return nil
590- }
591- defer resp.Body.Close()
592-593- if resp.StatusCode != http.StatusOK {
594- slog.Debug("Icon fetch returned non-OK status", "url", iconURL, "status", resp.StatusCode)
595- return nil
596- }
597-598- // Validate content type - only allow images
599- contentType := resp.Header.Get("Content-Type")
600- mimeType := detectImageMimeType(contentType, iconURL)
601- if mimeType == "" {
602- slog.Debug("Icon has unsupported content type", "url", iconURL, "contentType", contentType)
603- return nil
604- }
605-606- // Limit icon size to 3MB (matching lexicon maxSize)
607- limitedReader := io.LimitReader(resp.Body, 3*1024*1024)
608- iconData, err := io.ReadAll(limitedReader)
609- if err != nil {
610- slog.Debug("Failed to read icon data", "url", iconURL, "error", err)
611- return nil
612- }
613-614- if len(iconData) == 0 {
615- slog.Debug("Icon data is empty", "url", iconURL)
616- return nil
617- }
618-619- // Upload the icon as a blob to the user's PDS
620- blobRef, err := s.ctx.GetATProtoClient().UploadBlob(ctx, iconData, mimeType)
621- if err != nil {
622- slog.Warn("Failed to upload icon blob", "url", iconURL, "error", err)
623- return nil
624- }
625-626- slog.Info("Uploaded icon blob", "url", iconURL, "size", len(iconData), "mimeType", mimeType, "cid", blobRef.Ref.Link)
627- return blobRef
628-}
629-630-// detectImageMimeType determines the MIME type for an image
631-// Uses Content-Type header first, then falls back to extension-based detection
632-// Only allows types accepted by the lexicon: image/png, image/jpeg, image/webp
633-func detectImageMimeType(contentType, url string) string {
634- // Check Content-Type header first
635- switch {
636- case strings.HasPrefix(contentType, "image/png"):
637- return "image/png"
638- case strings.HasPrefix(contentType, "image/jpeg"):
639- return "image/jpeg"
640- case strings.HasPrefix(contentType, "image/webp"):
641- return "image/webp"
642- }
643-644- // Fall back to URL extension detection
645- lowerURL := strings.ToLower(url)
646- switch {
647- case strings.HasSuffix(lowerURL, ".png"):
648- return "image/png"
649- case strings.HasSuffix(lowerURL, ".jpg"), strings.HasSuffix(lowerURL, ".jpeg"):
650- return "image/jpeg"
651- case strings.HasSuffix(lowerURL, ".webp"):
652- return "image/webp"
653- }
654-655- // Unknown or unsupported type - reject
656- return ""
657}
···3import (
4 "bytes"
5 "context"
06 "encoding/json"
7 "errors"
8 "fmt"
···10 "log/slog"
11 "net/http"
12 "strings"
13+ "sync"
140015 "atcr.io/pkg/atproto"
016 "github.com/distribution/distribution/v3"
17 "github.com/opencontainers/go-digest"
18)
···20// ManifestStore implements distribution.ManifestService
21// It stores manifests in ATProto as records
22type ManifestStore struct {
23+ ctx *RegistryContext // Context with user/hold info
24+ mu sync.RWMutex // Protects lastFetchedHoldDID
25+ lastFetchedHoldDID string // Hold DID from most recently fetched manifest (for pull)
26 blobStore distribution.BlobStore // Blob store for fetching config during push
027}
2829// NewManifestStore creates a new ATProto-backed manifest store
30+func NewManifestStore(ctx *RegistryContext, blobStore distribution.BlobStore) *ManifestStore {
31 return &ManifestStore{
32+ ctx: ctx,
33 blobStore: blobStore,
034 }
35}
3637// Exists checks if a manifest exists by digest
38func (s *ManifestStore) Exists(ctx context.Context, dgst digest.Digest) (bool, error) {
39 rkey := digestToRKey(dgst)
40+ _, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey)
41 if err != nil {
42 // If not found, return false without error
43 if errors.Is(err, atproto.ErrRecordNotFound) {
···51// Get retrieves a manifest by digest
52func (s *ManifestStore) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) {
53 rkey := digestToRKey(dgst)
54+ record, err := s.ctx.ATProtoClient.GetRecord(ctx, atproto.ManifestCollection, rkey)
55 if err != nil {
56 return nil, distribution.ErrManifestUnknownRevision{
57+ Name: s.ctx.Repository,
58 Revision: dgst,
59 }
60 }
6162+ var manifestRecord atproto.Manifest
63 if err := json.Unmarshal(record.Value, &manifestRecord); err != nil {
64 return nil, fmt.Errorf("failed to unmarshal manifest record: %w", err)
65 }
6667+ // Store the hold DID for subsequent blob requests during pull
68+ // Prefer HoldDid (new format) with fallback to HoldEndpoint (legacy URL format)
69+ // The routing repository will cache this for concurrent blob fetches
70+ s.mu.Lock()
71+ if manifestRecord.HoldDid != nil && *manifestRecord.HoldDid != "" {
72+ // New format: DID reference (preferred)
73+ s.lastFetchedHoldDID = *manifestRecord.HoldDid
74+ } else if manifestRecord.HoldEndpoint != nil && *manifestRecord.HoldEndpoint != "" {
75+ // Legacy format: URL reference - convert to DID
76+ s.lastFetchedHoldDID = atproto.ResolveHoldDIDFromURL(*manifestRecord.HoldEndpoint)
77+ }
78+ s.mu.Unlock()
79+80 var ociManifest []byte
8182 // New records: Download blob from ATProto blob storage
83+ if manifestRecord.ManifestBlob != nil && manifestRecord.ManifestBlob.Ref.Defined() {
84+ ociManifest, err = s.ctx.ATProtoClient.GetBlob(ctx, manifestRecord.ManifestBlob.Ref.String())
85 if err != nil {
86 return nil, fmt.Errorf("failed to download manifest blob: %w", err)
87 }
···8990 // Track pull count (increment asynchronously to avoid blocking the response)
91 // Only count GET requests (actual downloads), not HEAD requests (existence checks)
92+ if s.ctx.Database != nil {
93 // Check HTTP method from context (distribution library stores it as "http.request.method")
94 if method, ok := ctx.Value("http.request.method").(string); ok && method == "GET" {
95 go func() {
96+ if err := s.ctx.Database.IncrementPullCount(s.ctx.DID, s.ctx.Repository); err != nil {
97+ slog.Warn("Failed to increment pull count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err)
98 }
99 }()
100 }
···121 dgst := digest.FromBytes(payload)
122123 // Upload manifest as blob to PDS
124+ blobRef, err := s.ctx.ATProtoClient.UploadBlob(ctx, payload, mediaType)
125 if err != nil {
126 return "", fmt.Errorf("failed to upload manifest blob: %w", err)
127 }
128129 // Create manifest record with structured metadata
130+ manifestRecord, err := atproto.NewManifestRecord(s.ctx.Repository, dgst.String(), payload)
131 if err != nil {
132 return "", fmt.Errorf("failed to create manifest record: %w", err)
133 }
134135 // Set the blob reference, hold DID, and hold endpoint
136 manifestRecord.ManifestBlob = blobRef
137+ if s.ctx.HoldDID != "" {
138+ manifestRecord.HoldDid = &s.ctx.HoldDID // Primary reference (DID)
139+ }
140141 // Extract Dockerfile labels from config blob and add to annotations
142 // Only for image manifests (not manifest lists which don't have config blobs)
···163 if !exists {
164 platform := "unknown"
165 if ref.Platform != nil {
166+ platform = fmt.Sprintf("%s/%s", ref.Platform.Os, ref.Platform.Architecture)
167 }
168 slog.Warn("Manifest list references non-existent child manifest",
169+ "repository", s.ctx.Repository,
170 "missingDigest", ref.Digest,
171 "platform", platform)
172 return "", distribution.ErrManifestBlobUnknown{Digest: refDigest}
···174 }
175 }
176177+ // Note: Label extraction from config blob is currently disabled because the generated
178+ // Manifest_Annotations type doesn't support arbitrary keys. The lexicon schema would
179+ // need to use "unknown" type for annotations to support dynamic key-value pairs.
180+ // TODO: Update lexicon schema if label extraction is needed.
181+ _ = isManifestList // silence unused variable warning for now
0000000000000000000182183 // Store manifest record in ATProto
184 rkey := digestToRKey(dgst)
185+ _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.ManifestCollection, rkey, manifestRecord)
186 if err != nil {
187 return "", fmt.Errorf("failed to store manifest record in ATProto: %w", err)
188 }
189190 // Track push count (increment asynchronously to avoid blocking the response)
191+ if s.ctx.Database != nil {
192 go func() {
193+ if err := s.ctx.Database.IncrementPushCount(s.ctx.DID, s.ctx.Repository); err != nil {
194+ slog.Warn("Failed to increment push count", "did", s.ctx.DID, "repository", s.ctx.Repository, "error", err)
195 }
196 }()
197 }
···201 for _, option := range options {
202 if tagOpt, ok := option.(distribution.WithTagOption); ok {
203 tag = tagOpt.Tag
204+ tagRecord := atproto.NewTagRecord(s.ctx.ATProtoClient.DID(), s.ctx.Repository, tag, dgst.String())
205+ tagRKey := atproto.RepositoryTagToRKey(s.ctx.Repository, tag)
206+ _, err = s.ctx.ATProtoClient.PutRecord(ctx, atproto.TagCollection, tagRKey, tagRecord)
207 if err != nil {
208 return "", fmt.Errorf("failed to store tag in ATProto: %w", err)
209 }
···212213 // Notify hold about manifest upload (for layer tracking and Bluesky posts)
214 // Do this asynchronously to avoid blocking the push
215+ if tag != "" && s.ctx.ServiceToken != "" && s.ctx.Handle != "" {
216+ go func() {
00217 defer func() {
218 if r := recover(); r != nil {
219 slog.Error("Panic in notifyHoldAboutManifest", "panic", r)
220 }
221 }()
222+ if err := s.notifyHoldAboutManifest(context.Background(), manifestRecord, tag, dgst.String()); err != nil {
223 slog.Warn("Failed to notify hold about manifest", "error", err)
224 }
225+ }()
226 }
227228+ // Refresh README cache asynchronously if manifest has io.atcr.readme annotation
229+ // This ensures fresh README content is available on repository pages
230 go func() {
231 defer func() {
232 if r := recover(); r != nil {
233+ slog.Error("Panic in refreshReadmeCache", "panic", r)
234 }
235 }()
236+ s.refreshReadmeCache(context.Background(), manifestRecord)
237 }()
238239 return dgst, nil
···242// Delete removes a manifest
243func (s *ManifestStore) Delete(ctx context.Context, dgst digest.Digest) error {
244 rkey := digestToRKey(dgst)
245+ return s.ctx.ATProtoClient.DeleteRecord(ctx, atproto.ManifestCollection, rkey)
246}
247248// digestToRKey converts a digest to an ATProto record key
···252 return dgst.Encoded()
253}
254255+// GetLastFetchedHoldDID returns the hold DID from the most recently fetched manifest
256+// This is used by the routing repository to cache the hold for blob requests
257+func (s *ManifestStore) GetLastFetchedHoldDID() string {
258+ s.mu.RLock()
259+ defer s.mu.RUnlock()
260+ return s.lastFetchedHoldDID
261+}
262+263// rawManifest is a simple implementation of distribution.Manifest
264type rawManifest struct {
265 mediaType string
···305306// notifyHoldAboutManifest notifies the hold service about a manifest upload
307// This enables the hold to create layer records and Bluesky posts
308+func (s *ManifestStore) notifyHoldAboutManifest(ctx context.Context, manifestRecord *atproto.Manifest, tag, manifestDigest string) error {
309+ // Skip if no service token configured (e.g., anonymous pulls)
310+ if s.ctx.ServiceToken == "" {
311 return nil
312 }
313314 // Resolve hold DID to HTTP endpoint
315 // For did:web, this is straightforward (e.g., did:web:hold01.atcr.io โ https://hold01.atcr.io)
316+ holdEndpoint := atproto.ResolveHoldURL(s.ctx.HoldDID)
317318+ // Use service token from middleware (already cached and validated)
319+ serviceToken := s.ctx.ServiceToken
320321 // Build notification request
322 manifestData := map[string]any{
···355 }
356 if m.Platform != nil {
357 mData["platform"] = map[string]any{
358+ "os": m.Platform.Os,
359 "architecture": m.Platform.Architecture,
360 }
361 }
···365 }
366367 notifyReq := map[string]any{
368+ "repository": s.ctx.Repository,
369 "tag": tag,
370+ "userDid": s.ctx.DID,
371+ "userHandle": s.ctx.Handle,
372 "manifest": manifestData,
373 }
374···406 // Parse response (optional logging)
407 var notifyResp map[string]any
408 if err := json.NewDecoder(resp.Body).Decode(¬ifyResp); err == nil {
409+ slog.Info("Hold notification successful", "repository", s.ctx.Repository, "tag", tag, "response", notifyResp)
410 }
411412 return nil
413}
414415+// refreshReadmeCache refreshes the README cache for this manifest if it has io.atcr.readme annotation
416+// This should be called asynchronously after manifest push to keep README content fresh
417+// NOTE: Currently disabled because the generated Manifest_Annotations type doesn't support
418+// arbitrary key-value pairs. Would need to update lexicon schema with "unknown" type.
419+func (s *ManifestStore) refreshReadmeCache(ctx context.Context, manifestRecord *atproto.Manifest) {
420+ // Skip if no README cache configured
421+ if s.ctx.ReadmeCache == nil {
00000000000000000000000000000000000000000422 return
423 }
424425+ // TODO: Re-enable once lexicon supports annotations as map[string]string
426+ // The generated Manifest_Annotations is an empty struct that doesn't support map access.
427+ // For now, README cache refresh on push is disabled.
428+ _ = manifestRecord // silence unused variable warning
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000429}
···54// GetProfile retrieves the user's profile from their PDS
55// Returns nil if profile doesn't exist
56// Automatically migrates old URL-based defaultHold values to DIDs
57-func GetProfile(ctx context.Context, client *atproto.Client) (*atproto.SailorProfileRecord, error) {
58 record, err := client.GetRecord(ctx, atproto.SailorProfileCollection, ProfileRKey)
59 if err != nil {
60 // Check if it's a 404 (profile doesn't exist)
···65 }
6667 // Parse the profile record
68- var profile atproto.SailorProfileRecord
69 if err := json.Unmarshal(record.Value, &profile); err != nil {
70 return nil, fmt.Errorf("failed to parse profile: %w", err)
71 }
7273 // Migrate old URL-based defaultHold to DID format
74 // This ensures backward compatibility with profiles created before DID migration
75- if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) {
76 // Convert URL to DID transparently
77- migratedDID := atproto.ResolveHoldDIDFromURL(profile.DefaultHold)
78- profile.DefaultHold = migratedDID
7980 // Persist the migration to PDS in a background goroutine
81 // Use a lock to ensure only one goroutine migrates this DID
···94 defer cancel()
9596 // Update the profile on the PDS
97- profile.UpdatedAt = time.Now()
098 if err := UpdateProfile(ctx, client, &profile); err != nil {
99 slog.Warn("Failed to persist URL-to-DID migration", "component", "profile", "did", did, "error", err)
100 } else {
···109110// UpdateProfile updates the user's profile
111// Normalizes defaultHold to DID format before saving
112-func UpdateProfile(ctx context.Context, client *atproto.Client, profile *atproto.SailorProfileRecord) error {
113 // Normalize defaultHold to DID if it's a URL
114 // This ensures we always store DIDs, even if user provides a URL
115- if profile.DefaultHold != "" && !atproto.IsDID(profile.DefaultHold) {
116- profile.DefaultHold = atproto.ResolveHoldDIDFromURL(profile.DefaultHold)
117- slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", profile.DefaultHold)
0118 }
119120 _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, ProfileRKey, profile)
···54// GetProfile retrieves the user's profile from their PDS
55// Returns nil if profile doesn't exist
56// Automatically migrates old URL-based defaultHold values to DIDs
57+func GetProfile(ctx context.Context, client *atproto.Client) (*atproto.SailorProfile, error) {
58 record, err := client.GetRecord(ctx, atproto.SailorProfileCollection, ProfileRKey)
59 if err != nil {
60 // Check if it's a 404 (profile doesn't exist)
···65 }
6667 // Parse the profile record
68+ var profile atproto.SailorProfile
69 if err := json.Unmarshal(record.Value, &profile); err != nil {
70 return nil, fmt.Errorf("failed to parse profile: %w", err)
71 }
7273 // Migrate old URL-based defaultHold to DID format
74 // This ensures backward compatibility with profiles created before DID migration
75+ if profile.DefaultHold != nil && *profile.DefaultHold != "" && !atproto.IsDID(*profile.DefaultHold) {
76 // Convert URL to DID transparently
77+ migratedDID := atproto.ResolveHoldDIDFromURL(*profile.DefaultHold)
78+ profile.DefaultHold = &migratedDID
7980 // Persist the migration to PDS in a background goroutine
81 // Use a lock to ensure only one goroutine migrates this DID
···94 defer cancel()
9596 // Update the profile on the PDS
97+ now := time.Now().Format(time.RFC3339)
98+ profile.UpdatedAt = &now
99 if err := UpdateProfile(ctx, client, &profile); err != nil {
100 slog.Warn("Failed to persist URL-to-DID migration", "component", "profile", "did", did, "error", err)
101 } else {
···110111// UpdateProfile updates the user's profile
112// Normalizes defaultHold to DID format before saving
113+func UpdateProfile(ctx context.Context, client *atproto.Client, profile *atproto.SailorProfile) error {
114 // Normalize defaultHold to DID if it's a URL
115 // This ensures we always store DIDs, even if user provides a URL
116+ if profile.DefaultHold != nil && *profile.DefaultHold != "" && !atproto.IsDID(*profile.DefaultHold) {
117+ normalized := atproto.ResolveHoldDIDFromURL(*profile.DefaultHold)
118+ profile.DefaultHold = &normalized
119+ slog.Debug("Normalized defaultHold to DID", "component", "profile", "default_hold", normalized)
120 }
121122 _, err := client.PutRecord(ctx, atproto.SailorProfileCollection, ProfileRKey, profile)
···3940 for _, tt := range tests {
41 t.Run(tt.name, func(t *testing.T) {
42+ var createdProfile *atproto.SailorProfile
4344 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
45 // First request: GetRecord (should 404)
···95 t.Fatal("Profile was not created")
96 }
9798+ if createdProfile.LexiconTypeID != atproto.SailorProfileCollection {
99+ t.Errorf("LexiconTypeID = %v, want %v", createdProfile.LexiconTypeID, atproto.SailorProfileCollection)
100 }
101102+ gotDefaultHold := ""
103+ if createdProfile.DefaultHold != nil {
104+ gotDefaultHold = *createdProfile.DefaultHold
105+ }
106+ if gotDefaultHold != tt.wantNormalized {
107+ t.Errorf("DefaultHold = %v, want %v", gotDefaultHold, tt.wantNormalized)
108 }
109 })
110 }
···158 name string
159 serverResponse string
160 serverStatus int
161+ wantProfile *atproto.SailorProfile
162 wantNil bool
163 wantErr bool
164 expectMigration bool // Whether URL-to-DID migration should happen
···269 }
270271 // Check that defaultHold is migrated to DID in returned profile
272+ gotDefaultHold := ""
273+ if profile.DefaultHold != nil {
274+ gotDefaultHold = *profile.DefaultHold
275+ }
276+ if gotDefaultHold != tt.expectedHoldDID {
277+ t.Errorf("DefaultHold = %v, want %v", gotDefaultHold, tt.expectedHoldDID)
278 }
279280 if tt.expectMigration {
···374 }
375}
376377+// testSailorProfile creates a test profile with the given default hold
378+func testSailorProfile(defaultHold string) *atproto.SailorProfile {
379+ now := time.Now().Format(time.RFC3339)
380+ profile := &atproto.SailorProfile{
381+ LexiconTypeID: atproto.SailorProfileCollection,
382+ CreatedAt: now,
383+ UpdatedAt: &now,
384+ }
385+ if defaultHold != "" {
386+ profile.DefaultHold = &defaultHold
387+ }
388+ return profile
389+}
390+391// TestUpdateProfile tests updating a user's profile
392func TestUpdateProfile(t *testing.T) {
393 tests := []struct {
394 name string
395+ profile *atproto.SailorProfile
396 wantNormalized string // Expected defaultHold after normalization
397 wantErr bool
398 }{
399 {
400+ name: "update with DID",
401+ profile: testSailorProfile("did:web:hold02.atcr.io"),
00000402 wantNormalized: "did:web:hold02.atcr.io",
403 wantErr: false,
404 },
405 {
406+ name: "update with URL - should normalize",
407+ profile: testSailorProfile("https://hold02.atcr.io"),
00000408 wantNormalized: "did:web:hold02.atcr.io",
409 wantErr: false,
410 },
411 {
412+ name: "clear default hold",
413+ profile: testSailorProfile(""),
00000414 wantNormalized: "",
415 wantErr: false,
416 },
···461 }
462463 // Verify normalization also updated the profile object
464+ gotProfileHold := ""
465+ if tt.profile.DefaultHold != nil {
466+ gotProfileHold = *tt.profile.DefaultHold
467+ }
468+ if gotProfileHold != tt.wantNormalized {
469+ t.Errorf("profile.DefaultHold = %v, want %v (should be updated in-place)", gotProfileHold, tt.wantNormalized)
470 }
471 }
472 })
···550 t.Fatalf("GetProfile() error = %v", err)
551 }
552553+ if profile.DefaultHold != nil && *profile.DefaultHold != "" {
554+ t.Errorf("DefaultHold = %v, want empty or nil", profile.DefaultHold)
555 }
556}
557···564 defer server.Close()
565566 client := atproto.NewClient(server.URL, "did:plc:test123", "test-token")
567+ profile := testSailorProfile("did:web:hold01.atcr.io")
00000568569 err := UpdateProfile(context.Background(), client, profile)
570
+28-26
pkg/appview/storage/proxy_blob_store.go
···12 "time"
1314 "atcr.io/pkg/atproto"
15- "atcr.io/pkg/auth"
16 "github.com/distribution/distribution/v3"
17 "github.com/distribution/distribution/v3/registry/api/errcode"
18 "github.com/opencontainers/go-digest"
···3334// ProxyBlobStore proxies blob requests to an external storage service
35type ProxyBlobStore struct {
36- ctx *auth.UserContext // User context with identity, target, permissions
37- holdURL string // Resolved HTTP URL for XRPC requests
38 httpClient *http.Client
39}
4041// NewProxyBlobStore creates a new proxy blob store
42-func NewProxyBlobStore(userCtx *auth.UserContext) *ProxyBlobStore {
43 // Resolve DID to URL once at construction time
44- holdURL := atproto.ResolveHoldURL(userCtx.TargetHoldDID)
4546- slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", userCtx.TargetHoldDID, "hold_url", holdURL, "user_did", userCtx.TargetOwnerDID, "repo", userCtx.TargetRepo)
4748 return &ProxyBlobStore{
49- ctx: userCtx,
50 holdURL: holdURL,
51 httpClient: &http.Client{
52 Timeout: 5 * time.Minute, // Timeout for presigned URL requests and uploads
···62}
6364// doAuthenticatedRequest performs an HTTP request with service token authentication
65-// Uses the service token from UserContext to authenticate requests to the hold service
66func (p *ProxyBlobStore) doAuthenticatedRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
67- // Get service token from UserContext (lazy-loaded and cached per holdDID)
68- serviceToken, err := p.ctx.GetServiceToken(ctx)
69- if err != nil {
70- slog.Error("Failed to get service token", "component", "proxy_blob_store", "did", p.ctx.DID, "error", err)
71- return nil, fmt.Errorf("failed to get service token: %w", err)
72- }
73- if serviceToken == "" {
74 // Should never happen - middleware validates OAuth before handlers run
75 slog.Error("No service token in context", "component", "proxy_blob_store", "did", p.ctx.DID)
76 return nil, fmt.Errorf("no service token available (middleware should have validated)")
77 }
7879 // Add Bearer token to Authorization header
80- req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", serviceToken))
8182 return p.httpClient.Do(req)
83}
8485// checkReadAccess validates that the user has read access to blobs in this hold
86func (p *ProxyBlobStore) checkReadAccess(ctx context.Context) error {
87- canRead, err := p.ctx.CanRead(ctx)
00088 if err != nil {
89 return fmt.Errorf("authorization check failed: %w", err)
90 }
91- if !canRead {
92 // Return 403 Forbidden instead of masquerading as missing blob
93 return errcode.ErrorCodeDenied.WithMessage("read access denied")
94 }
···9798// checkWriteAccess validates that the user has write access to blobs in this hold
99func (p *ProxyBlobStore) checkWriteAccess(ctx context.Context) error {
100- slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID)
101- canWrite, err := p.ctx.CanWrite(ctx)
0000102 if err != nil {
103 slog.Error("Authorization check error", "component", "proxy_blob_store", "error", err)
104 return fmt.Errorf("authorization check failed: %w", err)
105 }
106- if !canWrite {
107- slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID)
108- return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.TargetHoldDID))
109 }
110- slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.TargetHoldDID)
111 return nil
112}
113···354// getPresignedURL returns the XRPC endpoint URL for blob operations
355func (p *ProxyBlobStore) getPresignedURL(ctx context.Context, operation string, dgst digest.Digest) (string, error) {
356 // Use XRPC endpoint: /xrpc/com.atproto.sync.getBlob?did={userDID}&cid={digest}
357- // The 'did' parameter is the TARGET OWNER's DID (whose blob we're fetching), not the hold service DID
358 // Per migration doc: hold accepts OCI digest directly as cid parameter (checks for sha256: prefix)
359 xrpcURL := fmt.Sprintf("%s%s?did=%s&cid=%s&method=%s",
360- p.holdURL, atproto.SyncGetBlob, p.ctx.TargetOwnerDID, dgst.String(), operation)
361362 req, err := http.NewRequestWithContext(ctx, "GET", xrpcURL, nil)
363 if err != nil {
···12 "time"
1314 "atcr.io/pkg/atproto"
015 "github.com/distribution/distribution/v3"
16 "github.com/distribution/distribution/v3/registry/api/errcode"
17 "github.com/opencontainers/go-digest"
···3233// ProxyBlobStore proxies blob requests to an external storage service
34type ProxyBlobStore struct {
35+ ctx *RegistryContext // All context and services
36+ holdURL string // Resolved HTTP URL for XRPC requests
37 httpClient *http.Client
38}
3940// NewProxyBlobStore creates a new proxy blob store
41+func NewProxyBlobStore(ctx *RegistryContext) *ProxyBlobStore {
42 // Resolve DID to URL once at construction time
43+ holdURL := atproto.ResolveHoldURL(ctx.HoldDID)
4445+ slog.Debug("NewProxyBlobStore created", "component", "proxy_blob_store", "hold_did", ctx.HoldDID, "hold_url", holdURL, "user_did", ctx.DID, "repo", ctx.Repository)
4647 return &ProxyBlobStore{
48+ ctx: ctx,
49 holdURL: holdURL,
50 httpClient: &http.Client{
51 Timeout: 5 * time.Minute, // Timeout for presigned URL requests and uploads
···61}
6263// doAuthenticatedRequest performs an HTTP request with service token authentication
64+// Uses the service token from middleware to authenticate requests to the hold service
65func (p *ProxyBlobStore) doAuthenticatedRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
66+ // Use service token that middleware already validated and cached
67+ // Middleware fails fast with HTTP 401 if OAuth session is invalid
68+ if p.ctx.ServiceToken == "" {
000069 // Should never happen - middleware validates OAuth before handlers run
70 slog.Error("No service token in context", "component", "proxy_blob_store", "did", p.ctx.DID)
71 return nil, fmt.Errorf("no service token available (middleware should have validated)")
72 }
7374 // Add Bearer token to Authorization header
75+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", p.ctx.ServiceToken))
7677 return p.httpClient.Do(req)
78}
7980// checkReadAccess validates that the user has read access to blobs in this hold
81func (p *ProxyBlobStore) checkReadAccess(ctx context.Context) error {
82+ if p.ctx.Authorizer == nil {
83+ return nil // No authorization check if authorizer not configured
84+ }
85+ allowed, err := p.ctx.Authorizer.CheckReadAccess(ctx, p.ctx.HoldDID, p.ctx.DID)
86 if err != nil {
87 return fmt.Errorf("authorization check failed: %w", err)
88 }
89+ if !allowed {
90 // Return 403 Forbidden instead of masquerading as missing blob
91 return errcode.ErrorCodeDenied.WithMessage("read access denied")
92 }
···9596// checkWriteAccess validates that the user has write access to blobs in this hold
97func (p *ProxyBlobStore) checkWriteAccess(ctx context.Context) error {
98+ if p.ctx.Authorizer == nil {
99+ return nil // No authorization check if authorizer not configured
100+ }
101+102+ slog.Debug("Checking write access", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
103+ allowed, err := p.ctx.Authorizer.CheckWriteAccess(ctx, p.ctx.HoldDID, p.ctx.DID)
104 if err != nil {
105 slog.Error("Authorization check error", "component", "proxy_blob_store", "error", err)
106 return fmt.Errorf("authorization check failed: %w", err)
107 }
108+ if !allowed {
109+ slog.Warn("Write access denied", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
110+ return errcode.ErrorCodeDenied.WithMessage(fmt.Sprintf("write access denied to hold %s", p.ctx.HoldDID))
111 }
112+ slog.Debug("Write access allowed", "component", "proxy_blob_store", "user_did", p.ctx.DID, "hold_did", p.ctx.HoldDID)
113 return nil
114}
115···356// getPresignedURL returns the XRPC endpoint URL for blob operations
357func (p *ProxyBlobStore) getPresignedURL(ctx context.Context, operation string, dgst digest.Digest) (string, error) {
358 // Use XRPC endpoint: /xrpc/com.atproto.sync.getBlob?did={userDID}&cid={digest}
359+ // The 'did' parameter is the USER's DID (whose blob we're fetching), not the hold service DID
360 // Per migration doc: hold accepts OCI digest directly as cid parameter (checks for sha256: prefix)
361 xrpcURL := fmt.Sprintf("%s%s?did=%s&cid=%s&method=%s",
362+ p.holdURL, atproto.SyncGetBlob, p.ctx.DID, dgst.String(), operation)
363364 req, err := http.NewRequestWithContext(ctx, "GET", xrpcURL, nil)
365 if err != nil {
+420-78
pkg/appview/storage/proxy_blob_store_test.go
···1package storage
23import (
04 "encoding/base64"
05 "fmt"
006 "strings"
7 "testing"
8 "time"
910 "atcr.io/pkg/atproto"
11- "atcr.io/pkg/auth"
012)
1314-// TestGetServiceToken_CachingLogic tests the global service token caching mechanism
15-// These tests use the global auth cache functions directly
16func TestGetServiceToken_CachingLogic(t *testing.T) {
17- userDID := "did:plc:cache-test"
18 holdDID := "did:web:hold.example.com"
1920 // Test 1: Empty cache - invalidate any existing token
21- auth.InvalidateServiceToken(userDID, holdDID)
22- cachedToken, _ := auth.GetServiceToken(userDID, holdDID)
23 if cachedToken != "" {
24 t.Error("Expected empty cache at start")
25 }
2627 // Test 2: Insert token into cache
28 // Create a JWT-like token with exp claim for testing
029 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix())
30 testToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature"
3132- err := auth.SetServiceToken(userDID, holdDID, testToken)
33 if err != nil {
34 t.Fatalf("Failed to set service token: %v", err)
35 }
3637 // Test 3: Retrieve from cache
38- cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID)
39 if cachedToken == "" {
40 t.Fatal("Expected token to be in cache")
41 }
···51 // Test 4: Expired token - GetServiceToken automatically removes it
52 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix())
53 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature"
54- auth.SetServiceToken(userDID, holdDID, expiredToken)
5556 // GetServiceToken should return empty string for expired token
57- cachedToken, _ = auth.GetServiceToken(userDID, holdDID)
58 if cachedToken != "" {
59 t.Error("Expected expired token to be removed from cache")
60 }
···65 return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(data)), "=")
66}
6768-// mockUserContextForProxy creates a mock auth.UserContext for proxy blob store testing.
69-// It sets up both the user identity and target info, and configures test helpers
70-// to bypass network calls.
71-func mockUserContextForProxy(did, holdDID, pdsEndpoint, repository string) *auth.UserContext {
72- userCtx := auth.NewUserContext(did, "oauth", "PUT", nil)
73- userCtx.SetTarget(did, "test.handle", pdsEndpoint, repository, holdDID)
000000000000000000000000000000000007475- // Bypass PDS resolution (avoids network calls)
76- userCtx.SetPDSForTest("test.handle", pdsEndpoint)
00000007778- // Set up mock authorizer that allows access
79- userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer())
8081- // Set default hold DID for push resolution
82- userCtx.SetDefaultHoldDIDForTest(holdDID)
0008384- return userCtx
0000000000085}
8687-// mockUserContextForProxyWithToken creates a mock UserContext with a pre-populated service token.
88-func mockUserContextForProxyWithToken(did, holdDID, pdsEndpoint, repository, serviceToken string) *auth.UserContext {
89- userCtx := mockUserContextForProxy(did, holdDID, pdsEndpoint, repository)
90- userCtx.SetServiceTokenForTest(holdDID, serviceToken)
91- return userCtx
0000000000000000000000000000000000000000092}
9394-// TestResolveHoldURL tests DID to URL conversion (pure function)
95func TestResolveHoldURL(t *testing.T) {
96 tests := []struct {
97 name string
···99 expected string
100 }{
101 {
102- name: "did:web with http (localhost)",
103 holdDID: "did:web:localhost:8080",
104 expected: "http://localhost:8080",
105 },
···127128// TestServiceTokenCacheExpiry tests that expired cached tokens are not used
129func TestServiceTokenCacheExpiry(t *testing.T) {
130- userDID := "did:plc:expiry-test"
131 holdDID := "did:web:hold.example.com"
132133 // Insert expired token
134 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix())
135 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature"
136- auth.SetServiceToken(userDID, holdDID, expiredToken)
137138 // GetServiceToken should automatically remove expired tokens
139- cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID)
140141 // Should return empty string for expired token
142 if cachedToken != "" {
···171172// TestNewProxyBlobStore tests ProxyBlobStore creation
173func TestNewProxyBlobStore(t *testing.T) {
174- userCtx := mockUserContextForProxy(
175- "did:plc:test",
176- "did:web:hold.example.com",
177- "https://pds.example.com",
178- "test-repo",
179- )
180181- store := NewProxyBlobStore(userCtx)
182183 if store == nil {
184 t.Fatal("Expected non-nil ProxyBlobStore")
185 }
186187- if store.ctx != userCtx {
188 t.Error("Expected context to be set")
189 }
190···209210 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix())
211 testTokenStr := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature"
212- auth.SetServiceToken(userDID, holdDID, testTokenStr)
213214 for b.Loop() {
215- cachedToken, expiresAt := auth.GetServiceToken(userDID, holdDID)
216217 if cachedToken == "" || time.Now().After(expiresAt) {
218 b.Error("Cache miss in benchmark")
···220 }
221}
222223-// TestParseJWTExpiry tests JWT expiry parsing
224-func TestParseJWTExpiry(t *testing.T) {
225- // Create a JWT with known expiry
226- futureTime := time.Now().Add(1 * time.Hour).Unix()
227- testPayload := fmt.Sprintf(`{"exp":%d}`, futureTime)
228- testToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature"
00000000000000000229230- expiry, err := auth.ParseJWTExpiry(testToken)
0000000000000000231 if err != nil {
232- t.Fatalf("ParseJWTExpiry failed: %v", err)
233 }
234235- // Verify expiry is close to what we set (within 1 second tolerance)
236- expectedExpiry := time.Unix(futureTime, 0)
237- diff := expiry.Sub(expectedExpiry)
238- if diff < -time.Second || diff > time.Second {
239- t.Errorf("Expiry mismatch: expected %v, got %v", expectedExpiry, expiry)
00000000000000000000000000000000240 }
241}
242243-// TestParseJWTExpiry_InvalidToken tests error handling for invalid tokens
244-func TestParseJWTExpiry_InvalidToken(t *testing.T) {
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000245 tests := []struct {
246- name string
247- token string
0248 }{
249- {"empty token", ""},
250- {"single part", "header"},
251- {"two parts", "header.payload"},
252- {"invalid base64 payload", "header.!!!.signature"},
253- {"missing exp claim", "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(`{"sub":"test"}`) + ".sig"},
00000000000000000000000000254 }
255256 for _, tt := range tests {
257 t.Run(tt.name, func(t *testing.T) {
258- _, err := auth.ParseJWTExpiry(tt.token)
259- if err == nil {
260- t.Error("Expected error for invalid token")
000000000000000000000000000000000000261 }
262 })
263 }
264}
265-266-// Note: Tests for doAuthenticatedRequest, Get, Open, completeMultipartUpload, etc.
267-// require complex dependency mocking (OAuth refresher, PDS resolution, HoldAuthorizer).
268-// These should be tested at the integration level with proper infrastructure.
269-//
270-// The current unit tests cover:
271-// - Global service token cache (auth.GetServiceToken, auth.SetServiceToken, etc.)
272-// - URL resolution (atproto.ResolveHoldURL)
273-// - JWT parsing (auth.ParseJWTExpiry)
274-// - Store construction (NewProxyBlobStore)
···1package storage
23import (
4+ "context"
5 "encoding/base64"
6+ "encoding/json"
7 "fmt"
8+ "net/http"
9+ "net/http/httptest"
10 "strings"
11 "testing"
12 "time"
1314 "atcr.io/pkg/atproto"
15+ "atcr.io/pkg/auth/token"
16+ "github.com/opencontainers/go-digest"
17)
1819+// TestGetServiceToken_CachingLogic tests the token caching mechanism
020func TestGetServiceToken_CachingLogic(t *testing.T) {
21+ userDID := "did:plc:test"
22 holdDID := "did:web:hold.example.com"
2324 // Test 1: Empty cache - invalidate any existing token
25+ token.InvalidateServiceToken(userDID, holdDID)
26+ cachedToken, _ := token.GetServiceToken(userDID, holdDID)
27 if cachedToken != "" {
28 t.Error("Expected empty cache at start")
29 }
3031 // Test 2: Insert token into cache
32 // Create a JWT-like token with exp claim for testing
33+ // Format: header.payload.signature where payload has exp claim
34 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix())
35 testToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature"
3637+ err := token.SetServiceToken(userDID, holdDID, testToken)
38 if err != nil {
39 t.Fatalf("Failed to set service token: %v", err)
40 }
4142 // Test 3: Retrieve from cache
43+ cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID)
44 if cachedToken == "" {
45 t.Fatal("Expected token to be in cache")
46 }
···56 // Test 4: Expired token - GetServiceToken automatically removes it
57 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix())
58 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature"
59+ token.SetServiceToken(userDID, holdDID, expiredToken)
6061 // GetServiceToken should return empty string for expired token
62+ cachedToken, _ = token.GetServiceToken(userDID, holdDID)
63 if cachedToken != "" {
64 t.Error("Expected expired token to be removed from cache")
65 }
···70 return strings.TrimRight(base64.URLEncoding.EncodeToString([]byte(data)), "=")
71}
7273+// TestServiceToken_EmptyInContext tests that operations fail when service token is missing
74+func TestServiceToken_EmptyInContext(t *testing.T) {
75+ ctx := &RegistryContext{
76+ DID: "did:plc:test",
77+ HoldDID: "did:web:hold.example.com",
78+ PDSEndpoint: "https://pds.example.com",
79+ Repository: "test-repo",
80+ ServiceToken: "", // No service token (middleware didn't set it)
81+ Refresher: nil,
82+ }
83+84+ store := NewProxyBlobStore(ctx)
85+86+ // Try a write operation that requires authentication
87+ testDigest := digest.FromString("test-content")
88+ _, err := store.Stat(context.Background(), testDigest)
89+90+ // Should fail because no service token is available
91+ if err == nil {
92+ t.Error("Expected error when service token is empty")
93+ }
94+95+ // Error should indicate authentication issue
96+ if !strings.Contains(err.Error(), "UNAUTHORIZED") && !strings.Contains(err.Error(), "authentication") {
97+ t.Logf("Got error (acceptable): %v", err)
98+ }
99+}
100+101+// TestDoAuthenticatedRequest_BearerTokenInjection tests that Bearer tokens are added to requests
102+func TestDoAuthenticatedRequest_BearerTokenInjection(t *testing.T) {
103+ // This test verifies the Bearer token injection logic
104+105+ testToken := "test-bearer-token-xyz"
106+107+ // Create a test server to verify the Authorization header
108+ var receivedAuthHeader string
109+ testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
110+ receivedAuthHeader = r.Header.Get("Authorization")
111+ w.WriteHeader(http.StatusOK)
112+ }))
113+ defer testServer.Close()
114115+ // Create ProxyBlobStore with service token in context (set by middleware)
116+ ctx := &RegistryContext{
117+ DID: "did:plc:bearer-test",
118+ HoldDID: "did:web:hold.example.com",
119+ PDSEndpoint: "https://pds.example.com",
120+ Repository: "test-repo",
121+ ServiceToken: testToken, // Service token from middleware
122+ Refresher: nil,
123+ }
124125+ store := NewProxyBlobStore(ctx)
0126127+ // Create request
128+ req, err := http.NewRequest(http.MethodGet, testServer.URL+"/test", nil)
129+ if err != nil {
130+ t.Fatalf("Failed to create request: %v", err)
131+ }
132133+ // Do authenticated request
134+ resp, err := store.doAuthenticatedRequest(context.Background(), req)
135+ if err != nil {
136+ t.Fatalf("doAuthenticatedRequest failed: %v", err)
137+ }
138+ defer resp.Body.Close()
139+140+ // Verify Bearer token was added
141+ expectedHeader := "Bearer " + testToken
142+ if receivedAuthHeader != expectedHeader {
143+ t.Errorf("Expected Authorization header %s, got %s", expectedHeader, receivedAuthHeader)
144+ }
145}
146147+// TestDoAuthenticatedRequest_ErrorWhenTokenUnavailable tests that authentication failures return proper errors
148+func TestDoAuthenticatedRequest_ErrorWhenTokenUnavailable(t *testing.T) {
149+ // Create test server (should not be called since auth fails first)
150+ called := false
151+ testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
152+ called = true
153+ w.WriteHeader(http.StatusOK)
154+ }))
155+ defer testServer.Close()
156+157+ // Create ProxyBlobStore without service token (middleware didn't set it)
158+ ctx := &RegistryContext{
159+ DID: "did:plc:fallback",
160+ HoldDID: "did:web:hold.example.com",
161+ PDSEndpoint: "https://pds.example.com",
162+ Repository: "test-repo",
163+ ServiceToken: "", // No service token
164+ Refresher: nil,
165+ }
166+167+ store := NewProxyBlobStore(ctx)
168+169+ // Create request
170+ req, err := http.NewRequest(http.MethodGet, testServer.URL+"/test", nil)
171+ if err != nil {
172+ t.Fatalf("Failed to create request: %v", err)
173+ }
174+175+ // Do authenticated request - should fail when no service token
176+ resp, err := store.doAuthenticatedRequest(context.Background(), req)
177+ if err == nil {
178+ t.Fatal("Expected doAuthenticatedRequest to fail when no service token is available")
179+ }
180+ if resp != nil {
181+ resp.Body.Close()
182+ }
183+184+ // Verify error indicates authentication/authorization issue
185+ errStr := err.Error()
186+ if !strings.Contains(errStr, "service token") && !strings.Contains(errStr, "UNAUTHORIZED") {
187+ t.Errorf("Expected service token or unauthorized error, got: %v", err)
188+ }
189+190+ if called {
191+ t.Error("Expected request to NOT be made when authentication fails")
192+ }
193}
194195+// TestResolveHoldURL tests DID to URL conversion
196func TestResolveHoldURL(t *testing.T) {
197 tests := []struct {
198 name string
···200 expected string
201 }{
202 {
203+ name: "did:web with http (TEST_MODE)",
204 holdDID: "did:web:localhost:8080",
205 expected: "http://localhost:8080",
206 },
···228229// TestServiceTokenCacheExpiry tests that expired cached tokens are not used
230func TestServiceTokenCacheExpiry(t *testing.T) {
231+ userDID := "did:plc:expiry"
232 holdDID := "did:web:hold.example.com"
233234 // Insert expired token
235 expiredPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(-1*time.Hour).Unix())
236 expiredToken := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(expiredPayload) + ".signature"
237+ token.SetServiceToken(userDID, holdDID, expiredToken)
238239 // GetServiceToken should automatically remove expired tokens
240+ cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID)
241242 // Should return empty string for expired token
243 if cachedToken != "" {
···272273// TestNewProxyBlobStore tests ProxyBlobStore creation
274func TestNewProxyBlobStore(t *testing.T) {
275+ ctx := &RegistryContext{
276+ DID: "did:plc:test",
277+ HoldDID: "did:web:hold.example.com",
278+ PDSEndpoint: "https://pds.example.com",
279+ Repository: "test-repo",
280+ }
281282+ store := NewProxyBlobStore(ctx)
283284 if store == nil {
285 t.Fatal("Expected non-nil ProxyBlobStore")
286 }
287288+ if store.ctx != ctx {
289 t.Error("Expected context to be set")
290 }
291···310311 testPayload := fmt.Sprintf(`{"exp":%d}`, time.Now().Add(50*time.Second).Unix())
312 testTokenStr := "eyJhbGciOiJIUzI1NiJ9." + base64URLEncode(testPayload) + ".signature"
313+ token.SetServiceToken(userDID, holdDID, testTokenStr)
314315 for b.Loop() {
316+ cachedToken, expiresAt := token.GetServiceToken(userDID, holdDID)
317318 if cachedToken == "" || time.Now().After(expiresAt) {
319 b.Error("Cache miss in benchmark")
···321 }
322}
323324+// TestCompleteMultipartUpload_JSONFormat verifies the JSON request format sent to hold service
325+// This test would have caught the "partNumber" vs "part_number" bug
326+func TestCompleteMultipartUpload_JSONFormat(t *testing.T) {
327+ var capturedBody map[string]any
328+329+ // Mock hold service that captures the request body
330+ holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
331+ if !strings.Contains(r.URL.Path, atproto.HoldCompleteUpload) {
332+ t.Errorf("Wrong endpoint called: %s", r.URL.Path)
333+ }
334+335+ // Capture request body
336+ var body map[string]any
337+ if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
338+ t.Errorf("Failed to decode request body: %v", err)
339+ }
340+ capturedBody = body
341+342+ w.Header().Set("Content-Type", "application/json")
343+ w.WriteHeader(http.StatusOK)
344+ w.Write([]byte(`{}`))
345+ }))
346+ defer holdServer.Close()
347348+ // Create store with mocked hold URL
349+ ctx := &RegistryContext{
350+ DID: "did:plc:test",
351+ HoldDID: "did:web:hold.example.com",
352+ PDSEndpoint: "https://pds.example.com",
353+ Repository: "test-repo",
354+ ServiceToken: "test-service-token", // Service token from middleware
355+ }
356+ store := NewProxyBlobStore(ctx)
357+ store.holdURL = holdServer.URL
358+359+ // Call completeMultipartUpload
360+ parts := []CompletedPart{
361+ {PartNumber: 1, ETag: "etag-1"},
362+ {PartNumber: 2, ETag: "etag-2"},
363+ }
364+ err := store.completeMultipartUpload(context.Background(), "sha256:abc123", "upload-id-xyz", parts)
365 if err != nil {
366+ t.Fatalf("completeMultipartUpload failed: %v", err)
367 }
368369+ // Verify JSON format
370+ if capturedBody == nil {
371+ t.Fatal("No request body was captured")
372+ }
373+374+ // Check top-level fields
375+ if uploadID, ok := capturedBody["uploadId"].(string); !ok || uploadID != "upload-id-xyz" {
376+ t.Errorf("Expected uploadId='upload-id-xyz', got %v", capturedBody["uploadId"])
377+ }
378+ if digest, ok := capturedBody["digest"].(string); !ok || digest != "sha256:abc123" {
379+ t.Errorf("Expected digest='sha256:abc123', got %v", capturedBody["digest"])
380+ }
381+382+ // Check parts array
383+ partsArray, ok := capturedBody["parts"].([]any)
384+ if !ok {
385+ t.Fatalf("Expected parts to be array, got %T", capturedBody["parts"])
386+ }
387+ if len(partsArray) != 2 {
388+ t.Fatalf("Expected 2 parts, got %d", len(partsArray))
389+ }
390+391+ // Verify first part has "part_number" (not "partNumber")
392+ part0, ok := partsArray[0].(map[string]any)
393+ if !ok {
394+ t.Fatalf("Expected part to be object, got %T", partsArray[0])
395+ }
396+397+ // THIS IS THE KEY CHECK - would have caught the bug
398+ if _, hasPartNumber := part0["partNumber"]; hasPartNumber {
399+ t.Error("Found 'partNumber' (camelCase) - should be 'part_number' (snake_case)")
400+ }
401+ if partNum, ok := part0["part_number"].(float64); !ok || int(partNum) != 1 {
402+ t.Errorf("Expected part_number=1, got %v", part0["part_number"])
403+ }
404+ if etag, ok := part0["etag"].(string); !ok || etag != "etag-1" {
405+ t.Errorf("Expected etag='etag-1', got %v", part0["etag"])
406 }
407}
408409+// TestGet_UsesPresignedURLDirectly verifies that Get() doesn't add auth headers to presigned URLs
410+// This test would have caught the presigned URL authentication bug
411+func TestGet_UsesPresignedURLDirectly(t *testing.T) {
412+ blobData := []byte("test blob content")
413+ var s3ReceivedAuthHeader string
414+415+ // Mock S3 server that rejects requests with Authorization header
416+ s3Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
417+ s3ReceivedAuthHeader = r.Header.Get("Authorization")
418+419+ // Presigned URLs should NOT have Authorization header
420+ if s3ReceivedAuthHeader != "" {
421+ t.Errorf("S3 received Authorization header: %s (should be empty for presigned URLs)", s3ReceivedAuthHeader)
422+ w.WriteHeader(http.StatusForbidden)
423+ w.Write([]byte(`<?xml version="1.0"?><Error><Code>SignatureDoesNotMatch</Code></Error>`))
424+ return
425+ }
426+427+ // Return blob data
428+ w.WriteHeader(http.StatusOK)
429+ w.Write(blobData)
430+ }))
431+ defer s3Server.Close()
432+433+ // Mock hold service that returns presigned S3 URL
434+ holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
435+ // Return presigned URL pointing to S3 server
436+ w.Header().Set("Content-Type", "application/json")
437+ w.WriteHeader(http.StatusOK)
438+ resp := map[string]string{
439+ "url": s3Server.URL + "/blob?X-Amz-Signature=fake-signature",
440+ }
441+ json.NewEncoder(w).Encode(resp)
442+ }))
443+ defer holdServer.Close()
444+445+ // Create store with service token in context
446+ ctx := &RegistryContext{
447+ DID: "did:plc:test",
448+ HoldDID: "did:web:hold.example.com",
449+ PDSEndpoint: "https://pds.example.com",
450+ Repository: "test-repo",
451+ ServiceToken: "test-service-token", // Service token from middleware
452+ }
453+ store := NewProxyBlobStore(ctx)
454+ store.holdURL = holdServer.URL
455+456+ // Call Get()
457+ dgst := digest.FromBytes(blobData)
458+ retrieved, err := store.Get(context.Background(), dgst)
459+ if err != nil {
460+ t.Fatalf("Get() failed: %v", err)
461+ }
462+463+ // Verify correct data was retrieved
464+ if string(retrieved) != string(blobData) {
465+ t.Errorf("Expected data=%s, got %s", string(blobData), string(retrieved))
466+ }
467+468+ // Verify S3 received NO Authorization header
469+ if s3ReceivedAuthHeader != "" {
470+ t.Errorf("S3 should not receive Authorization header for presigned URLs, got: %s", s3ReceivedAuthHeader)
471+ }
472+}
473+474+// TestOpen_UsesPresignedURLDirectly verifies that Open() doesn't add auth headers to presigned URLs
475+// This test would have caught the presigned URL authentication bug
476+func TestOpen_UsesPresignedURLDirectly(t *testing.T) {
477+ blobData := []byte("test blob stream content")
478+ var s3ReceivedAuthHeader string
479+480+ // Mock S3 server
481+ s3Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
482+ s3ReceivedAuthHeader = r.Header.Get("Authorization")
483+484+ // Presigned URLs should NOT have Authorization header
485+ if s3ReceivedAuthHeader != "" {
486+ t.Errorf("S3 received Authorization header: %s (should be empty)", s3ReceivedAuthHeader)
487+ w.WriteHeader(http.StatusForbidden)
488+ return
489+ }
490+491+ w.WriteHeader(http.StatusOK)
492+ w.Write(blobData)
493+ }))
494+ defer s3Server.Close()
495+496+ // Mock hold service
497+ holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
498+ w.Header().Set("Content-Type", "application/json")
499+ w.WriteHeader(http.StatusOK)
500+ json.NewEncoder(w).Encode(map[string]string{
501+ "url": s3Server.URL + "/blob?X-Amz-Signature=fake",
502+ })
503+ }))
504+ defer holdServer.Close()
505+506+ // Create store with service token in context
507+ ctx := &RegistryContext{
508+ DID: "did:plc:test",
509+ HoldDID: "did:web:hold.example.com",
510+ PDSEndpoint: "https://pds.example.com",
511+ Repository: "test-repo",
512+ ServiceToken: "test-service-token", // Service token from middleware
513+ }
514+ store := NewProxyBlobStore(ctx)
515+ store.holdURL = holdServer.URL
516+517+ // Call Open()
518+ dgst := digest.FromBytes(blobData)
519+ reader, err := store.Open(context.Background(), dgst)
520+ if err != nil {
521+ t.Fatalf("Open() failed: %v", err)
522+ }
523+ defer reader.Close()
524+525+ // Verify S3 received NO Authorization header
526+ if s3ReceivedAuthHeader != "" {
527+ t.Errorf("S3 should not receive Authorization header for presigned URLs, got: %s", s3ReceivedAuthHeader)
528+ }
529+}
530+531+// TestMultipartEndpoints_CorrectURLs verifies all multipart XRPC endpoints use correct URLs
532+// This would have caught the old com.atproto.repo.uploadBlob vs new io.atcr.hold.* endpoints
533+func TestMultipartEndpoints_CorrectURLs(t *testing.T) {
534 tests := []struct {
535+ name string
536+ testFunc func(*ProxyBlobStore) error
537+ expectedPath string
538 }{
539+ {
540+ name: "startMultipartUpload",
541+ testFunc: func(store *ProxyBlobStore) error {
542+ _, err := store.startMultipartUpload(context.Background(), "sha256:test")
543+ return err
544+ },
545+ expectedPath: atproto.HoldInitiateUpload,
546+ },
547+ {
548+ name: "getPartUploadInfo",
549+ testFunc: func(store *ProxyBlobStore) error {
550+ _, err := store.getPartUploadInfo(context.Background(), "sha256:test", "upload-123", 1)
551+ return err
552+ },
553+ expectedPath: atproto.HoldGetPartUploadURL,
554+ },
555+ {
556+ name: "completeMultipartUpload",
557+ testFunc: func(store *ProxyBlobStore) error {
558+ parts := []CompletedPart{{PartNumber: 1, ETag: "etag1"}}
559+ return store.completeMultipartUpload(context.Background(), "sha256:test", "upload-123", parts)
560+ },
561+ expectedPath: atproto.HoldCompleteUpload,
562+ },
563+ {
564+ name: "abortMultipartUpload",
565+ testFunc: func(store *ProxyBlobStore) error {
566+ return store.abortMultipartUpload(context.Background(), "sha256:test", "upload-123")
567+ },
568+ expectedPath: atproto.HoldAbortUpload,
569+ },
570 }
571572 for _, tt := range tests {
573 t.Run(tt.name, func(t *testing.T) {
574+ var capturedPath string
575+576+ // Mock hold service that captures request path
577+ holdServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
578+ capturedPath = r.URL.Path
579+580+ // Return success response
581+ w.Header().Set("Content-Type", "application/json")
582+ w.WriteHeader(http.StatusOK)
583+ resp := map[string]string{
584+ "uploadId": "test-upload-id",
585+ "url": "https://s3.example.com/presigned",
586+ }
587+ json.NewEncoder(w).Encode(resp)
588+ }))
589+ defer holdServer.Close()
590+591+ // Create store with service token in context
592+ ctx := &RegistryContext{
593+ DID: "did:plc:test",
594+ HoldDID: "did:web:hold.example.com",
595+ PDSEndpoint: "https://pds.example.com",
596+ Repository: "test-repo",
597+ ServiceToken: "test-service-token", // Service token from middleware
598+ }
599+ store := NewProxyBlobStore(ctx)
600+ store.holdURL = holdServer.URL
601+602+ // Call the function
603+ _ = tt.testFunc(store) // Ignore error, we just care about the URL
604+605+ // Verify correct endpoint was called
606+ if capturedPath != tt.expectedPath {
607+ t.Errorf("Expected endpoint %s, got %s", tt.expectedPath, capturedPath)
608+ }
609+610+ // Verify it's NOT the old endpoint
611+ if strings.Contains(capturedPath, "com.atproto.repo.uploadBlob") {
612+ t.Error("Still using old com.atproto.repo.uploadBlob endpoint!")
613 }
614 })
615 }
616}
0000000000
+74-39
pkg/appview/storage/routing_repository.go
···67import (
8 "context"
9- "database/sql"
10 "log/slog"
01112- "atcr.io/pkg/auth"
13 "github.com/distribution/distribution/v3"
14- "github.com/distribution/reference"
15)
1617-// RoutingRepository routes manifests to ATProto and blobs to external hold service.
18-// The registry (AppView) is stateless and NEVER stores blobs locally.
19-// A new instance is created per HTTP request - no caching or synchronization needed.
20type RoutingRepository struct {
21 distribution.Repository
22- userCtx *auth.UserContext
23- sqlDB *sql.DB
0024}
2526// NewRoutingRepository creates a new routing repository
27-func NewRoutingRepository(baseRepo distribution.Repository, userCtx *auth.UserContext, sqlDB *sql.DB) *RoutingRepository {
28 return &RoutingRepository{
29 Repository: baseRepo,
30- userCtx: userCtx,
31- sqlDB: sqlDB,
32 }
33}
3435// Manifests returns the ATProto-backed manifest service
36func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
37- // blobStore used to fetch labels from th
38- blobStore := r.Blobs(ctx)
39- return NewManifestStore(r.userCtx, blobStore, r.sqlDB), nil
00000000000000040}
4142// Blobs returns a proxy blob store that routes to external hold service
043func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
44- // Resolve hold DID: pull uses DB lookup, push uses profile discovery
45- holdDID, err := r.userCtx.ResolveHoldDID(ctx, r.sqlDB)
46- if err != nil {
47- slog.Warn("Failed to resolve hold DID", "component", "storage/blobs", "error", err)
48- holdDID = r.userCtx.TargetHoldDID
0000000000000000000000000000049 }
5051 if holdDID == "" {
52- panic("hold DID not set - ensure default_hold_did is configured in middleware")
053 }
5455- slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.userCtx.TargetOwnerDID, "repo", r.userCtx.TargetRepo, "hold", holdDID, "action", r.userCtx.Action.String())
0005657- return NewProxyBlobStore(r.userCtx)
000058}
5960// Tags returns the tag service
61// Tags are stored in ATProto as io.atcr.tag records
62func (r *RoutingRepository) Tags(ctx context.Context) distribution.TagService {
63- return NewTagStore(r.userCtx.GetATProtoClient(), r.userCtx.TargetRepo)
64-}
65-66-// Named returns a reference to the repository name.
67-// If the base repository is set, it delegates to the base.
68-// Otherwise, it constructs a name from the user context.
69-func (r *RoutingRepository) Named() reference.Named {
70- if r.Repository != nil {
71- return r.Repository.Named()
72- }
73- // Construct from user context
74- name, err := reference.WithName(r.userCtx.TargetRepo)
75- if err != nil {
76- // Fallback: return a simple reference
77- name, _ = reference.WithName("unknown")
78- }
79- return name
80}
···67import (
8 "context"
09 "log/slog"
10+ "sync"
11012 "github.com/distribution/distribution/v3"
013)
1415+// RoutingRepository routes manifests to ATProto and blobs to external hold service
16+// The registry (AppView) is stateless and NEVER stores blobs locally
017type RoutingRepository struct {
18 distribution.Repository
19+ Ctx *RegistryContext // All context and services (exported for token updates)
20+ mu sync.Mutex // Protects manifestStore and blobStore
21+ manifestStore *ManifestStore // Cached manifest store instance
22+ blobStore *ProxyBlobStore // Cached blob store instance
23}
2425// NewRoutingRepository creates a new routing repository
26+func NewRoutingRepository(baseRepo distribution.Repository, ctx *RegistryContext) *RoutingRepository {
27 return &RoutingRepository{
28 Repository: baseRepo,
29+ Ctx: ctx,
030 }
31}
3233// Manifests returns the ATProto-backed manifest service
34func (r *RoutingRepository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) {
35+ r.mu.Lock()
36+ // Create or return cached manifest store
37+ if r.manifestStore == nil {
38+ // Ensure blob store is created first (needed for label extraction during push)
39+ // Release lock while calling Blobs to avoid deadlock
40+ r.mu.Unlock()
41+ blobStore := r.Blobs(ctx)
42+ r.mu.Lock()
43+44+ // Double-check after reacquiring lock (another goroutine might have set it)
45+ if r.manifestStore == nil {
46+ r.manifestStore = NewManifestStore(r.Ctx, blobStore)
47+ }
48+ }
49+ manifestStore := r.manifestStore
50+ r.mu.Unlock()
51+52+ return manifestStore, nil
53}
5455// Blobs returns a proxy blob store that routes to external hold service
56+// The registry (AppView) NEVER stores blobs locally - all blobs go through hold service
57func (r *RoutingRepository) Blobs(ctx context.Context) distribution.BlobStore {
58+ r.mu.Lock()
59+ // Return cached blob store if available
60+ if r.blobStore != nil {
61+ blobStore := r.blobStore
62+ r.mu.Unlock()
63+ slog.Debug("Returning cached blob store", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository)
64+ return blobStore
65+ }
66+67+ // Determine if this is a pull (GET) or push (PUT/POST/HEAD/etc) operation
68+ // Pull operations use the historical hold DID from the database (blobs are where they were pushed)
69+ // Push operations use the discovery-based hold DID from user's profile/default
70+ // This allows users to change their default hold and have new pushes go there
71+ isPull := false
72+ if method, ok := ctx.Value("http.request.method").(string); ok {
73+ isPull = method == "GET"
74+ }
75+76+ holdDID := r.Ctx.HoldDID // Default to discovery-based DID
77+ holdSource := "discovery"
78+79+ // Only query database for pull operations
80+ if isPull && r.Ctx.Database != nil {
81+ // Query database for the latest manifest's hold DID
82+ if dbHoldDID, err := r.Ctx.Database.GetLatestHoldDIDForRepo(r.Ctx.DID, r.Ctx.Repository); err == nil && dbHoldDID != "" {
83+ // Use hold DID from database (pull case - use historical reference)
84+ holdDID = dbHoldDID
85+ holdSource = "database"
86+ slog.Debug("Using hold from database manifest (pull)", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", dbHoldDID)
87+ } else if err != nil {
88+ // Log error but don't fail - fall back to discovery-based DID
89+ slog.Warn("Failed to query database for hold DID", "component", "storage/blobs", "error", err)
90+ }
91+ // If dbHoldDID is empty (no manifests yet), fall through to use discovery-based DID
92 }
9394 if holdDID == "" {
95+ // This should never happen if middleware is configured correctly
96+ panic("hold DID not set in RegistryContext - ensure default_hold_did is configured in middleware")
97 }
9899+ slog.Debug("Using hold DID for blobs", "component", "storage/blobs", "did", r.Ctx.DID, "repo", r.Ctx.Repository, "hold", holdDID, "source", holdSource)
100+101+ // Update context with the correct hold DID (may be from database or discovered)
102+ r.Ctx.HoldDID = holdDID
103104+ // Create and cache proxy blob store
105+ r.blobStore = NewProxyBlobStore(r.Ctx)
106+ blobStore := r.blobStore
107+ r.mu.Unlock()
108+ return blobStore
109}
110111// Tags returns the tag service
112// Tags are stored in ATProto as io.atcr.tag records
113func (r *RoutingRepository) Tags(ctx context.Context) distribution.TagService {
114+ return NewTagStore(r.Ctx.ATProtoClient, r.Ctx.Repository)
0000000000000000115}
+301-179
pkg/appview/storage/routing_repository_test.go
···23import (
4 "context"
05 "testing"
607 "github.com/stretchr/testify/assert"
8 "github.com/stretchr/testify/require"
910 "atcr.io/pkg/atproto"
11- "atcr.io/pkg/auth"
12)
1314-// mockUserContext creates a mock auth.UserContext for testing.
15-// It sets up both the user identity and target info, and configures
16-// test helpers to bypass network calls.
17-func mockUserContext(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID string) *auth.UserContext {
18- userCtx := auth.NewUserContext(did, authMethod, httpMethod, nil)
19- userCtx.SetTarget(targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID)
20-21- // Bypass PDS resolution (avoids network calls)
22- userCtx.SetPDSForTest(targetOwnerHandle, targetOwnerPDS)
23-24- // Set up mock authorizer that allows access
25- userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer())
2627- // Set default hold DID for push resolution
28- userCtx.SetDefaultHoldDIDForTest(targetHoldDID)
02930- return userCtx
031}
3233-// mockUserContextWithToken creates a mock UserContext with a pre-populated service token.
34-func mockUserContextWithToken(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID, serviceToken string) *auth.UserContext {
35- userCtx := mockUserContext(did, authMethod, httpMethod, targetOwnerDID, targetOwnerHandle, targetOwnerPDS, targetRepo, targetHoldDID)
36- userCtx.SetServiceTokenForTest(targetHoldDID, serviceToken)
37- return userCtx
38}
3940func TestNewRoutingRepository(t *testing.T) {
41- userCtx := mockUserContext(
42- "did:plc:test123", // authenticated user
43- "oauth", // auth method
44- "GET", // HTTP method
45- "did:plc:test123", // target owner
46- "test.handle", // target owner handle
47- "https://pds.example.com", // target owner PDS
48- "debian", // repository
49- "did:web:hold01.atcr.io", // hold DID
50- )
5152- repo := NewRoutingRepository(nil, userCtx, nil)
5354- if repo.userCtx.TargetOwnerDID != "did:plc:test123" {
55- t.Errorf("Expected TargetOwnerDID %q, got %q", "did:plc:test123", repo.userCtx.TargetOwnerDID)
56 }
5758- if repo.userCtx.TargetRepo != "debian" {
59- t.Errorf("Expected TargetRepo %q, got %q", "debian", repo.userCtx.TargetRepo)
000060 }
6162- if repo.userCtx.TargetHoldDID != "did:web:hold01.atcr.io" {
63- t.Errorf("Expected TargetHoldDID %q, got %q", "did:web:hold01.atcr.io", repo.userCtx.TargetHoldDID)
64 }
65}
6667// TestRoutingRepository_Manifests tests the Manifests() method
68func TestRoutingRepository_Manifests(t *testing.T) {
69- userCtx := mockUserContext(
70- "did:plc:test123",
71- "oauth",
72- "GET",
73- "did:plc:test123",
74- "test.handle",
75- "https://pds.example.com",
76- "myapp",
77- "did:web:hold01.atcr.io",
78- )
7980- repo := NewRoutingRepository(nil, userCtx, nil)
81 manifestService, err := repo.Manifests(context.Background())
8283 require.NoError(t, err)
84 assert.NotNil(t, manifestService)
0000000085}
8687-// TestRoutingRepository_Blobs tests the Blobs() method
88-func TestRoutingRepository_Blobs(t *testing.T) {
89- userCtx := mockUserContext(
90- "did:plc:test123",
91- "oauth",
92- "GET",
93- "did:plc:test123",
94- "test.handle",
95- "https://pds.example.com",
96- "myapp",
97- "did:web:hold01.atcr.io",
98- )
99100- repo := NewRoutingRepository(nil, userCtx, nil)
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101 blobStore := repo.Blobs(context.Background())
102103 assert.NotNil(t, blobStore)
00000000000000000000000000000000000000000000000000000000000000000104}
105106// TestRoutingRepository_Blobs_PanicOnEmptyHoldDID tests panic when hold DID is empty
107func TestRoutingRepository_Blobs_PanicOnEmptyHoldDID(t *testing.T) {
108- // Create context without default hold and empty target hold
109- userCtx := auth.NewUserContext("did:plc:emptyholdtest999", "oauth", "GET", nil)
110- userCtx.SetTarget("did:plc:emptyholdtest999", "test.handle", "https://pds.example.com", "empty-hold-app", "")
111- userCtx.SetPDSForTest("test.handle", "https://pds.example.com")
112- userCtx.SetAuthorizerForTest(auth.NewMockHoldAuthorizer())
113- // Intentionally NOT setting default hold DID
0114115- repo := NewRoutingRepository(nil, userCtx, nil)
116117 // Should panic with empty hold DID
118 assert.Panics(t, func() {
···122123// TestRoutingRepository_Tags tests the Tags() method
124func TestRoutingRepository_Tags(t *testing.T) {
125- userCtx := mockUserContext(
126- "did:plc:test123",
127- "oauth",
128- "GET",
129- "did:plc:test123",
130- "test.handle",
131- "https://pds.example.com",
132- "myapp",
133- "did:web:hold01.atcr.io",
134- )
135136- repo := NewRoutingRepository(nil, userCtx, nil)
137 tagService := repo.Tags(context.Background())
138139 assert.NotNil(t, tagService)
140141- // Call again and verify we get a fresh instance (no caching)
142 tagService2 := repo.Tags(context.Background())
143 assert.NotNil(t, tagService2)
0144}
145146-// TestRoutingRepository_UserContext tests that UserContext fields are properly set
147-func TestRoutingRepository_UserContext(t *testing.T) {
148- testCases := []struct {
149- name string
150- httpMethod string
151- expectedAction auth.RequestAction
152- }{
153- {"GET request is pull", "GET", auth.ActionPull},
154- {"HEAD request is pull", "HEAD", auth.ActionPull},
155- {"PUT request is push", "PUT", auth.ActionPush},
156- {"POST request is push", "POST", auth.ActionPush},
157- {"DELETE request is push", "DELETE", auth.ActionPush},
158 }
159160- for _, tc := range testCases {
161- t.Run(tc.name, func(t *testing.T) {
162- userCtx := mockUserContext(
163- "did:plc:test123",
164- "oauth",
165- tc.httpMethod,
166- "did:plc:test123",
167- "test.handle",
168- "https://pds.example.com",
169- "myapp",
170- "did:web:hold01.atcr.io",
171- )
172173- repo := NewRoutingRepository(nil, userCtx, nil)
0174175- assert.Equal(t, tc.expectedAction, repo.userCtx.Action, "action should match HTTP method")
176- })
177- }
178-}
179180-// TestRoutingRepository_DifferentHoldDIDs tests routing with different hold DIDs
181-func TestRoutingRepository_DifferentHoldDIDs(t *testing.T) {
182- testCases := []struct {
183- name string
184- holdDID string
185- }{
186- {"did:web hold", "did:web:hold01.atcr.io"},
187- {"did:web with port", "did:web:localhost:8080"},
188- {"did:plc hold", "did:plc:xyz123"},
189 }
190191- for _, tc := range testCases {
192- t.Run(tc.name, func(t *testing.T) {
193- userCtx := mockUserContext(
194- "did:plc:test123",
195- "oauth",
196- "PUT",
197- "did:plc:test123",
198- "test.handle",
199- "https://pds.example.com",
200- "myapp",
201- tc.holdDID,
202- )
203-204- repo := NewRoutingRepository(nil, userCtx, nil)
205- blobStore := repo.Blobs(context.Background())
206207- assert.NotNil(t, blobStore, "should create blob store for %s", tc.holdDID)
208- })
0209 }
210-}
211212-// TestRoutingRepository_Named tests the Named() method
213-func TestRoutingRepository_Named(t *testing.T) {
214- userCtx := mockUserContext(
215- "did:plc:test123",
216- "oauth",
217- "GET",
218- "did:plc:test123",
219- "test.handle",
220- "https://pds.example.com",
221- "myapp",
222- "did:web:hold01.atcr.io",
223- )
224225- repo := NewRoutingRepository(nil, userCtx, nil)
0000000226227- // Named() returns a reference.Named from the base repository
228- // Since baseRepo is nil, this tests our implementation handles that case
229- named := repo.Named()
000230231- // With nil base, Named() should return a name constructed from context
232- assert.NotNil(t, named)
233- assert.Contains(t, named.Name(), "myapp")
234}
235236-// TestATProtoResolveHoldURL tests DID to URL resolution
237-func TestATProtoResolveHoldURL(t *testing.T) {
238- tests := []struct {
239- name string
240- holdDID string
241- expected string
242- }{
243- {
244- name: "did:web simple domain",
245- holdDID: "did:web:hold01.atcr.io",
246- expected: "https://hold01.atcr.io",
247- },
248- {
249- name: "did:web with port (localhost)",
250- holdDID: "did:web:localhost:8080",
251- expected: "http://localhost:8080",
252- },
253- }
254255- for _, tt := range tests {
256- t.Run(tt.name, func(t *testing.T) {
257- result := atproto.ResolveHoldURL(tt.holdDID)
258- assert.Equal(t, tt.expected, result)
259- })
0260 }
0000000000261}
···23import (
4 "context"
5+ "sync"
6 "testing"
78+ "github.com/distribution/distribution/v3"
9 "github.com/stretchr/testify/assert"
10 "github.com/stretchr/testify/require"
1112 "atcr.io/pkg/atproto"
013)
1415+// mockDatabase is a simple mock for testing
16+type mockDatabase struct {
17+ holdDID string
18+ err error
19+}
00000002021+func (m *mockDatabase) IncrementPullCount(did, repository string) error {
22+ return nil
23+}
2425+func (m *mockDatabase) IncrementPushCount(did, repository string) error {
26+ return nil
27}
2829+func (m *mockDatabase) GetLatestHoldDIDForRepo(did, repository string) (string, error) {
30+ if m.err != nil {
31+ return "", m.err
32+ }
33+ return m.holdDID, nil
34}
3536func TestNewRoutingRepository(t *testing.T) {
37+ ctx := &RegistryContext{
38+ DID: "did:plc:test123",
39+ Repository: "debian",
40+ HoldDID: "did:web:hold01.atcr.io",
41+ ATProtoClient: &atproto.Client{},
42+ }
00004344+ repo := NewRoutingRepository(nil, ctx)
4546+ if repo.Ctx.DID != "did:plc:test123" {
47+ t.Errorf("Expected DID %q, got %q", "did:plc:test123", repo.Ctx.DID)
48 }
4950+ if repo.Ctx.Repository != "debian" {
51+ t.Errorf("Expected repository %q, got %q", "debian", repo.Ctx.Repository)
52+ }
53+54+ if repo.manifestStore != nil {
55+ t.Error("Expected manifestStore to be nil initially")
56 }
5758+ if repo.blobStore != nil {
59+ t.Error("Expected blobStore to be nil initially")
60 }
61}
6263// TestRoutingRepository_Manifests tests the Manifests() method
64func TestRoutingRepository_Manifests(t *testing.T) {
65+ ctx := &RegistryContext{
66+ DID: "did:plc:test123",
67+ Repository: "myapp",
68+ HoldDID: "did:web:hold01.atcr.io",
69+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
70+ }
00007172+ repo := NewRoutingRepository(nil, ctx)
73 manifestService, err := repo.Manifests(context.Background())
7475 require.NoError(t, err)
76 assert.NotNil(t, manifestService)
77+78+ // Verify the manifest store is cached
79+ assert.NotNil(t, repo.manifestStore, "manifest store should be cached")
80+81+ // Call again and verify we get the same instance
82+ manifestService2, err := repo.Manifests(context.Background())
83+ require.NoError(t, err)
84+ assert.Same(t, manifestService, manifestService2, "should return cached manifest store")
85}
8687+// TestRoutingRepository_ManifestStoreCaching tests that manifest store is cached
88+func TestRoutingRepository_ManifestStoreCaching(t *testing.T) {
89+ ctx := &RegistryContext{
90+ DID: "did:plc:test123",
91+ Repository: "myapp",
92+ HoldDID: "did:web:hold01.atcr.io",
93+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
94+ }
00009596+ repo := NewRoutingRepository(nil, ctx)
97+98+ // First call creates the store
99+ store1, err := repo.Manifests(context.Background())
100+ require.NoError(t, err)
101+ assert.NotNil(t, store1)
102+103+ // Second call returns cached store
104+ store2, err := repo.Manifests(context.Background())
105+ require.NoError(t, err)
106+ assert.Same(t, store1, store2, "should return cached manifest store instance")
107+108+ // Verify internal cache
109+ assert.NotNil(t, repo.manifestStore)
110+}
111+112+// TestRoutingRepository_Blobs_PullUsesDatabase tests that GET (pull) uses database hold DID
113+func TestRoutingRepository_Blobs_PullUsesDatabase(t *testing.T) {
114+ dbHoldDID := "did:web:database.hold.io"
115+ discoveryHoldDID := "did:web:discovery.hold.io"
116+117+ ctx := &RegistryContext{
118+ DID: "did:plc:test123",
119+ Repository: "myapp",
120+ HoldDID: discoveryHoldDID, // Discovery-based hold (should be overridden for pull)
121+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
122+ Database: &mockDatabase{holdDID: dbHoldDID},
123+ }
124+125+ repo := NewRoutingRepository(nil, ctx)
126+127+ // Create context with GET method (pull operation)
128+ pullCtx := context.WithValue(context.Background(), "http.request.method", "GET")
129+ blobStore := repo.Blobs(pullCtx)
130+131+ assert.NotNil(t, blobStore)
132+ // Verify the hold DID was updated to use the database value for pull
133+ assert.Equal(t, dbHoldDID, repo.Ctx.HoldDID, "pull (GET) should use database hold DID")
134+}
135+136+// TestRoutingRepository_Blobs_PushUsesDiscovery tests that push operations use discovery hold DID
137+func TestRoutingRepository_Blobs_PushUsesDiscovery(t *testing.T) {
138+ dbHoldDID := "did:web:database.hold.io"
139+ discoveryHoldDID := "did:web:discovery.hold.io"
140+141+ testCases := []struct {
142+ name string
143+ method string
144+ }{
145+ {"PUT", "PUT"},
146+ {"POST", "POST"},
147+ {"HEAD", "HEAD"},
148+ {"PATCH", "PATCH"},
149+ {"DELETE", "DELETE"},
150+ }
151+152+ for _, tc := range testCases {
153+ t.Run(tc.name, func(t *testing.T) {
154+ ctx := &RegistryContext{
155+ DID: "did:plc:test123",
156+ Repository: "myapp-" + tc.method, // Unique repo to avoid caching
157+ HoldDID: discoveryHoldDID,
158+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
159+ Database: &mockDatabase{holdDID: dbHoldDID},
160+ }
161+162+ repo := NewRoutingRepository(nil, ctx)
163+164+ // Create context with push method
165+ pushCtx := context.WithValue(context.Background(), "http.request.method", tc.method)
166+ blobStore := repo.Blobs(pushCtx)
167+168+ assert.NotNil(t, blobStore)
169+ // Verify the hold DID remains the discovery-based one for push operations
170+ assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "%s should use discovery hold DID, not database", tc.method)
171+ })
172+ }
173+}
174+175+// TestRoutingRepository_Blobs_NoMethodUsesDiscovery tests that missing method defaults to discovery
176+func TestRoutingRepository_Blobs_NoMethodUsesDiscovery(t *testing.T) {
177+ dbHoldDID := "did:web:database.hold.io"
178+ discoveryHoldDID := "did:web:discovery.hold.io"
179+180+ ctx := &RegistryContext{
181+ DID: "did:plc:test123",
182+ Repository: "myapp-nomethod",
183+ HoldDID: discoveryHoldDID,
184+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
185+ Database: &mockDatabase{holdDID: dbHoldDID},
186+ }
187+188+ repo := NewRoutingRepository(nil, ctx)
189+190+ // Context without HTTP method (shouldn't happen in practice, but test defensive behavior)
191 blobStore := repo.Blobs(context.Background())
192193 assert.NotNil(t, blobStore)
194+ // Without method, should default to discovery (safer for push scenarios)
195+ assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "missing method should use discovery hold DID")
196+}
197+198+// TestRoutingRepository_Blobs_WithoutDatabase tests blob store with discovery-based hold
199+func TestRoutingRepository_Blobs_WithoutDatabase(t *testing.T) {
200+ discoveryHoldDID := "did:web:discovery.hold.io"
201+202+ ctx := &RegistryContext{
203+ DID: "did:plc:nocache456",
204+ Repository: "uncached-app",
205+ HoldDID: discoveryHoldDID,
206+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:nocache456", ""),
207+ Database: nil, // No database
208+ }
209+210+ repo := NewRoutingRepository(nil, ctx)
211+ blobStore := repo.Blobs(context.Background())
212+213+ assert.NotNil(t, blobStore)
214+ // Verify the hold DID remains the discovery-based one
215+ assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "should use discovery-based hold DID")
216+}
217+218+// TestRoutingRepository_Blobs_DatabaseEmptyFallback tests fallback when database returns empty hold DID
219+func TestRoutingRepository_Blobs_DatabaseEmptyFallback(t *testing.T) {
220+ discoveryHoldDID := "did:web:discovery.hold.io"
221+222+ ctx := &RegistryContext{
223+ DID: "did:plc:test123",
224+ Repository: "newapp",
225+ HoldDID: discoveryHoldDID,
226+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
227+ Database: &mockDatabase{holdDID: ""}, // Empty string (no manifests yet)
228+ }
229+230+ repo := NewRoutingRepository(nil, ctx)
231+ blobStore := repo.Blobs(context.Background())
232+233+ assert.NotNil(t, blobStore)
234+ // Verify the hold DID falls back to discovery-based
235+ assert.Equal(t, discoveryHoldDID, repo.Ctx.HoldDID, "should fall back to discovery-based hold DID when database returns empty")
236+}
237+238+// TestRoutingRepository_BlobStoreCaching tests that blob store is cached
239+func TestRoutingRepository_BlobStoreCaching(t *testing.T) {
240+ ctx := &RegistryContext{
241+ DID: "did:plc:test123",
242+ Repository: "myapp",
243+ HoldDID: "did:web:hold01.atcr.io",
244+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
245+ }
246+247+ repo := NewRoutingRepository(nil, ctx)
248+249+ // First call creates the store
250+ store1 := repo.Blobs(context.Background())
251+ assert.NotNil(t, store1)
252+253+ // Second call returns cached store
254+ store2 := repo.Blobs(context.Background())
255+ assert.Same(t, store1, store2, "should return cached blob store instance")
256+257+ // Verify internal cache
258+ assert.NotNil(t, repo.blobStore)
259}
260261// TestRoutingRepository_Blobs_PanicOnEmptyHoldDID tests panic when hold DID is empty
262func TestRoutingRepository_Blobs_PanicOnEmptyHoldDID(t *testing.T) {
263+ // Use a unique DID/repo to ensure no cache entry exists
264+ ctx := &RegistryContext{
265+ DID: "did:plc:emptyholdtest999",
266+ Repository: "empty-hold-app",
267+ HoldDID: "", // Empty hold DID should panic
268+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:emptyholdtest999", ""),
269+ }
270271+ repo := NewRoutingRepository(nil, ctx)
272273 // Should panic with empty hold DID
274 assert.Panics(t, func() {
···278279// TestRoutingRepository_Tags tests the Tags() method
280func TestRoutingRepository_Tags(t *testing.T) {
281+ ctx := &RegistryContext{
282+ DID: "did:plc:test123",
283+ Repository: "myapp",
284+ HoldDID: "did:web:hold01.atcr.io",
285+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
286+ }
0000287288+ repo := NewRoutingRepository(nil, ctx)
289 tagService := repo.Tags(context.Background())
290291 assert.NotNil(t, tagService)
292293+ // Call again and verify we get a new instance (Tags() doesn't cache)
294 tagService2 := repo.Tags(context.Background())
295 assert.NotNil(t, tagService2)
296+ // Tags service is not cached, so each call creates a new instance
297}
298299+// TestRoutingRepository_ConcurrentAccess tests concurrent access to cached stores
300+func TestRoutingRepository_ConcurrentAccess(t *testing.T) {
301+ ctx := &RegistryContext{
302+ DID: "did:plc:test123",
303+ Repository: "myapp",
304+ HoldDID: "did:web:hold01.atcr.io",
305+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
00000306 }
307308+ repo := NewRoutingRepository(nil, ctx)
00000000000309310+ var wg sync.WaitGroup
311+ numGoroutines := 10
312313+ // Track all manifest stores returned
314+ manifestStores := make([]distribution.ManifestService, numGoroutines)
315+ blobStores := make([]distribution.BlobStore, numGoroutines)
0316317+ // Concurrent access to Manifests()
318+ for i := 0; i < numGoroutines; i++ {
319+ wg.Add(1)
320+ go func(index int) {
321+ defer wg.Done()
322+ store, err := repo.Manifests(context.Background())
323+ require.NoError(t, err)
324+ manifestStores[index] = store
325+ }(i)
326 }
327328+ wg.Wait()
00000000000000329330+ // Verify all stores are non-nil (due to race conditions, they may not all be the same instance)
331+ for i := 0; i < numGoroutines; i++ {
332+ assert.NotNil(t, manifestStores[i], "manifest store should not be nil")
333 }
0334335+ // After concurrent creation, subsequent calls should return the cached instance
336+ cachedStore, err := repo.Manifests(context.Background())
337+ require.NoError(t, err)
338+ assert.NotNil(t, cachedStore)
00000000339340+ // Concurrent access to Blobs()
341+ for i := 0; i < numGoroutines; i++ {
342+ wg.Add(1)
343+ go func(index int) {
344+ defer wg.Done()
345+ blobStores[index] = repo.Blobs(context.Background())
346+ }(i)
347+ }
348349+ wg.Wait()
350+351+ // Verify all stores are non-nil (due to race conditions, they may not all be the same instance)
352+ for i := 0; i < numGoroutines; i++ {
353+ assert.NotNil(t, blobStores[i], "blob store should not be nil")
354+ }
355356+ // After concurrent creation, subsequent calls should return the cached instance
357+ cachedBlobStore := repo.Blobs(context.Background())
358+ assert.NotNil(t, cachedBlobStore)
359}
360361+// TestRoutingRepository_Blobs_PullPriority tests that database hold DID takes priority for pull (GET)
362+func TestRoutingRepository_Blobs_PullPriority(t *testing.T) {
363+ dbHoldDID := "did:web:database.hold.io"
364+ discoveryHoldDID := "did:web:discovery.hold.io"
00000000000000365366+ ctx := &RegistryContext{
367+ DID: "did:plc:test123",
368+ Repository: "myapp-priority",
369+ HoldDID: discoveryHoldDID, // Discovery-based hold
370+ ATProtoClient: atproto.NewClient("https://pds.example.com", "did:plc:test123", ""),
371+ Database: &mockDatabase{holdDID: dbHoldDID}, // Database has a different hold DID
372 }
373+374+ repo := NewRoutingRepository(nil, ctx)
375+376+ // For pull (GET), database should take priority
377+ pullCtx := context.WithValue(context.Background(), "http.request.method", "GET")
378+ blobStore := repo.Blobs(pullCtx)
379+380+ assert.NotNil(t, blobStore)
381+ // Database hold DID should take priority over discovery for pull operations
382+ assert.Equal(t, dbHoldDID, repo.Ctx.HoldDID, "database hold DID should take priority over discovery for pull (GET)")
383}
+3-3
pkg/appview/storage/tag_store.go
···36 return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
37 }
3839- var tagRecord atproto.TagRecord
40 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
41 return distribution.Descriptor{}, fmt.Errorf("failed to unmarshal tag record: %w", err)
42 }
···9192 var tags []string
93 for _, record := range records {
94- var tagRecord atproto.TagRecord
95 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
96 // Skip invalid records
97 continue
···116117 var tags []string
118 for _, record := range records {
119- var tagRecord atproto.TagRecord
120 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
121 // Skip invalid records
122 continue
···36 return distribution.Descriptor{}, distribution.ErrTagUnknown{Tag: tag}
37 }
3839+ var tagRecord atproto.Tag
40 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
41 return distribution.Descriptor{}, fmt.Errorf("failed to unmarshal tag record: %w", err)
42 }
···9192 var tags []string
93 for _, record := range records {
94+ var tagRecord atproto.Tag
95 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
96 // Skip invalid records
97 continue
···116117 var tags []string
118 for _, record := range records {
119+ var tagRecord atproto.Tag
120 if err := json.Unmarshal(record.Value, &tagRecord); err != nil {
121 // Skip invalid records
122 continue
+6-6
pkg/appview/storage/tag_store_test.go
···229230 for _, tt := range tests {
231 t.Run(tt.name, func(t *testing.T) {
232- var sentTagRecord *atproto.TagRecord
233234 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
235 if r.Method != "POST" {
···254 // Parse and verify tag record
255 recordData := body["record"].(map[string]any)
256 recordBytes, _ := json.Marshal(recordData)
257- var tagRecord atproto.TagRecord
258 json.Unmarshal(recordBytes, &tagRecord)
259 sentTagRecord = &tagRecord
260···284285 if !tt.wantErr && sentTagRecord != nil {
286 // Verify the tag record
287- if sentTagRecord.Type != atproto.TagCollection {
288- t.Errorf("Type = %v, want %v", sentTagRecord.Type, atproto.TagCollection)
289 }
290 if sentTagRecord.Repository != "myapp" {
291 t.Errorf("Repository = %v, want myapp", sentTagRecord.Repository)
···295 }
296 // New records should have manifest field
297 expectedURI := atproto.BuildManifestURI("did:plc:test123", tt.digest.String())
298- if sentTagRecord.Manifest != expectedURI {
299 t.Errorf("Manifest = %v, want %v", sentTagRecord.Manifest, expectedURI)
300 }
301 // New records should NOT have manifestDigest field
302- if sentTagRecord.ManifestDigest != "" {
303 t.Errorf("ManifestDigest should be empty for new records, got %v", sentTagRecord.ManifestDigest)
304 }
305 }
···229230 for _, tt := range tests {
231 t.Run(tt.name, func(t *testing.T) {
232+ var sentTagRecord *atproto.Tag
233234 server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
235 if r.Method != "POST" {
···254 // Parse and verify tag record
255 recordData := body["record"].(map[string]any)
256 recordBytes, _ := json.Marshal(recordData)
257+ var tagRecord atproto.Tag
258 json.Unmarshal(recordBytes, &tagRecord)
259 sentTagRecord = &tagRecord
260···284285 if !tt.wantErr && sentTagRecord != nil {
286 // Verify the tag record
287+ if sentTagRecord.LexiconTypeID != atproto.TagCollection {
288+ t.Errorf("LexiconTypeID = %v, want %v", sentTagRecord.LexiconTypeID, atproto.TagCollection)
289 }
290 if sentTagRecord.Repository != "myapp" {
291 t.Errorf("Repository = %v, want myapp", sentTagRecord.Repository)
···295 }
296 // New records should have manifest field
297 expectedURI := atproto.BuildManifestURI("did:plc:test123", tt.digest.String())
298+ if sentTagRecord.Manifest == nil || *sentTagRecord.Manifest != expectedURI {
299 t.Errorf("Manifest = %v, want %v", sentTagRecord.Manifest, expectedURI)
300 }
301 // New records should NOT have manifestDigest field
302+ if sentTagRecord.ManifestDigest != nil && *sentTagRecord.ManifestDigest != "" {
303 t.Errorf("ManifestDigest should be empty for new records, got %v", sentTagRecord.ManifestDigest)
304 }
305 }
-22
pkg/appview/templates/pages/404.html
···1-{{ define "404" }}
2-<!DOCTYPE html>
3-<html lang="en">
4-<head>
5- <title>404 - Lost at Sea | ATCR</title>
6- {{ template "head" . }}
7-</head>
8-<body>
9- {{ template "nav-simple" . }}
10- <main class="error-page">
11- <div class="error-content">
12- <i data-lucide="anchor" class="error-icon"></i>
13- <div class="error-code">404</div>
14- <h1>Lost at Sea</h1>
15- <p>The page you're looking for has drifted into uncharted waters.</p>
16- <a href="/" class="btn btn-primary">Return to Port</a>
17- </div>
18- </main>
19- <script>lucide.createIcons();</script>
20-</body>
21-</html>
22-{{ end }}
···44</div>
45{{ end }}
4600000000047{{ if eq (len .Pushes) 0 }}
48<div class="empty-state">
49 <p>No pushes yet. Start using ATCR by pushing your first image!</p>
···44</div>
45{{ end }}
4647+{{ if .HasMore }}
48+<button class="load-more"
49+ hx-get="/api/recent-pushes?offset={{ .NextOffset }}"
50+ hx-target="#push-list"
51+ hx-swap="beforeend">
52+ Load More
53+</button>
54+{{ end }}
55+56{{ if eq (len .Pushes) 0 }}
57<div class="empty-state">
58 <p>No pushes yet. Start using ATCR by pushing your first image!</p>
···386 t.Errorf("Content-Type = %v, want %v", r.Header.Get("Content-Type"), mimeType)
387 }
388389+ // Send response - use a valid CIDv1 in base32 format
390 response := `{
391 "blob": {
392 "$type": "blob",
393+ "ref": {"$link": "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"},
394 "mimeType": "application/octet-stream",
395 "size": 17
396 }
···406 t.Fatalf("UploadBlob() error = %v", err)
407 }
408409+ if blobRef.MimeType != mimeType {
410+ t.Errorf("MimeType = %v, want %v", blobRef.MimeType, mimeType)
411 }
412413+ // LexBlob.Ref is a LexLink (cid.Cid alias), use .String() to get the CID string
414+ expectedCID := "bafkreihdwdcefgh4dqkjv67uzcmw7ojee6xedzdetojuzjevtenxquvyku"
415+ if blobRef.Ref.String() != expectedCID {
416+ t.Errorf("Ref.String() = %v, want %v", blobRef.Ref.String(), expectedCID)
417 }
418419 if blobRef.Size != 17 {
+255-11
pkg/atproto/generate.go
···34package main
56-// CBOR Code Generator
7//
8-// This generates optimized CBOR marshaling code for ATProto records.
0009//
10// Usage:
11// go generate ./pkg/atproto/...
12//
13-// This creates pkg/atproto/cbor_gen.go which should be committed to git.
14-// Only re-run when you modify types in pkg/atproto/types.go
15-//
16-// The //go:generate directive is in lexicon.go
0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001718import (
19 "fmt"
···25)
2627func main() {
28- // Generate map-style encoders for CrewRecord, CaptainRecord, LayerRecord, and TangledProfileRecord
29 if err := cbg.WriteMapEncodersToFile("cbor_gen.go", "atproto",
30- atproto.CrewRecord{},
31- atproto.CaptainRecord{},
32- atproto.LayerRecord{},
000000000000000033 atproto.TangledProfileRecord{},
34 ); err != nil {
35- fmt.Printf("Failed to generate CBOR encoders: %v\n", err)
36 os.Exit(1)
37 }
38}
0000000000000000000000000000000000000000000000000000
···34package main
56+// Lexicon and CBOR Code Generator
7//
8+// This generates:
9+// 1. Go types from lexicon JSON files (via lex/lexgen library)
10+// 2. CBOR marshaling code for ATProto records (via cbor-gen)
11+// 3. Type registration for lexutil (register.go)
12//
13// Usage:
14// go generate ./pkg/atproto/...
15//
16+// Key insight: We use RegisterLexiconTypeID: false to avoid generating init()
17+// blocks that require CBORMarshaler. This breaks the circular dependency between
18+// lexgen and cbor-gen. See: https://github.com/bluesky-social/indigo/issues/931
19+20+import (
21+ "bytes"
22+ "encoding/json"
23+ "fmt"
24+ "os"
25+ "os/exec"
26+ "path/filepath"
27+ "strings"
28+29+ "github.com/bluesky-social/indigo/atproto/lexicon"
30+ "github.com/bluesky-social/indigo/lex/lexgen"
31+ "golang.org/x/tools/imports"
32+)
33+34+func main() {
35+ // Find repo root
36+ repoRoot, err := findRepoRoot()
37+ if err != nil {
38+ fmt.Printf("failed to find repo root: %v\n", err)
39+ os.Exit(1)
40+ }
41+42+ pkgDir := filepath.Join(repoRoot, "pkg/atproto")
43+ lexDir := filepath.Join(repoRoot, "lexicons")
44+45+ // Step 0: Clean up old register.go to avoid conflicts
46+ // (It will be regenerated at the end)
47+ os.Remove(filepath.Join(pkgDir, "register.go"))
48+49+ // Step 1: Load all lexicon schemas into catalog (for cross-references)
50+ fmt.Println("Loading lexicons...")
51+ cat := lexicon.NewBaseCatalog()
52+ if err := cat.LoadDirectory(lexDir); err != nil {
53+ fmt.Printf("failed to load lexicons: %v\n", err)
54+ os.Exit(1)
55+ }
56+57+ // Step 2: Generate Go code for each lexicon file
58+ fmt.Println("Running lexgen...")
59+ config := &lexgen.GenConfig{
60+ RegisterLexiconTypeID: false, // KEY: no init() blocks generated
61+ UnknownType: "map-string-any",
62+ WarningText: "Code generated by generate.go; DO NOT EDIT.",
63+ }
64+65+ // Track generated types for register.go
66+ var registeredTypes []typeInfo
67+68+ // Walk lexicon directory and generate code for each file
69+ err = filepath.Walk(lexDir, func(path string, info os.FileInfo, err error) error {
70+ if err != nil {
71+ return err
72+ }
73+ if info.IsDir() || !strings.HasSuffix(path, ".json") {
74+ return nil
75+ }
76+77+ // Load and parse the schema file
78+ data, err := os.ReadFile(path)
79+ if err != nil {
80+ return fmt.Errorf("failed to read %s: %w", path, err)
81+ }
82+83+ var sf lexicon.SchemaFile
84+ if err := json.Unmarshal(data, &sf); err != nil {
85+ return fmt.Errorf("failed to parse %s: %w", path, err)
86+ }
87+88+ if err := sf.FinishParse(); err != nil {
89+ return fmt.Errorf("failed to finish parse %s: %w", path, err)
90+ }
91+92+ // Flatten the schema
93+ flat, err := lexgen.FlattenSchemaFile(&sf)
94+ if err != nil {
95+ return fmt.Errorf("failed to flatten schema %s: %w", path, err)
96+ }
97+98+ // Generate code
99+ var buf bytes.Buffer
100+ gen := &lexgen.CodeGenerator{
101+ Config: config,
102+ Lex: flat,
103+ Cat: &cat,
104+ Out: &buf,
105+ }
106+107+ if err := gen.WriteLexicon(); err != nil {
108+ return fmt.Errorf("failed to generate code for %s: %w", path, err)
109+ }
110+111+ // Fix package name: lexgen generates "ioatcr" but we want "atproto"
112+ code := bytes.Replace(buf.Bytes(), []byte("package ioatcr"), []byte("package atproto"), 1)
113+114+ // Format with goimports
115+ fileName := gen.FileName()
116+ formatted, err := imports.Process(fileName, code, nil)
117+ if err != nil {
118+ // Write unformatted for debugging
119+ outPath := filepath.Join(pkgDir, fileName)
120+ os.WriteFile(outPath+".broken", code, 0644)
121+ return fmt.Errorf("failed to format %s: %w (wrote to %s.broken)", fileName, err, outPath)
122+ }
123+124+ // Write output file
125+ outPath := filepath.Join(pkgDir, fileName)
126+ if err := os.WriteFile(outPath, formatted, 0644); err != nil {
127+ return fmt.Errorf("failed to write %s: %w", outPath, err)
128+ }
129+130+ fmt.Printf(" Generated %s\n", fileName)
131+132+ // Track type for registration - compute type name from NSID
133+ typeName := nsidToTypeName(sf.ID)
134+ registeredTypes = append(registeredTypes, typeInfo{
135+ NSID: sf.ID,
136+ TypeName: typeName,
137+ })
138+139+ return nil
140+ })
141+ if err != nil {
142+ fmt.Printf("lexgen failed: %v\n", err)
143+ os.Exit(1)
144+ }
145+146+ // Step 3: Run cbor-gen via exec.Command
147+ // This must be a separate process so it can compile the freshly generated types
148+ fmt.Println("Running cbor-gen...")
149+ if err := runCborGen(repoRoot, pkgDir); err != nil {
150+ fmt.Printf("cbor-gen failed: %v\n", err)
151+ os.Exit(1)
152+ }
153+154+ // Step 4: Generate register.go
155+ fmt.Println("Generating register.go...")
156+ if err := generateRegisterFile(pkgDir, registeredTypes); err != nil {
157+ fmt.Printf("failed to generate register.go: %v\n", err)
158+ os.Exit(1)
159+ }
160+161+ fmt.Println("Code generation complete!")
162+}
163+164+type typeInfo struct {
165+ NSID string
166+ TypeName string
167+}
168+169+// nsidToTypeName converts an NSID to a Go type name
170+// io.atcr.manifest โ Manifest
171+// io.atcr.hold.captain โ HoldCaptain
172+// io.atcr.sailor.profile โ SailorProfile
173+func nsidToTypeName(nsid string) string {
174+ parts := strings.Split(nsid, ".")
175+ if len(parts) < 3 {
176+ return ""
177+ }
178+ // Skip the first two parts (authority, e.g., "io.atcr")
179+ // and capitalize each remaining part
180+ var result string
181+ for _, part := range parts[2:] {
182+ if len(part) > 0 {
183+ result += strings.ToUpper(part[:1]) + part[1:]
184+ }
185+ }
186+ return result
187+}
188+189+func runCborGen(repoRoot, pkgDir string) error {
190+ // Create a temporary Go file that runs cbor-gen
191+ cborGenCode := `//go:build ignore
192+193+package main
194195import (
196 "fmt"
···202)
203204func main() {
0205 if err := cbg.WriteMapEncodersToFile("cbor_gen.go", "atproto",
206+ // Manifest types
207+ atproto.Manifest{},
208+ atproto.Manifest_BlobReference{},
209+ atproto.Manifest_ManifestReference{},
210+ atproto.Manifest_Platform{},
211+ atproto.Manifest_Annotations{},
212+ atproto.Manifest_BlobReference_Annotations{},
213+ atproto.Manifest_ManifestReference_Annotations{},
214+ // Tag
215+ atproto.Tag{},
216+ // Sailor types
217+ atproto.SailorProfile{},
218+ atproto.SailorStar{},
219+ atproto.SailorStar_Subject{},
220+ // Hold types
221+ atproto.HoldCaptain{},
222+ atproto.HoldCrew{},
223+ atproto.HoldLayer{},
224+ // External types
225 atproto.TangledProfileRecord{},
226 ); err != nil {
227+ fmt.Printf("cbor-gen failed: %v\n", err)
228 os.Exit(1)
229 }
230}
231+`
232+233+ // Write temp file
234+ tmpFile := filepath.Join(pkgDir, "cborgen_tmp.go")
235+ if err := os.WriteFile(tmpFile, []byte(cborGenCode), 0644); err != nil {
236+ return fmt.Errorf("failed to write temp cbor-gen file: %w", err)
237+ }
238+ defer os.Remove(tmpFile)
239+240+ // Run it
241+ cmd := exec.Command("go", "run", tmpFile)
242+ cmd.Dir = pkgDir
243+ cmd.Stdout = os.Stdout
244+ cmd.Stderr = os.Stderr
245+ return cmd.Run()
246+}
247+248+func generateRegisterFile(pkgDir string, types []typeInfo) error {
249+ var buf bytes.Buffer
250+251+ buf.WriteString("// Code generated by generate.go; DO NOT EDIT.\n\n")
252+ buf.WriteString("package atproto\n\n")
253+ buf.WriteString("import lexutil \"github.com/bluesky-social/indigo/lex/util\"\n\n")
254+ buf.WriteString("func init() {\n")
255+256+ for _, t := range types {
257+ fmt.Fprintf(&buf, "\tlexutil.RegisterType(%q, &%s{})\n", t.NSID, t.TypeName)
258+ }
259+260+ buf.WriteString("}\n")
261+262+ outPath := filepath.Join(pkgDir, "register.go")
263+ return os.WriteFile(outPath, buf.Bytes(), 0644)
264+}
265+266+func findRepoRoot() (string, error) {
267+ dir, err := os.Getwd()
268+ if err != nil {
269+ return "", err
270+ }
271+272+ for {
273+ if _, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil {
274+ return dir, nil
275+ }
276+ parent := filepath.Dir(dir)
277+ if parent == dir {
278+ return "", fmt.Errorf("go.mod not found")
279+ }
280+ dir = parent
281+ }
282+}
+24
pkg/atproto/holdcaptain.go
···000000000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.hold.captain
4+5+package atproto
6+7+// Represents the hold's ownership and metadata. Stored as a singleton record at rkey 'self' in the hold's embedded PDS.
8+type HoldCaptain struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.captain"`
10+ // allowAllCrew: Allow any authenticated user to register as crew
11+ AllowAllCrew bool `json:"allowAllCrew" cborgen:"allowAllCrew"`
12+ // deployedAt: RFC3339 timestamp of when the hold was deployed
13+ DeployedAt string `json:"deployedAt" cborgen:"deployedAt"`
14+ // enableBlueskyPosts: Enable Bluesky posts when manifests are pushed
15+ EnableBlueskyPosts bool `json:"enableBlueskyPosts" cborgen:"enableBlueskyPosts"`
16+ // owner: DID of the hold owner
17+ Owner string `json:"owner" cborgen:"owner"`
18+ // provider: Deployment provider (e.g., fly.io, aws, etc.)
19+ Provider *string `json:"provider,omitempty" cborgen:"provider,omitempty"`
20+ // public: Whether this hold allows public blob reads (pulls) without authentication
21+ Public bool `json:"public" cborgen:"public"`
22+ // region: S3 region where blobs are stored
23+ Region *string `json:"region,omitempty" cborgen:"region,omitempty"`
24+}
+18
pkg/atproto/holdcrew.go
···000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.hold.crew
4+5+package atproto
6+7+// Crew member in a hold's embedded PDS. Grants access permissions to push blobs to the hold. Stored in the hold's embedded PDS (one record per member).
8+type HoldCrew struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.crew"`
10+ // addedAt: RFC3339 timestamp of when the member was added
11+ AddedAt string `json:"addedAt" cborgen:"addedAt"`
12+ // member: DID of the crew member
13+ Member string `json:"member" cborgen:"member"`
14+ // permissions: Specific permissions granted to this member
15+ Permissions []string `json:"permissions" cborgen:"permissions"`
16+ // role: Member's role in the hold
17+ Role string `json:"role" cborgen:"role"`
18+}
+24
pkg/atproto/holdlayer.go
···000000000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.hold.layer
4+5+package atproto
6+7+// Represents metadata about a container layer stored in the hold. Stored in the hold's embedded PDS for tracking and analytics.
8+type HoldLayer struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.hold.layer"`
10+ // createdAt: RFC3339 timestamp of when the layer was uploaded
11+ CreatedAt string `json:"createdAt" cborgen:"createdAt"`
12+ // digest: Layer digest (e.g., sha256:abc123...)
13+ Digest string `json:"digest" cborgen:"digest"`
14+ // mediaType: Media type (e.g., application/vnd.oci.image.layer.v1.tar+gzip)
15+ MediaType string `json:"mediaType" cborgen:"mediaType"`
16+ // repository: Repository this layer belongs to
17+ Repository string `json:"repository" cborgen:"repository"`
18+ // size: Size in bytes
19+ Size int64 `json:"size" cborgen:"size"`
20+ // userDid: DID of user who uploaded this layer
21+ UserDid string `json:"userDid" cborgen:"userDid"`
22+ // userHandle: Handle of user (for display purposes)
23+ UserHandle string `json:"userHandle" cborgen:"userHandle"`
24+}
+17-40
pkg/atproto/lexicon.go
···18 // TagCollection is the collection name for image tags
19 TagCollection = "io.atcr.tag"
2000021 // HoldCrewCollection is the collection name for hold crew (membership) - LEGACY BYOS model
22 // Stored in owner's PDS for BYOS holds
23 HoldCrewCollection = "io.atcr.hold.crew"
···38 // TangledProfileCollection is the collection name for tangled profiles
39 // Stored in hold's embedded PDS (singleton record at rkey "self")
40 TangledProfileCollection = "sh.tangled.actor.profile"
0004142 // BskyPostCollection is the collection name for Bluesky posts
43 BskyPostCollection = "app.bsky.feed.post"
···4748 // StarCollection is the collection name for repository stars
49 StarCollection = "io.atcr.sailor.star"
50-51- // RepoPageCollection is the collection name for repository page metadata
52- // Stored in user's PDS with rkey = repository name
53- RepoPageCollection = "io.atcr.repo.page"
54)
5556// ManifestRecord represents a container image manifest stored in ATProto
···310 CreatedAt time.Time `json:"createdAt"`
311}
31200000000000313// SailorProfileRecord represents a user's profile with registry preferences
314// Stored in the user's PDS to configure default hold and other settings
315type SailorProfileRecord struct {
···335 return &SailorProfileRecord{
336 Type: SailorProfileCollection,
337 DefaultHold: defaultHold,
338- CreatedAt: now,
339- UpdatedAt: now,
340- }
341-}
342-343-// RepoPageRecord represents repository page metadata (description + avatar)
344-// Stored in the user's PDS with rkey = repository name
345-// Users can edit this directly in their PDS to customize their repository page
346-type RepoPageRecord struct {
347- // Type should be "io.atcr.repo.page"
348- Type string `json:"$type"`
349-350- // Repository is the name of the repository (e.g., "myapp")
351- Repository string `json:"repository"`
352-353- // Description is the markdown README/description content
354- Description string `json:"description,omitempty"`
355-356- // Avatar is the repository avatar/icon blob reference
357- Avatar *ATProtoBlobRef `json:"avatar,omitempty"`
358-359- // CreatedAt timestamp
360- CreatedAt time.Time `json:"createdAt"`
361-362- // UpdatedAt timestamp
363- UpdatedAt time.Time `json:"updatedAt"`
364-}
365-366-// NewRepoPageRecord creates a new repo page record
367-func NewRepoPageRecord(repository, description string, avatar *ATProtoBlobRef) *RepoPageRecord {
368- now := time.Now()
369- return &RepoPageRecord{
370- Type: RepoPageCollection,
371- Repository: repository,
372- Description: description,
373- Avatar: avatar,
374 CreatedAt: now,
375 UpdatedAt: now,
376 }
···18 // TagCollection is the collection name for image tags
19 TagCollection = "io.atcr.tag"
2021+ // HoldCollection is the collection name for storage holds (BYOS)
22+ HoldCollection = "io.atcr.hold"
23+24 // HoldCrewCollection is the collection name for hold crew (membership) - LEGACY BYOS model
25 // Stored in owner's PDS for BYOS holds
26 HoldCrewCollection = "io.atcr.hold.crew"
···41 // TangledProfileCollection is the collection name for tangled profiles
42 // Stored in hold's embedded PDS (singleton record at rkey "self")
43 TangledProfileCollection = "sh.tangled.actor.profile"
44+45+ // BskyPostCollection is the collection name for Bluesky posts
46+ BskyPostCollection = "app.bsky.feed.post"
4748 // BskyPostCollection is the collection name for Bluesky posts
49 BskyPostCollection = "app.bsky.feed.post"
···5354 // StarCollection is the collection name for repository stars
55 StarCollection = "io.atcr.sailor.star"
000056)
5758// ManifestRecord represents a container image manifest stored in ATProto
···312 CreatedAt time.Time `json:"createdAt"`
313}
314315+// NewHoldRecord creates a new hold record
316+func NewHoldRecord(endpoint, owner string, public bool) *HoldRecord {
317+ return &HoldRecord{
318+ Type: HoldCollection,
319+ Endpoint: endpoint,
320+ Owner: owner,
321+ Public: public,
322+ CreatedAt: time.Now(),
323+ }
324+}
325+326// SailorProfileRecord represents a user's profile with registry preferences
327// Stored in the user's PDS to configure default hold and other settings
328type SailorProfileRecord struct {
···348 return &SailorProfileRecord{
349 Type: SailorProfileCollection,
350 DefaultHold: defaultHold,
000000000000000000000000000000000000351 CreatedAt: now,
352 UpdatedAt: now,
353 }
+18
pkg/atproto/lexicon_embedded.go
···000000000000000000
···1+package atproto
2+3+// This file contains ATProto record types that are NOT generated from our lexicons.
4+// These are either external schemas or special types that require manual definition.
5+6+// TangledProfileRecord represents a Tangled profile for the hold
7+// Collection: sh.tangled.actor.profile (external schema - not controlled by ATCR)
8+// Stored in hold's embedded PDS (singleton record at rkey "self")
9+// Uses CBOR encoding for efficient storage in hold's carstore
10+type TangledProfileRecord struct {
11+ Type string `json:"$type" cborgen:"$type"`
12+ Links []string `json:"links" cborgen:"links"`
13+ Stats []string `json:"stats" cborgen:"stats"`
14+ Bluesky bool `json:"bluesky" cborgen:"bluesky"`
15+ Location string `json:"location" cborgen:"location"`
16+ Description string `json:"description" cborgen:"description"`
17+ PinnedRepositories []string `json:"pinnedRepositories" cborgen:"pinnedRepositories"`
18+}
···1+package atproto
2+3+//go:generate go run generate.go
4+5+import (
6+ "encoding/base64"
7+ "encoding/json"
8+ "fmt"
9+ "strings"
10+ "time"
11+)
12+13+// Collection names for ATProto records
14+const (
15+ // ManifestCollection is the collection name for container manifests
16+ ManifestCollection = "io.atcr.manifest"
17+18+ // TagCollection is the collection name for image tags
19+ TagCollection = "io.atcr.tag"
20+21+ // HoldCollection is the collection name for storage holds (BYOS) - LEGACY
22+ HoldCollection = "io.atcr.hold"
23+24+ // HoldCrewCollection is the collection name for hold crew (membership) - LEGACY BYOS model
25+ // Stored in owner's PDS for BYOS holds
26+ HoldCrewCollection = "io.atcr.hold.crew"
27+28+ // CaptainCollection is the collection name for captain records (hold ownership) - EMBEDDED PDS model
29+ // Stored in hold's embedded PDS (singleton record at rkey "self")
30+ CaptainCollection = "io.atcr.hold.captain"
31+32+ // CrewCollection is the collection name for crew records (access control) - EMBEDDED PDS model
33+ // Stored in hold's embedded PDS (one record per member)
34+ // Note: Uses same collection name as HoldCrewCollection but stored in different PDS (hold's PDS vs owner's PDS)
35+ CrewCollection = "io.atcr.hold.crew"
36+37+ // LayerCollection is the collection name for container layer metadata
38+ // Stored in hold's embedded PDS to track which layers are stored
39+ LayerCollection = "io.atcr.hold.layer"
40+41+ // TangledProfileCollection is the collection name for tangled profiles
42+ // Stored in hold's embedded PDS (singleton record at rkey "self")
43+ TangledProfileCollection = "sh.tangled.actor.profile"
44+45+ // BskyPostCollection is the collection name for Bluesky posts
46+ BskyPostCollection = "app.bsky.feed.post"
47+48+ // SailorProfileCollection is the collection name for user profiles
49+ SailorProfileCollection = "io.atcr.sailor.profile"
50+51+ // StarCollection is the collection name for repository stars
52+ StarCollection = "io.atcr.sailor.star"
53+)
54+55+// NewManifestRecord creates a new manifest record from OCI manifest JSON
56+func NewManifestRecord(repository, digest string, ociManifest []byte) (*Manifest, error) {
57+ // Parse the OCI manifest
58+ var ociData struct {
59+ SchemaVersion int `json:"schemaVersion"`
60+ MediaType string `json:"mediaType"`
61+ Config json.RawMessage `json:"config,omitempty"`
62+ Layers []json.RawMessage `json:"layers,omitempty"`
63+ Manifests []json.RawMessage `json:"manifests,omitempty"`
64+ Subject json.RawMessage `json:"subject,omitempty"`
65+ Annotations map[string]string `json:"annotations,omitempty"`
66+ }
67+68+ if err := json.Unmarshal(ociManifest, &ociData); err != nil {
69+ return nil, err
70+ }
71+72+ // Detect manifest type based on media type
73+ isManifestList := strings.Contains(ociData.MediaType, "manifest.list") ||
74+ strings.Contains(ociData.MediaType, "image.index")
75+76+ // Validate: must have either (config+layers) OR (manifests), never both
77+ hasImageFields := len(ociData.Config) > 0 || len(ociData.Layers) > 0
78+ hasIndexFields := len(ociData.Manifests) > 0
79+80+ if hasImageFields && hasIndexFields {
81+ return nil, fmt.Errorf("manifest cannot have both image fields (config/layers) and index fields (manifests)")
82+ }
83+ if !hasImageFields && !hasIndexFields {
84+ return nil, fmt.Errorf("manifest must have either image fields (config/layers) or index fields (manifests)")
85+ }
86+87+ record := &Manifest{
88+ LexiconTypeID: ManifestCollection,
89+ Repository: repository,
90+ Digest: digest,
91+ MediaType: ociData.MediaType,
92+ SchemaVersion: int64(ociData.SchemaVersion),
93+ // ManifestBlob will be set by the caller after uploading to blob storage
94+ CreatedAt: time.Now().Format(time.RFC3339),
95+ }
96+97+ // Handle annotations - Manifest_Annotations is an empty struct in generated code
98+ // We don't copy ociData.Annotations since the generated type doesn't support arbitrary keys
99+100+ if isManifestList {
101+ // Parse manifest list/index
102+ record.Manifests = make([]Manifest_ManifestReference, len(ociData.Manifests))
103+ for i, m := range ociData.Manifests {
104+ var ref struct {
105+ MediaType string `json:"mediaType"`
106+ Digest string `json:"digest"`
107+ Size int64 `json:"size"`
108+ Platform *Manifest_Platform `json:"platform,omitempty"`
109+ Annotations map[string]string `json:"annotations,omitempty"`
110+ }
111+ if err := json.Unmarshal(m, &ref); err != nil {
112+ return nil, fmt.Errorf("failed to parse manifest reference %d: %w", i, err)
113+ }
114+ record.Manifests[i] = Manifest_ManifestReference{
115+ MediaType: ref.MediaType,
116+ Digest: ref.Digest,
117+ Size: ref.Size,
118+ Platform: ref.Platform,
119+ }
120+ }
121+ } else {
122+ // Parse image manifest
123+ if len(ociData.Config) > 0 {
124+ var config Manifest_BlobReference
125+ if err := json.Unmarshal(ociData.Config, &config); err != nil {
126+ return nil, fmt.Errorf("failed to parse config: %w", err)
127+ }
128+ record.Config = &config
129+ }
130+131+ // Parse layers
132+ record.Layers = make([]Manifest_BlobReference, len(ociData.Layers))
133+ for i, layer := range ociData.Layers {
134+ if err := json.Unmarshal(layer, &record.Layers[i]); err != nil {
135+ return nil, fmt.Errorf("failed to parse layer %d: %w", i, err)
136+ }
137+ }
138+ }
139+140+ // Parse subject if present (works for both types)
141+ if len(ociData.Subject) > 0 {
142+ var subject Manifest_BlobReference
143+ if err := json.Unmarshal(ociData.Subject, &subject); err != nil {
144+ return nil, err
145+ }
146+ record.Subject = &subject
147+ }
148+149+ return record, nil
150+}
151+152+// NewTagRecord creates a new tag record with manifest AT-URI
153+// did: The DID of the user (e.g., "did:plc:xyz123")
154+// repository: The repository name (e.g., "myapp")
155+// tag: The tag name (e.g., "latest", "v1.0.0")
156+// manifestDigest: The manifest digest (e.g., "sha256:abc123...")
157+func NewTagRecord(did, repository, tag, manifestDigest string) *Tag {
158+ // Build AT-URI for the manifest
159+ // Format: at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix>
160+ manifestURI := BuildManifestURI(did, manifestDigest)
161+162+ return &Tag{
163+ LexiconTypeID: TagCollection,
164+ Repository: repository,
165+ Tag: tag,
166+ Manifest: &manifestURI,
167+ // Note: ManifestDigest is not set for new records (only for backward compat with old records)
168+ CreatedAt: time.Now().Format(time.RFC3339),
169+ }
170+}
171+172+// NewSailorProfileRecord creates a new sailor profile record
173+func NewSailorProfileRecord(defaultHold string) *SailorProfile {
174+ now := time.Now().Format(time.RFC3339)
175+ var holdPtr *string
176+ if defaultHold != "" {
177+ holdPtr = &defaultHold
178+ }
179+ return &SailorProfile{
180+ LexiconTypeID: SailorProfileCollection,
181+ DefaultHold: holdPtr,
182+ CreatedAt: now,
183+ UpdatedAt: &now,
184+ }
185+}
186+187+// NewStarRecord creates a new star record
188+func NewStarRecord(ownerDID, repository string) *SailorStar {
189+ return &SailorStar{
190+ LexiconTypeID: StarCollection,
191+ Subject: SailorStar_Subject{
192+ Did: ownerDID,
193+ Repository: repository,
194+ },
195+ CreatedAt: time.Now().Format(time.RFC3339),
196+ }
197+}
198+199+// NewLayerRecord creates a new layer record
200+func NewLayerRecord(digest string, size int64, mediaType, repository, userDID, userHandle string) *HoldLayer {
201+ return &HoldLayer{
202+ LexiconTypeID: LayerCollection,
203+ Digest: digest,
204+ Size: size,
205+ MediaType: mediaType,
206+ Repository: repository,
207+ UserDid: userDID,
208+ UserHandle: userHandle,
209+ CreatedAt: time.Now().Format(time.RFC3339),
210+ }
211+}
212+213+// StarRecordKey generates a record key for a star
214+// Uses a simple hash to ensure uniqueness and prevent duplicate stars
215+func StarRecordKey(ownerDID, repository string) string {
216+ // Use base64 encoding of "ownerDID/repository" as the record key
217+ // This is deterministic and prevents duplicate stars
218+ combined := ownerDID + "/" + repository
219+ return base64.RawURLEncoding.EncodeToString([]byte(combined))
220+}
221+222+// ParseStarRecordKey decodes a star record key back to ownerDID and repository
223+func ParseStarRecordKey(rkey string) (ownerDID, repository string, err error) {
224+ decoded, err := base64.RawURLEncoding.DecodeString(rkey)
225+ if err != nil {
226+ return "", "", fmt.Errorf("failed to decode star rkey: %w", err)
227+ }
228+229+ parts := strings.SplitN(string(decoded), "/", 2)
230+ if len(parts) != 2 {
231+ return "", "", fmt.Errorf("invalid star rkey format: %s", string(decoded))
232+ }
233+234+ return parts[0], parts[1], nil
235+}
236+237+// ResolveHoldDIDFromURL converts a hold endpoint URL to a did:web DID
238+// This ensures that different representations of the same hold are deduplicated:
239+// - http://172.28.0.3:8080 โ did:web:172.28.0.3:8080
240+// - http://hold01.atcr.io โ did:web:hold01.atcr.io
241+// - https://hold01.atcr.io โ did:web:hold01.atcr.io
242+// - did:web:hold01.atcr.io โ did:web:hold01.atcr.io (passthrough)
243+func ResolveHoldDIDFromURL(holdURL string) string {
244+ // Handle empty URLs
245+ if holdURL == "" {
246+ return ""
247+ }
248+249+ // If already a DID, return as-is
250+ if IsDID(holdURL) {
251+ return holdURL
252+ }
253+254+ // Parse URL to get hostname
255+ holdURL = strings.TrimPrefix(holdURL, "http://")
256+ holdURL = strings.TrimPrefix(holdURL, "https://")
257+ holdURL = strings.TrimSuffix(holdURL, "/")
258+259+ // Extract hostname (remove path if present)
260+ parts := strings.Split(holdURL, "/")
261+ hostname := parts[0]
262+263+ // Convert to did:web
264+ // did:web uses hostname directly (port included if non-standard)
265+ return "did:web:" + hostname
266+}
267+268+// IsDID checks if a string is a DID (starts with "did:")
269+func IsDID(s string) bool {
270+ return len(s) > 4 && s[:4] == "did:"
271+}
272+273+// RepositoryTagToRKey converts a repository and tag to an ATProto record key
274+// ATProto record keys must match: ^[a-zA-Z0-9._~-]{1,512}$
275+func RepositoryTagToRKey(repository, tag string) string {
276+ // Combine repository and tag to create a unique key
277+ // Replace invalid characters: slashes become tildes (~)
278+ // We use tilde instead of dash to avoid ambiguity with repository names that contain hyphens
279+ key := fmt.Sprintf("%s_%s", repository, tag)
280+281+ // Replace / with ~ (slash not allowed in rkeys, tilde is allowed and unlikely in repo names)
282+ key = strings.ReplaceAll(key, "/", "~")
283+284+ return key
285+}
286+287+// RKeyToRepositoryTag converts an ATProto record key back to repository and tag
288+// This is the inverse of RepositoryTagToRKey
289+// Note: If the tag contains underscores, this will split on the LAST underscore
290+func RKeyToRepositoryTag(rkey string) (repository, tag string) {
291+ // Find the last underscore to split repository and tag
292+ lastUnderscore := strings.LastIndex(rkey, "_")
293+ if lastUnderscore == -1 {
294+ // No underscore found - treat entire string as tag with empty repository
295+ return "", rkey
296+ }
297+298+ repository = rkey[:lastUnderscore]
299+ tag = rkey[lastUnderscore+1:]
300+301+ // Convert tildes back to slashes in repository (tilde was used to encode slashes)
302+ repository = strings.ReplaceAll(repository, "~", "/")
303+304+ return repository, tag
305+}
306+307+// BuildManifestURI creates an AT-URI for a manifest record
308+// did: The DID of the user (e.g., "did:plc:xyz123")
309+// manifestDigest: The manifest digest (e.g., "sha256:abc123...")
310+// Returns: AT-URI in format "at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix>"
311+func BuildManifestURI(did, manifestDigest string) string {
312+ // Remove the "sha256:" prefix from the digest to get the rkey
313+ rkey := strings.TrimPrefix(manifestDigest, "sha256:")
314+ return fmt.Sprintf("at://%s/%s/%s", did, ManifestCollection, rkey)
315+}
316+317+// ParseManifestURI extracts the digest from a manifest AT-URI
318+// manifestURI: AT-URI in format "at://did:plc:xyz/io.atcr.manifest/<digest-without-sha256-prefix>"
319+// Returns: Full digest with "sha256:" prefix (e.g., "sha256:abc123...")
320+func ParseManifestURI(manifestURI string) (string, error) {
321+ // Expected format: at://did:plc:xyz/io.atcr.manifest/<rkey>
322+ if !strings.HasPrefix(manifestURI, "at://") {
323+ return "", fmt.Errorf("invalid AT-URI format: must start with 'at://'")
324+ }
325+326+ // Remove "at://" prefix
327+ remainder := strings.TrimPrefix(manifestURI, "at://")
328+329+ // Split by "/"
330+ parts := strings.Split(remainder, "/")
331+ if len(parts) != 3 {
332+ return "", fmt.Errorf("invalid AT-URI format: expected 3 parts (did/collection/rkey), got %d", len(parts))
333+ }
334+335+ // Validate collection
336+ if parts[1] != ManifestCollection {
337+ return "", fmt.Errorf("invalid AT-URI: expected collection %s, got %s", ManifestCollection, parts[1])
338+ }
339+340+ // The rkey is the digest without the "sha256:" prefix
341+ // Add it back to get the full digest
342+ rkey := parts[2]
343+ return "sha256:" + rkey, nil
344+}
345+346+// GetManifestDigest extracts the digest from a Tag, preferring the manifest field
347+// Returns the digest with "sha256:" prefix (e.g., "sha256:abc123...")
348+func (t *Tag) GetManifestDigest() (string, error) {
349+ // Prefer the new manifest field
350+ if t.Manifest != nil && *t.Manifest != "" {
351+ return ParseManifestURI(*t.Manifest)
352+ }
353+354+ // Fall back to the legacy manifestDigest field
355+ if t.ManifestDigest != nil && *t.ManifestDigest != "" {
356+ return *t.ManifestDigest, nil
357+ }
358+359+ return "", fmt.Errorf("tag record has neither manifest nor manifestDigest field")
360+}
+109-215
pkg/atproto/lexicon_test.go
···104 digest string
105 ociManifest string
106 wantErr bool
107- checkFunc func(*testing.T, *ManifestRecord)
108 }{
109 {
110 name: "valid OCI manifest",
···112 digest: "sha256:abc123",
113 ociManifest: validOCIManifest,
114 wantErr: false,
115- checkFunc: func(t *testing.T, record *ManifestRecord) {
116- if record.Type != ManifestCollection {
117- t.Errorf("Type = %v, want %v", record.Type, ManifestCollection)
118 }
119 if record.Repository != "myapp" {
120 t.Errorf("Repository = %v, want myapp", record.Repository)
···143 if record.Layers[1].Digest != "sha256:layer2" {
144 t.Errorf("Layers[1].Digest = %v, want sha256:layer2", record.Layers[1].Digest)
145 }
146- if record.Annotations["org.opencontainers.image.created"] != "2025-01-01T00:00:00Z" {
147- t.Errorf("Annotations missing expected key")
148- }
149- if record.CreatedAt.IsZero() {
150- t.Error("CreatedAt should not be zero")
151 }
152 if record.Subject != nil {
153 t.Error("Subject should be nil")
···160 digest: "sha256:abc123",
161 ociManifest: manifestWithSubject,
162 wantErr: false,
163- checkFunc: func(t *testing.T, record *ManifestRecord) {
164 if record.Subject == nil {
165 t.Fatal("Subject should not be nil")
166 }
···192 digest: "sha256:multiarch",
193 ociManifest: manifestList,
194 wantErr: false,
195- checkFunc: func(t *testing.T, record *ManifestRecord) {
196 if record.MediaType != "application/vnd.oci.image.index.v1+json" {
197 t.Errorf("MediaType = %v, want application/vnd.oci.image.index.v1+json", record.MediaType)
198 }
···219 if record.Manifests[0].Platform.Architecture != "amd64" {
220 t.Errorf("Platform.Architecture = %v, want amd64", record.Manifests[0].Platform.Architecture)
221 }
222- if record.Manifests[0].Platform.OS != "linux" {
223- t.Errorf("Platform.OS = %v, want linux", record.Manifests[0].Platform.OS)
224 }
225226 // Check second manifest (arm64)
···230 if record.Manifests[1].Platform.Architecture != "arm64" {
231 t.Errorf("Platform.Architecture = %v, want arm64", record.Manifests[1].Platform.Architecture)
232 }
233- if record.Manifests[1].Platform.Variant != "v8" {
234 t.Errorf("Platform.Variant = %v, want v8", record.Manifests[1].Platform.Variant)
235 }
236 },
···268269func TestNewTagRecord(t *testing.T) {
270 did := "did:plc:test123"
271- before := time.Now()
0272 record := NewTagRecord(did, "myapp", "latest", "sha256:abc123")
273- after := time.Now()
274275- if record.Type != TagCollection {
276- t.Errorf("Type = %v, want %v", record.Type, TagCollection)
277 }
278279 if record.Repository != "myapp" {
···286287 // New records should have manifest field (AT-URI)
288 expectedURI := "at://did:plc:test123/io.atcr.manifest/abc123"
289- if record.Manifest != expectedURI {
290 t.Errorf("Manifest = %v, want %v", record.Manifest, expectedURI)
291 }
292293 // New records should NOT have manifestDigest field
294- if record.ManifestDigest != "" {
295- t.Errorf("ManifestDigest should be empty for new records, got %v", record.ManifestDigest)
296 }
297298- if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) {
299- t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after)
0000300 }
301}
302···391}
392393func TestTagRecord_GetManifestDigest(t *testing.T) {
000394 tests := []struct {
395 name string
396- record TagRecord
397 want string
398 wantErr bool
399 }{
400 {
401 name: "new record with manifest field",
402- record: TagRecord{
403- Manifest: "at://did:plc:test123/io.atcr.manifest/abc123",
404 },
405 want: "sha256:abc123",
406 wantErr: false,
407 },
408 {
409 name: "old record with manifestDigest field",
410- record: TagRecord{
411- ManifestDigest: "sha256:def456",
412 },
413 want: "sha256:def456",
414 wantErr: false,
415 },
416 {
417 name: "prefers manifest over manifestDigest",
418- record: TagRecord{
419- Manifest: "at://did:plc:test123/io.atcr.manifest/abc123",
420- ManifestDigest: "sha256:def456",
421 },
422 want: "sha256:abc123",
423 wantErr: false,
424 },
425 {
426 name: "no fields set",
427- record: TagRecord{},
428 want: "",
429 wantErr: true,
430 },
431 {
432 name: "invalid manifest URI",
433- record: TagRecord{
434- Manifest: "invalid-uri",
435 },
436 want: "",
437 wantErr: true,
···451 })
452 }
453}
00454455func TestNewSailorProfileRecord(t *testing.T) {
456 tests := []struct {
···473474 for _, tt := range tests {
475 t.Run(tt.name, func(t *testing.T) {
476- before := time.Now()
0477 record := NewSailorProfileRecord(tt.defaultHold)
478- after := time.Now()
479480- if record.Type != SailorProfileCollection {
481- t.Errorf("Type = %v, want %v", record.Type, SailorProfileCollection)
482 }
483484- if record.DefaultHold != tt.defaultHold {
485- t.Errorf("DefaultHold = %v, want %v", record.DefaultHold, tt.defaultHold)
000000486 }
487488- if record.CreatedAt.Before(before) || record.CreatedAt.After(after) {
489- t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after)
0490 }
491-492- if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) {
493- t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after)
494 }
495496- // CreatedAt and UpdatedAt should be equal for new records
497- if !record.CreatedAt.Equal(record.UpdatedAt) {
498- t.Errorf("CreatedAt (%v) != UpdatedAt (%v)", record.CreatedAt, record.UpdatedAt)
0000000499 }
500 })
501 }
502}
503504func TestNewStarRecord(t *testing.T) {
505- before := time.Now()
0506 record := NewStarRecord("did:plc:alice123", "myapp")
507- after := time.Now()
508509- if record.Type != StarCollection {
510- t.Errorf("Type = %v, want %v", record.Type, StarCollection)
511 }
512513- if record.Subject.DID != "did:plc:alice123" {
514- t.Errorf("Subject.DID = %v, want did:plc:alice123", record.Subject.DID)
515 }
516517 if record.Subject.Repository != "myapp" {
518 t.Errorf("Subject.Repository = %v, want myapp", record.Subject.Repository)
519 }
520521- if record.CreatedAt.Before(before) || record.CreatedAt.After(after) {
522- t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after)
0000523 }
524}
525···807 }
808809 // Add hold DID
810- record.HoldDID = "did:web:hold01.atcr.io"
0811812 // Serialize to JSON
813 jsonData, err := json.Marshal(record)
···816 }
817818 // Deserialize from JSON
819- var decoded ManifestRecord
820 if err := json.Unmarshal(jsonData, &decoded); err != nil {
821 t.Fatalf("json.Unmarshal() error = %v", err)
822 }
823824 // Verify fields
825- if decoded.Type != record.Type {
826- t.Errorf("Type = %v, want %v", decoded.Type, record.Type)
827 }
828 if decoded.Repository != record.Repository {
829 t.Errorf("Repository = %v, want %v", decoded.Repository, record.Repository)
···831 if decoded.Digest != record.Digest {
832 t.Errorf("Digest = %v, want %v", decoded.Digest, record.Digest)
833 }
834- if decoded.HoldDID != record.HoldDID {
835- t.Errorf("HoldDID = %v, want %v", decoded.HoldDID, record.HoldDID)
836 }
837 if decoded.Config.Digest != record.Config.Digest {
838 t.Errorf("Config.Digest = %v, want %v", decoded.Config.Digest, record.Config.Digest)
···843}
844845func TestBlobReference_JSONSerialization(t *testing.T) {
846- blob := BlobReference{
847 MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
848 Digest: "sha256:abc123",
849 Size: 12345,
850- URLs: []string{"https://s3.example.com/blob"},
851- Annotations: map[string]string{
852- "key": "value",
853- },
854 }
855856 // Serialize
···860 }
861862 // Deserialize
863- var decoded BlobReference
864 if err := json.Unmarshal(jsonData, &decoded); err != nil {
865 t.Fatalf("json.Unmarshal() error = %v", err)
866 }
···878}
879880func TestStarSubject_JSONSerialization(t *testing.T) {
881- subject := StarSubject{
882- DID: "did:plc:alice123",
883 Repository: "myapp",
884 }
885···890 }
891892 // Deserialize
893- var decoded StarSubject
894 if err := json.Unmarshal(jsonData, &decoded); err != nil {
895 t.Fatalf("json.Unmarshal() error = %v", err)
896 }
897898 // Verify
899- if decoded.DID != subject.DID {
900- t.Errorf("DID = %v, want %v", decoded.DID, subject.DID)
901 }
902 if decoded.Repository != subject.Repository {
903 t.Errorf("Repository = %v, want %v", decoded.Repository, subject.Repository)
···1144 t.Fatal("NewLayerRecord() returned nil")
1145 }
11461147- if record.Type != LayerCollection {
1148- t.Errorf("Type = %q, want %q", record.Type, LayerCollection)
1149 }
11501151 if record.Digest != tt.digest {
···1164 t.Errorf("Repository = %q, want %q", record.Repository, tt.repository)
1165 }
11661167- if record.UserDID != tt.userDID {
1168- t.Errorf("UserDID = %q, want %q", record.UserDID, tt.userDID)
1169 }
11701171 if record.UserHandle != tt.userHandle {
···1187}
11881189func TestNewLayerRecordJSON(t *testing.T) {
1190- // Test that LayerRecord can be marshaled/unmarshaled to/from JSON
1191 record := NewLayerRecord(
1192 "sha256:abc123",
1193 1024,
···1204 }
12051206 // Unmarshal back
1207- var decoded LayerRecord
1208 if err := json.Unmarshal(jsonData, &decoded); err != nil {
1209 t.Fatalf("json.Unmarshal() error = %v", err)
1210 }
12111212 // Verify fields match
1213- if decoded.Type != record.Type {
1214- t.Errorf("Type = %q, want %q", decoded.Type, record.Type)
1215 }
1216 if decoded.Digest != record.Digest {
1217 t.Errorf("Digest = %q, want %q", decoded.Digest, record.Digest)
···1225 if decoded.Repository != record.Repository {
1226 t.Errorf("Repository = %q, want %q", decoded.Repository, record.Repository)
1227 }
1228- if decoded.UserDID != record.UserDID {
1229- t.Errorf("UserDID = %q, want %q", decoded.UserDID, record.UserDID)
1230 }
1231 if decoded.UserHandle != record.UserHandle {
1232 t.Errorf("UserHandle = %q, want %q", decoded.UserHandle, record.UserHandle)
···1235 t.Errorf("CreatedAt = %q, want %q", decoded.CreatedAt, record.CreatedAt)
1236 }
1237}
1238-1239-func TestNewRepoPageRecord(t *testing.T) {
1240- tests := []struct {
1241- name string
1242- repository string
1243- description string
1244- avatar *ATProtoBlobRef
1245- }{
1246- {
1247- name: "with description only",
1248- repository: "myapp",
1249- description: "# My App\n\nA cool container image.",
1250- avatar: nil,
1251- },
1252- {
1253- name: "with avatar only",
1254- repository: "another-app",
1255- description: "",
1256- avatar: &ATProtoBlobRef{
1257- Type: "blob",
1258- Ref: Link{Link: "bafyreiabc123"},
1259- MimeType: "image/png",
1260- Size: 1024,
1261- },
1262- },
1263- {
1264- name: "with both description and avatar",
1265- repository: "full-app",
1266- description: "This is a full description.",
1267- avatar: &ATProtoBlobRef{
1268- Type: "blob",
1269- Ref: Link{Link: "bafyreiabc456"},
1270- MimeType: "image/jpeg",
1271- Size: 2048,
1272- },
1273- },
1274- {
1275- name: "empty values",
1276- repository: "",
1277- description: "",
1278- avatar: nil,
1279- },
1280- }
1281-1282- for _, tt := range tests {
1283- t.Run(tt.name, func(t *testing.T) {
1284- before := time.Now()
1285- record := NewRepoPageRecord(tt.repository, tt.description, tt.avatar)
1286- after := time.Now()
1287-1288- if record.Type != RepoPageCollection {
1289- t.Errorf("Type = %v, want %v", record.Type, RepoPageCollection)
1290- }
1291-1292- if record.Repository != tt.repository {
1293- t.Errorf("Repository = %v, want %v", record.Repository, tt.repository)
1294- }
1295-1296- if record.Description != tt.description {
1297- t.Errorf("Description = %v, want %v", record.Description, tt.description)
1298- }
1299-1300- if tt.avatar == nil && record.Avatar != nil {
1301- t.Error("Avatar should be nil")
1302- }
1303-1304- if tt.avatar != nil {
1305- if record.Avatar == nil {
1306- t.Fatal("Avatar should not be nil")
1307- }
1308- if record.Avatar.Ref.Link != tt.avatar.Ref.Link {
1309- t.Errorf("Avatar.Ref.Link = %v, want %v", record.Avatar.Ref.Link, tt.avatar.Ref.Link)
1310- }
1311- }
1312-1313- if record.CreatedAt.Before(before) || record.CreatedAt.After(after) {
1314- t.Errorf("CreatedAt = %v, want between %v and %v", record.CreatedAt, before, after)
1315- }
1316-1317- if record.UpdatedAt.Before(before) || record.UpdatedAt.After(after) {
1318- t.Errorf("UpdatedAt = %v, want between %v and %v", record.UpdatedAt, before, after)
1319- }
1320-1321- // CreatedAt and UpdatedAt should be equal for new records
1322- if !record.CreatedAt.Equal(record.UpdatedAt) {
1323- t.Errorf("CreatedAt (%v) != UpdatedAt (%v)", record.CreatedAt, record.UpdatedAt)
1324- }
1325- })
1326- }
1327-}
1328-1329-func TestRepoPageRecord_JSONSerialization(t *testing.T) {
1330- record := NewRepoPageRecord(
1331- "myapp",
1332- "# My App\n\nA description with **markdown**.",
1333- &ATProtoBlobRef{
1334- Type: "blob",
1335- Ref: Link{Link: "bafyreiabc123"},
1336- MimeType: "image/png",
1337- Size: 1024,
1338- },
1339- )
1340-1341- // Serialize to JSON
1342- jsonData, err := json.Marshal(record)
1343- if err != nil {
1344- t.Fatalf("json.Marshal() error = %v", err)
1345- }
1346-1347- // Deserialize from JSON
1348- var decoded RepoPageRecord
1349- if err := json.Unmarshal(jsonData, &decoded); err != nil {
1350- t.Fatalf("json.Unmarshal() error = %v", err)
1351- }
1352-1353- // Verify fields
1354- if decoded.Type != record.Type {
1355- t.Errorf("Type = %v, want %v", decoded.Type, record.Type)
1356- }
1357- if decoded.Repository != record.Repository {
1358- t.Errorf("Repository = %v, want %v", decoded.Repository, record.Repository)
1359- }
1360- if decoded.Description != record.Description {
1361- t.Errorf("Description = %v, want %v", decoded.Description, record.Description)
1362- }
1363- if decoded.Avatar == nil {
1364- t.Fatal("Avatar should not be nil")
1365- }
1366- if decoded.Avatar.Ref.Link != record.Avatar.Ref.Link {
1367- t.Errorf("Avatar.Ref.Link = %v, want %v", decoded.Avatar.Ref.Link, record.Avatar.Ref.Link)
1368- }
1369-}
···104 digest string
105 ociManifest string
106 wantErr bool
107+ checkFunc func(*testing.T, *Manifest)
108 }{
109 {
110 name: "valid OCI manifest",
···112 digest: "sha256:abc123",
113 ociManifest: validOCIManifest,
114 wantErr: false,
115+ checkFunc: func(t *testing.T, record *Manifest) {
116+ if record.LexiconTypeID != ManifestCollection {
117+ t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, ManifestCollection)
118 }
119 if record.Repository != "myapp" {
120 t.Errorf("Repository = %v, want myapp", record.Repository)
···143 if record.Layers[1].Digest != "sha256:layer2" {
144 t.Errorf("Layers[1].Digest = %v, want sha256:layer2", record.Layers[1].Digest)
145 }
146+ // Note: Annotations are not copied to generated type (empty struct)
147+ if record.CreatedAt == "" {
148+ t.Error("CreatedAt should not be empty")
00149 }
150 if record.Subject != nil {
151 t.Error("Subject should be nil")
···158 digest: "sha256:abc123",
159 ociManifest: manifestWithSubject,
160 wantErr: false,
161+ checkFunc: func(t *testing.T, record *Manifest) {
162 if record.Subject == nil {
163 t.Fatal("Subject should not be nil")
164 }
···190 digest: "sha256:multiarch",
191 ociManifest: manifestList,
192 wantErr: false,
193+ checkFunc: func(t *testing.T, record *Manifest) {
194 if record.MediaType != "application/vnd.oci.image.index.v1+json" {
195 t.Errorf("MediaType = %v, want application/vnd.oci.image.index.v1+json", record.MediaType)
196 }
···217 if record.Manifests[0].Platform.Architecture != "amd64" {
218 t.Errorf("Platform.Architecture = %v, want amd64", record.Manifests[0].Platform.Architecture)
219 }
220+ if record.Manifests[0].Platform.Os != "linux" {
221+ t.Errorf("Platform.Os = %v, want linux", record.Manifests[0].Platform.Os)
222 }
223224 // Check second manifest (arm64)
···228 if record.Manifests[1].Platform.Architecture != "arm64" {
229 t.Errorf("Platform.Architecture = %v, want arm64", record.Manifests[1].Platform.Architecture)
230 }
231+ if record.Manifests[1].Platform.Variant == nil || *record.Manifests[1].Platform.Variant != "v8" {
232 t.Errorf("Platform.Variant = %v, want v8", record.Manifests[1].Platform.Variant)
233 }
234 },
···266267func TestNewTagRecord(t *testing.T) {
268 did := "did:plc:test123"
269+ // Truncate to second precision since RFC3339 doesn't have sub-second precision
270+ before := time.Now().Truncate(time.Second)
271 record := NewTagRecord(did, "myapp", "latest", "sha256:abc123")
272+ after := time.Now().Truncate(time.Second).Add(time.Second)
273274+ if record.LexiconTypeID != TagCollection {
275+ t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, TagCollection)
276 }
277278 if record.Repository != "myapp" {
···285286 // New records should have manifest field (AT-URI)
287 expectedURI := "at://did:plc:test123/io.atcr.manifest/abc123"
288+ if record.Manifest == nil || *record.Manifest != expectedURI {
289 t.Errorf("Manifest = %v, want %v", record.Manifest, expectedURI)
290 }
291292 // New records should NOT have manifestDigest field
293+ if record.ManifestDigest != nil && *record.ManifestDigest != "" {
294+ t.Errorf("ManifestDigest should be nil for new records, got %v", record.ManifestDigest)
295 }
296297+ createdAt, err := time.Parse(time.RFC3339, record.CreatedAt)
298+ if err != nil {
299+ t.Errorf("CreatedAt is not valid RFC3339: %v", err)
300+ }
301+ if createdAt.Before(before) || createdAt.After(after) {
302+ t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after)
303 }
304}
305···394}
395396func TestTagRecord_GetManifestDigest(t *testing.T) {
397+ manifestURI := "at://did:plc:test123/io.atcr.manifest/abc123"
398+ digestValue := "sha256:def456"
399+400 tests := []struct {
401 name string
402+ record Tag
403 want string
404 wantErr bool
405 }{
406 {
407 name: "new record with manifest field",
408+ record: Tag{
409+ Manifest: &manifestURI,
410 },
411 want: "sha256:abc123",
412 wantErr: false,
413 },
414 {
415 name: "old record with manifestDigest field",
416+ record: Tag{
417+ ManifestDigest: &digestValue,
418 },
419 want: "sha256:def456",
420 wantErr: false,
421 },
422 {
423 name: "prefers manifest over manifestDigest",
424+ record: Tag{
425+ Manifest: &manifestURI,
426+ ManifestDigest: &digestValue,
427 },
428 want: "sha256:abc123",
429 wantErr: false,
430 },
431 {
432 name: "no fields set",
433+ record: Tag{},
434 want: "",
435 wantErr: true,
436 },
437 {
438 name: "invalid manifest URI",
439+ record: Tag{
440+ Manifest: func() *string { s := "invalid-uri"; return &s }(),
441 },
442 want: "",
443 wantErr: true,
···457 })
458 }
459}
460+461+// TestNewHoldRecord is removed - HoldRecord is no longer supported (legacy BYOS)
462463func TestNewSailorProfileRecord(t *testing.T) {
464 tests := []struct {
···481482 for _, tt := range tests {
483 t.Run(tt.name, func(t *testing.T) {
484+ // Truncate to second precision since RFC3339 doesn't have sub-second precision
485+ before := time.Now().Truncate(time.Second)
486 record := NewSailorProfileRecord(tt.defaultHold)
487+ after := time.Now().Truncate(time.Second).Add(time.Second)
488489+ if record.LexiconTypeID != SailorProfileCollection {
490+ t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, SailorProfileCollection)
491 }
492493+ if tt.defaultHold == "" {
494+ if record.DefaultHold != nil {
495+ t.Errorf("DefaultHold = %v, want nil", record.DefaultHold)
496+ }
497+ } else {
498+ if record.DefaultHold == nil || *record.DefaultHold != tt.defaultHold {
499+ t.Errorf("DefaultHold = %v, want %v", record.DefaultHold, tt.defaultHold)
500+ }
501 }
502503+ createdAt, err := time.Parse(time.RFC3339, record.CreatedAt)
504+ if err != nil {
505+ t.Errorf("CreatedAt is not valid RFC3339: %v", err)
506 }
507+ if createdAt.Before(before) || createdAt.After(after) {
508+ t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after)
0509 }
510511+ if record.UpdatedAt == nil {
512+ t.Error("UpdatedAt should not be nil")
513+ } else {
514+ updatedAt, err := time.Parse(time.RFC3339, *record.UpdatedAt)
515+ if err != nil {
516+ t.Errorf("UpdatedAt is not valid RFC3339: %v", err)
517+ }
518+ if updatedAt.Before(before) || updatedAt.After(after) {
519+ t.Errorf("UpdatedAt = %v, want between %v and %v", updatedAt, before, after)
520+ }
521 }
522 })
523 }
524}
525526func TestNewStarRecord(t *testing.T) {
527+ // Truncate to second precision since RFC3339 doesn't have sub-second precision
528+ before := time.Now().Truncate(time.Second)
529 record := NewStarRecord("did:plc:alice123", "myapp")
530+ after := time.Now().Truncate(time.Second).Add(time.Second)
531532+ if record.LexiconTypeID != StarCollection {
533+ t.Errorf("LexiconTypeID = %v, want %v", record.LexiconTypeID, StarCollection)
534 }
535536+ if record.Subject.Did != "did:plc:alice123" {
537+ t.Errorf("Subject.Did = %v, want did:plc:alice123", record.Subject.Did)
538 }
539540 if record.Subject.Repository != "myapp" {
541 t.Errorf("Subject.Repository = %v, want myapp", record.Subject.Repository)
542 }
543544+ createdAt, err := time.Parse(time.RFC3339, record.CreatedAt)
545+ if err != nil {
546+ t.Errorf("CreatedAt is not valid RFC3339: %v", err)
547+ }
548+ if createdAt.Before(before) || createdAt.After(after) {
549+ t.Errorf("CreatedAt = %v, want between %v and %v", createdAt, before, after)
550 }
551}
552···834 }
835836 // Add hold DID
837+ holdDID := "did:web:hold01.atcr.io"
838+ record.HoldDid = &holdDID
839840 // Serialize to JSON
841 jsonData, err := json.Marshal(record)
···844 }
845846 // Deserialize from JSON
847+ var decoded Manifest
848 if err := json.Unmarshal(jsonData, &decoded); err != nil {
849 t.Fatalf("json.Unmarshal() error = %v", err)
850 }
851852 // Verify fields
853+ if decoded.LexiconTypeID != record.LexiconTypeID {
854+ t.Errorf("LexiconTypeID = %v, want %v", decoded.LexiconTypeID, record.LexiconTypeID)
855 }
856 if decoded.Repository != record.Repository {
857 t.Errorf("Repository = %v, want %v", decoded.Repository, record.Repository)
···859 if decoded.Digest != record.Digest {
860 t.Errorf("Digest = %v, want %v", decoded.Digest, record.Digest)
861 }
862+ if decoded.HoldDid == nil || *decoded.HoldDid != *record.HoldDid {
863+ t.Errorf("HoldDid = %v, want %v", decoded.HoldDid, record.HoldDid)
864 }
865 if decoded.Config.Digest != record.Config.Digest {
866 t.Errorf("Config.Digest = %v, want %v", decoded.Config.Digest, record.Config.Digest)
···871}
872873func TestBlobReference_JSONSerialization(t *testing.T) {
874+ blob := Manifest_BlobReference{
875 MediaType: "application/vnd.oci.image.layer.v1.tar+gzip",
876 Digest: "sha256:abc123",
877 Size: 12345,
878+ Urls: []string{"https://s3.example.com/blob"},
879+ // Note: Annotations is now an empty struct, not a map
00880 }
881882 // Serialize
···886 }
887888 // Deserialize
889+ var decoded Manifest_BlobReference
890 if err := json.Unmarshal(jsonData, &decoded); err != nil {
891 t.Fatalf("json.Unmarshal() error = %v", err)
892 }
···904}
905906func TestStarSubject_JSONSerialization(t *testing.T) {
907+ subject := SailorStar_Subject{
908+ Did: "did:plc:alice123",
909 Repository: "myapp",
910 }
911···916 }
917918 // Deserialize
919+ var decoded SailorStar_Subject
920 if err := json.Unmarshal(jsonData, &decoded); err != nil {
921 t.Fatalf("json.Unmarshal() error = %v", err)
922 }
923924 // Verify
925+ if decoded.Did != subject.Did {
926+ t.Errorf("Did = %v, want %v", decoded.Did, subject.Did)
927 }
928 if decoded.Repository != subject.Repository {
929 t.Errorf("Repository = %v, want %v", decoded.Repository, subject.Repository)
···1170 t.Fatal("NewLayerRecord() returned nil")
1171 }
11721173+ if record.LexiconTypeID != LayerCollection {
1174+ t.Errorf("LexiconTypeID = %q, want %q", record.LexiconTypeID, LayerCollection)
1175 }
11761177 if record.Digest != tt.digest {
···1190 t.Errorf("Repository = %q, want %q", record.Repository, tt.repository)
1191 }
11921193+ if record.UserDid != tt.userDID {
1194+ t.Errorf("UserDid = %q, want %q", record.UserDid, tt.userDID)
1195 }
11961197 if record.UserHandle != tt.userHandle {
···1213}
12141215func TestNewLayerRecordJSON(t *testing.T) {
1216+ // Test that HoldLayer can be marshaled/unmarshaled to/from JSON
1217 record := NewLayerRecord(
1218 "sha256:abc123",
1219 1024,
···1230 }
12311232 // Unmarshal back
1233+ var decoded HoldLayer
1234 if err := json.Unmarshal(jsonData, &decoded); err != nil {
1235 t.Fatalf("json.Unmarshal() error = %v", err)
1236 }
12371238 // Verify fields match
1239+ if decoded.LexiconTypeID != record.LexiconTypeID {
1240+ t.Errorf("LexiconTypeID = %q, want %q", decoded.LexiconTypeID, record.LexiconTypeID)
1241 }
1242 if decoded.Digest != record.Digest {
1243 t.Errorf("Digest = %q, want %q", decoded.Digest, record.Digest)
···1251 if decoded.Repository != record.Repository {
1252 t.Errorf("Repository = %q, want %q", decoded.Repository, record.Repository)
1253 }
1254+ if decoded.UserDid != record.UserDid {
1255+ t.Errorf("UserDid = %q, want %q", decoded.UserDid, record.UserDid)
1256 }
1257 if decoded.UserHandle != record.UserHandle {
1258 t.Errorf("UserHandle = %q, want %q", decoded.UserHandle, record.UserHandle)
···1261 t.Errorf("CreatedAt = %q, want %q", decoded.CreatedAt, record.CreatedAt)
1262 }
1263}
000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.sailor.profile
4+5+package atproto
6+7+// User profile for ATCR registry. Stores preferences like default hold for blob storage.
8+type SailorProfile struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.sailor.profile"`
10+ // createdAt: Profile creation timestamp
11+ CreatedAt string `json:"createdAt" cborgen:"createdAt"`
12+ // defaultHold: Default hold endpoint for blob storage. If null, user has opted out of defaults.
13+ DefaultHold *string `json:"defaultHold,omitempty" cborgen:"defaultHold,omitempty"`
14+ // updatedAt: Profile last updated timestamp
15+ UpdatedAt *string `json:"updatedAt,omitempty" cborgen:"updatedAt,omitempty"`
16+}
+25
pkg/atproto/sailorstar.go
···0000000000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.sailor.star
4+5+package atproto
6+7+// A star (like) on a container image repository. Stored in the starrer's PDS, similar to Bluesky likes.
8+type SailorStar struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.sailor.star"`
10+ // createdAt: Star creation timestamp
11+ CreatedAt string `json:"createdAt" cborgen:"createdAt"`
12+ // subject: The repository being starred
13+ Subject SailorStar_Subject `json:"subject" cborgen:"subject"`
14+}
15+16+// SailorStar_Subject is a "subject" in the io.atcr.sailor.star schema.
17+//
18+// Reference to a repository owned by a user
19+type SailorStar_Subject struct {
20+ LexiconTypeID string `json:"$type,omitempty" cborgen:"$type,const=io.atcr.sailor.star#subject,omitempty"`
21+ // did: DID of the repository owner
22+ Did string `json:"did" cborgen:"did"`
23+ // repository: Repository name (e.g., 'myapp')
24+ Repository string `json:"repository" cborgen:"repository"`
25+}
+20
pkg/atproto/tag.go
···00000000000000000000
···1+// Code generated by generate.go; DO NOT EDIT.
2+3+// Lexicon schema: io.atcr.tag
4+5+package atproto
6+7+// A named tag pointing to a specific manifest digest
8+type Tag struct {
9+ LexiconTypeID string `json:"$type" cborgen:"$type,const=io.atcr.tag"`
10+ // createdAt: Tag creation timestamp
11+ CreatedAt string `json:"createdAt" cborgen:"createdAt"`
12+ // manifest: AT-URI of the manifest this tag points to (e.g., 'at://did:plc:xyz/io.atcr.manifest/abc123'). Preferred over manifestDigest for new records.
13+ Manifest *string `json:"manifest,omitempty" cborgen:"manifest,omitempty"`
14+ // manifestDigest: DEPRECATED: Digest of the manifest (e.g., 'sha256:...'). Kept for backward compatibility with old records. New records should use 'manifest' field instead.
15+ ManifestDigest *string `json:"manifestDigest,omitempty" cborgen:"manifestDigest,omitempty"`
16+ // repository: Repository name (e.g., 'myapp'). Scoped to user's DID.
17+ Repository string `json:"repository" cborgen:"repository"`
18+ // tag: Tag name (e.g., 'latest', 'v1.0.0', '12-slim')
19+ Tag string `json:"tag" cborgen:"tag"`
20+}
-142
pkg/auth/cache.go
···1-// Package token provides service token caching and management for AppView.
2-// Service tokens are JWTs issued by a user's PDS to authorize AppView to
3-// act on their behalf when communicating with hold services. Tokens are
4-// cached with automatic expiry parsing and 10-second safety margins.
5-package auth
6-7-import (
8- "log/slog"
9- "sync"
10- "time"
11-)
12-13-// serviceTokenEntry represents a cached service token
14-type serviceTokenEntry struct {
15- token string
16- expiresAt time.Time
17- err error
18- once sync.Once
19-}
20-21-// Global cache for service tokens (DID:HoldDID -> token)
22-// Service tokens are JWTs issued by a user's PDS to authorize AppView to act on their behalf
23-// when communicating with hold services. These tokens are scoped to specific holds and have
24-// limited lifetime (typically 60s, can request up to 5min).
25-var (
26- globalServiceTokens = make(map[string]*serviceTokenEntry)
27- globalServiceTokensMu sync.RWMutex
28-)
29-30-// GetServiceToken retrieves a cached service token for the given DID and hold DID
31-// Returns empty string if no valid cached token exists
32-func GetServiceToken(did, holdDID string) (token string, expiresAt time.Time) {
33- cacheKey := did + ":" + holdDID
34-35- globalServiceTokensMu.RLock()
36- entry, exists := globalServiceTokens[cacheKey]
37- globalServiceTokensMu.RUnlock()
38-39- if !exists {
40- return "", time.Time{}
41- }
42-43- // Check if token is still valid
44- if time.Now().After(entry.expiresAt) {
45- // Token expired, remove from cache
46- globalServiceTokensMu.Lock()
47- delete(globalServiceTokens, cacheKey)
48- globalServiceTokensMu.Unlock()
49- return "", time.Time{}
50- }
51-52- return entry.token, entry.expiresAt
53-}
54-55-// SetServiceToken stores a service token in the cache
56-// Automatically parses the JWT to extract the expiry time
57-// Applies a 10-second safety margin (cache expires 10s before actual JWT expiry)
58-func SetServiceToken(did, holdDID, token string) error {
59- cacheKey := did + ":" + holdDID
60-61- // Parse JWT to extract expiry (don't verify signature - we trust the PDS)
62- expiry, err := ParseJWTExpiry(token)
63- if err != nil {
64- // If parsing fails, use default 50s TTL (conservative fallback)
65- slog.Warn("Failed to parse JWT expiry, using default 50s", "error", err, "cacheKey", cacheKey)
66- expiry = time.Now().Add(50 * time.Second)
67- } else {
68- // Apply 10s safety margin to avoid using nearly-expired tokens
69- expiry = expiry.Add(-10 * time.Second)
70- }
71-72- globalServiceTokensMu.Lock()
73- globalServiceTokens[cacheKey] = &serviceTokenEntry{
74- token: token,
75- expiresAt: expiry,
76- }
77- globalServiceTokensMu.Unlock()
78-79- slog.Debug("Cached service token",
80- "cacheKey", cacheKey,
81- "expiresIn", time.Until(expiry).Round(time.Second))
82-83- return nil
84-}
85-86-// InvalidateServiceToken removes a service token from the cache
87-// Used when we detect that a token is invalid or the user's session has expired
88-func InvalidateServiceToken(did, holdDID string) {
89- cacheKey := did + ":" + holdDID
90-91- globalServiceTokensMu.Lock()
92- delete(globalServiceTokens, cacheKey)
93- globalServiceTokensMu.Unlock()
94-95- slog.Debug("Invalidated service token", "cacheKey", cacheKey)
96-}
97-98-// GetCacheStats returns statistics about the service token cache for debugging
99-func GetCacheStats() map[string]any {
100- globalServiceTokensMu.RLock()
101- defer globalServiceTokensMu.RUnlock()
102-103- validCount := 0
104- expiredCount := 0
105- now := time.Now()
106-107- for _, entry := range globalServiceTokens {
108- if now.Before(entry.expiresAt) {
109- validCount++
110- } else {
111- expiredCount++
112- }
113- }
114-115- return map[string]any{
116- "total_entries": len(globalServiceTokens),
117- "valid_tokens": validCount,
118- "expired_tokens": expiredCount,
119- }
120-}
121-122-// CleanExpiredTokens removes expired tokens from the cache
123-// Can be called periodically to prevent unbounded growth (though expired tokens
124-// are also removed lazily on access)
125-func CleanExpiredTokens() {
126- globalServiceTokensMu.Lock()
127- defer globalServiceTokensMu.Unlock()
128-129- now := time.Now()
130- removed := 0
131-132- for key, entry := range globalServiceTokens {
133- if now.After(entry.expiresAt) {
134- delete(globalServiceTokens, key)
135- removed++
136- }
137- }
138-139- if removed > 0 {
140- slog.Debug("Cleaned expired service tokens", "count", removed)
141- }
142-}
···2122 // GetCaptainRecord retrieves the captain record for a hold
23 // Used to check public flag and allowAllCrew settings
24- GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error)
2526 // IsCrewMember checks if userDID is a crew member of holdDID
27 IsCrewMember(ctx context.Context, holdDID, userDID string) (bool, error)
···32// Read access rules:
33// - Public hold: allow anyone (even anonymous)
34// - Private hold: require authentication (any authenticated user)
35-func CheckReadAccessWithCaptain(captain *atproto.CaptainRecord, userDID string) bool {
36 if captain.Public {
37 // Public hold - allow anyone (even anonymous)
38 return true
···55// Write access rules:
56// - Must be authenticated
57// - Must be hold owner OR crew member
58-func CheckWriteAccessWithCaptain(captain *atproto.CaptainRecord, userDID string, isCrew bool) bool {
59 slog.Debug("Checking write access", "userDID", userDID, "owner", captain.Owner, "isCrew", isCrew)
6061 if userDID == "" {
···2122 // GetCaptainRecord retrieves the captain record for a hold
23 // Used to check public flag and allowAllCrew settings
24+ GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error)
2526 // IsCrewMember checks if userDID is a crew member of holdDID
27 IsCrewMember(ctx context.Context, holdDID, userDID string) (bool, error)
···32// Read access rules:
33// - Public hold: allow anyone (even anonymous)
34// - Private hold: require authentication (any authenticated user)
35+func CheckReadAccessWithCaptain(captain *atproto.HoldCaptain, userDID string) bool {
36 if captain.Public {
37 // Public hold - allow anyone (even anonymous)
38 return true
···55// Write access rules:
56// - Must be authenticated
57// - Must be hold owner OR crew member
58+func CheckWriteAccessWithCaptain(captain *atproto.HoldCaptain, userDID string, isCrew bool) bool {
59 slog.Debug("Checking write access", "userDID", userDID, "owner", captain.Owner, "isCrew", isCrew)
6061 if userDID == "" {
···35}
3637// GetCaptainRecord retrieves the captain record from the hold's PDS
38-func (a *LocalHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) {
39 // Verify that the requested holdDID matches this hold
40 if holdDID != a.pds.DID() {
41 return nil, fmt.Errorf("holdDID mismatch: requested %s, this hold is %s", holdDID, a.pds.DID())
···47 return nil, fmt.Errorf("failed to get captain record: %w", err)
48 }
4950- // The PDS returns *atproto.CaptainRecord directly now (after we update pds to use atproto types)
51 return pdsCaptain, nil
52}
53
···35}
3637// GetCaptainRecord retrieves the captain record from the hold's PDS
38+func (a *LocalHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) {
39 // Verify that the requested holdDID matches this hold
40 if holdDID != a.pds.DID() {
41 return nil, fmt.Errorf("holdDID mismatch: requested %s, this hold is %s", holdDID, a.pds.DID())
···47 return nil, fmt.Errorf("failed to get captain record: %w", err)
48 }
4950+ // The PDS returns *atproto.HoldCaptain directly
51 return pdsCaptain, nil
52}
53
+34-20
pkg/auth/hold_remote.go
···101// 1. Check database cache
102// 2. If cache miss or expired, query hold's XRPC endpoint
103// 3. Update cache
104-func (a *RemoteHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) {
105 // Try cache first
106 if a.db != nil {
107 cached, err := a.getCachedCaptainRecord(holdDID)
108 if err == nil && cached != nil {
109 // Cache hit - check if still valid
110 if time.Since(cached.UpdatedAt) < a.cacheTTL {
111- return cached.CaptainRecord, nil
112 }
113 // Cache expired - continue to fetch fresh data
114 }
···133134// captainRecordWithMeta includes UpdatedAt for cache management
135type captainRecordWithMeta struct {
136- *atproto.CaptainRecord
137 UpdatedAt time.Time
138}
139···145 WHERE hold_did = ?
146 `
147148- var record atproto.CaptainRecord
149 var deployedAt, region, provider sql.NullString
150 var updatedAt time.Time
151···172 record.DeployedAt = deployedAt.String
173 }
174 if region.Valid {
175- record.Region = region.String
176 }
177 if provider.Valid {
178- record.Provider = provider.String
179 }
180181 return &captainRecordWithMeta{
182- CaptainRecord: &record,
183- UpdatedAt: updatedAt,
184 }, nil
185}
186187// setCachedCaptainRecord stores a captain record in database cache
188-func (a *RemoteHoldAuthorizer) setCachedCaptainRecord(holdDID string, record *atproto.CaptainRecord) error {
189 query := `
190 INSERT INTO hold_captain_records (
191 hold_did, owner_did, public, allow_all_crew,
···207 record.Public,
208 record.AllowAllCrew,
209 nullString(record.DeployedAt),
210- nullString(record.Region),
211- nullString(record.Provider),
212 time.Now(),
213 )
214···216}
217218// fetchCaptainRecordFromXRPC queries the hold's XRPC endpoint for captain record
219-func (a *RemoteHoldAuthorizer) fetchCaptainRecordFromXRPC(ctx context.Context, holdDID string) (*atproto.CaptainRecord, error) {
220 // Resolve DID to URL
221 holdURL := atproto.ResolveHoldURL(holdDID)
222···261 }
262263 // Convert to our type
264- record := &atproto.CaptainRecord{
265- Type: atproto.CaptainCollection,
266- Owner: xrpcResp.Value.Owner,
267- Public: xrpcResp.Value.Public,
268- AllowAllCrew: xrpcResp.Value.AllowAllCrew,
269- DeployedAt: xrpcResp.Value.DeployedAt,
270- Region: xrpcResp.Value.Region,
271- Provider: xrpcResp.Value.Provider,
000000272 }
273274 return record, nil
···406 return sql.NullString{Valid: false}
407 }
408 return sql.NullString{String: s, Valid: true}
00000000409}
410411// getCachedApproval checks if user has a cached crew approval
···101// 1. Check database cache
102// 2. If cache miss or expired, query hold's XRPC endpoint
103// 3. Update cache
104+func (a *RemoteHoldAuthorizer) GetCaptainRecord(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) {
105 // Try cache first
106 if a.db != nil {
107 cached, err := a.getCachedCaptainRecord(holdDID)
108 if err == nil && cached != nil {
109 // Cache hit - check if still valid
110 if time.Since(cached.UpdatedAt) < a.cacheTTL {
111+ return cached.HoldCaptain, nil
112 }
113 // Cache expired - continue to fetch fresh data
114 }
···133134// captainRecordWithMeta includes UpdatedAt for cache management
135type captainRecordWithMeta struct {
136+ *atproto.HoldCaptain
137 UpdatedAt time.Time
138}
139···145 WHERE hold_did = ?
146 `
147148+ var record atproto.HoldCaptain
149 var deployedAt, region, provider sql.NullString
150 var updatedAt time.Time
151···172 record.DeployedAt = deployedAt.String
173 }
174 if region.Valid {
175+ record.Region = ®ion.String
176 }
177 if provider.Valid {
178+ record.Provider = &provider.String
179 }
180181 return &captainRecordWithMeta{
182+ HoldCaptain: &record,
183+ UpdatedAt: updatedAt,
184 }, nil
185}
186187// setCachedCaptainRecord stores a captain record in database cache
188+func (a *RemoteHoldAuthorizer) setCachedCaptainRecord(holdDID string, record *atproto.HoldCaptain) error {
189 query := `
190 INSERT INTO hold_captain_records (
191 hold_did, owner_did, public, allow_all_crew,
···207 record.Public,
208 record.AllowAllCrew,
209 nullString(record.DeployedAt),
210+ nullStringPtr(record.Region),
211+ nullStringPtr(record.Provider),
212 time.Now(),
213 )
214···216}
217218// fetchCaptainRecordFromXRPC queries the hold's XRPC endpoint for captain record
219+func (a *RemoteHoldAuthorizer) fetchCaptainRecordFromXRPC(ctx context.Context, holdDID string) (*atproto.HoldCaptain, error) {
220 // Resolve DID to URL
221 holdURL := atproto.ResolveHoldURL(holdDID)
222···261 }
262263 // Convert to our type
264+ record := &atproto.HoldCaptain{
265+ LexiconTypeID: atproto.CaptainCollection,
266+ Owner: xrpcResp.Value.Owner,
267+ Public: xrpcResp.Value.Public,
268+ AllowAllCrew: xrpcResp.Value.AllowAllCrew,
269+ DeployedAt: xrpcResp.Value.DeployedAt,
270+ }
271+272+ // Handle optional pointer fields
273+ if xrpcResp.Value.Region != "" {
274+ record.Region = &xrpcResp.Value.Region
275+ }
276+ if xrpcResp.Value.Provider != "" {
277+ record.Provider = &xrpcResp.Value.Provider
278 }
279280 return record, nil
···412 return sql.NullString{Valid: false}
413 }
414 return sql.NullString{String: s, Valid: true}
415+}
416+417+// nullStringPtr converts a *string to sql.NullString
418+func nullStringPtr(s *string) sql.NullString {
419+ if s == nil || *s == "" {
420+ return sql.NullString{Valid: false}
421+ }
422+ return sql.NullString{String: *s, Valid: true}
423}
424425// getCachedApproval checks if user has a cached crew approval
···1+package token
2+3+import (
4+ "testing"
5+ "time"
6+)
7+8+func TestGetServiceToken_NotCached(t *testing.T) {
9+ // Clear cache first
10+ globalServiceTokensMu.Lock()
11+ globalServiceTokens = make(map[string]*serviceTokenEntry)
12+ globalServiceTokensMu.Unlock()
13+14+ did := "did:plc:test123"
15+ holdDID := "did:web:hold.example.com"
16+17+ token, expiresAt := GetServiceToken(did, holdDID)
18+ if token != "" {
19+ t.Errorf("Expected empty token for uncached entry, got %q", token)
20+ }
21+ if !expiresAt.IsZero() {
22+ t.Error("Expected zero time for uncached entry")
23+ }
24+}
25+26+func TestSetServiceToken_ManualExpiry(t *testing.T) {
27+ // Clear cache first
28+ globalServiceTokensMu.Lock()
29+ globalServiceTokens = make(map[string]*serviceTokenEntry)
30+ globalServiceTokensMu.Unlock()
31+32+ did := "did:plc:test123"
33+ holdDID := "did:web:hold.example.com"
34+ token := "invalid_jwt_token" // Will fall back to 50s default
35+36+ // This should succeed with default 50s TTL since JWT parsing will fail
37+ err := SetServiceToken(did, holdDID, token)
38+ if err != nil {
39+ t.Fatalf("SetServiceToken() error = %v", err)
40+ }
41+42+ // Verify token was cached
43+ cachedToken, expiresAt := GetServiceToken(did, holdDID)
44+ if cachedToken != token {
45+ t.Errorf("Expected token %q, got %q", token, cachedToken)
46+ }
47+ if expiresAt.IsZero() {
48+ t.Error("Expected non-zero expiry time")
49+ }
50+51+ // Expiry should be approximately 50s from now (with 10s margin subtracted in some cases)
52+ expectedExpiry := time.Now().Add(50 * time.Second)
53+ diff := expiresAt.Sub(expectedExpiry)
54+ if diff < -5*time.Second || diff > 5*time.Second {
55+ t.Errorf("Expiry time off by %v (expected ~50s from now)", diff)
56+ }
57+}
58+59+func TestGetServiceToken_Expired(t *testing.T) {
60+ // Manually insert an expired token
61+ did := "did:plc:test123"
62+ holdDID := "did:web:hold.example.com"
63+ cacheKey := did + ":" + holdDID
64+65+ globalServiceTokensMu.Lock()
66+ globalServiceTokens[cacheKey] = &serviceTokenEntry{
67+ token: "expired_token",
68+ expiresAt: time.Now().Add(-1 * time.Hour), // 1 hour ago
69+ }
70+ globalServiceTokensMu.Unlock()
71+72+ // Try to get - should return empty since expired
73+ token, expiresAt := GetServiceToken(did, holdDID)
74+ if token != "" {
75+ t.Errorf("Expected empty token for expired entry, got %q", token)
76+ }
77+ if !expiresAt.IsZero() {
78+ t.Error("Expected zero time for expired entry")
79+ }
80+81+ // Verify token was removed from cache
82+ globalServiceTokensMu.RLock()
83+ _, exists := globalServiceTokens[cacheKey]
84+ globalServiceTokensMu.RUnlock()
85+86+ if exists {
87+ t.Error("Expected expired token to be removed from cache")
88+ }
89+}
90+91+func TestInvalidateServiceToken(t *testing.T) {
92+ // Set a token
93+ did := "did:plc:test123"
94+ holdDID := "did:web:hold.example.com"
95+ token := "test_token"
96+97+ err := SetServiceToken(did, holdDID, token)
98+ if err != nil {
99+ t.Fatalf("SetServiceToken() error = %v", err)
100+ }
101+102+ // Verify it's cached
103+ cachedToken, _ := GetServiceToken(did, holdDID)
104+ if cachedToken != token {
105+ t.Fatal("Token should be cached")
106+ }
107+108+ // Invalidate
109+ InvalidateServiceToken(did, holdDID)
110+111+ // Verify it's gone
112+ cachedToken, _ = GetServiceToken(did, holdDID)
113+ if cachedToken != "" {
114+ t.Error("Expected token to be invalidated")
115+ }
116+}
117+118+func TestCleanExpiredTokens(t *testing.T) {
119+ // Clear cache first
120+ globalServiceTokensMu.Lock()
121+ globalServiceTokens = make(map[string]*serviceTokenEntry)
122+ globalServiceTokensMu.Unlock()
123+124+ // Add expired and valid tokens
125+ globalServiceTokensMu.Lock()
126+ globalServiceTokens["expired:hold1"] = &serviceTokenEntry{
127+ token: "expired1",
128+ expiresAt: time.Now().Add(-1 * time.Hour),
129+ }
130+ globalServiceTokens["valid:hold2"] = &serviceTokenEntry{
131+ token: "valid1",
132+ expiresAt: time.Now().Add(1 * time.Hour),
133+ }
134+ globalServiceTokensMu.Unlock()
135+136+ // Clean expired
137+ CleanExpiredTokens()
138+139+ // Verify only valid token remains
140+ globalServiceTokensMu.RLock()
141+ _, expiredExists := globalServiceTokens["expired:hold1"]
142+ _, validExists := globalServiceTokens["valid:hold2"]
143+ globalServiceTokensMu.RUnlock()
144+145+ if expiredExists {
146+ t.Error("Expected expired token to be removed")
147+ }
148+ if !validExists {
149+ t.Error("Expected valid token to remain")
150+ }
151+}
152+153+func TestGetCacheStats(t *testing.T) {
154+ // Clear cache first
155+ globalServiceTokensMu.Lock()
156+ globalServiceTokens = make(map[string]*serviceTokenEntry)
157+ globalServiceTokensMu.Unlock()
158+159+ // Add some tokens
160+ globalServiceTokensMu.Lock()
161+ globalServiceTokens["did1:hold1"] = &serviceTokenEntry{
162+ token: "token1",
163+ expiresAt: time.Now().Add(1 * time.Hour),
164+ }
165+ globalServiceTokens["did2:hold2"] = &serviceTokenEntry{
166+ token: "token2",
167+ expiresAt: time.Now().Add(1 * time.Hour),
168+ }
169+ globalServiceTokensMu.Unlock()
170+171+ stats := GetCacheStats()
172+ if stats == nil {
173+ t.Fatal("Expected non-nil stats")
174+ }
175+176+ // GetCacheStats returns map[string]any with "total_entries" key
177+ totalEntries, ok := stats["total_entries"].(int)
178+ if !ok {
179+ t.Fatalf("Expected total_entries in stats map, got: %v", stats)
180+ }
181+182+ if totalEntries != 2 {
183+ t.Errorf("Expected 2 entries, got %d", totalEntries)
184+ }
185+186+ // Also check valid_tokens
187+ validTokens, ok := stats["valid_tokens"].(int)
188+ if !ok {
189+ t.Fatal("Expected valid_tokens in stats map")
190+ }
191+192+ if validTokens != 2 {
193+ t.Errorf("Expected 2 valid tokens, got %d", validTokens)
194+ }
195+}
-19
pkg/auth/token/claims.go
···5657 return claims.AuthMethod
58}
59-60-// ExtractSubject parses a JWT token string and extracts the Subject claim (the user's DID)
61-// Returns the subject or empty string if not found or token is invalid
62-// This does NOT validate the token - it only parses it to extract the claim
63-func ExtractSubject(tokenString string) string {
64- // Parse token without validation (we only need the claims, validation is done by distribution library)
65- parser := jwt.NewParser(jwt.WithoutClaimsValidation())
66- token, _, err := parser.ParseUnverified(tokenString, &Claims{})
67- if err != nil {
68- return "" // Invalid token format
69- }
70-71- claims, ok := token.Claims.(*Claims)
72- if !ok {
73- return "" // Wrong claims type
74- }
75-76- return claims.Subject
77-}
···18// CreateCaptainRecord creates the captain record for the hold (first-time only).
19// This will FAIL if the captain record already exists. Use UpdateCaptainRecord to modify.
20func (p *HoldPDS) CreateCaptainRecord(ctx context.Context, ownerDID string, public bool, allowAllCrew bool, enableBlueskyPosts bool) (cid.Cid, error) {
21- captainRecord := &atproto.CaptainRecord{
22- Type: atproto.CaptainCollection,
23 Owner: ownerDID,
24 Public: public,
25 AllowAllCrew: allowAllCrew,
···40}
4142// GetCaptainRecord retrieves the captain record
43-func (p *HoldPDS) GetCaptainRecord(ctx context.Context) (cid.Cid, *atproto.CaptainRecord, error) {
44 // Use repomgr.GetRecord - our types are registered in init()
45 // so it will automatically unmarshal to the concrete type
46 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CaptainCollection, CaptainRkey, cid.Undef)
···49 }
5051 // Type assert to our concrete type
52- captainRecord, ok := val.(*atproto.CaptainRecord)
53 if !ok {
54 return cid.Undef, nil, fmt.Errorf("unexpected type for captain record: %T", val)
55 }
···18// CreateCaptainRecord creates the captain record for the hold (first-time only).
19// This will FAIL if the captain record already exists. Use UpdateCaptainRecord to modify.
20func (p *HoldPDS) CreateCaptainRecord(ctx context.Context, ownerDID string, public bool, allowAllCrew bool, enableBlueskyPosts bool) (cid.Cid, error) {
21+ captainRecord := &atproto.HoldCaptain{
22+ LexiconTypeID: atproto.CaptainCollection,
23 Owner: ownerDID,
24 Public: public,
25 AllowAllCrew: allowAllCrew,
···40}
4142// GetCaptainRecord retrieves the captain record
43+func (p *HoldPDS) GetCaptainRecord(ctx context.Context) (cid.Cid, *atproto.HoldCaptain, error) {
44 // Use repomgr.GetRecord - our types are registered in init()
45 // so it will automatically unmarshal to the concrete type
46 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CaptainCollection, CaptainRkey, cid.Undef)
···49 }
5051 // Type assert to our concrete type
52+ captainRecord, ok := val.(*atproto.HoldCaptain)
53 if !ok {
54 return cid.Undef, nil, fmt.Errorf("unexpected type for captain record: %T", val)
55 }
+43-32
pkg/hold/pds/captain_test.go
···12 "atcr.io/pkg/atproto"
13)
140000015// setupTestPDS creates a test PDS instance in a temporary directory
16// It initializes the repo but does NOT create captain/crew records
17// Tests should call Bootstrap or create records as needed
···146 if captain.EnableBlueskyPosts != tt.enableBlueskyPosts {
147 t.Errorf("Expected enableBlueskyPosts=%v, got %v", tt.enableBlueskyPosts, captain.EnableBlueskyPosts)
148 }
149- if captain.Type != atproto.CaptainCollection {
150- t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.Type)
151 }
152 if captain.DeployedAt == "" {
153 t.Error("Expected deployedAt to be set")
···322func TestCaptainRecord_CBORRoundtrip(t *testing.T) {
323 tests := []struct {
324 name string
325- record *atproto.CaptainRecord
326 }{
327 {
328 name: "Basic captain",
329- record: &atproto.CaptainRecord{
330- Type: atproto.CaptainCollection,
331- Owner: "did:plc:alice123",
332- Public: true,
333- AllowAllCrew: false,
334- DeployedAt: "2025-10-16T12:00:00Z",
335 },
336 },
337 {
338 name: "Captain with optional fields",
339- record: &atproto.CaptainRecord{
340- Type: atproto.CaptainCollection,
341- Owner: "did:plc:bob456",
342- Public: false,
343- AllowAllCrew: true,
344- DeployedAt: "2025-10-16T12:00:00Z",
345- Region: "us-west-2",
346- Provider: "fly.io",
347 },
348 },
349 {
350 name: "Captain with empty optional fields",
351- record: &atproto.CaptainRecord{
352- Type: atproto.CaptainCollection,
353- Owner: "did:plc:charlie789",
354- Public: true,
355- AllowAllCrew: true,
356- DeployedAt: "2025-10-16T12:00:00Z",
357- Region: "",
358- Provider: "",
359 },
360 },
361 }
···375 }
376377 // Unmarshal from CBOR
378- var decoded atproto.CaptainRecord
379 err = decoded.UnmarshalCBOR(bytes.NewReader(cborBytes))
380 if err != nil {
381 t.Fatalf("UnmarshalCBOR failed: %v", err)
382 }
383384 // Verify all fields match
385- if decoded.Type != tt.record.Type {
386- t.Errorf("Type mismatch: expected %s, got %s", tt.record.Type, decoded.Type)
387 }
388 if decoded.Owner != tt.record.Owner {
389 t.Errorf("Owner mismatch: expected %s, got %s", tt.record.Owner, decoded.Owner)
···397 if decoded.DeployedAt != tt.record.DeployedAt {
398 t.Errorf("DeployedAt mismatch: expected %s, got %s", tt.record.DeployedAt, decoded.DeployedAt)
399 }
400- if decoded.Region != tt.record.Region {
401- t.Errorf("Region mismatch: expected %s, got %s", tt.record.Region, decoded.Region)
000402 }
403- if decoded.Provider != tt.record.Provider {
404- t.Errorf("Provider mismatch: expected %s, got %s", tt.record.Provider, decoded.Provider)
000405 }
406 })
407 }
···12 "atcr.io/pkg/atproto"
13)
1415+// ptrString returns a pointer to the given string
16+func ptrString(s string) *string {
17+ return &s
18+}
19+20// setupTestPDS creates a test PDS instance in a temporary directory
21// It initializes the repo but does NOT create captain/crew records
22// Tests should call Bootstrap or create records as needed
···151 if captain.EnableBlueskyPosts != tt.enableBlueskyPosts {
152 t.Errorf("Expected enableBlueskyPosts=%v, got %v", tt.enableBlueskyPosts, captain.EnableBlueskyPosts)
153 }
154+ if captain.LexiconTypeID != atproto.CaptainCollection {
155+ t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID)
156 }
157 if captain.DeployedAt == "" {
158 t.Error("Expected deployedAt to be set")
···327func TestCaptainRecord_CBORRoundtrip(t *testing.T) {
328 tests := []struct {
329 name string
330+ record *atproto.HoldCaptain
331 }{
332 {
333 name: "Basic captain",
334+ record: &atproto.HoldCaptain{
335+ LexiconTypeID: atproto.CaptainCollection,
336+ Owner: "did:plc:alice123",
337+ Public: true,
338+ AllowAllCrew: false,
339+ DeployedAt: "2025-10-16T12:00:00Z",
340 },
341 },
342 {
343 name: "Captain with optional fields",
344+ record: &atproto.HoldCaptain{
345+ LexiconTypeID: atproto.CaptainCollection,
346+ Owner: "did:plc:bob456",
347+ Public: false,
348+ AllowAllCrew: true,
349+ DeployedAt: "2025-10-16T12:00:00Z",
350+ Region: ptrString("us-west-2"),
351+ Provider: ptrString("fly.io"),
352 },
353 },
354 {
355 name: "Captain with empty optional fields",
356+ record: &atproto.HoldCaptain{
357+ LexiconTypeID: atproto.CaptainCollection,
358+ Owner: "did:plc:charlie789",
359+ Public: true,
360+ AllowAllCrew: true,
361+ DeployedAt: "2025-10-16T12:00:00Z",
362+ Region: ptrString(""),
363+ Provider: ptrString(""),
364 },
365 },
366 }
···380 }
381382 // Unmarshal from CBOR
383+ var decoded atproto.HoldCaptain
384 err = decoded.UnmarshalCBOR(bytes.NewReader(cborBytes))
385 if err != nil {
386 t.Fatalf("UnmarshalCBOR failed: %v", err)
387 }
388389 // Verify all fields match
390+ if decoded.LexiconTypeID != tt.record.LexiconTypeID {
391+ t.Errorf("LexiconTypeID mismatch: expected %s, got %s", tt.record.LexiconTypeID, decoded.LexiconTypeID)
392 }
393 if decoded.Owner != tt.record.Owner {
394 t.Errorf("Owner mismatch: expected %s, got %s", tt.record.Owner, decoded.Owner)
···402 if decoded.DeployedAt != tt.record.DeployedAt {
403 t.Errorf("DeployedAt mismatch: expected %s, got %s", tt.record.DeployedAt, decoded.DeployedAt)
404 }
405+ // Compare Region pointers (may be nil)
406+ if (decoded.Region == nil) != (tt.record.Region == nil) {
407+ t.Errorf("Region nil mismatch: expected %v, got %v", tt.record.Region, decoded.Region)
408+ } else if decoded.Region != nil && *decoded.Region != *tt.record.Region {
409+ t.Errorf("Region mismatch: expected %q, got %q", *tt.record.Region, *decoded.Region)
410 }
411+ // Compare Provider pointers (may be nil)
412+ if (decoded.Provider == nil) != (tt.record.Provider == nil) {
413+ t.Errorf("Provider nil mismatch: expected %v, got %v", tt.record.Provider, decoded.Provider)
414+ } else if decoded.Provider != nil && *decoded.Provider != *tt.record.Provider {
415+ t.Errorf("Provider mismatch: expected %q, got %q", *tt.record.Provider, *decoded.Provider)
416 }
417 })
418 }
+10-10
pkg/hold/pds/crew.go
···1516// AddCrewMember adds a new crew member to the hold and commits to carstore
17func (p *HoldPDS) AddCrewMember(ctx context.Context, memberDID, role string, permissions []string) (cid.Cid, error) {
18- crewRecord := &atproto.CrewRecord{
19- Type: atproto.CrewCollection,
20- Member: memberDID,
21- Role: role,
22- Permissions: permissions,
23- AddedAt: time.Now().Format(time.RFC3339),
24 }
2526 // Use repomgr for crew operations - auto-generated rkey is fine
···33}
3435// GetCrewMember retrieves a crew member by their record key
36-func (p *HoldPDS) GetCrewMember(ctx context.Context, rkey string) (cid.Cid, *atproto.CrewRecord, error) {
37 // Use repomgr.GetRecord - our types are registered in init()
38 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CrewCollection, rkey, cid.Undef)
39 if err != nil {
···41 }
4243 // Type assert to our concrete type
44- crewRecord, ok := val.(*atproto.CrewRecord)
45 if !ok {
46 return cid.Undef, nil, fmt.Errorf("unexpected type for crew record: %T", val)
47 }
···53type CrewMemberWithKey struct {
54 Rkey string
55 Cid cid.Cid
56- Record *atproto.CrewRecord
57}
5859// ListCrewMembers returns all crew members with their rkeys
···108 }
109110 // Unmarshal the CBOR bytes into our concrete type
111- var crewRecord atproto.CrewRecord
112 if err := crewRecord.UnmarshalCBOR(bytes.NewReader(*recBytes)); err != nil {
113 return fmt.Errorf("failed to decode crew record: %w", err)
114 }
···1516// AddCrewMember adds a new crew member to the hold and commits to carstore
17func (p *HoldPDS) AddCrewMember(ctx context.Context, memberDID, role string, permissions []string) (cid.Cid, error) {
18+ crewRecord := &atproto.HoldCrew{
19+ LexiconTypeID: atproto.CrewCollection,
20+ Member: memberDID,
21+ Role: role,
22+ Permissions: permissions,
23+ AddedAt: time.Now().Format(time.RFC3339),
24 }
2526 // Use repomgr for crew operations - auto-generated rkey is fine
···33}
3435// GetCrewMember retrieves a crew member by their record key
36+func (p *HoldPDS) GetCrewMember(ctx context.Context, rkey string) (cid.Cid, *atproto.HoldCrew, error) {
37 // Use repomgr.GetRecord - our types are registered in init()
38 recordCID, val, err := p.repomgr.GetRecord(ctx, p.uid, atproto.CrewCollection, rkey, cid.Undef)
39 if err != nil {
···41 }
4243 // Type assert to our concrete type
44+ crewRecord, ok := val.(*atproto.HoldCrew)
45 if !ok {
46 return cid.Undef, nil, fmt.Errorf("unexpected type for crew record: %T", val)
47 }
···53type CrewMemberWithKey struct {
54 Rkey string
55 Cid cid.Cid
56+ Record *atproto.HoldCrew
57}
5859// ListCrewMembers returns all crew members with their rkeys
···108 }
109110 // Unmarshal the CBOR bytes into our concrete type
111+ var crewRecord atproto.HoldCrew
112 if err := crewRecord.UnmarshalCBOR(bytes.NewReader(*recBytes)); err != nil {
113 return fmt.Errorf("failed to decode crew record: %w", err)
114 }
···910// CreateLayerRecord creates a new layer record in the hold's PDS
11// Returns the rkey and CID of the created record
12-func (p *HoldPDS) CreateLayerRecord(ctx context.Context, record *atproto.LayerRecord) (string, string, error) {
13 // Validate record
14- if record.Type != atproto.LayerCollection {
15- return "", "", fmt.Errorf("invalid record type: %s", record.Type)
16 }
1718 if record.Digest == "" {
···4041// GetLayerRecord retrieves a specific layer record by rkey
42// Note: This is a simplified implementation. For production, you may need to pass the CID
43-func (p *HoldPDS) GetLayerRecord(ctx context.Context, rkey string) (*atproto.LayerRecord, error) {
44 // For now, we don't implement this as it's not needed for the manifest post feature
45 // Full implementation would require querying the carstore with a specific CID
46 return nil, fmt.Errorf("GetLayerRecord not yet implemented - use via XRPC listRecords instead")
···50// Returns records, next cursor (empty if no more), and error
51// Note: This is a simplified implementation. For production, consider adding filters
52// (by repository, user, digest, etc.) and proper pagination
53-func (p *HoldPDS) ListLayerRecords(ctx context.Context, limit int, cursor string) ([]*atproto.LayerRecord, string, error) {
54 // For now, return empty list - full implementation would query the carstore
55 // This would require iterating over records in the collection and filtering
56 // In practice, layer records are mainly for analytics and Bluesky posts,
···910// CreateLayerRecord creates a new layer record in the hold's PDS
11// Returns the rkey and CID of the created record
12+func (p *HoldPDS) CreateLayerRecord(ctx context.Context, record *atproto.HoldLayer) (string, string, error) {
13 // Validate record
14+ if record.LexiconTypeID != atproto.LayerCollection {
15+ return "", "", fmt.Errorf("invalid record type: %s", record.LexiconTypeID)
16 }
1718 if record.Digest == "" {
···4041// GetLayerRecord retrieves a specific layer record by rkey
42// Note: This is a simplified implementation. For production, you may need to pass the CID
43+func (p *HoldPDS) GetLayerRecord(ctx context.Context, rkey string) (*atproto.HoldLayer, error) {
44 // For now, we don't implement this as it's not needed for the manifest post feature
45 // Full implementation would require querying the carstore with a specific CID
46 return nil, fmt.Errorf("GetLayerRecord not yet implemented - use via XRPC listRecords instead")
···50// Returns records, next cursor (empty if no more), and error
51// Note: This is a simplified implementation. For production, consider adding filters
52// (by repository, user, digest, etc.) and proper pagination
53+func (p *HoldPDS) ListLayerRecords(ctx context.Context, limit int, cursor string) ([]*atproto.HoldLayer, string, error) {
54 // For now, return empty list - full implementation would query the carstore
55 // This would require iterating over records in the collection and filtering
56 // In practice, layer records are mainly for analytics and Bluesky posts,
···19 "github.com/ipfs/go-cid"
20)
2122-// init registers our custom ATProto types with indigo's lexutil type registry
23-// This allows repomgr.GetRecord to automatically unmarshal our types
024func init() {
25- // Register captain, crew, tangled profile, and layer record types
26- // These must match the $type field in the records
27- lexutil.RegisterType(atproto.CaptainCollection, &atproto.CaptainRecord{})
28- lexutil.RegisterType(atproto.CrewCollection, &atproto.CrewRecord{})
29- lexutil.RegisterType(atproto.LayerCollection, &atproto.LayerRecord{})
30 lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{})
31}
32
···19 "github.com/ipfs/go-cid"
20)
2122+// init registers the TangledProfileRecord type with indigo's lexutil type registry.
23+// Note: HoldCaptain, HoldCrew, and HoldLayer are registered in pkg/atproto/register.go (generated).
24+// TangledProfileRecord is external (sh.tangled.actor.profile) so we register it here.
25func init() {
0000026 lexutil.RegisterType(atproto.TangledProfileCollection, &atproto.TangledProfileRecord{})
27}
28
+6-6
pkg/hold/pds/server_test.go
···150 if captain.AllowAllCrew != allowAllCrew {
151 t.Errorf("Expected allowAllCrew=%v, got %v", allowAllCrew, captain.AllowAllCrew)
152 }
153- if captain.Type != atproto.CaptainCollection {
154- t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.Type)
155 }
156 if captain.DeployedAt == "" {
157 t.Error("Expected deployedAt to be set")
···317 if captain == nil {
318 t.Fatal("Expected non-nil captain record")
319 }
320- if captain.Type != atproto.CaptainCollection {
321- t.Errorf("Expected captain type %s, got %s", atproto.CaptainCollection, captain.Type)
322 }
323324 // Do the same for crew record
···331 }
332333 crew := crewMembers[0].Record
334- if crew.Type != atproto.CrewCollection {
335- t.Errorf("Expected crew type %s, got %s", atproto.CrewCollection, crew.Type)
336 }
337}
338
···150 if captain.AllowAllCrew != allowAllCrew {
151 t.Errorf("Expected allowAllCrew=%v, got %v", allowAllCrew, captain.AllowAllCrew)
152 }
153+ if captain.LexiconTypeID != atproto.CaptainCollection {
154+ t.Errorf("Expected type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID)
155 }
156 if captain.DeployedAt == "" {
157 t.Error("Expected deployedAt to be set")
···317 if captain == nil {
318 t.Fatal("Expected non-nil captain record")
319 }
320+ if captain.LexiconTypeID != atproto.CaptainCollection {
321+ t.Errorf("Expected captain type %s, got %s", atproto.CaptainCollection, captain.LexiconTypeID)
322 }
323324 // Do the same for crew record
···331 }
332333 crew := crewMembers[0].Record
334+ if crew.LexiconTypeID != atproto.CrewCollection {
335+ t.Errorf("Expected crew type %s, got %s", atproto.CrewCollection, crew.LexiconTypeID)
336 }
337}
338