fork of indigo with slightly nicer lexgen

get all tests passing finally

+87 -3
api/atproto/cbor_gen.go
··· 194 195 cw := cbg.NewCborWriter(w) 196 197 - if _, err := cw.Write([]byte{170}); err != nil { 198 return err 199 } 200 ··· 221 if err := v.MarshalCBOR(cw); err != nil { 222 return err 223 } 224 } 225 226 // t.Seq (int64) (int64) ··· 332 } 333 } 334 335 // t.Blocks (util.LexBytes) (slice) 336 if len("blocks") > cbg.MaxLength { 337 return xerrors.Errorf("Value in field \"blocks\" was too long") ··· 474 t.Ops[i] = &v 475 } 476 477 // t.Seq (int64) (int64) 478 case "seq": 479 { ··· 572 t.Blobs[i] = v 573 } 574 575 // t.Blocks (util.LexBytes) (slice) 576 case "blocks": 577 ··· 1606 return err 1607 } 1608 1609 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("com.atproto.label.defs"))); err != nil { 1610 return err 1611 } 1612 - if _, err := cw.WriteString(string("com.atproto.label.defs")); err != nil { 1613 return err 1614 } 1615
··· 194 195 cw := cbg.NewCborWriter(w) 196 197 + if _, err := cw.Write([]byte{172}); err != nil { 198 return err 199 } 200 ··· 221 if err := v.MarshalCBOR(cw); err != nil { 222 return err 223 } 224 + } 225 + 226 + // t.Rev (string) (string) 227 + if len("rev") > cbg.MaxLength { 228 + return xerrors.Errorf("Value in field \"rev\" was too long") 229 + } 230 + 231 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rev"))); err != nil { 232 + return err 233 + } 234 + if _, err := cw.WriteString(string("rev")); err != nil { 235 + return err 236 + } 237 + 238 + if len(t.Rev) > cbg.MaxLength { 239 + return xerrors.Errorf("Value in field t.Rev was too long") 240 + } 241 + 242 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Rev))); err != nil { 243 + return err 244 + } 245 + if _, err := cw.WriteString(string(t.Rev)); err != nil { 246 + return err 247 } 248 249 // t.Seq (int64) (int64) ··· 355 } 356 } 357 358 + // t.Since (string) (string) 359 + if len("since") > cbg.MaxLength { 360 + return xerrors.Errorf("Value in field \"since\" was too long") 361 + } 362 + 363 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("since"))); err != nil { 364 + return err 365 + } 366 + if _, err := cw.WriteString(string("since")); err != nil { 367 + return err 368 + } 369 + 370 + if t.Since == nil { 371 + if _, err := cw.Write(cbg.CborNull); err != nil { 372 + return err 373 + } 374 + } else { 375 + if len(*t.Since) > cbg.MaxLength { 376 + return xerrors.Errorf("Value in field t.Since was too long") 377 + } 378 + 379 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(*t.Since))); err != nil { 380 + return err 381 + } 382 + if _, err := cw.WriteString(string(*t.Since)); err != nil { 383 + return err 384 + } 385 + } 386 + 387 // t.Blocks (util.LexBytes) (slice) 388 if len("blocks") > cbg.MaxLength { 389 return xerrors.Errorf("Value in field \"blocks\" was too long") ··· 526 t.Ops[i] = &v 527 } 528 529 + // t.Rev (string) (string) 530 + case "rev": 531 + 532 + { 533 + sval, err := cbg.ReadString(cr) 534 + if err != nil { 535 + return err 536 + } 537 + 538 + t.Rev = string(sval) 539 + } 540 // t.Seq (int64) (int64) 541 case "seq": 542 { ··· 635 t.Blobs[i] = v 636 } 637 638 + // t.Since (string) (string) 639 + case "since": 640 + 641 + { 642 + b, err := cr.ReadByte() 643 + if err != nil { 644 + return err 645 + } 646 + if b != cbg.CborNull[0] { 647 + if err := cr.UnreadByte(); err != nil { 648 + return err 649 + } 650 + 651 + sval, err := cbg.ReadString(cr) 652 + if err != nil { 653 + return err 654 + } 655 + 656 + t.Since = (*string)(&sval) 657 + } 658 + } 659 // t.Blocks (util.LexBytes) (slice) 660 case "blocks": 661 ··· 1690 return err 1691 } 1692 1693 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("com.atproto.label.defs#selfLabels"))); err != nil { 1694 return err 1695 } 1696 + if _, err := cw.WriteString(string("com.atproto.label.defs#selfLabels")); err != nil { 1697 return err 1698 } 1699
+1 -1
api/atproto/repostrongRef.go
··· 13 } // RepoStrongRef is a "main" in the com.atproto.repo.strongRef schema. 14 // RECORDTYPE: RepoStrongRef 15 type RepoStrongRef struct { 16 - LexiconTypeID string `json:"$type,const=com.atproto.repo.strongRef#main,omitempty" cborgen:"$type,const=com.atproto.repo.strongRef#main,omitempty"` 17 Cid string `json:"cid" cborgen:"cid"` 18 Uri string `json:"uri" cborgen:"uri"` 19 }
··· 13 } // RepoStrongRef is a "main" in the com.atproto.repo.strongRef schema. 14 // RECORDTYPE: RepoStrongRef 15 type RepoStrongRef struct { 16 + LexiconTypeID string `json:"$type,const=com.atproto.repo.strongRef,omitempty" cborgen:"$type,const=com.atproto.repo.strongRef,omitempty"` 17 Cid string `json:"cid" cborgen:"cid"` 18 Uri string `json:"uri" cborgen:"uri"` 19 }
+6 -6
api/bsky/cbor_gen.go
··· 2867 return err 2868 } 2869 2870 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.richtext.facet"))); err != nil { 2871 return err 2872 } 2873 - if _, err := cw.WriteString(string("app.bsky.richtext.facet")); err != nil { 2874 return err 2875 } 2876 return nil ··· 2992 return err 2993 } 2994 2995 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.richtext.facet"))); err != nil { 2996 return err 2997 } 2998 - if _, err := cw.WriteString(string("app.bsky.richtext.facet")); err != nil { 2999 return err 3000 } 3001 return nil ··· 3280 return err 3281 } 3282 3283 - if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.feed.defs"))); err != nil { 3284 return err 3285 } 3286 - if _, err := cw.WriteString(string("app.bsky.feed.defs")); err != nil { 3287 return err 3288 } 3289
··· 2867 return err 2868 } 2869 2870 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.richtext.facet#link"))); err != nil { 2871 return err 2872 } 2873 + if _, err := cw.WriteString(string("app.bsky.richtext.facet#link")); err != nil { 2874 return err 2875 } 2876 return nil ··· 2992 return err 2993 } 2994 2995 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.richtext.facet#mention"))); err != nil { 2996 return err 2997 } 2998 + if _, err := cw.WriteString(string("app.bsky.richtext.facet#mention")); err != nil { 2999 return err 3000 } 3001 return nil ··· 3280 return err 3281 } 3282 3283 + if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("app.bsky.feed.defs#notFoundPost"))); err != nil { 3284 return err 3285 } 3286 + if _, err := cw.WriteString(string("app.bsky.feed.defs#notFoundPost")); err != nil { 3287 return err 3288 } 3289
+1 -1
api/bsky/embedexternal.go
··· 13 } // EmbedExternal is a "main" in the app.bsky.embed.external schema. 14 // RECORDTYPE: EmbedExternal 15 type EmbedExternal struct { 16 - LexiconTypeID string `json:"$type,const=app.bsky.embed.external#main" cborgen:"$type,const=app.bsky.embed.external#main"` 17 External *EmbedExternal_External `json:"external" cborgen:"external"` 18 } 19
··· 13 } // EmbedExternal is a "main" in the app.bsky.embed.external schema. 14 // RECORDTYPE: EmbedExternal 15 type EmbedExternal struct { 16 + LexiconTypeID string `json:"$type,const=app.bsky.embed.external" cborgen:"$type,const=app.bsky.embed.external"` 17 External *EmbedExternal_External `json:"external" cborgen:"external"` 18 } 19
+1 -1
api/bsky/embedimages.go
··· 13 } // EmbedImages is a "main" in the app.bsky.embed.images schema. 14 // RECORDTYPE: EmbedImages 15 type EmbedImages struct { 16 - LexiconTypeID string `json:"$type,const=app.bsky.embed.images#main" cborgen:"$type,const=app.bsky.embed.images#main"` 17 Images []*EmbedImages_Image `json:"images" cborgen:"images"` 18 } 19
··· 13 } // EmbedImages is a "main" in the app.bsky.embed.images schema. 14 // RECORDTYPE: EmbedImages 15 type EmbedImages struct { 16 + LexiconTypeID string `json:"$type,const=app.bsky.embed.images" cborgen:"$type,const=app.bsky.embed.images"` 17 Images []*EmbedImages_Image `json:"images" cborgen:"images"` 18 } 19
+1 -1
api/bsky/embedrecord.go
··· 17 } // EmbedRecord is a "main" in the app.bsky.embed.record schema. 18 // RECORDTYPE: EmbedRecord 19 type EmbedRecord struct { 20 - LexiconTypeID string `json:"$type,const=app.bsky.embed.record#main" cborgen:"$type,const=app.bsky.embed.record#main"` 21 Record *comatprototypes.RepoStrongRef `json:"record" cborgen:"record"` 22 } 23
··· 17 } // EmbedRecord is a "main" in the app.bsky.embed.record schema. 18 // RECORDTYPE: EmbedRecord 19 type EmbedRecord struct { 20 + LexiconTypeID string `json:"$type,const=app.bsky.embed.record" cborgen:"$type,const=app.bsky.embed.record"` 21 Record *comatprototypes.RepoStrongRef `json:"record" cborgen:"record"` 22 } 23
+1 -1
api/bsky/embedrecordWithMedia.go
··· 19 } // EmbedRecordWithMedia is a "main" in the app.bsky.embed.recordWithMedia schema. 20 // RECORDTYPE: EmbedRecordWithMedia 21 type EmbedRecordWithMedia struct { 22 - LexiconTypeID string `json:"$type,const=app.bsky.embed.recordWithMedia#main" cborgen:"$type,const=app.bsky.embed.recordWithMedia#main"` 23 Media *EmbedRecordWithMedia_Media `json:"media" cborgen:"media"` 24 Record *EmbedRecord `json:"record" cborgen:"record"` 25 }
··· 19 } // EmbedRecordWithMedia is a "main" in the app.bsky.embed.recordWithMedia schema. 20 // RECORDTYPE: EmbedRecordWithMedia 21 type EmbedRecordWithMedia struct { 22 + LexiconTypeID string `json:"$type,const=app.bsky.embed.recordWithMedia" cborgen:"$type,const=app.bsky.embed.recordWithMedia"` 23 Media *EmbedRecordWithMedia_Media `json:"media" cborgen:"media"` 24 Record *EmbedRecord `json:"record" cborgen:"record"` 25 }
+1 -11
bgs/bgs.go
··· 275 276 e.GET("/xrpc/com.atproto.sync.subscribeRepos", bgs.EventsHandler) 277 e.GET("/xrpc/com.atproto.sync.getCheckout", bgs.HandleComAtprotoSyncGetCheckout) 278 - e.GET("/xrpc/com.atproto.sync.getCommitPath", bgs.HandleComAtprotoSyncGetCommitPath) 279 e.GET("/xrpc/com.atproto.sync.getHead", bgs.HandleComAtprotoSyncGetHead) 280 e.GET("/xrpc/com.atproto.sync.getRecord", bgs.HandleComAtprotoSyncGetRecord) 281 e.GET("/xrpc/com.atproto.sync.getRepo", bgs.HandleComAtprotoSyncGetRepo) ··· 755 return bgs.Index.Crawler.AddToCatchupQueue(ctx, host, ai, evt) 756 } 757 758 - if err := bgs.repoman.HandleExternalUserEvent(ctx, host.ID, u.ID, u.Did, (*cid.Cid)(evt.Prev), evt.Blocks, evt.Ops); err != nil { 759 log.Warnw("failed handling event", "err", err, "host", host.Host, "seq", evt.Seq, "repo", u.Did, "prev", stringLink(evt.Prev), "commit", evt.Commit.String()) 760 761 if errors.Is(err, carstore.ErrRepoBaseMismatch) { ··· 767 span.SetAttributes(attribute.Bool("catchup_queue", true)) 768 769 return bgs.Index.Crawler.AddToCatchupQueue(ctx, host, ai, evt) 770 - } 771 - 772 - if errors.Is(err, carstore.ErrRepoFork) { 773 - log.Errorw("detected repo fork", "from", stringLink(evt.Prev), "host", host.Host, "repo", u.Did) 774 - 775 - span.SetAttributes(attribute.Bool("catchup_queue", true)) 776 - span.SetAttributes(attribute.Bool("fork", true)) 777 - 778 - return fmt.Errorf("cannot process repo fork") 779 } 780 781 return fmt.Errorf("handle user event failed: %w", err)
··· 275 276 e.GET("/xrpc/com.atproto.sync.subscribeRepos", bgs.EventsHandler) 277 e.GET("/xrpc/com.atproto.sync.getCheckout", bgs.HandleComAtprotoSyncGetCheckout) 278 e.GET("/xrpc/com.atproto.sync.getHead", bgs.HandleComAtprotoSyncGetHead) 279 e.GET("/xrpc/com.atproto.sync.getRecord", bgs.HandleComAtprotoSyncGetRecord) 280 e.GET("/xrpc/com.atproto.sync.getRepo", bgs.HandleComAtprotoSyncGetRepo) ··· 754 return bgs.Index.Crawler.AddToCatchupQueue(ctx, host, ai, evt) 755 } 756 757 + if err := bgs.repoman.HandleExternalUserEvent(ctx, host.ID, u.ID, u.Did, evt.Since, evt.Rev, evt.Blocks, evt.Ops); err != nil { 758 log.Warnw("failed handling event", "err", err, "host", host.Host, "seq", evt.Seq, "repo", u.Did, "prev", stringLink(evt.Prev), "commit", evt.Commit.String()) 759 760 if errors.Is(err, carstore.ErrRepoBaseMismatch) { ··· 766 span.SetAttributes(attribute.Bool("catchup_queue", true)) 767 768 return bgs.Index.Crawler.AddToCatchupQueue(ctx, host, ai, evt) 769 } 770 771 return fmt.Errorf("handle user event failed: %w", err)
+12 -27
bgs/handlers.go
··· 12 comatprototypes "github.com/bluesky-social/indigo/api/atproto" 13 "github.com/bluesky-social/indigo/util" 14 "github.com/bluesky-social/indigo/xrpc" 15 - "github.com/ipfs/go-cid" 16 "github.com/labstack/echo/v4" 17 ) 18 19 - func (s *BGS) handleComAtprotoSyncGetCheckout(ctx context.Context, commit string, did string) (io.Reader, error) { 20 /* 21 u, err := s.Index.LookupUserByDid(ctx, did) 22 if err != nil { ··· 63 return nil, fmt.Errorf("nyi") 64 } 65 66 - func (s *BGS) handleComAtprotoSyncGetRepo(ctx context.Context, did string, earliest string, latest string) (io.Reader, error) { 67 u, err := s.Index.LookupUserByDid(ctx, did) 68 if err != nil { 69 return nil, err 70 } 71 72 - var earlyCid, lateCid cid.Cid 73 - if earliest != "" { 74 - c, err := cid.Decode(earliest) 75 - if err != nil { 76 - return nil, err 77 - } 78 - 79 - earlyCid = c 80 - } 81 - 82 - if latest != "" { 83 - c, err := cid.Decode(latest) 84 - if err != nil { 85 - return nil, err 86 - } 87 - 88 - lateCid = c 89 - } 90 - 91 // TODO: stream the response 92 buf := new(bytes.Buffer) 93 - if err := s.repoman.ReadRepo(ctx, u.Uid, earlyCid, lateCid, buf); err != nil { 94 - return nil, err 95 } 96 97 return buf, nil ··· 101 return nil, fmt.Errorf("NYI") 102 } 103 104 - func (s *BGS) handleComAtprotoSyncRequestCrawl(ctx context.Context, host string) error { 105 if host == "" { 106 return fmt.Errorf("must pass valid hostname") 107 } ··· 151 return s.slurper.SubscribeToPds(ctx, norm, true) 152 } 153 154 - func (s *BGS) handleComAtprotoSyncNotifyOfUpdate(ctx context.Context, hostname string) error { 155 // TODO: 156 return nil 157 } ··· 169 return bytes.NewReader(b), nil 170 } 171 172 - func (s *BGS) handleComAtprotoSyncListBlobs(ctx context.Context, did string, earliest string, latest string) (*comatprototypes.SyncListBlobs_Output, error) { 173 return nil, fmt.Errorf("NYI") 174 } 175 176 func (s *BGS) handleComAtprotoSyncListRepos(ctx context.Context, cursor string, limit int) (*comatprototypes.SyncListRepos_Output, error) { 177 return nil, fmt.Errorf("NYI") 178 }
··· 12 comatprototypes "github.com/bluesky-social/indigo/api/atproto" 13 "github.com/bluesky-social/indigo/util" 14 "github.com/bluesky-social/indigo/xrpc" 15 "github.com/labstack/echo/v4" 16 ) 17 18 + func (s *BGS) handleComAtprotoSyncGetCheckout(ctx context.Context, did string) (io.Reader, error) { 19 /* 20 u, err := s.Index.LookupUserByDid(ctx, did) 21 if err != nil { ··· 62 return nil, fmt.Errorf("nyi") 63 } 64 65 + func (s *BGS) handleComAtprotoSyncGetRepo(ctx context.Context, did string, since string) (io.Reader, error) { 66 u, err := s.Index.LookupUserByDid(ctx, did) 67 if err != nil { 68 return nil, err 69 } 70 71 // TODO: stream the response 72 buf := new(bytes.Buffer) 73 + if err := s.repoman.ReadRepo(ctx, u.Uid, since, buf); err != nil { 74 + return nil, fmt.Errorf("failed to read repo: %w", err) 75 } 76 77 return buf, nil ··· 81 return nil, fmt.Errorf("NYI") 82 } 83 84 + func (s *BGS) handleComAtprotoSyncRequestCrawl(ctx context.Context, body *comatprototypes.SyncRequestCrawl_Input) error { 85 + host := body.Hostname 86 if host == "" { 87 return fmt.Errorf("must pass valid hostname") 88 } ··· 132 return s.slurper.SubscribeToPds(ctx, norm, true) 133 } 134 135 + func (s *BGS) handleComAtprotoSyncNotifyOfUpdate(ctx context.Context, body *comatprototypes.SyncNotifyOfUpdate_Input) error { 136 // TODO: 137 return nil 138 } ··· 150 return bytes.NewReader(b), nil 151 } 152 153 + func (s *BGS) handleComAtprotoSyncListBlobs(ctx context.Context, cursor string, did string, limit int, since string) (*comatprototypes.SyncListBlobs_Output, error) { 154 return nil, fmt.Errorf("NYI") 155 } 156 157 func (s *BGS) handleComAtprotoSyncListRepos(ctx context.Context, cursor string, limit int) (*comatprototypes.SyncListRepos_Output, error) { 158 return nil, fmt.Errorf("NYI") 159 } 160 + 161 + func (s *BGS) handleComAtprotoSyncGetLatestCommit(ctx context.Context, did string) (*comatprototypes.SyncGetLatestCommit_Output, error) { 162 + return nil, fmt.Errorf("NYI") 163 + }
+45 -45
bgs/stubs.go
··· 1 package bgs 2 3 import ( 4 - "fmt" 5 "io" 6 "strconv" 7 ··· 18 e.GET("/xrpc/com.atproto.sync.getBlob", s.HandleComAtprotoSyncGetBlob) 19 e.GET("/xrpc/com.atproto.sync.getBlocks", s.HandleComAtprotoSyncGetBlocks) 20 e.GET("/xrpc/com.atproto.sync.getCheckout", s.HandleComAtprotoSyncGetCheckout) 21 - e.GET("/xrpc/com.atproto.sync.getCommitPath", s.HandleComAtprotoSyncGetCommitPath) 22 e.GET("/xrpc/com.atproto.sync.getHead", s.HandleComAtprotoSyncGetHead) 23 e.GET("/xrpc/com.atproto.sync.getRecord", s.HandleComAtprotoSyncGetRecord) 24 e.GET("/xrpc/com.atproto.sync.getRepo", s.HandleComAtprotoSyncGetRepo) 25 e.GET("/xrpc/com.atproto.sync.listBlobs", s.HandleComAtprotoSyncListBlobs) 26 e.GET("/xrpc/com.atproto.sync.listRepos", s.HandleComAtprotoSyncListRepos) 27 - e.GET("/xrpc/com.atproto.sync.notifyOfUpdate", s.HandleComAtprotoSyncNotifyOfUpdate) 28 - e.GET("/xrpc/com.atproto.sync.requestCrawl", s.HandleComAtprotoSyncRequestCrawl) 29 return nil 30 } 31 ··· 63 func (s *BGS) HandleComAtprotoSyncGetCheckout(c echo.Context) error { 64 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCheckout") 65 defer span.End() 66 - commit := c.QueryParam("commit") 67 did := c.QueryParam("did") 68 var out io.Reader 69 var handleErr error 70 - // func (s *BGS) handleComAtprotoSyncGetCheckout(ctx context.Context,commit string,did string) (io.Reader, error) 71 - out, handleErr = s.handleComAtprotoSyncGetCheckout(ctx, commit, did) 72 if handleErr != nil { 73 return handleErr 74 } 75 return c.Stream(200, "application/vnd.ipld.car", out) 76 } 77 78 - func (s *BGS) HandleComAtprotoSyncGetCommitPath(c echo.Context) error { 79 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCommitPath") 80 defer span.End() 81 did := c.QueryParam("did") 82 - earliest := c.QueryParam("earliest") 83 - latest := c.QueryParam("latest") 84 - var out *comatprototypes.SyncGetCommitPath_Output 85 var handleErr error 86 - // func (s *BGS) handleComAtprotoSyncGetCommitPath(ctx context.Context,did string,earliest string,latest string) (*comatprototypes.SyncGetCommitPath_Output, error) 87 - out, handleErr = s.handleComAtprotoSyncGetCommitPath(ctx, did, earliest, latest) 88 if handleErr != nil { 89 return handleErr 90 } 91 return c.JSON(200, out) 92 } 93 94 - func (s *BGS) HandleComAtprotoSyncGetHead(c echo.Context) error { 95 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetHead") 96 defer span.End() 97 did := c.QueryParam("did") 98 - var out *comatprototypes.SyncGetHead_Output 99 var handleErr error 100 - // func (s *BGS) handleComAtprotoSyncGetHead(ctx context.Context,did string) (*comatprototypes.SyncGetHead_Output, error) 101 - out, handleErr = s.handleComAtprotoSyncGetHead(ctx, did) 102 if handleErr != nil { 103 return handleErr 104 } ··· 126 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetRepo") 127 defer span.End() 128 did := c.QueryParam("did") 129 - earliest := c.QueryParam("earliest") 130 - latest := c.QueryParam("latest") 131 var out io.Reader 132 var handleErr error 133 - // func (s *BGS) handleComAtprotoSyncGetRepo(ctx context.Context,did string,earliest string,latest string) (io.Reader, error) 134 - out, handleErr = s.handleComAtprotoSyncGetRepo(ctx, did, earliest, latest) 135 if handleErr != nil { 136 return handleErr 137 } ··· 141 func (s *BGS) HandleComAtprotoSyncListBlobs(c echo.Context) error { 142 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncListBlobs") 143 defer span.End() 144 did := c.QueryParam("did") 145 - earliest := c.QueryParam("earliest") 146 - latest := c.QueryParam("latest") 147 var out *comatprototypes.SyncListBlobs_Output 148 var handleErr error 149 - // func (s *BGS) handleComAtprotoSyncListBlobs(ctx context.Context,did string,earliest string,latest string) (*comatprototypes.SyncListBlobs_Output, error) 150 - out, handleErr = s.handleComAtprotoSyncListBlobs(ctx, did, earliest, latest) 151 if handleErr != nil { 152 return handleErr 153 } ··· 182 func (s *BGS) HandleComAtprotoSyncNotifyOfUpdate(c echo.Context) error { 183 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncNotifyOfUpdate") 184 defer span.End() 185 - hostname := c.QueryParam("hostname") 186 var handleErr error 187 - // func (s *BGS) handleComAtprotoSyncNotifyOfUpdate(ctx context.Context,hostname string) error 188 - handleErr = s.handleComAtprotoSyncNotifyOfUpdate(ctx, hostname) 189 if handleErr != nil { 190 return handleErr 191 } ··· 196 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncRequestCrawl") 197 defer span.End() 198 199 - var hostname string 200 - switch c.Request().Method { 201 - case "GET": 202 - hostname = c.QueryParam("hostname") 203 - case "POST": 204 - var m map[string]string 205 - if err := c.Bind(&m); err != nil { 206 - return err 207 - } 208 - 209 - hostname = m["hostname"] 210 - default: 211 - return fmt.Errorf("invalid method for handler") 212 } 213 var handleErr error 214 - // func (s *BGS) handleComAtprotoSyncRequestCrawl(ctx context.Context,hostname string) error 215 - handleErr = s.handleComAtprotoSyncRequestCrawl(ctx, hostname) 216 if handleErr != nil { 217 return handleErr 218 }
··· 1 package bgs 2 3 import ( 4 "io" 5 "strconv" 6 ··· 17 e.GET("/xrpc/com.atproto.sync.getBlob", s.HandleComAtprotoSyncGetBlob) 18 e.GET("/xrpc/com.atproto.sync.getBlocks", s.HandleComAtprotoSyncGetBlocks) 19 e.GET("/xrpc/com.atproto.sync.getCheckout", s.HandleComAtprotoSyncGetCheckout) 20 e.GET("/xrpc/com.atproto.sync.getHead", s.HandleComAtprotoSyncGetHead) 21 + e.GET("/xrpc/com.atproto.sync.getLatestCommit", s.HandleComAtprotoSyncGetLatestCommit) 22 e.GET("/xrpc/com.atproto.sync.getRecord", s.HandleComAtprotoSyncGetRecord) 23 e.GET("/xrpc/com.atproto.sync.getRepo", s.HandleComAtprotoSyncGetRepo) 24 e.GET("/xrpc/com.atproto.sync.listBlobs", s.HandleComAtprotoSyncListBlobs) 25 e.GET("/xrpc/com.atproto.sync.listRepos", s.HandleComAtprotoSyncListRepos) 26 + e.POST("/xrpc/com.atproto.sync.notifyOfUpdate", s.HandleComAtprotoSyncNotifyOfUpdate) 27 + e.POST("/xrpc/com.atproto.sync.requestCrawl", s.HandleComAtprotoSyncRequestCrawl) 28 return nil 29 } 30 ··· 62 func (s *BGS) HandleComAtprotoSyncGetCheckout(c echo.Context) error { 63 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCheckout") 64 defer span.End() 65 did := c.QueryParam("did") 66 var out io.Reader 67 var handleErr error 68 + // func (s *BGS) handleComAtprotoSyncGetCheckout(ctx context.Context,did string) (io.Reader, error) 69 + out, handleErr = s.handleComAtprotoSyncGetCheckout(ctx, did) 70 if handleErr != nil { 71 return handleErr 72 } 73 return c.Stream(200, "application/vnd.ipld.car", out) 74 } 75 76 + func (s *BGS) HandleComAtprotoSyncGetHead(c echo.Context) error { 77 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetHead") 78 defer span.End() 79 did := c.QueryParam("did") 80 + var out *comatprototypes.SyncGetHead_Output 81 var handleErr error 82 + // func (s *BGS) handleComAtprotoSyncGetHead(ctx context.Context,did string) (*comatprototypes.SyncGetHead_Output, error) 83 + out, handleErr = s.handleComAtprotoSyncGetHead(ctx, did) 84 if handleErr != nil { 85 return handleErr 86 } 87 return c.JSON(200, out) 88 } 89 90 + func (s *BGS) HandleComAtprotoSyncGetLatestCommit(c echo.Context) error { 91 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetLatestCommit") 92 defer span.End() 93 did := c.QueryParam("did") 94 + var out *comatprototypes.SyncGetLatestCommit_Output 95 var handleErr error 96 + // func (s *BGS) handleComAtprotoSyncGetLatestCommit(ctx context.Context,did string) (*comatprototypes.SyncGetLatestCommit_Output, error) 97 + out, handleErr = s.handleComAtprotoSyncGetLatestCommit(ctx, did) 98 if handleErr != nil { 99 return handleErr 100 } ··· 122 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetRepo") 123 defer span.End() 124 did := c.QueryParam("did") 125 + since := c.QueryParam("since") 126 var out io.Reader 127 var handleErr error 128 + // func (s *BGS) handleComAtprotoSyncGetRepo(ctx context.Context,did string,since string) (io.Reader, error) 129 + out, handleErr = s.handleComAtprotoSyncGetRepo(ctx, did, since) 130 if handleErr != nil { 131 return handleErr 132 } ··· 136 func (s *BGS) HandleComAtprotoSyncListBlobs(c echo.Context) error { 137 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncListBlobs") 138 defer span.End() 139 + cursor := c.QueryParam("cursor") 140 did := c.QueryParam("did") 141 + 142 + var limit int 143 + if p := c.QueryParam("limit"); p != "" { 144 + var err error 145 + limit, err = strconv.Atoi(p) 146 + if err != nil { 147 + return err 148 + } 149 + } else { 150 + limit = 500 151 + } 152 + since := c.QueryParam("since") 153 var out *comatprototypes.SyncListBlobs_Output 154 var handleErr error 155 + // func (s *BGS) handleComAtprotoSyncListBlobs(ctx context.Context,cursor string,did string,limit int,since string) (*comatprototypes.SyncListBlobs_Output, error) 156 + out, handleErr = s.handleComAtprotoSyncListBlobs(ctx, cursor, did, limit, since) 157 if handleErr != nil { 158 return handleErr 159 } ··· 188 func (s *BGS) HandleComAtprotoSyncNotifyOfUpdate(c echo.Context) error { 189 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncNotifyOfUpdate") 190 defer span.End() 191 + 192 + var body comatprototypes.SyncNotifyOfUpdate_Input 193 + if err := c.Bind(&body); err != nil { 194 + return err 195 + } 196 var handleErr error 197 + // func (s *BGS) handleComAtprotoSyncNotifyOfUpdate(ctx context.Context,body *comatprototypes.SyncNotifyOfUpdate_Input) error 198 + handleErr = s.handleComAtprotoSyncNotifyOfUpdate(ctx, &body) 199 if handleErr != nil { 200 return handleErr 201 } ··· 206 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncRequestCrawl") 207 defer span.End() 208 209 + var body comatprototypes.SyncRequestCrawl_Input 210 + if err := c.Bind(&body); err != nil { 211 + return err 212 } 213 var handleErr error 214 + // func (s *BGS) handleComAtprotoSyncRequestCrawl(ctx context.Context,body *comatprototypes.SyncRequestCrawl_Input) error 215 + handleErr = s.handleComAtprotoSyncRequestCrawl(ctx, &body) 216 if handleErr != nil { 217 return handleErr 218 }
+47 -52
carstore/bs.go
··· 74 Path string 75 Usr models.Uid `gorm:"index:idx_car_shards_usr;index:idx_car_shards_usr_seq,priority:1"` 76 Rebase bool 77 } 78 79 type blockRef struct { ··· 243 rmcids map[cid.Cid]bool 244 base blockstore.Blockstore 245 user models.Uid 246 seq int 247 readonly bool 248 cs *CarStore ··· 292 293 var ErrRepoBaseMismatch = fmt.Errorf("attempted a delta session on top of the wrong previous head") 294 295 - var ErrRepoFork = fmt.Errorf("repo fork detected") 296 - 297 - func (cs *CarStore) NewDeltaSession(ctx context.Context, user models.Uid, prev *cid.Cid) (*DeltaSession, error) { 298 ctx, span := otel.Tracer("carstore").Start(ctx, "NewSession") 299 defer span.End() 300 ··· 305 return nil, err 306 } 307 308 - if prev != nil { 309 - if lastShard.Root.CID != *prev { 310 - fork, err := cs.checkFork(ctx, user, *prev) 311 - if err != nil { 312 - return nil, fmt.Errorf("failed to check carstore base mismatch for fork condition: %w", err) 313 - } 314 - 315 - if fork { 316 - return nil, fmt.Errorf("fork at %s: %w", prev.String(), ErrRepoFork) 317 - } 318 - 319 - return nil, fmt.Errorf("mismatch: %s != %s: %w", lastShard.Root.CID, prev.String(), ErrRepoBaseMismatch) 320 - } 321 } 322 323 return &DeltaSession{ ··· 329 prefetch: true, 330 cache: make(map[cid.Cid]blockformat.Block), 331 }, 332 - user: user, 333 - cs: cs, 334 - seq: lastShard.Seq + 1, 335 }, nil 336 } 337 ··· 349 }, nil 350 } 351 352 - func (cs *CarStore) ReadUserCar(ctx context.Context, user models.Uid, earlyCid, lateCid cid.Cid, incremental bool, w io.Writer) error { 353 ctx, span := otel.Tracer("carstore").Start(ctx, "ReadUserCar") 354 defer span.End() 355 356 - var lateSeq, earlySeq int 357 - 358 - if earlyCid.Defined() { 359 var untilShard CarShard 360 - if err := cs.meta.First(&untilShard, "root = ? AND usr = ?", models.DbCID{earlyCid}, user).Error; err != nil { 361 return fmt.Errorf("finding early shard: %w", err) 362 } 363 earlySeq = untilShard.Seq 364 } 365 366 - if lateCid.Defined() { 367 - var fromShard CarShard 368 - if err := cs.meta.First(&fromShard, "root = ? AND usr = ?", models.DbCID{lateCid}, user).Error; err != nil { 369 - return fmt.Errorf("finding late shard: %w", err) 370 } 371 - lateSeq = fromShard.Seq 372 - } 373 - 374 - q := cs.meta.Order("seq desc").Where("usr = ? AND seq > ?", user, earlySeq) 375 - if lateCid.Defined() { 376 - q = q.Where("seq <= ?", lateSeq) 377 - } 378 var shards []CarShard 379 - if err := q.Find(&shards).Error; err != nil { 380 return err 381 } 382 383 - if !incremental && earlyCid.Defined() { 384 // have to do it the ugly way 385 return fmt.Errorf("nyi") 386 } ··· 462 } 463 464 var _ blockstore.Blockstore = (*DeltaSession)(nil) 465 466 func (ds *DeltaSession) Put(ctx context.Context, b blockformat.Block) error { 467 if ds.readonly { ··· 563 564 // CloseWithRoot writes all new blocks in a car file to the writer with the 565 // given cid as the 'root' 566 - func (ds *DeltaSession) CloseWithRoot(ctx context.Context, root cid.Cid) ([]byte, error) { 567 - return ds.closeWithRoot(ctx, root, false) 568 } 569 570 func WriteCarHeader(w io.Writer, root cid.Cid) (int64, error) { ··· 585 return hnw, nil 586 } 587 588 - func (ds *DeltaSession) closeWithRoot(ctx context.Context, root cid.Cid, rebase bool) ([]byte, error) { 589 ctx, span := otel.Tracer("carstore").Start(ctx, "CloseWithRoot") 590 defer span.End() 591 ··· 640 Seq: ds.seq, 641 Path: path, 642 Usr: ds.user, 643 } 644 645 if err := ds.putShard(ctx, &shard, brefs); err != nil { ··· 733 return nil 734 } 735 736 - func (ds *DeltaSession) CloseAsRebase(ctx context.Context, root cid.Cid) error { 737 - _, err := ds.closeWithRoot(ctx, root, true) 738 if err != nil { 739 return err 740 } ··· 855 return dropset, nil 856 } 857 858 - func (cs *CarStore) ImportSlice(ctx context.Context, uid models.Uid, prev *cid.Cid, carslice []byte) (cid.Cid, *DeltaSession, error) { 859 ctx, span := otel.Tracer("carstore").Start(ctx, "ImportSlice") 860 defer span.End() 861 ··· 868 return cid.Undef, nil, fmt.Errorf("invalid car file, header must have a single root (has %d)", len(carr.Header.Roots)) 869 } 870 871 - ds, err := cs.NewDeltaSession(ctx, uid, prev) 872 if err != nil { 873 - return cid.Undef, nil, err 874 } 875 876 var cids []cid.Cid ··· 890 } 891 } 892 893 - base := cid.Undef 894 - if prev != nil { 895 - base = *prev 896 - } 897 - 898 - rmcids, err := BlockDiff(ctx, ds, base, cids) 899 if err != nil { 900 - return cid.Undef, nil, err 901 } 902 903 ds.rmcids = rmcids ··· 915 } 916 917 return lastShard.Root.CID, nil 918 } 919 920 type UserStat struct {
··· 74 Path string 75 Usr models.Uid `gorm:"index:idx_car_shards_usr;index:idx_car_shards_usr_seq,priority:1"` 76 Rebase bool 77 + Rev string 78 } 79 80 type blockRef struct { ··· 244 rmcids map[cid.Cid]bool 245 base blockstore.Blockstore 246 user models.Uid 247 + baseCid cid.Cid 248 seq int 249 readonly bool 250 cs *CarStore ··· 294 295 var ErrRepoBaseMismatch = fmt.Errorf("attempted a delta session on top of the wrong previous head") 296 297 + func (cs *CarStore) NewDeltaSession(ctx context.Context, user models.Uid, since *string) (*DeltaSession, error) { 298 ctx, span := otel.Tracer("carstore").Start(ctx, "NewSession") 299 defer span.End() 300 ··· 305 return nil, err 306 } 307 308 + if since != nil && *since != lastShard.Rev { 309 + return nil, fmt.Errorf("revision mismatch: %s != %s: %w", *since, lastShard.Rev, ErrRepoBaseMismatch) 310 } 311 312 return &DeltaSession{ ··· 318 prefetch: true, 319 cache: make(map[cid.Cid]blockformat.Block), 320 }, 321 + user: user, 322 + baseCid: lastShard.Root.CID, 323 + cs: cs, 324 + seq: lastShard.Seq + 1, 325 }, nil 326 } 327 ··· 339 }, nil 340 } 341 342 + func (cs *CarStore) ReadUserCar(ctx context.Context, user models.Uid, sinceRev string, incremental bool, w io.Writer) error { 343 ctx, span := otel.Tracer("carstore").Start(ctx, "ReadUserCar") 344 defer span.End() 345 346 + var earlySeq int 347 + if sinceRev != "" { 348 var untilShard CarShard 349 + if err := cs.meta.Where("rev >= ? AND usr = ?", sinceRev, user).Order("rev").First(&untilShard).Error; err != nil { 350 return fmt.Errorf("finding early shard: %w", err) 351 } 352 earlySeq = untilShard.Seq 353 } 354 355 + q := cs.meta.Order("seq desc").Where("usr = ? AND seq >= ?", user, earlySeq) 356 + /* 357 + if lateCid.Defined() { 358 + q = q.Where("seq <= ?", lateSeq) 359 } 360 + */ 361 var shards []CarShard 362 + if err := q.Debug().Find(&shards).Error; err != nil { 363 return err 364 } 365 366 + if !incremental && earlySeq > 0 { 367 // have to do it the ugly way 368 return fmt.Errorf("nyi") 369 } ··· 445 } 446 447 var _ blockstore.Blockstore = (*DeltaSession)(nil) 448 + 449 + func (ds *DeltaSession) BaseCid() cid.Cid { 450 + return ds.baseCid 451 + } 452 453 func (ds *DeltaSession) Put(ctx context.Context, b blockformat.Block) error { 454 if ds.readonly { ··· 550 551 // CloseWithRoot writes all new blocks in a car file to the writer with the 552 // given cid as the 'root' 553 + func (ds *DeltaSession) CloseWithRoot(ctx context.Context, root cid.Cid, rev string) ([]byte, error) { 554 + return ds.closeWithRoot(ctx, root, rev, false) 555 } 556 557 func WriteCarHeader(w io.Writer, root cid.Cid) (int64, error) { ··· 572 return hnw, nil 573 } 574 575 + func (ds *DeltaSession) closeWithRoot(ctx context.Context, root cid.Cid, rev string, rebase bool) ([]byte, error) { 576 ctx, span := otel.Tracer("carstore").Start(ctx, "CloseWithRoot") 577 defer span.End() 578 ··· 627 Seq: ds.seq, 628 Path: path, 629 Usr: ds.user, 630 + Rev: rev, 631 } 632 633 if err := ds.putShard(ctx, &shard, brefs); err != nil { ··· 721 return nil 722 } 723 724 + func (ds *DeltaSession) CloseAsRebase(ctx context.Context, root cid.Cid, rev string) error { 725 + _, err := ds.closeWithRoot(ctx, root, rev, true) 726 if err != nil { 727 return err 728 } ··· 843 return dropset, nil 844 } 845 846 + func (cs *CarStore) ImportSlice(ctx context.Context, uid models.Uid, since *string, carslice []byte) (cid.Cid, *DeltaSession, error) { 847 ctx, span := otel.Tracer("carstore").Start(ctx, "ImportSlice") 848 defer span.End() 849 ··· 856 return cid.Undef, nil, fmt.Errorf("invalid car file, header must have a single root (has %d)", len(carr.Header.Roots)) 857 } 858 859 + ds, err := cs.NewDeltaSession(ctx, uid, since) 860 if err != nil { 861 + return cid.Undef, nil, fmt.Errorf("new delta session failed: %w", err) 862 } 863 864 var cids []cid.Cid ··· 878 } 879 } 880 881 + rmcids, err := BlockDiff(ctx, ds, ds.baseCid, cids) 882 if err != nil { 883 + return cid.Undef, nil, fmt.Errorf("block diff failed (base=%s): %w", ds.baseCid, err) 884 } 885 886 ds.rmcids = rmcids ··· 898 } 899 900 return lastShard.Root.CID, nil 901 + } 902 + 903 + func (cs *CarStore) GetUserRepoRev(ctx context.Context, user models.Uid) (string, error) { 904 + lastShard, err := cs.getLastShard(ctx, user) 905 + if err != nil { 906 + return "", err 907 + } 908 + if lastShard.ID == 0 { 909 + return "", nil 910 + } 911 + 912 + return lastShard.Rev, nil 913 } 914 915 type UserStat struct {
+25 -21
carstore/repo_test.go
··· 83 t.Fatal(err) 84 } 85 86 - ncid, err := setupRepo(ctx, ds) 87 if err != nil { 88 t.Fatal(err) 89 } 90 91 - if _, err := ds.CloseWithRoot(ctx, ncid); err != nil { 92 t.Fatal(err) 93 } 94 95 head := ncid 96 for i := 0; i < 10; i++ { 97 - ds, err := cs.NewDeltaSession(ctx, 1, &head) 98 if err != nil { 99 t.Fatal(err) 100 } ··· 111 } 112 113 kmgr := &util.FakeKeyManager{} 114 - nroot, err := rr.Commit(ctx, kmgr.SignForUser) 115 if err != nil { 116 t.Fatal(err) 117 } 118 119 - if _, err := ds.CloseWithRoot(ctx, nroot); err != nil { 120 t.Fatal(err) 121 } 122 ··· 124 } 125 126 buf := new(bytes.Buffer) 127 - if err := cs.ReadUserCar(ctx, 1, cid.Undef, cid.Undef, true, buf); err != nil { 128 t.Fatal(err) 129 } 130 ··· 132 133 } 134 135 - func setupRepo(ctx context.Context, bs blockstore.Blockstore) (cid.Cid, error) { 136 nr := repo.NewRepo(ctx, "did:foo", bs) 137 138 if _, _, err := nr.CreateRecord(ctx, "app.bsky.feed.post", &appbsky.FeedPost{ 139 Text: fmt.Sprintf("hey look its a tweet %s", time.Now()), 140 }); err != nil { 141 - return cid.Undef, err 142 } 143 144 kmgr := &util.FakeKeyManager{} 145 - ncid, err := nr.Commit(ctx, kmgr.SignForUser) 146 if err != nil { 147 - return cid.Undef, fmt.Errorf("commit failed: %w", err) 148 } 149 150 - return ncid, nil 151 } 152 153 func BenchmarkRepoWritesCarstore(b *testing.B) { ··· 159 } 160 defer cleanup() 161 162 - ds, err := cs.NewDeltaSession(ctx, 1, &cid.Undef) 163 if err != nil { 164 b.Fatal(err) 165 } 166 167 - ncid, err := setupRepo(ctx, ds) 168 if err != nil { 169 b.Fatal(err) 170 } 171 172 - if _, err := ds.CloseWithRoot(ctx, ncid); err != nil { 173 b.Fatal(err) 174 } 175 176 head := ncid 177 b.ResetTimer() 178 for i := 0; i < b.N; i++ { 179 - ds, err := cs.NewDeltaSession(ctx, 1, &head) 180 if err != nil { 181 b.Fatal(err) 182 } ··· 193 } 194 195 kmgr := &util.FakeKeyManager{} 196 - nroot, err := rr.Commit(ctx, kmgr.SignForUser) 197 if err != nil { 198 b.Fatal(err) 199 } 200 201 - if _, err := ds.CloseWithRoot(ctx, nroot); err != nil { 202 b.Fatal(err) 203 } 204 ··· 215 } 216 defer cleanup() 217 218 - ncid, err := setupRepo(ctx, bs) 219 if err != nil { 220 b.Fatal(err) 221 } ··· 236 } 237 238 kmgr := &util.FakeKeyManager{} 239 - nroot, err := rr.Commit(ctx, kmgr.SignForUser) 240 if err != nil { 241 b.Fatal(err) 242 } ··· 253 b.Fatal(err) 254 } 255 256 - ncid, err := setupRepo(ctx, bs) 257 if err != nil { 258 b.Fatal(err) 259 } ··· 274 } 275 276 kmgr := &util.FakeKeyManager{} 277 - nroot, err := rr.Commit(ctx, kmgr.SignForUser) 278 if err != nil { 279 b.Fatal(err) 280 }
··· 83 t.Fatal(err) 84 } 85 86 + ncid, rev, err := setupRepo(ctx, ds) 87 if err != nil { 88 t.Fatal(err) 89 } 90 91 + if _, err := ds.CloseWithRoot(ctx, ncid, rev); err != nil { 92 t.Fatal(err) 93 } 94 95 head := ncid 96 for i := 0; i < 10; i++ { 97 + ds, err := cs.NewDeltaSession(ctx, 1, &rev) 98 if err != nil { 99 t.Fatal(err) 100 } ··· 111 } 112 113 kmgr := &util.FakeKeyManager{} 114 + nroot, nrev, err := rr.Commit(ctx, kmgr.SignForUser) 115 if err != nil { 116 t.Fatal(err) 117 } 118 119 + rev = nrev 120 + 121 + if _, err := ds.CloseWithRoot(ctx, nroot, rev); err != nil { 122 t.Fatal(err) 123 } 124 ··· 126 } 127 128 buf := new(bytes.Buffer) 129 + if err := cs.ReadUserCar(ctx, 1, "", true, buf); err != nil { 130 t.Fatal(err) 131 } 132 ··· 134 135 } 136 137 + func setupRepo(ctx context.Context, bs blockstore.Blockstore) (cid.Cid, string, error) { 138 nr := repo.NewRepo(ctx, "did:foo", bs) 139 140 if _, _, err := nr.CreateRecord(ctx, "app.bsky.feed.post", &appbsky.FeedPost{ 141 Text: fmt.Sprintf("hey look its a tweet %s", time.Now()), 142 }); err != nil { 143 + return cid.Undef, "", err 144 } 145 146 kmgr := &util.FakeKeyManager{} 147 + ncid, rev, err := nr.Commit(ctx, kmgr.SignForUser) 148 if err != nil { 149 + return cid.Undef, "", fmt.Errorf("commit failed: %w", err) 150 } 151 152 + return ncid, rev, nil 153 } 154 155 func BenchmarkRepoWritesCarstore(b *testing.B) { ··· 161 } 162 defer cleanup() 163 164 + ds, err := cs.NewDeltaSession(ctx, 1, nil) 165 if err != nil { 166 b.Fatal(err) 167 } 168 169 + ncid, rev, err := setupRepo(ctx, ds) 170 if err != nil { 171 b.Fatal(err) 172 } 173 174 + if _, err := ds.CloseWithRoot(ctx, ncid, rev); err != nil { 175 b.Fatal(err) 176 } 177 178 head := ncid 179 b.ResetTimer() 180 for i := 0; i < b.N; i++ { 181 + ds, err := cs.NewDeltaSession(ctx, 1, &rev) 182 if err != nil { 183 b.Fatal(err) 184 } ··· 195 } 196 197 kmgr := &util.FakeKeyManager{} 198 + nroot, nrev, err := rr.Commit(ctx, kmgr.SignForUser) 199 if err != nil { 200 b.Fatal(err) 201 } 202 203 + rev = nrev 204 + 205 + if _, err := ds.CloseWithRoot(ctx, nroot, rev); err != nil { 206 b.Fatal(err) 207 } 208 ··· 219 } 220 defer cleanup() 221 222 + ncid, _, err := setupRepo(ctx, bs) 223 if err != nil { 224 b.Fatal(err) 225 } ··· 240 } 241 242 kmgr := &util.FakeKeyManager{} 243 + nroot, _, err := rr.Commit(ctx, kmgr.SignForUser) 244 if err != nil { 245 b.Fatal(err) 246 } ··· 257 b.Fatal(err) 258 } 259 260 + ncid, _, err := setupRepo(ctx, bs) 261 if err != nil { 262 b.Fatal(err) 263 } ··· 278 } 279 280 kmgr := &util.FakeKeyManager{} 281 + nroot, _, err := rr.Commit(ctx, kmgr.SignForUser) 282 if err != nil { 283 b.Fatal(err) 284 }
+3 -3
cmd/gosky/main.go
··· 347 348 ctx := context.TODO() 349 350 - repobytes, err := comatproto.SyncGetRepo(ctx, xrpcc, cctx.Args().First(), "", "") 351 if err != nil { 352 return err 353 } ··· 647 arg = xrpcc.Auth.Did 648 } 649 650 - rrb, err := comatproto.SyncGetRepo(ctx, xrpcc, arg, "", "") 651 if err != nil { 652 return err 653 } ··· 1168 return err 1169 } 1170 1171 - rrb, err := comatproto.SyncGetRepo(ctx, xrpcc, rfi, "", "") 1172 if err != nil { 1173 return err 1174 }
··· 347 348 ctx := context.TODO() 349 350 + repobytes, err := comatproto.SyncGetRepo(ctx, xrpcc, cctx.Args().First(), "") 351 if err != nil { 352 return err 353 } ··· 647 arg = xrpcc.Auth.Did 648 } 649 650 + rrb, err := comatproto.SyncGetRepo(ctx, xrpcc, arg, "") 651 if err != nil { 652 return err 653 } ··· 1168 return err 1169 } 1170 1171 + rrb, err := comatproto.SyncGetRepo(ctx, xrpcc, rfi, "") 1172 if err != nil { 1173 return err 1174 }
-1
cmd/supercollider/main.go
··· 618 Time: time.Now().Format(util.ISO8601), 619 Ops: outops, 620 TooBig: toobig, 621 - Rebase: evt.Rebase, 622 }, 623 PrivUid: evt.User, 624 }); err != nil {
··· 618 Time: time.Now().Format(util.ISO8601), 619 Ops: outops, 620 TooBig: toobig, 621 }, 622 PrivUid: evt.User, 623 }); err != nil {
+7 -6
events/dbpersist.go
··· 67 68 type RepoEventRecord struct { 69 Seq uint `gorm:"primarykey"` 70 Commit *models.DbCID 71 Prev *models.DbCID 72 NewHandle *string // NewHandle is only set if this is a handle change event ··· 276 Blobs: blobs, 277 Time: t, 278 Rebase: evt.Rebase, 279 } 280 281 opsb, err := json.Marshal(evt.Ops) ··· 493 Blobs: blobCIDs, 494 Rebase: rer.Rebase, 495 Ops: ops, 496 } 497 498 cs, err := p.readCarSlice(ctx, rer) ··· 511 512 func (p *DbPersistence) readCarSlice(ctx context.Context, rer *RepoEventRecord) ([]byte, error) { 513 514 - var early cid.Cid 515 - if rer.Prev != nil && !rer.Rebase { 516 - early = rer.Prev.CID 517 - } 518 - 519 buf := new(bytes.Buffer) 520 - if err := p.cs.ReadUserCar(ctx, rer.Repo, early, rer.Commit.CID, true, buf); err != nil { 521 return nil, err 522 } 523
··· 67 68 type RepoEventRecord struct { 69 Seq uint `gorm:"primarykey"` 70 + Rev string 71 + Since *string 72 Commit *models.DbCID 73 Prev *models.DbCID 74 NewHandle *string // NewHandle is only set if this is a handle change event ··· 278 Blobs: blobs, 279 Time: t, 280 Rebase: evt.Rebase, 281 + Rev: evt.Rev, 282 + Since: evt.Since, 283 } 284 285 opsb, err := json.Marshal(evt.Ops) ··· 497 Blobs: blobCIDs, 498 Rebase: rer.Rebase, 499 Ops: ops, 500 + Rev: rer.Rev, 501 + Since: rer.Since, 502 } 503 504 cs, err := p.readCarSlice(ctx, rer) ··· 517 518 func (p *DbPersistence) readCarSlice(ctx context.Context, rer *RepoEventRecord) ([]byte, error) { 519 520 buf := new(bytes.Buffer) 521 + if err := p.cs.ReadUserCar(ctx, rer.Repo, rer.Rev, true, buf); err != nil { 522 return nil, err 523 } 524
+299 -334
indexer/indexer.go
··· 130 131 toobig := false 132 slice := evt.RepoSlice 133 - if len(slice) > MaxEventSliceLength || len(outops) > MaxOpsSliceLength { 134 slice = nil 135 outops = nil 136 toobig = true 137 } 138 139 - if evt.Rebase { 140 - if err := ix.events.HandleRebase(ctx, evt.User); err != nil { 141 - log.Errorf("failed to handle rebase in events manager: %s", err) 142 - } 143 - } 144 - 145 log.Debugw("Sending event", "did", did) 146 if err := ix.events.AddEvent(ctx, &events.XRPCStreamEvent{ 147 RepoCommit: &comatproto.SyncSubscribeRepos_Commit{ 148 Repo: did, 149 Prev: (*lexutil.LexLink)(evt.OldRoot), 150 Blocks: slice, 151 Commit: lexutil.LexLink(evt.NewRoot), 152 Time: time.Now().Format(util.ISO8601), 153 Ops: outops, 154 TooBig: toobig, 155 - Rebase: evt.Rebase, 156 }, 157 PrivUid: evt.User, 158 }); err != nil { ··· 165 func (ix *Indexer) handleRepoOp(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 166 switch op.Kind { 167 case repomgr.EvtKindCreateRecord: 168 - if err := ix.crawlRecordReferences(ctx, op); err != nil { 169 - return err 170 - } 171 - 172 if ix.doAggregations { 173 _, err := ix.handleRecordCreate(ctx, evt, op, true) 174 if err != nil { 175 return fmt.Errorf("handle recordCreate: %w", err) 176 } 177 } 178 case repomgr.EvtKindDeleteRecord: 179 if ix.doAggregations { 180 if err := ix.handleRecordDelete(ctx, evt, op, true); err != nil { ··· 194 return nil 195 } 196 197 func (ix *Indexer) handleRecordDelete(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp, local bool) error { 198 log.Infow("record delete event", "collection", op.Collection) 199 ··· 227 228 log.Warn("TODO: remove notifications on delete") 229 /* 230 - if err := ix.notifman.RemoveRepost(ctx, fp.Author, rr.ID, evt.User); err != nil { 231 - return nil, err 232 - } 233 */ 234 235 case "app.bsky.feed.vote": ··· 335 return out, nil 336 } 337 338 - func (ix *Indexer) crawlAtUriRef(ctx context.Context, uri string) error { 339 - puri, err := util.ParseAtUri(uri) 340 - if err != nil { 341 - return err 342 - } else { 343 - _, err := ix.GetUserOrMissing(ctx, puri.Did) 344 - if err != nil { 345 - return err 346 - } 347 - } 348 - return nil 349 - } 350 - func (ix *Indexer) crawlRecordReferences(ctx context.Context, op *repomgr.RepoOp) error { 351 - ctx, span := otel.Tracer("indexer").Start(ctx, "crawlRecordReferences") 352 - defer span.End() 353 - 354 - switch rec := op.Record.(type) { 355 - case *bsky.FeedPost: 356 - for _, e := range rec.Entities { 357 - if e.Type == "mention" { 358 - _, err := ix.GetUserOrMissing(ctx, e.Value) 359 - if err != nil { 360 - log.Infow("failed to parse user mention", "ref", e.Value, "err", err) 361 - } 362 - } 363 - } 364 - 365 - if rec.Reply != nil { 366 - if rec.Reply.Parent != nil { 367 - if err := ix.crawlAtUriRef(ctx, rec.Reply.Parent.Uri); err != nil { 368 - log.Infow("failed to crawl reply parent", "cid", op.RecCid, "replyuri", rec.Reply.Parent.Uri, "err", err) 369 - } 370 - } 371 - 372 - if rec.Reply.Root != nil { 373 - if err := ix.crawlAtUriRef(ctx, rec.Reply.Root.Uri); err != nil { 374 - log.Infow("failed to crawl reply root", "cid", op.RecCid, "rooturi", rec.Reply.Root.Uri, "err", err) 375 - } 376 - } 377 - } 378 - 379 - return nil 380 - case *bsky.FeedRepost: 381 - if rec.Subject != nil { 382 - if err := ix.crawlAtUriRef(ctx, rec.Subject.Uri); err != nil { 383 - log.Infow("failed to crawl repost subject", "cid", op.RecCid, "subjecturi", rec.Subject.Uri, "err", err) 384 - } 385 - } 386 - return nil 387 - case *bsky.FeedLike: 388 - if rec.Subject != nil { 389 - if err := ix.crawlAtUriRef(ctx, rec.Subject.Uri); err != nil { 390 - log.Infow("failed to crawl vote subject", "cid", op.RecCid, "subjecturi", rec.Subject.Uri, "err", err) 391 - } 392 - } 393 - return nil 394 - case *bsky.GraphFollow: 395 - _, err := ix.GetUserOrMissing(ctx, rec.Subject) 396 - if err != nil { 397 - log.Infow("failed to crawl follow subject", "cid", op.RecCid, "subjectdid", rec.Subject, "err", err) 398 - } 399 - return nil 400 - case *bsky.GraphBlock: 401 - _, err := ix.GetUserOrMissing(ctx, rec.Subject) 402 - if err != nil { 403 - log.Infow("failed to crawl follow subject", "cid", op.RecCid, "subjectdid", rec.Subject, "err", err) 404 - } 405 - return nil 406 - case *bsky.ActorProfile: 407 - return nil 408 - default: 409 - log.Warnf("unrecognized record type: %T", op.Record) 410 - return nil 411 - } 412 - } 413 - 414 func (ix *Indexer) handleRecordCreateFeedLike(ctx context.Context, rec *bsky.FeedLike, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 415 post, err := ix.GetPostOrMissing(ctx, rec.Subject.Uri) 416 if err != nil { ··· 658 return nil 659 } 660 661 - func (ix *Indexer) GetUserOrMissing(ctx context.Context, did string) (*models.ActorInfo, error) { 662 - ctx, span := otel.Tracer("indexer").Start(ctx, "getUserOrMissing") 663 - defer span.End() 664 - 665 - ai, err := ix.LookupUserByDid(ctx, did) 666 - if err == nil { 667 - return ai, nil 668 - } 669 - 670 - if !isNotFound(err) { 671 - return nil, err 672 - } 673 - 674 - // unknown user... create it and send it off to the crawler 675 - return ix.createMissingUserRecord(ctx, did) 676 - } 677 - 678 func (ix *Indexer) createMissingPostRecord(ctx context.Context, puri *util.ParsedUri) (*models.FeedPost, error) { 679 log.Warn("creating missing post record") 680 ai, err := ix.GetUserOrMissing(ctx, puri.Did) ··· 694 return &fp, nil 695 } 696 697 - func (ix *Indexer) createMissingUserRecord(ctx context.Context, did string) (*models.ActorInfo, error) { 698 - ctx, span := otel.Tracer("indexer").Start(ctx, "createMissingUserRecord") 699 - defer span.End() 700 - 701 - ai, err := ix.CreateExternalUser(ctx, did) 702 - if err != nil { 703 - return nil, err 704 - } 705 - 706 - if err := ix.addUserToCrawler(ctx, ai); err != nil { 707 - return nil, fmt.Errorf("failed to add unknown user to crawler: %w", err) 708 - } 709 - 710 - return ai, nil 711 - } 712 - 713 - func (ix *Indexer) addUserToCrawler(ctx context.Context, ai *models.ActorInfo) error { 714 - log.Infow("Sending user to crawler: ", "did", ai.Did) 715 - if ix.Crawler == nil { 716 - return nil 717 - } 718 - 719 - return ix.Crawler.Crawl(ctx, ai) 720 - } 721 - 722 - func (ix *Indexer) DidForUser(ctx context.Context, uid models.Uid) (string, error) { 723 - var ai models.ActorInfo 724 - if err := ix.db.First(&ai, "uid = ?", uid).Error; err != nil { 725 - return "", err 726 - } 727 - 728 - return ai.Did, nil 729 - } 730 - 731 - func (ix *Indexer) LookupUser(ctx context.Context, id models.Uid) (*models.ActorInfo, error) { 732 - var ai models.ActorInfo 733 - if err := ix.db.First(&ai, "uid = ?", id).Error; err != nil { 734 - return nil, err 735 - } 736 - 737 - return &ai, nil 738 - } 739 - 740 - func (ix *Indexer) LookupUserByDid(ctx context.Context, did string) (*models.ActorInfo, error) { 741 - var ai models.ActorInfo 742 - if err := ix.db.Find(&ai, "did = ?", did).Error; err != nil { 743 - return nil, err 744 - } 745 - 746 - if ai.ID == 0 { 747 - return nil, gorm.ErrRecordNotFound 748 - } 749 - 750 - return &ai, nil 751 - } 752 - 753 - func (ix *Indexer) LookupUserByHandle(ctx context.Context, handle string) (*models.ActorInfo, error) { 754 - var ai models.ActorInfo 755 - if err := ix.db.Find(&ai, "handle = ?", handle).Error; err != nil { 756 - return nil, err 757 - } 758 - 759 - if ai.ID == 0 { 760 - return nil, gorm.ErrRecordNotFound 761 - } 762 - 763 - return &ai, nil 764 - } 765 - 766 func (ix *Indexer) addNewPostNotification(ctx context.Context, post *bsky.FeedPost, fp *models.FeedPost, mentions []*models.ActorInfo) error { 767 if post.Reply != nil { 768 replyto, err := ix.GetPost(ctx, post.Reply.Parent.Uri) ··· 788 func (ix *Indexer) addNewVoteNotification(ctx context.Context, postauthor models.Uid, vr *models.VoteRecord) error { 789 return ix.notifman.AddUpVote(ctx, vr.Voter, vr.Post, vr.ID, postauthor) 790 } 791 - 792 - func (ix *Indexer) handleInitActor(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 793 - ai := op.ActorInfo 794 - 795 - if err := ix.db.Clauses(clause.OnConflict{ 796 - Columns: []clause.Column{{Name: "uid"}}, 797 - UpdateAll: true, 798 - }).Create(&models.ActorInfo{ 799 - Uid: evt.User, 800 - Handle: ai.Handle, 801 - Did: ai.Did, 802 - DisplayName: ai.DisplayName, 803 - Type: ai.Type, 804 - PDS: evt.PDS, 805 - }).Error; err != nil { 806 - return fmt.Errorf("initializing new actor info: %w", err) 807 - } 808 - 809 - if err := ix.db.Create(&models.FollowRecord{ 810 - Follower: evt.User, 811 - Target: evt.User, 812 - }).Error; err != nil { 813 - return err 814 - } 815 - 816 - return nil 817 - } 818 - 819 - func isNotFound(err error) bool { 820 - if errors.Is(err, gorm.ErrRecordNotFound) { 821 - return true 822 - } 823 - 824 - return false 825 - } 826 - 827 - func (ix *Indexer) GetPost(ctx context.Context, uri string) (*models.FeedPost, error) { 828 - puri, err := util.ParseAtUri(uri) 829 - if err != nil { 830 - return nil, err 831 - } 832 - 833 - var post models.FeedPost 834 - if err := ix.db.First(&post, "rkey = ? AND author = (?)", puri.Rkey, ix.db.Model(models.ActorInfo{}).Where("did = ?", puri.Did).Select("id")).Error; err != nil { 835 - return nil, err 836 - } 837 - 838 - return &post, nil 839 - } 840 - 841 - // TODO: since this function is the only place we depend on the repomanager, i wonder if this should be wired some other way? 842 - func (ix *Indexer) FetchAndIndexRepo(ctx context.Context, job *crawlWork) error { 843 - ctx, span := otel.Tracer("indexer").Start(ctx, "FetchAndIndexRepo") 844 - defer span.End() 845 - 846 - span.SetAttributes(attribute.Int("catchup", len(job.catchup))) 847 - 848 - ai := job.act 849 - 850 - var pds models.PDS 851 - if err := ix.db.First(&pds, "id = ?", ai.PDS).Error; err != nil { 852 - return fmt.Errorf("expected to find pds record (%d) in db for crawling one of their users: %w", ai.PDS, err) 853 - } 854 - 855 - curHead, err := ix.repomgr.GetRepoRoot(ctx, ai.Uid) 856 - if err != nil && !isNotFound(err) { 857 - return fmt.Errorf("failed to get repo root: %w", err) 858 - } 859 - 860 - var rebase *comatproto.SyncSubscribeRepos_Commit 861 - var rebaseIx int 862 - for i, j := range job.catchup { 863 - if j.evt.Rebase { 864 - rebase = j.evt 865 - rebaseIx = i 866 - break 867 - } 868 - } 869 - 870 - if rebase != nil { 871 - if err := ix.repomgr.HandleRebase(ctx, ai.PDS, ai.Uid, ai.Did, (*cid.Cid)(rebase.Prev), (cid.Cid)(rebase.Commit), rebase.Blocks); err != nil { 872 - return fmt.Errorf("handling rebase: %w", err) 873 - } 874 - // now process the rest of the catchup events 875 - // these are all events that got received *after* the rebase, but 876 - // before we could start processing it. 877 - // That means these should be the next operations that get cleanly 878 - // applied after the rebase 879 - for _, j := range job.catchup[rebaseIx+1:] { 880 - if err := ix.repomgr.HandleExternalUserEvent(ctx, pds.ID, ai.Uid, ai.Did, (*cid.Cid)(j.evt.Prev), j.evt.Blocks, j.evt.Ops); err != nil { 881 - return fmt.Errorf("post rebase catchup failed: %w", err) 882 - } 883 - } 884 - return nil 885 - } 886 - 887 - if !(job.initScrape || len(job.catchup) == 0) { 888 - first := job.catchup[0] 889 - if first.evt.Prev == nil || curHead == (cid.Cid)(*first.evt.Prev) { 890 - for _, j := range job.catchup { 891 - if err := ix.repomgr.HandleExternalUserEvent(ctx, pds.ID, ai.Uid, ai.Did, (*cid.Cid)(j.evt.Prev), j.evt.Blocks, j.evt.Ops); err != nil { 892 - // TODO: if we fail here, we should probably fall back to a repo re-sync 893 - return fmt.Errorf("post rebase catchup failed: %w", err) 894 - } 895 - } 896 - 897 - return nil 898 - } 899 - } 900 - 901 - var host string 902 - if pds.SSL { 903 - host = "https://" + pds.Host 904 - } else { 905 - host = "http://" + pds.Host 906 - } 907 - c := &xrpc.Client{ 908 - Host: host, 909 - } 910 - 911 - ix.ApplyPDSClientSettings(c) 912 - 913 - var from string 914 - if curHead.Defined() { 915 - from = curHead.String() 916 - } else { 917 - span.SetAttributes(attribute.Bool("full", true)) 918 - } 919 - 920 - limiter := ix.GetLimiter(pds.ID) 921 - if limiter == nil { 922 - limiter = rate.NewLimiter(rate.Limit(pds.CrawlRateLimit), 1) 923 - ix.SetLimiter(pds.ID, limiter) 924 - } 925 - 926 - // Wait to prevent DOSing the PDS when connecting to a new stream with lots of active repos 927 - limiter.Wait(ctx) 928 - 929 - log.Infow("SyncGetRepo", "did", ai.Did, "user", ai.Handle, "from", from) 930 - // TODO: max size on these? A malicious PDS could just send us a petabyte sized repo here and kill us 931 - repo, err := comatproto.SyncGetRepo(ctx, c, ai.Did, from, "") 932 - if err != nil { 933 - return fmt.Errorf("failed to fetch repo: %w", err) 934 - } 935 - 936 - // this process will send individual indexing events back to the indexer, doing a 'fast forward' of the users entire history 937 - // we probably want alternative ways of doing this for 'very large' or 'very old' repos, but this works for now 938 - if err := ix.repomgr.ImportNewRepo(ctx, ai.Uid, ai.Did, bytes.NewReader(repo), curHead); err != nil { 939 - span.RecordError(err) 940 - return fmt.Errorf("importing fetched repo (curHead: %s): %w", from, err) 941 - } 942 - 943 - // TODO: this is currently doing too much work, allowing us to ignore the catchup events we've gotten 944 - // need to do 'just enough' work... 945 - 946 - return nil 947 - }
··· 130 131 toobig := false 132 slice := evt.RepoSlice 133 + if len(slice) > MaxEventSliceLength || len(outops) > MaxOpsSliceLength || evt.TooBig { 134 slice = nil 135 outops = nil 136 toobig = true 137 } 138 139 log.Debugw("Sending event", "did", did) 140 if err := ix.events.AddEvent(ctx, &events.XRPCStreamEvent{ 141 RepoCommit: &comatproto.SyncSubscribeRepos_Commit{ 142 Repo: did, 143 Prev: (*lexutil.LexLink)(evt.OldRoot), 144 Blocks: slice, 145 + Rev: evt.Rev, 146 + Since: evt.Since, 147 Commit: lexutil.LexLink(evt.NewRoot), 148 Time: time.Now().Format(util.ISO8601), 149 Ops: outops, 150 TooBig: toobig, 151 }, 152 PrivUid: evt.User, 153 }); err != nil { ··· 160 func (ix *Indexer) handleRepoOp(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 161 switch op.Kind { 162 case repomgr.EvtKindCreateRecord: 163 if ix.doAggregations { 164 _, err := ix.handleRecordCreate(ctx, evt, op, true) 165 if err != nil { 166 return fmt.Errorf("handle recordCreate: %w", err) 167 } 168 } 169 + if err := ix.crawlRecordReferences(ctx, op); err != nil { 170 + return err 171 + } 172 + 173 case repomgr.EvtKindDeleteRecord: 174 if ix.doAggregations { 175 if err := ix.handleRecordDelete(ctx, evt, op, true); err != nil { ··· 189 return nil 190 } 191 192 + func (ix *Indexer) crawlAtUriRef(ctx context.Context, uri string) error { 193 + puri, err := util.ParseAtUri(uri) 194 + if err != nil { 195 + return err 196 + } else { 197 + _, err := ix.GetUserOrMissing(ctx, puri.Did) 198 + if err != nil { 199 + return err 200 + } 201 + } 202 + return nil 203 + } 204 + func (ix *Indexer) crawlRecordReferences(ctx context.Context, op *repomgr.RepoOp) error { 205 + ctx, span := otel.Tracer("indexer").Start(ctx, "crawlRecordReferences") 206 + defer span.End() 207 + 208 + switch rec := op.Record.(type) { 209 + case *bsky.FeedPost: 210 + for _, e := range rec.Entities { 211 + if e.Type == "mention" { 212 + _, err := ix.GetUserOrMissing(ctx, e.Value) 213 + if err != nil { 214 + log.Infow("failed to parse user mention", "ref", e.Value, "err", err) 215 + } 216 + } 217 + } 218 + 219 + if rec.Reply != nil { 220 + if rec.Reply.Parent != nil { 221 + if err := ix.crawlAtUriRef(ctx, rec.Reply.Parent.Uri); err != nil { 222 + log.Infow("failed to crawl reply parent", "cid", op.RecCid, "replyuri", rec.Reply.Parent.Uri, "err", err) 223 + } 224 + } 225 + 226 + if rec.Reply.Root != nil { 227 + if err := ix.crawlAtUriRef(ctx, rec.Reply.Root.Uri); err != nil { 228 + log.Infow("failed to crawl reply root", "cid", op.RecCid, "rooturi", rec.Reply.Root.Uri, "err", err) 229 + } 230 + } 231 + } 232 + 233 + return nil 234 + case *bsky.FeedRepost: 235 + if rec.Subject != nil { 236 + if err := ix.crawlAtUriRef(ctx, rec.Subject.Uri); err != nil { 237 + log.Infow("failed to crawl repost subject", "cid", op.RecCid, "subjecturi", rec.Subject.Uri, "err", err) 238 + } 239 + } 240 + return nil 241 + case *bsky.FeedLike: 242 + if rec.Subject != nil { 243 + if err := ix.crawlAtUriRef(ctx, rec.Subject.Uri); err != nil { 244 + log.Infow("failed to crawl vote subject", "cid", op.RecCid, "subjecturi", rec.Subject.Uri, "err", err) 245 + } 246 + } 247 + return nil 248 + case *bsky.GraphFollow: 249 + _, err := ix.GetUserOrMissing(ctx, rec.Subject) 250 + if err != nil { 251 + log.Infow("failed to crawl follow subject", "cid", op.RecCid, "subjectdid", rec.Subject, "err", err) 252 + } 253 + return nil 254 + case *bsky.GraphBlock: 255 + _, err := ix.GetUserOrMissing(ctx, rec.Subject) 256 + if err != nil { 257 + log.Infow("failed to crawl follow subject", "cid", op.RecCid, "subjectdid", rec.Subject, "err", err) 258 + } 259 + return nil 260 + case *bsky.ActorProfile: 261 + return nil 262 + default: 263 + log.Warnf("unrecognized record type: %T", op.Record) 264 + return nil 265 + } 266 + } 267 + 268 + func (ix *Indexer) GetUserOrMissing(ctx context.Context, did string) (*models.ActorInfo, error) { 269 + ctx, span := otel.Tracer("indexer").Start(ctx, "getUserOrMissing") 270 + defer span.End() 271 + 272 + ai, err := ix.LookupUserByDid(ctx, did) 273 + if err == nil { 274 + return ai, nil 275 + } 276 + 277 + if !isNotFound(err) { 278 + return nil, err 279 + } 280 + 281 + // unknown user... create it and send it off to the crawler 282 + return ix.createMissingUserRecord(ctx, did) 283 + } 284 + 285 + func (ix *Indexer) createMissingUserRecord(ctx context.Context, did string) (*models.ActorInfo, error) { 286 + ctx, span := otel.Tracer("indexer").Start(ctx, "createMissingUserRecord") 287 + defer span.End() 288 + 289 + ai, err := ix.CreateExternalUser(ctx, did) 290 + if err != nil { 291 + return nil, err 292 + } 293 + 294 + if err := ix.addUserToCrawler(ctx, ai); err != nil { 295 + return nil, fmt.Errorf("failed to add unknown user to crawler: %w", err) 296 + } 297 + 298 + return ai, nil 299 + } 300 + 301 + func (ix *Indexer) addUserToCrawler(ctx context.Context, ai *models.ActorInfo) error { 302 + log.Infow("Sending user to crawler: ", "did", ai.Did) 303 + if ix.Crawler == nil { 304 + return nil 305 + } 306 + 307 + return ix.Crawler.Crawl(ctx, ai) 308 + } 309 + 310 + func (ix *Indexer) DidForUser(ctx context.Context, uid models.Uid) (string, error) { 311 + var ai models.ActorInfo 312 + if err := ix.db.First(&ai, "uid = ?", uid).Error; err != nil { 313 + return "", err 314 + } 315 + 316 + return ai.Did, nil 317 + } 318 + 319 + func (ix *Indexer) LookupUser(ctx context.Context, id models.Uid) (*models.ActorInfo, error) { 320 + var ai models.ActorInfo 321 + if err := ix.db.First(&ai, "uid = ?", id).Error; err != nil { 322 + return nil, err 323 + } 324 + 325 + return &ai, nil 326 + } 327 + 328 + func (ix *Indexer) LookupUserByDid(ctx context.Context, did string) (*models.ActorInfo, error) { 329 + var ai models.ActorInfo 330 + if err := ix.db.Find(&ai, "did = ?", did).Error; err != nil { 331 + return nil, err 332 + } 333 + 334 + if ai.ID == 0 { 335 + return nil, gorm.ErrRecordNotFound 336 + } 337 + 338 + return &ai, nil 339 + } 340 + 341 + func (ix *Indexer) LookupUserByHandle(ctx context.Context, handle string) (*models.ActorInfo, error) { 342 + var ai models.ActorInfo 343 + if err := ix.db.Find(&ai, "handle = ?", handle).Error; err != nil { 344 + return nil, err 345 + } 346 + 347 + if ai.ID == 0 { 348 + return nil, gorm.ErrRecordNotFound 349 + } 350 + 351 + return &ai, nil 352 + } 353 + 354 + func (ix *Indexer) handleInitActor(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 355 + ai := op.ActorInfo 356 + 357 + if err := ix.db.Clauses(clause.OnConflict{ 358 + Columns: []clause.Column{{Name: "uid"}}, 359 + UpdateAll: true, 360 + }).Create(&models.ActorInfo{ 361 + Uid: evt.User, 362 + Handle: ai.Handle, 363 + Did: ai.Did, 364 + DisplayName: ai.DisplayName, 365 + Type: ai.Type, 366 + PDS: evt.PDS, 367 + }).Error; err != nil { 368 + return fmt.Errorf("initializing new actor info: %w", err) 369 + } 370 + 371 + if err := ix.db.Create(&models.FollowRecord{ 372 + Follower: evt.User, 373 + Target: evt.User, 374 + }).Error; err != nil { 375 + return err 376 + } 377 + 378 + return nil 379 + } 380 + 381 + func isNotFound(err error) bool { 382 + if errors.Is(err, gorm.ErrRecordNotFound) { 383 + return true 384 + } 385 + 386 + return false 387 + } 388 + 389 + // TODO: since this function is the only place we depend on the repomanager, i wonder if this should be wired some other way? 390 + func (ix *Indexer) FetchAndIndexRepo(ctx context.Context, job *crawlWork) error { 391 + ctx, span := otel.Tracer("indexer").Start(ctx, "FetchAndIndexRepo") 392 + defer span.End() 393 + 394 + span.SetAttributes(attribute.Int("catchup", len(job.catchup))) 395 + 396 + ai := job.act 397 + 398 + var pds models.PDS 399 + if err := ix.db.First(&pds, "id = ?", ai.PDS).Error; err != nil { 400 + return fmt.Errorf("expected to find pds record (%d) in db for crawling one of their users: %w", ai.PDS, err) 401 + } 402 + 403 + rev, err := ix.repomgr.GetRepoRev(ctx, ai.Uid) 404 + if err != nil && !isNotFound(err) { 405 + return fmt.Errorf("failed to get repo root: %w", err) 406 + } 407 + 408 + if !(job.initScrape || len(job.catchup) == 0) { 409 + first := job.catchup[0] 410 + if first.evt.Since == nil || rev == *first.evt.Since { 411 + for _, j := range job.catchup { 412 + if err := ix.repomgr.HandleExternalUserEvent(ctx, pds.ID, ai.Uid, ai.Did, j.evt.Since, j.evt.Rev, j.evt.Blocks, j.evt.Ops); err != nil { 413 + // TODO: if we fail here, we should probably fall back to a repo re-sync 414 + return fmt.Errorf("post rebase catchup failed: %w", err) 415 + } 416 + } 417 + 418 + return nil 419 + } 420 + } 421 + 422 + var host string 423 + if pds.SSL { 424 + host = "https://" + pds.Host 425 + } else { 426 + host = "http://" + pds.Host 427 + } 428 + c := &xrpc.Client{ 429 + Host: host, 430 + } 431 + 432 + ix.ApplyPDSClientSettings(c) 433 + 434 + if rev == "" { 435 + span.SetAttributes(attribute.Bool("full", true)) 436 + } 437 + 438 + limiter := ix.GetLimiter(pds.ID) 439 + if limiter == nil { 440 + limiter = rate.NewLimiter(rate.Limit(pds.CrawlRateLimit), 1) 441 + ix.SetLimiter(pds.ID, limiter) 442 + } 443 + 444 + // Wait to prevent DOSing the PDS when connecting to a new stream with lots of active repos 445 + limiter.Wait(ctx) 446 + 447 + log.Infow("SyncGetRepo", "did", ai.Did, "user", ai.Handle, "since", rev) 448 + // TODO: max size on these? A malicious PDS could just send us a petabyte sized repo here and kill us 449 + repo, err := comatproto.SyncGetRepo(ctx, c, ai.Did, rev) 450 + if err != nil { 451 + return fmt.Errorf("failed to fetch repo: %w", err) 452 + } 453 + 454 + // this process will send individual indexing events back to the indexer, doing a 'fast forward' of the users entire history 455 + // we probably want alternative ways of doing this for 'very large' or 'very old' repos, but this works for now 456 + if err := ix.repomgr.ImportNewRepo(ctx, ai.Uid, ai.Did, bytes.NewReader(repo), &rev); err != nil { 457 + span.RecordError(err) 458 + return fmt.Errorf("importing fetched repo (curRev: %s): %w", rev, err) 459 + } 460 + 461 + // TODO: this is currently doing too much work, allowing us to ignore the catchup events we've gotten 462 + // need to do 'just enough' work... 463 + 464 + return nil 465 + } 466 + 467 + func (ix *Indexer) GetPost(ctx context.Context, uri string) (*models.FeedPost, error) { 468 + puri, err := util.ParseAtUri(uri) 469 + if err != nil { 470 + return nil, err 471 + } 472 + 473 + var post models.FeedPost 474 + if err := ix.db.First(&post, "rkey = ? AND author = (?)", puri.Rkey, ix.db.Model(models.ActorInfo{}).Where("did = ?", puri.Did).Select("id")).Error; err != nil { 475 + return nil, err 476 + } 477 + 478 + return &post, nil 479 + } 480 + 481 func (ix *Indexer) handleRecordDelete(ctx context.Context, evt *repomgr.RepoEvent, op *repomgr.RepoOp, local bool) error { 482 log.Infow("record delete event", "collection", op.Collection) 483 ··· 511 512 log.Warn("TODO: remove notifications on delete") 513 /* 514 + if err := ix.notifman.RemoveRepost(ctx, fp.Author, rr.ID, evt.User); err != nil { 515 + return nil, err 516 + } 517 */ 518 519 case "app.bsky.feed.vote": ··· 619 return out, nil 620 } 621 622 func (ix *Indexer) handleRecordCreateFeedLike(ctx context.Context, rec *bsky.FeedLike, evt *repomgr.RepoEvent, op *repomgr.RepoOp) error { 623 post, err := ix.GetPostOrMissing(ctx, rec.Subject.Uri) 624 if err != nil { ··· 866 return nil 867 } 868 869 func (ix *Indexer) createMissingPostRecord(ctx context.Context, puri *util.ParsedUri) (*models.FeedPost, error) { 870 log.Warn("creating missing post record") 871 ai, err := ix.GetUserOrMissing(ctx, puri.Did) ··· 885 return &fp, nil 886 } 887 888 func (ix *Indexer) addNewPostNotification(ctx context.Context, post *bsky.FeedPost, fp *models.FeedPost, mentions []*models.ActorInfo) error { 889 if post.Reply != nil { 890 replyto, err := ix.GetPost(ctx, post.Reply.Parent.Uri) ··· 910 func (ix *Indexer) addNewVoteNotification(ctx context.Context, postauthor models.Uid, vr *models.VoteRecord) error { 911 return ix.notifman.AddUpVote(ctx, vr.Voter, vr.Post, vr.ID, postauthor) 912 }
+1 -1
lex/gen.go
··· 1211 omit = ",omitempty" 1212 } 1213 cval := ts.id 1214 - if ts.defName != "" { 1215 cval += "#" + ts.defName 1216 } 1217 pf("\tLexiconTypeID string `json:\"$type,const=%s%s\" cborgen:\"$type,const=%s%s\"`\n", cval, omit, cval, omit)
··· 1211 omit = ",omitempty" 1212 } 1213 cval := ts.id 1214 + if ts.defName != "" && ts.defName != "main" { 1215 cval += "#" + ts.defName 1216 } 1217 pf("\tLexiconTypeID string `json:\"$type,const=%s%s\" cborgen:\"$type,const=%s%s\"`\n", cval, omit, cval, omit)
+20 -33
pds/handlers.go
··· 565 panic("not yet implemented") 566 } 567 568 - func (s *Server) handleComAtprotoSyncGetCheckout(ctx context.Context, commit string, did string) (io.Reader, error) { 569 panic("not yet implemented") 570 } 571 ··· 593 panic("not yet implemented") 594 } 595 596 - func (s *Server) handleComAtprotoSyncGetRepo(ctx context.Context, did string, earliest, latest string) (io.Reader, error) { 597 - var earlyCid cid.Cid 598 - if earliest != "" { 599 - cc, err := cid.Decode(earliest) 600 - if err != nil { 601 - return nil, err 602 - } 603 - 604 - earlyCid = cc 605 - } 606 - 607 - var lateCid cid.Cid 608 - if latest != "" { 609 - cc, err := cid.Decode(latest) 610 - if err != nil { 611 - return nil, err 612 - } 613 - 614 - lateCid = cc 615 - } 616 - 617 targetUser, err := s.lookupUser(ctx, did) 618 if err != nil { 619 return nil, err 620 } 621 622 buf := new(bytes.Buffer) 623 - if err := s.repoman.ReadRepo(ctx, targetUser.ID, earlyCid, lateCid, buf); err != nil { 624 return nil, err 625 } 626 ··· 681 panic("nyi") 682 } 683 684 - func (s *Server) handleComAtprotoSyncListBlobs(ctx context.Context, did string, earliest string, latest string) (*comatprototypes.SyncListBlobs_Output, error) { 685 panic("nyi") 686 } 687 ··· 798 panic("nyi") 799 } 800 801 - func (s *Server) handleComAtprotoRepoRebaseRepo(ctx context.Context, body *comatprototypes.RepoRebaseRepo_Input) error { 802 - u, err := s.getUser(ctx) 803 - if err != nil { 804 - return err 805 - } 806 - 807 - return s.repoman.DoRebase(ctx, u.ID) 808 - } 809 - 810 func (s *Server) handleAppBskyFeedDescribeFeedGenerator(ctx context.Context) (*appbskytypes.FeedDescribeFeedGenerator_Output, error) { 811 panic("nyi") 812 } ··· 843 func (s *Server) handleComAtprotoAdminSendEmail(ctx context.Context, body *comatprototypes.AdminSendEmail_Input) (*comatprototypes.AdminSendEmail_Output, error) { 844 panic("nyi") 845 }
··· 565 panic("not yet implemented") 566 } 567 568 + func (s *Server) handleComAtprotoSyncGetCheckout(ctx context.Context, did string) (io.Reader, error) { 569 panic("not yet implemented") 570 } 571 ··· 593 panic("not yet implemented") 594 } 595 596 + func (s *Server) handleComAtprotoSyncGetRepo(ctx context.Context, did string, since string) (io.Reader, error) { 597 targetUser, err := s.lookupUser(ctx, did) 598 if err != nil { 599 return nil, err 600 } 601 602 buf := new(bytes.Buffer) 603 + if err := s.repoman.ReadRepo(ctx, targetUser.ID, since, buf); err != nil { 604 return nil, err 605 } 606 ··· 661 panic("nyi") 662 } 663 664 + func (s *Server) handleComAtprotoSyncListBlobs(ctx context.Context, cursor string, did string, limit int, since string) (*comatprototypes.SyncListBlobs_Output, error) { 665 panic("nyi") 666 } 667 ··· 778 panic("nyi") 779 } 780 781 func (s *Server) handleAppBskyFeedDescribeFeedGenerator(ctx context.Context) (*appbskytypes.FeedDescribeFeedGenerator_Output, error) { 782 panic("nyi") 783 } ··· 814 func (s *Server) handleComAtprotoAdminSendEmail(ctx context.Context, body *comatprototypes.AdminSendEmail_Input) (*comatprototypes.AdminSendEmail_Output, error) { 815 panic("nyi") 816 } 817 + 818 + func (s *Server) handleAppBskyFeedGetActorLikes(ctx context.Context, actor string, cursor string, limit int) (*appbskytypes.FeedGetActorLikes_Output, error) { 819 + panic("nyi") 820 + } 821 + 822 + func (s *Server) handleAppBskyNotificationRegisterPush(ctx context.Context, body *appbskytypes.NotificationRegisterPush_Input) error { 823 + panic("nyi") 824 + } 825 + 826 + func (s *Server) handleComAtprotoSyncGetLatestCommit(ctx context.Context, did string) (*comatprototypes.SyncGetLatestCommit_Output, error) { 827 + panic("nyi") 828 + } 829 + 830 + func (s *Server) handleComAtprotoTempUpgradeRepoVersion(ctx context.Context, body *comatprototypes.TempUpgradeRepoVersion_Input) error { 831 + panic("nyi") 832 + }
+2 -2
pds/server.go
··· 142 u.ID = subj.Uid 143 } 144 145 - return s.repoman.HandleExternalUserEvent(ctx, host.ID, u.ID, u.Did, (*cid.Cid)(evt.Prev), evt.Blocks, evt.Ops) 146 default: 147 return fmt.Errorf("invalid fed event") 148 } ··· 338 } 339 340 e.HTTPErrorHandler = func(err error, ctx echo.Context) { 341 - fmt.Printf("HANDLER ERROR: (%s) %s\n", ctx.Path(), err) 342 343 // TODO: need to properly figure out where http error codes for error 344 // types get decided. This spot is reasonable, but maybe a bit weird.
··· 142 u.ID = subj.Uid 143 } 144 145 + return s.repoman.HandleExternalUserEvent(ctx, host.ID, u.ID, u.Did, evt.Since, evt.Rev, evt.Blocks, evt.Ops) 146 default: 147 return fmt.Errorf("invalid fed event") 148 } ··· 338 } 339 340 e.HTTPErrorHandler = func(err error, ctx echo.Context) { 341 + fmt.Printf("PDS HANDLER ERROR: (%s) %s\n", ctx.Path(), err) 342 343 // TODO: need to properly figure out where http error codes for error 344 // types get decided. This spot is reasonable, but maybe a bit weird.
+94 -60
pds/stubs.go
··· 20 e.GET("/xrpc/app.bsky.actor.searchActorsTypeahead", s.HandleAppBskyActorSearchActorsTypeahead) 21 e.GET("/xrpc/app.bsky.feed.describeFeedGenerator", s.HandleAppBskyFeedDescribeFeedGenerator) 22 e.GET("/xrpc/app.bsky.feed.getActorFeeds", s.HandleAppBskyFeedGetActorFeeds) 23 e.GET("/xrpc/app.bsky.feed.getAuthorFeed", s.HandleAppBskyFeedGetAuthorFeed) 24 e.GET("/xrpc/app.bsky.feed.getFeed", s.HandleAppBskyFeedGetFeed) 25 e.GET("/xrpc/app.bsky.feed.getFeedGenerator", s.HandleAppBskyFeedGetFeedGenerator) ··· 43 e.POST("/xrpc/app.bsky.graph.unmuteActorList", s.HandleAppBskyGraphUnmuteActorList) 44 e.GET("/xrpc/app.bsky.notification.getUnreadCount", s.HandleAppBskyNotificationGetUnreadCount) 45 e.GET("/xrpc/app.bsky.notification.listNotifications", s.HandleAppBskyNotificationListNotifications) 46 e.POST("/xrpc/app.bsky.notification.updateSeen", s.HandleAppBskyNotificationUpdateSeen) 47 e.POST("/xrpc/app.bsky.unspecced.applyLabels", s.HandleAppBskyUnspeccedApplyLabels) 48 e.GET("/xrpc/app.bsky.unspecced.getPopular", s.HandleAppBskyUnspeccedGetPopular) ··· 219 var handleErr error 220 // func (s *Server) handleAppBskyFeedGetActorFeeds(ctx context.Context,actor string,cursor string,limit int) (*appbskytypes.FeedGetActorFeeds_Output, error) 221 out, handleErr = s.handleAppBskyFeedGetActorFeeds(ctx, actor, cursor, limit) 222 if handleErr != nil { 223 return handleErr 224 } ··· 751 return c.JSON(200, out) 752 } 753 754 func (s *Server) HandleAppBskyNotificationUpdateSeen(c echo.Context) error { 755 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleAppBskyNotificationUpdateSeen") 756 defer span.End() ··· 883 e.GET("/xrpc/com.atproto.admin.getModerationReports", s.HandleComAtprotoAdminGetModerationReports) 884 e.GET("/xrpc/com.atproto.admin.getRecord", s.HandleComAtprotoAdminGetRecord) 885 e.GET("/xrpc/com.atproto.admin.getRepo", s.HandleComAtprotoAdminGetRepo) 886 - e.POST("/xrpc/com.atproto.admin.rebaseRepo", s.HandleComAtprotoAdminRebaseRepo) 887 e.POST("/xrpc/com.atproto.admin.resolveModerationReports", s.HandleComAtprotoAdminResolveModerationReports) 888 e.POST("/xrpc/com.atproto.admin.reverseModerationAction", s.HandleComAtprotoAdminReverseModerationAction) 889 e.GET("/xrpc/com.atproto.admin.searchRepos", s.HandleComAtprotoAdminSearchRepos) ··· 902 e.GET("/xrpc/com.atproto.repo.getRecord", s.HandleComAtprotoRepoGetRecord) 903 e.GET("/xrpc/com.atproto.repo.listRecords", s.HandleComAtprotoRepoListRecords) 904 e.POST("/xrpc/com.atproto.repo.putRecord", s.HandleComAtprotoRepoPutRecord) 905 - e.POST("/xrpc/com.atproto.repo.rebaseRepo", s.HandleComAtprotoRepoRebaseRepo) 906 e.POST("/xrpc/com.atproto.repo.uploadBlob", s.HandleComAtprotoRepoUploadBlob) 907 e.POST("/xrpc/com.atproto.server.createAccount", s.HandleComAtprotoServerCreateAccount) 908 e.POST("/xrpc/com.atproto.server.createAppPassword", s.HandleComAtprotoServerCreateAppPassword) ··· 923 e.GET("/xrpc/com.atproto.sync.getBlob", s.HandleComAtprotoSyncGetBlob) 924 e.GET("/xrpc/com.atproto.sync.getBlocks", s.HandleComAtprotoSyncGetBlocks) 925 e.GET("/xrpc/com.atproto.sync.getCheckout", s.HandleComAtprotoSyncGetCheckout) 926 - e.GET("/xrpc/com.atproto.sync.getCommitPath", s.HandleComAtprotoSyncGetCommitPath) 927 e.GET("/xrpc/com.atproto.sync.getHead", s.HandleComAtprotoSyncGetHead) 928 e.GET("/xrpc/com.atproto.sync.getRecord", s.HandleComAtprotoSyncGetRecord) 929 e.GET("/xrpc/com.atproto.sync.getRepo", s.HandleComAtprotoSyncGetRepo) 930 e.GET("/xrpc/com.atproto.sync.listBlobs", s.HandleComAtprotoSyncListBlobs) 931 e.GET("/xrpc/com.atproto.sync.listRepos", s.HandleComAtprotoSyncListRepos) 932 e.POST("/xrpc/com.atproto.sync.notifyOfUpdate", s.HandleComAtprotoSyncNotifyOfUpdate) 933 e.POST("/xrpc/com.atproto.sync.requestCrawl", s.HandleComAtprotoSyncRequestCrawl) 934 return nil 935 } 936 ··· 1152 return c.JSON(200, out) 1153 } 1154 1155 - func (s *Server) HandleComAtprotoAdminRebaseRepo(c echo.Context) error { 1156 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoAdminRebaseRepo") 1157 - defer span.End() 1158 - 1159 - var body comatprototypes.AdminRebaseRepo_Input 1160 - if err := c.Bind(&body); err != nil { 1161 - return err 1162 - } 1163 - var handleErr error 1164 - // func (s *Server) handleComAtprotoAdminRebaseRepo(ctx context.Context,body *comatprototypes.AdminRebaseRepo_Input) error 1165 - handleErr = s.handleComAtprotoAdminRebaseRepo(ctx, &body) 1166 - if handleErr != nil { 1167 - return handleErr 1168 - } 1169 - return nil 1170 - } 1171 - 1172 func (s *Server) HandleComAtprotoAdminResolveModerationReports(c echo.Context) error { 1173 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoAdminResolveModerationReports") 1174 defer span.End() ··· 1519 return c.JSON(200, out) 1520 } 1521 1522 - func (s *Server) HandleComAtprotoRepoRebaseRepo(c echo.Context) error { 1523 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoRepoRebaseRepo") 1524 - defer span.End() 1525 - 1526 - var body comatprototypes.RepoRebaseRepo_Input 1527 - if err := c.Bind(&body); err != nil { 1528 - return err 1529 - } 1530 - var handleErr error 1531 - // func (s *Server) handleComAtprotoRepoRebaseRepo(ctx context.Context,body *comatprototypes.RepoRebaseRepo_Input) error 1532 - handleErr = s.handleComAtprotoRepoRebaseRepo(ctx, &body) 1533 - if handleErr != nil { 1534 - return handleErr 1535 - } 1536 - return nil 1537 - } 1538 - 1539 func (s *Server) HandleComAtprotoRepoUploadBlob(c echo.Context) error { 1540 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoRepoUploadBlob") 1541 defer span.End() ··· 1854 func (s *Server) HandleComAtprotoSyncGetCheckout(c echo.Context) error { 1855 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCheckout") 1856 defer span.End() 1857 - commit := c.QueryParam("commit") 1858 did := c.QueryParam("did") 1859 var out io.Reader 1860 var handleErr error 1861 - // func (s *Server) handleComAtprotoSyncGetCheckout(ctx context.Context,commit string,did string) (io.Reader, error) 1862 - out, handleErr = s.handleComAtprotoSyncGetCheckout(ctx, commit, did) 1863 if handleErr != nil { 1864 return handleErr 1865 } 1866 return c.Stream(200, "application/vnd.ipld.car", out) 1867 } 1868 1869 - func (s *Server) HandleComAtprotoSyncGetCommitPath(c echo.Context) error { 1870 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCommitPath") 1871 defer span.End() 1872 did := c.QueryParam("did") 1873 - earliest := c.QueryParam("earliest") 1874 - latest := c.QueryParam("latest") 1875 - var out *comatprototypes.SyncGetCommitPath_Output 1876 var handleErr error 1877 - // func (s *Server) handleComAtprotoSyncGetCommitPath(ctx context.Context,did string,earliest string,latest string) (*comatprototypes.SyncGetCommitPath_Output, error) 1878 - out, handleErr = s.handleComAtprotoSyncGetCommitPath(ctx, did, earliest, latest) 1879 if handleErr != nil { 1880 return handleErr 1881 } 1882 return c.JSON(200, out) 1883 } 1884 1885 - func (s *Server) HandleComAtprotoSyncGetHead(c echo.Context) error { 1886 - ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetHead") 1887 defer span.End() 1888 did := c.QueryParam("did") 1889 - var out *comatprototypes.SyncGetHead_Output 1890 var handleErr error 1891 - // func (s *Server) handleComAtprotoSyncGetHead(ctx context.Context,did string) (*comatprototypes.SyncGetHead_Output, error) 1892 - out, handleErr = s.handleComAtprotoSyncGetHead(ctx, did) 1893 if handleErr != nil { 1894 return handleErr 1895 } ··· 1917 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetRepo") 1918 defer span.End() 1919 did := c.QueryParam("did") 1920 - earliest := c.QueryParam("earliest") 1921 - latest := c.QueryParam("latest") 1922 var out io.Reader 1923 var handleErr error 1924 - // func (s *Server) handleComAtprotoSyncGetRepo(ctx context.Context,did string,earliest string,latest string) (io.Reader, error) 1925 - out, handleErr = s.handleComAtprotoSyncGetRepo(ctx, did, earliest, latest) 1926 if handleErr != nil { 1927 return handleErr 1928 } ··· 1932 func (s *Server) HandleComAtprotoSyncListBlobs(c echo.Context) error { 1933 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncListBlobs") 1934 defer span.End() 1935 did := c.QueryParam("did") 1936 - earliest := c.QueryParam("earliest") 1937 - latest := c.QueryParam("latest") 1938 var out *comatprototypes.SyncListBlobs_Output 1939 var handleErr error 1940 - // func (s *Server) handleComAtprotoSyncListBlobs(ctx context.Context,did string,earliest string,latest string) (*comatprototypes.SyncListBlobs_Output, error) 1941 - out, handleErr = s.handleComAtprotoSyncListBlobs(ctx, did, earliest, latest) 1942 if handleErr != nil { 1943 return handleErr 1944 } ··· 2003 } 2004 return nil 2005 }
··· 20 e.GET("/xrpc/app.bsky.actor.searchActorsTypeahead", s.HandleAppBskyActorSearchActorsTypeahead) 21 e.GET("/xrpc/app.bsky.feed.describeFeedGenerator", s.HandleAppBskyFeedDescribeFeedGenerator) 22 e.GET("/xrpc/app.bsky.feed.getActorFeeds", s.HandleAppBskyFeedGetActorFeeds) 23 + e.GET("/xrpc/app.bsky.feed.getActorLikes", s.HandleAppBskyFeedGetActorLikes) 24 e.GET("/xrpc/app.bsky.feed.getAuthorFeed", s.HandleAppBskyFeedGetAuthorFeed) 25 e.GET("/xrpc/app.bsky.feed.getFeed", s.HandleAppBskyFeedGetFeed) 26 e.GET("/xrpc/app.bsky.feed.getFeedGenerator", s.HandleAppBskyFeedGetFeedGenerator) ··· 44 e.POST("/xrpc/app.bsky.graph.unmuteActorList", s.HandleAppBskyGraphUnmuteActorList) 45 e.GET("/xrpc/app.bsky.notification.getUnreadCount", s.HandleAppBskyNotificationGetUnreadCount) 46 e.GET("/xrpc/app.bsky.notification.listNotifications", s.HandleAppBskyNotificationListNotifications) 47 + e.POST("/xrpc/app.bsky.notification.registerPush", s.HandleAppBskyNotificationRegisterPush) 48 e.POST("/xrpc/app.bsky.notification.updateSeen", s.HandleAppBskyNotificationUpdateSeen) 49 e.POST("/xrpc/app.bsky.unspecced.applyLabels", s.HandleAppBskyUnspeccedApplyLabels) 50 e.GET("/xrpc/app.bsky.unspecced.getPopular", s.HandleAppBskyUnspeccedGetPopular) ··· 221 var handleErr error 222 // func (s *Server) handleAppBskyFeedGetActorFeeds(ctx context.Context,actor string,cursor string,limit int) (*appbskytypes.FeedGetActorFeeds_Output, error) 223 out, handleErr = s.handleAppBskyFeedGetActorFeeds(ctx, actor, cursor, limit) 224 + if handleErr != nil { 225 + return handleErr 226 + } 227 + return c.JSON(200, out) 228 + } 229 + 230 + func (s *Server) HandleAppBskyFeedGetActorLikes(c echo.Context) error { 231 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleAppBskyFeedGetActorLikes") 232 + defer span.End() 233 + actor := c.QueryParam("actor") 234 + cursor := c.QueryParam("cursor") 235 + 236 + var limit int 237 + if p := c.QueryParam("limit"); p != "" { 238 + var err error 239 + limit, err = strconv.Atoi(p) 240 + if err != nil { 241 + return err 242 + } 243 + } else { 244 + limit = 50 245 + } 246 + var out *appbskytypes.FeedGetActorLikes_Output 247 + var handleErr error 248 + // func (s *Server) handleAppBskyFeedGetActorLikes(ctx context.Context,actor string,cursor string,limit int) (*appbskytypes.FeedGetActorLikes_Output, error) 249 + out, handleErr = s.handleAppBskyFeedGetActorLikes(ctx, actor, cursor, limit) 250 if handleErr != nil { 251 return handleErr 252 } ··· 779 return c.JSON(200, out) 780 } 781 782 + func (s *Server) HandleAppBskyNotificationRegisterPush(c echo.Context) error { 783 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleAppBskyNotificationRegisterPush") 784 + defer span.End() 785 + 786 + var body appbskytypes.NotificationRegisterPush_Input 787 + if err := c.Bind(&body); err != nil { 788 + return err 789 + } 790 + var handleErr error 791 + // func (s *Server) handleAppBskyNotificationRegisterPush(ctx context.Context,body *appbskytypes.NotificationRegisterPush_Input) error 792 + handleErr = s.handleAppBskyNotificationRegisterPush(ctx, &body) 793 + if handleErr != nil { 794 + return handleErr 795 + } 796 + return nil 797 + } 798 + 799 func (s *Server) HandleAppBskyNotificationUpdateSeen(c echo.Context) error { 800 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleAppBskyNotificationUpdateSeen") 801 defer span.End() ··· 928 e.GET("/xrpc/com.atproto.admin.getModerationReports", s.HandleComAtprotoAdminGetModerationReports) 929 e.GET("/xrpc/com.atproto.admin.getRecord", s.HandleComAtprotoAdminGetRecord) 930 e.GET("/xrpc/com.atproto.admin.getRepo", s.HandleComAtprotoAdminGetRepo) 931 e.POST("/xrpc/com.atproto.admin.resolveModerationReports", s.HandleComAtprotoAdminResolveModerationReports) 932 e.POST("/xrpc/com.atproto.admin.reverseModerationAction", s.HandleComAtprotoAdminReverseModerationAction) 933 e.GET("/xrpc/com.atproto.admin.searchRepos", s.HandleComAtprotoAdminSearchRepos) ··· 946 e.GET("/xrpc/com.atproto.repo.getRecord", s.HandleComAtprotoRepoGetRecord) 947 e.GET("/xrpc/com.atproto.repo.listRecords", s.HandleComAtprotoRepoListRecords) 948 e.POST("/xrpc/com.atproto.repo.putRecord", s.HandleComAtprotoRepoPutRecord) 949 e.POST("/xrpc/com.atproto.repo.uploadBlob", s.HandleComAtprotoRepoUploadBlob) 950 e.POST("/xrpc/com.atproto.server.createAccount", s.HandleComAtprotoServerCreateAccount) 951 e.POST("/xrpc/com.atproto.server.createAppPassword", s.HandleComAtprotoServerCreateAppPassword) ··· 966 e.GET("/xrpc/com.atproto.sync.getBlob", s.HandleComAtprotoSyncGetBlob) 967 e.GET("/xrpc/com.atproto.sync.getBlocks", s.HandleComAtprotoSyncGetBlocks) 968 e.GET("/xrpc/com.atproto.sync.getCheckout", s.HandleComAtprotoSyncGetCheckout) 969 e.GET("/xrpc/com.atproto.sync.getHead", s.HandleComAtprotoSyncGetHead) 970 + e.GET("/xrpc/com.atproto.sync.getLatestCommit", s.HandleComAtprotoSyncGetLatestCommit) 971 e.GET("/xrpc/com.atproto.sync.getRecord", s.HandleComAtprotoSyncGetRecord) 972 e.GET("/xrpc/com.atproto.sync.getRepo", s.HandleComAtprotoSyncGetRepo) 973 e.GET("/xrpc/com.atproto.sync.listBlobs", s.HandleComAtprotoSyncListBlobs) 974 e.GET("/xrpc/com.atproto.sync.listRepos", s.HandleComAtprotoSyncListRepos) 975 e.POST("/xrpc/com.atproto.sync.notifyOfUpdate", s.HandleComAtprotoSyncNotifyOfUpdate) 976 e.POST("/xrpc/com.atproto.sync.requestCrawl", s.HandleComAtprotoSyncRequestCrawl) 977 + e.POST("/xrpc/com.atproto.temp.upgradeRepoVersion", s.HandleComAtprotoTempUpgradeRepoVersion) 978 return nil 979 } 980 ··· 1196 return c.JSON(200, out) 1197 } 1198 1199 func (s *Server) HandleComAtprotoAdminResolveModerationReports(c echo.Context) error { 1200 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoAdminResolveModerationReports") 1201 defer span.End() ··· 1546 return c.JSON(200, out) 1547 } 1548 1549 func (s *Server) HandleComAtprotoRepoUploadBlob(c echo.Context) error { 1550 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoRepoUploadBlob") 1551 defer span.End() ··· 1864 func (s *Server) HandleComAtprotoSyncGetCheckout(c echo.Context) error { 1865 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetCheckout") 1866 defer span.End() 1867 did := c.QueryParam("did") 1868 var out io.Reader 1869 var handleErr error 1870 + // func (s *Server) handleComAtprotoSyncGetCheckout(ctx context.Context,did string) (io.Reader, error) 1871 + out, handleErr = s.handleComAtprotoSyncGetCheckout(ctx, did) 1872 if handleErr != nil { 1873 return handleErr 1874 } 1875 return c.Stream(200, "application/vnd.ipld.car", out) 1876 } 1877 1878 + func (s *Server) HandleComAtprotoSyncGetHead(c echo.Context) error { 1879 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetHead") 1880 defer span.End() 1881 did := c.QueryParam("did") 1882 + var out *comatprototypes.SyncGetHead_Output 1883 var handleErr error 1884 + // func (s *Server) handleComAtprotoSyncGetHead(ctx context.Context,did string) (*comatprototypes.SyncGetHead_Output, error) 1885 + out, handleErr = s.handleComAtprotoSyncGetHead(ctx, did) 1886 if handleErr != nil { 1887 return handleErr 1888 } 1889 return c.JSON(200, out) 1890 } 1891 1892 + func (s *Server) HandleComAtprotoSyncGetLatestCommit(c echo.Context) error { 1893 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetLatestCommit") 1894 defer span.End() 1895 did := c.QueryParam("did") 1896 + var out *comatprototypes.SyncGetLatestCommit_Output 1897 var handleErr error 1898 + // func (s *Server) handleComAtprotoSyncGetLatestCommit(ctx context.Context,did string) (*comatprototypes.SyncGetLatestCommit_Output, error) 1899 + out, handleErr = s.handleComAtprotoSyncGetLatestCommit(ctx, did) 1900 if handleErr != nil { 1901 return handleErr 1902 } ··· 1924 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncGetRepo") 1925 defer span.End() 1926 did := c.QueryParam("did") 1927 + since := c.QueryParam("since") 1928 var out io.Reader 1929 var handleErr error 1930 + // func (s *Server) handleComAtprotoSyncGetRepo(ctx context.Context,did string,since string) (io.Reader, error) 1931 + out, handleErr = s.handleComAtprotoSyncGetRepo(ctx, did, since) 1932 if handleErr != nil { 1933 return handleErr 1934 } ··· 1938 func (s *Server) HandleComAtprotoSyncListBlobs(c echo.Context) error { 1939 ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoSyncListBlobs") 1940 defer span.End() 1941 + cursor := c.QueryParam("cursor") 1942 did := c.QueryParam("did") 1943 + 1944 + var limit int 1945 + if p := c.QueryParam("limit"); p != "" { 1946 + var err error 1947 + limit, err = strconv.Atoi(p) 1948 + if err != nil { 1949 + return err 1950 + } 1951 + } else { 1952 + limit = 500 1953 + } 1954 + since := c.QueryParam("since") 1955 var out *comatprototypes.SyncListBlobs_Output 1956 var handleErr error 1957 + // func (s *Server) handleComAtprotoSyncListBlobs(ctx context.Context,cursor string,did string,limit int,since string) (*comatprototypes.SyncListBlobs_Output, error) 1958 + out, handleErr = s.handleComAtprotoSyncListBlobs(ctx, cursor, did, limit, since) 1959 if handleErr != nil { 1960 return handleErr 1961 } ··· 2020 } 2021 return nil 2022 } 2023 + 2024 + func (s *Server) HandleComAtprotoTempUpgradeRepoVersion(c echo.Context) error { 2025 + ctx, span := otel.Tracer("server").Start(c.Request().Context(), "HandleComAtprotoTempUpgradeRepoVersion") 2026 + defer span.End() 2027 + 2028 + var body comatprototypes.TempUpgradeRepoVersion_Input 2029 + if err := c.Bind(&body); err != nil { 2030 + return err 2031 + } 2032 + var handleErr error 2033 + // func (s *Server) handleComAtprotoTempUpgradeRepoVersion(ctx context.Context,body *comatprototypes.TempUpgradeRepoVersion_Input) error 2034 + handleErr = s.handleComAtprotoTempUpgradeRepoVersion(ctx, &body) 2035 + if handleErr != nil { 2036 + return handleErr 2037 + } 2038 + return nil 2039 + }
+3
plc/fakedid.go
··· 4 "context" 5 "crypto/rand" 6 "encoding/hex" 7 8 "github.com/whyrusleeping/go-did" 9 "gorm.io/gorm" ··· 32 if err := fd.db.First(&rec, "did = ?", udid).Error; err != nil { 33 return nil, err 34 } 35 36 d, err := did.ParseDID(rec.Did) 37 if err != nil {
··· 4 "context" 5 "crypto/rand" 6 "encoding/hex" 7 + "fmt" 8 9 "github.com/whyrusleeping/go-did" 10 "gorm.io/gorm" ··· 33 if err := fd.db.First(&rec, "did = ?", udid).Error; err != nil { 34 return nil, err 35 } 36 + 37 + fmt.Println("GET DOCUMENT: ", udid, rec.Handle, rec.Service) 38 39 d, err := did.ParseDID(rec.Did) 40 if err != nil {
+4 -4
repo/cbor_gen.go
··· 68 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rev"))); err != nil { 69 return err 70 } 71 - if _, err := io.WriteString(w, string("rev")); err != nil { 72 return err 73 } 74 ··· 79 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Rev))); err != nil { 80 return err 81 } 82 - if _, err := io.WriteString(w, string(t.Rev)); err != nil { 83 return err 84 } 85 } ··· 373 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rev"))); err != nil { 374 return err 375 } 376 - if _, err := io.WriteString(w, string("rev")); err != nil { 377 return err 378 } 379 ··· 384 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Rev))); err != nil { 385 return err 386 } 387 - if _, err := io.WriteString(w, string(t.Rev)); err != nil { 388 return err 389 } 390 }
··· 68 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rev"))); err != nil { 69 return err 70 } 71 + if _, err := cw.WriteString(string("rev")); err != nil { 72 return err 73 } 74 ··· 79 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Rev))); err != nil { 80 return err 81 } 82 + if _, err := cw.WriteString(string(t.Rev)); err != nil { 83 return err 84 } 85 } ··· 373 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len("rev"))); err != nil { 374 return err 375 } 376 + if _, err := cw.WriteString(string("rev")); err != nil { 377 return err 378 } 379 ··· 384 if err := cw.WriteMajorTypeHeader(cbg.MajTextString, uint64(len(t.Rev))); err != nil { 385 return err 386 } 387 + if _, err := cw.WriteString(string(t.Rev)); err != nil { 388 return err 389 } 390 }
+7 -7
repo/repo.go
··· 253 } 254 255 // creates and writes a new SignedCommit for this repo, with `prev` pointing to old value 256 - func (r *Repo) Commit(ctx context.Context, signer func(context.Context, string, []byte) ([]byte, error)) (cid.Cid, error) { 257 ctx, span := otel.Tracer("repo").Start(ctx, "Commit") 258 defer span.End() 259 260 t, err := r.getMst(ctx) 261 if err != nil { 262 - return cid.Undef, err 263 } 264 265 rcid, err := t.GetPointer(ctx) 266 if err != nil { 267 - return cid.Undef, err 268 } 269 270 ncom := UnsignedCommit{ ··· 276 277 sb, err := ncom.BytesForSigning() 278 if err != nil { 279 - return cid.Undef, fmt.Errorf("failed to serialize commit: %w", err) 280 } 281 sig, err := signer(ctx, ncom.Did, sb) 282 if err != nil { 283 - return cid.Undef, fmt.Errorf("failed to sign root: %w", err) 284 } 285 286 nsc := SignedCommit{ ··· 294 295 nsccid, err := r.cst.Put(ctx, &nsc) 296 if err != nil { 297 - return cid.Undef, err 298 } 299 300 r.sc = nsc 301 r.dirty = false 302 303 - return nsccid, nil 304 } 305 306 func (r *Repo) getMst(ctx context.Context) (*mst.MerkleSearchTree, error) {
··· 253 } 254 255 // creates and writes a new SignedCommit for this repo, with `prev` pointing to old value 256 + func (r *Repo) Commit(ctx context.Context, signer func(context.Context, string, []byte) ([]byte, error)) (cid.Cid, string, error) { 257 ctx, span := otel.Tracer("repo").Start(ctx, "Commit") 258 defer span.End() 259 260 t, err := r.getMst(ctx) 261 if err != nil { 262 + return cid.Undef, "", err 263 } 264 265 rcid, err := t.GetPointer(ctx) 266 if err != nil { 267 + return cid.Undef, "", err 268 } 269 270 ncom := UnsignedCommit{ ··· 276 277 sb, err := ncom.BytesForSigning() 278 if err != nil { 279 + return cid.Undef, "", fmt.Errorf("failed to serialize commit: %w", err) 280 } 281 sig, err := signer(ctx, ncom.Did, sb) 282 if err != nil { 283 + return cid.Undef, "", fmt.Errorf("failed to sign root: %w", err) 284 } 285 286 nsc := SignedCommit{ ··· 294 295 nsccid, err := r.cst.Put(ctx, &nsc) 296 if err != nil { 297 + return cid.Undef, "", err 298 } 299 300 r.sc = nsc 301 r.dirty = false 302 303 + return nsccid, nsc.Rev, nil 304 } 305 306 func (r *Repo) getMst(ctx context.Context) (*mst.MerkleSearchTree, error) {
+13 -63
repomgr/ingest_test.go
··· 64 defer fi.Close() 65 66 ctx := context.TODO() 67 - if err := repoman.ImportNewRepo(ctx, 2, "", fi, cid.Undef); err != nil { 68 t.Fatal(err) 69 } 70 } ··· 116 } 117 cs2 := testCarstore(t, dir2) 118 119 ctx := context.TODO() 120 - var prev *cid.Cid 121 for i := 0; i < 5; i++ { 122 - slice, head, tid := doPost(t, cs2, did, prev, i) 123 124 ops := []*atproto.SyncSubscribeRepos_RepoOp{ 125 { ··· 128 }, 129 } 130 131 - if err := repoman.HandleExternalUserEvent(ctx, 1, 1, did, prev, slice, ops); err != nil { 132 t.Fatal(err) 133 } 134 135 - prev = &head 136 } 137 138 - latest := *prev 139 - 140 // now do a few outside of the standard event stream flow 141 for i := 0; i < 5; i++ { 142 - _, head, _ := doPost(t, cs2, did, prev, i) 143 - prev = &head 144 } 145 146 buf := new(bytes.Buffer) 147 - if err := cs2.ReadUserCar(ctx, 1, latest, *prev, true, buf); err != nil { 148 t.Fatal(err) 149 } 150 151 - if err := repoman.ImportNewRepo(ctx, 1, did, buf, latest); err != nil { 152 t.Fatal(err) 153 } 154 } 155 156 - func doPost(t *testing.T, cs *carstore.CarStore, did string, prev *cid.Cid, postid int) ([]byte, cid.Cid, string) { 157 ctx := context.TODO() 158 ds, err := cs.NewDeltaSession(ctx, 1, prev) 159 if err != nil { ··· 169 t.Fatal(err) 170 } 171 172 - root, err := r.Commit(ctx, func(context.Context, string, []byte) ([]byte, error) { return nil, nil }) 173 - if err != nil { 174 - t.Fatal(err) 175 - } 176 - 177 - slice, err := ds.CloseWithRoot(ctx, root) 178 - if err != nil { 179 - t.Fatal(err) 180 - } 181 - 182 - return slice, root, tid 183 - } 184 - 185 - func TestRebase(t *testing.T) { 186 - dir, err := os.MkdirTemp("", "integtest") 187 if err != nil { 188 t.Fatal(err) 189 } 190 191 - maindb, err := gorm.Open(sqlite.Open(filepath.Join(dir, "test.sqlite"))) 192 if err != nil { 193 t.Fatal(err) 194 } 195 - maindb.AutoMigrate(models.ActorInfo{}) 196 197 - did := "did:plc:beepboop" 198 - maindb.Create(&models.ActorInfo{ 199 - Did: did, 200 - Uid: 1, 201 - }) 202 - 203 - cs := testCarstore(t, dir) 204 - 205 - repoman := NewRepoManager(cs, &util.FakeKeyManager{}) 206 - 207 - ctx := context.TODO() 208 - if err := repoman.InitNewActor(ctx, 1, "hello.world", "did:plc:foobar", "", "", ""); err != nil { 209 - t.Fatal(err) 210 - } 211 - 212 - for i := 0; i < 5; i++ { 213 - _, _, err := repoman.CreateRecord(ctx, 1, "app.bsky.feed.post", &bsky.FeedPost{ 214 - Text: fmt.Sprintf("hello friend %d", i), 215 - }) 216 - if err != nil { 217 - t.Fatal(err) 218 - } 219 - } 220 - 221 - if err := repoman.DoRebase(ctx, 1); err != nil { 222 - t.Fatal(err) 223 - } 224 - 225 - _, _, err = repoman.CreateRecord(ctx, 1, "app.bsky.feed.post", &bsky.FeedPost{ 226 - Text: "after the rebase", 227 - }) 228 - if err != nil { 229 - t.Fatal(err) 230 - } 231 } 232 233 func TestDuplicateRecord(t *testing.T) {
··· 64 defer fi.Close() 65 66 ctx := context.TODO() 67 + if err := repoman.ImportNewRepo(ctx, 2, "", fi, nil); err != nil { 68 t.Fatal(err) 69 } 70 } ··· 116 } 117 cs2 := testCarstore(t, dir2) 118 119 + var since *string 120 ctx := context.TODO() 121 for i := 0; i < 5; i++ { 122 + slice, _, nrev, tid := doPost(t, cs2, did, since, i) 123 124 ops := []*atproto.SyncSubscribeRepos_RepoOp{ 125 { ··· 128 }, 129 } 130 131 + if err := repoman.HandleExternalUserEvent(ctx, 1, 1, did, since, nrev, slice, ops); err != nil { 132 t.Fatal(err) 133 } 134 135 + since = &nrev 136 } 137 138 // now do a few outside of the standard event stream flow 139 for i := 0; i < 5; i++ { 140 + _, _, nrev, _ := doPost(t, cs2, did, since, i) 141 + since = &nrev 142 } 143 144 buf := new(bytes.Buffer) 145 + if err := cs2.ReadUserCar(ctx, 1, "", true, buf); err != nil { 146 t.Fatal(err) 147 } 148 149 + if err := repoman.ImportNewRepo(ctx, 1, did, buf, nil); err != nil { 150 t.Fatal(err) 151 } 152 } 153 154 + func doPost(t *testing.T, cs *carstore.CarStore, did string, prev *string, postid int) ([]byte, cid.Cid, string, string) { 155 ctx := context.TODO() 156 ds, err := cs.NewDeltaSession(ctx, 1, prev) 157 if err != nil { ··· 167 t.Fatal(err) 168 } 169 170 + root, nrev, err := r.Commit(ctx, func(context.Context, string, []byte) ([]byte, error) { return nil, nil }) 171 if err != nil { 172 t.Fatal(err) 173 } 174 175 + slice, err := ds.CloseWithRoot(ctx, root, nrev) 176 if err != nil { 177 t.Fatal(err) 178 } 179 180 + return slice, root, nrev, tid 181 } 182 183 func TestDuplicateRecord(t *testing.T) {
+95 -271
repomgr/repomgr.go
··· 16 "github.com/bluesky-social/indigo/models" 17 "github.com/bluesky-social/indigo/mst" 18 "github.com/bluesky-social/indigo/repo" 19 - "github.com/bluesky-social/indigo/util" 20 21 "github.com/ipfs/go-cid" 22 "github.com/ipfs/go-datastore" ··· 70 User models.Uid 71 OldRoot *cid.Cid 72 NewRoot cid.Cid 73 RepoSlice []byte 74 PDS uint 75 Ops []RepoOp 76 - Rebase bool 77 } 78 79 type RepoOp struct { ··· 146 unlock := rm.lockUser(ctx, user) 147 defer unlock() 148 149 - head, err := rm.cs.GetUserRepoHead(ctx, user) 150 if err != nil { 151 return "", cid.Undef, err 152 } 153 154 - ds, err := rm.cs.NewDeltaSession(ctx, user, &head) 155 if err != nil { 156 return "", cid.Undef, err 157 } 158 159 r, err := repo.OpenRepo(ctx, ds, head, true) 160 if err != nil { 161 return "", cid.Undef, err ··· 166 return "", cid.Undef, err 167 } 168 169 - nroot, err := r.Commit(ctx, rm.kmgr.SignForUser) 170 if err != nil { 171 return "", cid.Undef, err 172 } 173 174 - rslice, err := ds.CloseWithRoot(ctx, nroot) 175 if err != nil { 176 return "", cid.Undef, fmt.Errorf("close with root: %w", err) 177 } ··· 186 User: user, 187 OldRoot: oldroot, 188 NewRoot: nroot, 189 Ops: []RepoOp{{ 190 Kind: EvtKindCreateRecord, 191 Collection: collection, ··· 207 unlock := rm.lockUser(ctx, user) 208 defer unlock() 209 210 - head, err := rm.cs.GetUserRepoHead(ctx, user) 211 if err != nil { 212 return cid.Undef, err 213 } 214 215 - ds, err := rm.cs.NewDeltaSession(ctx, user, &head) 216 if err != nil { 217 return cid.Undef, err 218 } 219 220 r, err := repo.OpenRepo(ctx, ds, head, true) 221 if err != nil { 222 return cid.Undef, err ··· 228 return cid.Undef, err 229 } 230 231 - nroot, err := r.Commit(ctx, rm.kmgr.SignForUser) 232 if err != nil { 233 return cid.Undef, err 234 } 235 236 - rslice, err := ds.CloseWithRoot(ctx, nroot) 237 if err != nil { 238 return cid.Undef, fmt.Errorf("close with root: %w", err) 239 } ··· 248 User: user, 249 OldRoot: oldroot, 250 NewRoot: nroot, 251 Ops: []RepoOp{{ 252 Kind: EvtKindUpdateRecord, 253 Collection: collection, ··· 269 unlock := rm.lockUser(ctx, user) 270 defer unlock() 271 272 - head, err := rm.cs.GetUserRepoHead(ctx, user) 273 if err != nil { 274 return err 275 } 276 277 - ds, err := rm.cs.NewDeltaSession(ctx, user, &head) 278 if err != nil { 279 return err 280 } 281 282 r, err := repo.OpenRepo(ctx, ds, head, true) 283 if err != nil { 284 return err ··· 289 return err 290 } 291 292 - nroot, err := r.Commit(ctx, rm.kmgr.SignForUser) 293 if err != nil { 294 return err 295 } 296 297 - rslice, err := ds.CloseWithRoot(ctx, nroot) 298 if err != nil { 299 return fmt.Errorf("close with root: %w", err) 300 } ··· 309 User: user, 310 OldRoot: oldroot, 311 NewRoot: nroot, 312 Ops: []RepoOp{{ 313 Kind: EvtKindDeleteRecord, 314 Collection: collection, ··· 350 return fmt.Errorf("setting initial actor profile: %w", err) 351 } 352 353 - root, err := r.Commit(ctx, rm.kmgr.SignForUser) 354 if err != nil { 355 return fmt.Errorf("committing repo for actor init: %w", err) 356 } 357 358 - rslice, err := ds.CloseWithRoot(ctx, root) 359 if err != nil { 360 return fmt.Errorf("close with root: %w", err) 361 } ··· 364 rm.events(ctx, &RepoEvent{ 365 User: user, 366 NewRoot: root, 367 Ops: []RepoOp{{ 368 Kind: EvtKindCreateRecord, 369 Collection: "app.bsky.actor.profile", ··· 384 return rm.cs.GetUserRepoHead(ctx, user) 385 } 386 387 - func (rm *RepoManager) ReadRepo(ctx context.Context, user models.Uid, earlyCid, lateCid cid.Cid, w io.Writer) error { 388 - return rm.cs.ReadUserCar(ctx, user, earlyCid, lateCid, true, w) 389 } 390 391 func (rm *RepoManager) GetRecord(ctx context.Context, user models.Uid, collection string, rkey string, maybeCid cid.Cid) (cid.Cid, cbg.CBORMarshaler, error) { ··· 445 return ap, nil 446 } 447 448 - var ErrUncleanRebase = fmt.Errorf("unclean rebase") 449 - 450 - func (rm *RepoManager) HandleRebase(ctx context.Context, pdsid uint, uid models.Uid, did string, prev *cid.Cid, commit cid.Cid, carslice []byte) error { 451 - ctx, span := otel.Tracer("repoman").Start(ctx, "HandleRebase") 452 - defer span.End() 453 - 454 - log.Infow("HandleRebase", "pds", pdsid, "uid", uid, "commit", commit) 455 - 456 - unlock := rm.lockUser(ctx, uid) 457 - defer unlock() 458 - 459 - ro, err := rm.cs.ReadOnlySession(uid) 460 - if err != nil { 461 - return err 462 - } 463 - 464 - head, err := rm.cs.GetUserRepoHead(ctx, uid) 465 - if err != nil { 466 - return err 467 - } 468 - 469 - // TODO: do we allow prev to be nil in any case here? 470 - if prev != nil { 471 - if *prev != head { 472 - log.Warnw("rebase 'prev' value did not match our latest head for repo", "did", did, "rprev", prev.String(), "lprev", head.String()) 473 - } 474 - } 475 - 476 - currepo, err := repo.OpenRepo(ctx, ro, head, true) 477 - if err != nil { 478 - return err 479 - } 480 - 481 - olddc := currepo.DataCid() 482 - 483 - root, ds, err := rm.cs.ImportSlice(ctx, uid, nil, carslice) 484 - if err != nil { 485 - return fmt.Errorf("importing external carslice: %w", err) 486 - } 487 - 488 - r, err := repo.OpenRepo(ctx, ds, root, true) 489 - if err != nil { 490 - return fmt.Errorf("opening external user repo (%d, root=%s): %w", uid, root, err) 491 - } 492 - 493 - if r.DataCid() != olddc { 494 - return ErrUncleanRebase 495 - } 496 - 497 - if err := rm.CheckRepoSig(ctx, r, did); err != nil { 498 - return err 499 - } 500 - 501 - // TODO: this is moderately expensive and currently results in the users 502 - // entire repo being held in memory 503 - if err := r.CopyDataTo(ctx, ds); err != nil { 504 - return err 505 - } 506 - 507 - if err := ds.CloseAsRebase(ctx, root); err != nil { 508 - return fmt.Errorf("finalizing rebase: %w", err) 509 - } 510 - 511 - if rm.events != nil { 512 - rm.events(ctx, &RepoEvent{ 513 - User: uid, 514 - OldRoot: prev, 515 - NewRoot: root, 516 - Ops: nil, 517 - RepoSlice: carslice, 518 - PDS: pdsid, 519 - Rebase: true, 520 - }) 521 - } 522 - 523 - return nil 524 - } 525 - 526 - func (rm *RepoManager) DoRebase(ctx context.Context, uid models.Uid) error { 527 - ctx, span := otel.Tracer("repoman").Start(ctx, "DoRebase") 528 - defer span.End() 529 - 530 - log.Infow("DoRebase", "uid", uid) 531 - 532 - unlock := rm.lockUser(ctx, uid) 533 - defer unlock() 534 - 535 - ds, err := rm.cs.NewDeltaSession(ctx, uid, nil) 536 - if err != nil { 537 - return err 538 - } 539 - 540 - head, err := rm.cs.GetUserRepoHead(ctx, uid) 541 - if err != nil { 542 - return err 543 - } 544 - 545 - r, err := repo.OpenRepo(ctx, ds, head, true) 546 - if err != nil { 547 - return err 548 - } 549 - 550 - r.Truncate() 551 - 552 - nroot, err := r.Commit(ctx, rm.kmgr.SignForUser) 553 - if err != nil { 554 - return err 555 - } 556 - 557 - if err := r.CopyDataTo(ctx, ds); err != nil { 558 - return err 559 - } 560 - 561 - if err := ds.CloseAsRebase(ctx, nroot); err != nil { 562 - return fmt.Errorf("finalizing rebase: %w", err) 563 - } 564 - 565 - // outbound car slice should just be the new signed root 566 - buf := new(bytes.Buffer) 567 - if _, err := carstore.WriteCarHeader(buf, nroot); err != nil { 568 - return err 569 - } 570 - 571 - robj, err := ds.Get(ctx, nroot) 572 - if err != nil { 573 - return err 574 - } 575 - _, err = carstore.LdWrite(buf, robj.Cid().Bytes(), robj.RawData()) 576 - if err != nil { 577 - return err 578 - } 579 - 580 - if rm.events != nil { 581 - rm.events(ctx, &RepoEvent{ 582 - User: uid, 583 - OldRoot: &head, 584 - NewRoot: nroot, 585 - Ops: nil, 586 - RepoSlice: buf.Bytes(), 587 - PDS: 0, 588 - Rebase: true, 589 - }) 590 - } 591 - 592 - return nil 593 - } 594 - 595 func (rm *RepoManager) CheckRepoSig(ctx context.Context, r *repo.Repo, expdid string) error { 596 ctx, span := otel.Tracer("repoman").Start(ctx, "CheckRepoSig") 597 defer span.End() ··· 615 return nil 616 } 617 618 - func (rm *RepoManager) HandleExternalUserEvent(ctx context.Context, pdsid uint, uid models.Uid, did string, prev *cid.Cid, carslice []byte, ops []*atproto.SyncSubscribeRepos_RepoOp) error { 619 ctx, span := otel.Tracer("repoman").Start(ctx, "HandleExternalUserEvent") 620 defer span.End() 621 622 - log.Infow("HandleExternalUserEvent", "pds", pdsid, "uid", uid, "prev", prev) 623 624 unlock := rm.lockUser(ctx, uid) 625 defer unlock() 626 627 - root, ds, err := rm.cs.ImportSlice(ctx, uid, prev, carslice) 628 if err != nil { 629 return fmt.Errorf("importing external carslice: %w", err) 630 } ··· 684 } 685 } 686 687 - rslice, err := ds.CloseWithRoot(ctx, root) 688 if err != nil { 689 return fmt.Errorf("close with root: %w", err) 690 } 691 692 if rm.events != nil { 693 rm.events(ctx, &RepoEvent{ 694 - User: uid, 695 - OldRoot: prev, 696 NewRoot: root, 697 Ops: evtops, 698 RepoSlice: rslice, 699 PDS: pdsid, ··· 714 unlock := rm.lockUser(ctx, user) 715 defer unlock() 716 717 - head, err := rm.cs.GetUserRepoHead(ctx, user) 718 if err != nil { 719 return err 720 } 721 722 - ds, err := rm.cs.NewDeltaSession(ctx, user, &head) 723 if err != nil { 724 return err 725 } 726 727 r, err := repo.OpenRepo(ctx, ds, head, true) 728 if err != nil { 729 return err ··· 786 } 787 } 788 789 - nroot, err := r.Commit(ctx, rm.kmgr.SignForUser) 790 if err != nil { 791 return err 792 } 793 794 - rslice, err := ds.CloseWithRoot(ctx, nroot) 795 if err != nil { 796 return fmt.Errorf("close with root: %w", err) 797 } ··· 807 OldRoot: oldroot, 808 NewRoot: nroot, 809 RepoSlice: rslice, 810 Ops: ops, 811 }) 812 } ··· 814 return nil 815 } 816 817 - func (rm *RepoManager) ImportNewRepo(ctx context.Context, user models.Uid, repoDid string, r io.Reader, oldest cid.Cid) error { 818 ctx, span := otel.Tracer("repoman").Start(ctx, "ImportNewRepo") 819 defer span.End() 820 821 unlock := rm.lockUser(ctx, user) 822 defer unlock() 823 824 - head, err := rm.cs.GetUserRepoHead(ctx, user) 825 if err != nil { 826 return err 827 } 828 829 - if head != oldest { 830 // TODO: we could probably just deal with this 831 return fmt.Errorf("ImportNewRepo called with incorrect base") 832 } 833 834 - err = rm.processNewRepo(ctx, user, r, head, func(ctx context.Context, old, nu cid.Cid, finish func(context.Context) ([]byte, error), bs blockstore.Blockstore) error { 835 - r, err := repo.OpenRepo(ctx, bs, nu, true) 836 if err != nil { 837 return fmt.Errorf("opening new repo: %w", err) 838 } ··· 848 return fmt.Errorf("new user signature check failed: %w", err) 849 } 850 851 - diffops, err := r.DiffSince(ctx, old) 852 if err != nil { 853 return fmt.Errorf("diff trees: %w", err) 854 } ··· 865 } 866 } 867 868 - slice, err := finish(ctx) 869 if err != nil { 870 return err 871 } 872 873 - var oldroot *cid.Cid 874 - if old.Defined() { 875 - oldroot = &old 876 - } 877 - 878 if rm.events != nil { 879 rm.events(ctx, &RepoEvent{ 880 - User: user, 881 - OldRoot: oldroot, 882 - NewRoot: nu, 883 RepoSlice: slice, 884 Ops: ops, 885 }) ··· 888 return nil 889 }) 890 if err != nil { 891 - return fmt.Errorf("process new repo (current head: %s): %w:", head, err) 892 } 893 894 return nil ··· 944 } 945 } 946 947 - func (rm *RepoManager) processNewRepo(ctx context.Context, user models.Uid, r io.Reader, until cid.Cid, cb func(ctx context.Context, old, nu cid.Cid, finish func(context.Context) ([]byte, error), bs blockstore.Blockstore) error) error { 948 ctx, span := otel.Tracer("repoman").Start(ctx, "processNewRepo") 949 defer span.End() 950 ··· 973 } 974 } 975 976 - head := &carr.Header.Roots[0] 977 - 978 - var commits []cid.Cid 979 - for head != nil && *head != until { 980 - commits = append(commits, *head) 981 - rep, err := repo.OpenRepo(ctx, membs, *head, true) 982 - if err != nil { 983 - return fmt.Errorf("opening repo for backwalk (%d commits, until: %s, head: %s, carRoot: %s): %w", len(commits), until, *head, carr.Header.Roots[0], err) 984 - } 985 - 986 - prev, err := rep.PrevCommit(ctx) 987 - if err != nil { 988 - return fmt.Errorf("prevCommit: %w", err) 989 - } 990 - 991 - head = prev 992 - } 993 - 994 - if until.Defined() && (head == nil || *head != until) { 995 - // TODO: this shouldnt be happening, but i've seen some log messages 996 - // suggest that it might. Leaving this here to discover any cases where 997 - // it does. 998 - log.Errorw("reached end of walkback without finding our 'until' commit", 999 - "until", until, 1000 - "root", carr.Header.Roots[0], 1001 - "commits", len(commits), 1002 - "head", head, 1003 - "user", user, 1004 - ) 1005 - } 1006 - 1007 - // now we need to generate repo slices for each commit 1008 - 1009 seen := make(map[cid.Cid]bool) 1010 1011 - if until.Defined() { 1012 - seen[until] = true 1013 } 1014 1015 - cbs := membs 1016 - if until.Defined() { 1017 - bs, err := rm.cs.ReadOnlySession(user) 1018 - if err != nil { 1019 - return err 1020 - } 1021 - 1022 - // TODO: we technically only need this for the 'next' commit to diff against our current head. 1023 - cbs = util.NewReadThroughBstore(bs, membs) 1024 } 1025 1026 - prev := until 1027 - for i := len(commits) - 1; i >= 0; i-- { 1028 - root := commits[i] 1029 - // TODO: if there are blocks that get convergently recreated throughout 1030 - // the repos lifecycle, this will end up erroneously not including 1031 - // them. We should compute the set of blocks needed to read any repo 1032 - // ops that happened in the commit and use that for our 'output' blocks 1033 - cids, err := walkTree(ctx, seen, root, membs, true) 1034 if err != nil { 1035 - return fmt.Errorf("walkTree: %w", err) 1036 } 1037 1038 - var prevptr *cid.Cid 1039 - if prev.Defined() { 1040 - prevptr = &prev 1041 } 1042 - ds, err := rm.cs.NewDeltaSession(ctx, user, prevptr) 1043 - if err != nil { 1044 - return fmt.Errorf("opening delta session (%d / %d): %w", i, len(commits)-1, err) 1045 - } 1046 1047 - for _, c := range cids { 1048 - blk, err := membs.Get(ctx, c) 1049 - if err != nil { 1050 - return fmt.Errorf("copying walked cids to carstore: %w", err) 1051 - } 1052 - 1053 - if err := ds.Put(ctx, blk); err != nil { 1054 - return err 1055 - } 1056 - } 1057 - 1058 - finish := func(ctx context.Context) ([]byte, error) { 1059 - return ds.CloseWithRoot(ctx, root) 1060 - } 1061 1062 - if err := cb(ctx, prev, root, finish, cbs); err != nil { 1063 - return fmt.Errorf("cb errored (%d/%d) root: %s, prev: %s: %w", i, len(commits)-1, root, prev, err) 1064 - } 1065 - 1066 - prev = root 1067 } 1068 1069 return nil
··· 16 "github.com/bluesky-social/indigo/models" 17 "github.com/bluesky-social/indigo/mst" 18 "github.com/bluesky-social/indigo/repo" 19 20 "github.com/ipfs/go-cid" 21 "github.com/ipfs/go-datastore" ··· 69 User models.Uid 70 OldRoot *cid.Cid 71 NewRoot cid.Cid 72 + Since *string 73 + Rev string 74 RepoSlice []byte 75 PDS uint 76 Ops []RepoOp 77 + TooBig bool 78 } 79 80 type RepoOp struct { ··· 147 unlock := rm.lockUser(ctx, user) 148 defer unlock() 149 150 + rev, err := rm.cs.GetUserRepoRev(ctx, user) 151 if err != nil { 152 return "", cid.Undef, err 153 } 154 155 + ds, err := rm.cs.NewDeltaSession(ctx, user, &rev) 156 if err != nil { 157 return "", cid.Undef, err 158 } 159 160 + head := ds.BaseCid() 161 + 162 r, err := repo.OpenRepo(ctx, ds, head, true) 163 if err != nil { 164 return "", cid.Undef, err ··· 169 return "", cid.Undef, err 170 } 171 172 + nroot, nrev, err := r.Commit(ctx, rm.kmgr.SignForUser) 173 if err != nil { 174 return "", cid.Undef, err 175 } 176 177 + fmt.Println("NEW REV: ", nrev) 178 + 179 + rslice, err := ds.CloseWithRoot(ctx, nroot, nrev) 180 if err != nil { 181 return "", cid.Undef, fmt.Errorf("close with root: %w", err) 182 } ··· 191 User: user, 192 OldRoot: oldroot, 193 NewRoot: nroot, 194 + Rev: nrev, 195 + Since: &rev, 196 Ops: []RepoOp{{ 197 Kind: EvtKindCreateRecord, 198 Collection: collection, ··· 214 unlock := rm.lockUser(ctx, user) 215 defer unlock() 216 217 + rev, err := rm.cs.GetUserRepoRev(ctx, user) 218 if err != nil { 219 return cid.Undef, err 220 } 221 222 + ds, err := rm.cs.NewDeltaSession(ctx, user, &rev) 223 if err != nil { 224 return cid.Undef, err 225 } 226 227 + head := ds.BaseCid() 228 r, err := repo.OpenRepo(ctx, ds, head, true) 229 if err != nil { 230 return cid.Undef, err ··· 236 return cid.Undef, err 237 } 238 239 + nroot, nrev, err := r.Commit(ctx, rm.kmgr.SignForUser) 240 if err != nil { 241 return cid.Undef, err 242 } 243 244 + rslice, err := ds.CloseWithRoot(ctx, nroot, nrev) 245 if err != nil { 246 return cid.Undef, fmt.Errorf("close with root: %w", err) 247 } ··· 256 User: user, 257 OldRoot: oldroot, 258 NewRoot: nroot, 259 + Rev: nrev, 260 + Since: &rev, 261 Ops: []RepoOp{{ 262 Kind: EvtKindUpdateRecord, 263 Collection: collection, ··· 279 unlock := rm.lockUser(ctx, user) 280 defer unlock() 281 282 + rev, err := rm.cs.GetUserRepoRev(ctx, user) 283 if err != nil { 284 return err 285 } 286 287 + ds, err := rm.cs.NewDeltaSession(ctx, user, &rev) 288 if err != nil { 289 return err 290 } 291 292 + head := ds.BaseCid() 293 r, err := repo.OpenRepo(ctx, ds, head, true) 294 if err != nil { 295 return err ··· 300 return err 301 } 302 303 + nroot, nrev, err := r.Commit(ctx, rm.kmgr.SignForUser) 304 if err != nil { 305 return err 306 } 307 308 + rslice, err := ds.CloseWithRoot(ctx, nroot, nrev) 309 if err != nil { 310 return fmt.Errorf("close with root: %w", err) 311 } ··· 320 User: user, 321 OldRoot: oldroot, 322 NewRoot: nroot, 323 + Rev: nrev, 324 + Since: &rev, 325 Ops: []RepoOp{{ 326 Kind: EvtKindDeleteRecord, 327 Collection: collection, ··· 363 return fmt.Errorf("setting initial actor profile: %w", err) 364 } 365 366 + root, nrev, err := r.Commit(ctx, rm.kmgr.SignForUser) 367 if err != nil { 368 return fmt.Errorf("committing repo for actor init: %w", err) 369 } 370 371 + rslice, err := ds.CloseWithRoot(ctx, root, nrev) 372 if err != nil { 373 return fmt.Errorf("close with root: %w", err) 374 } ··· 377 rm.events(ctx, &RepoEvent{ 378 User: user, 379 NewRoot: root, 380 + Rev: nrev, 381 Ops: []RepoOp{{ 382 Kind: EvtKindCreateRecord, 383 Collection: "app.bsky.actor.profile", ··· 398 return rm.cs.GetUserRepoHead(ctx, user) 399 } 400 401 + func (rm *RepoManager) GetRepoRev(ctx context.Context, user models.Uid) (string, error) { 402 + unlock := rm.lockUser(ctx, user) 403 + defer unlock() 404 + 405 + return rm.cs.GetUserRepoRev(ctx, user) 406 + } 407 + 408 + func (rm *RepoManager) ReadRepo(ctx context.Context, user models.Uid, since string, w io.Writer) error { 409 + return rm.cs.ReadUserCar(ctx, user, since, true, w) 410 } 411 412 func (rm *RepoManager) GetRecord(ctx context.Context, user models.Uid, collection string, rkey string, maybeCid cid.Cid) (cid.Cid, cbg.CBORMarshaler, error) { ··· 466 return ap, nil 467 } 468 469 func (rm *RepoManager) CheckRepoSig(ctx context.Context, r *repo.Repo, expdid string) error { 470 ctx, span := otel.Tracer("repoman").Start(ctx, "CheckRepoSig") 471 defer span.End() ··· 489 return nil 490 } 491 492 + func (rm *RepoManager) HandleExternalUserEvent(ctx context.Context, pdsid uint, uid models.Uid, did string, since *string, nrev string, carslice []byte, ops []*atproto.SyncSubscribeRepos_RepoOp) error { 493 ctx, span := otel.Tracer("repoman").Start(ctx, "HandleExternalUserEvent") 494 defer span.End() 495 496 + log.Infow("HandleExternalUserEvent", "pds", pdsid, "uid", uid, "since", since, "nrev", nrev) 497 498 unlock := rm.lockUser(ctx, uid) 499 defer unlock() 500 501 + root, ds, err := rm.cs.ImportSlice(ctx, uid, since, carslice) 502 if err != nil { 503 return fmt.Errorf("importing external carslice: %w", err) 504 } ··· 558 } 559 } 560 561 + rslice, err := ds.CloseWithRoot(ctx, root, nrev) 562 if err != nil { 563 return fmt.Errorf("close with root: %w", err) 564 } 565 566 if rm.events != nil { 567 rm.events(ctx, &RepoEvent{ 568 + User: uid, 569 + //OldRoot: prev, 570 NewRoot: root, 571 + Rev: nrev, 572 + Since: since, 573 Ops: evtops, 574 RepoSlice: rslice, 575 PDS: pdsid, ··· 590 unlock := rm.lockUser(ctx, user) 591 defer unlock() 592 593 + rev, err := rm.cs.GetUserRepoRev(ctx, user) 594 if err != nil { 595 return err 596 } 597 598 + ds, err := rm.cs.NewDeltaSession(ctx, user, &rev) 599 if err != nil { 600 return err 601 } 602 603 + head := ds.BaseCid() 604 r, err := repo.OpenRepo(ctx, ds, head, true) 605 if err != nil { 606 return err ··· 663 } 664 } 665 666 + nroot, nrev, err := r.Commit(ctx, rm.kmgr.SignForUser) 667 if err != nil { 668 return err 669 } 670 671 + rslice, err := ds.CloseWithRoot(ctx, nroot, nrev) 672 if err != nil { 673 return fmt.Errorf("close with root: %w", err) 674 } ··· 684 OldRoot: oldroot, 685 NewRoot: nroot, 686 RepoSlice: rslice, 687 + Rev: nrev, 688 + Since: &rev, 689 Ops: ops, 690 }) 691 } ··· 693 return nil 694 } 695 696 + func (rm *RepoManager) ImportNewRepo(ctx context.Context, user models.Uid, repoDid string, r io.Reader, rev *string) error { 697 ctx, span := otel.Tracer("repoman").Start(ctx, "ImportNewRepo") 698 defer span.End() 699 700 unlock := rm.lockUser(ctx, user) 701 defer unlock() 702 703 + currev, err := rm.cs.GetUserRepoRev(ctx, user) 704 if err != nil { 705 return err 706 } 707 708 + curhead, err := rm.cs.GetUserRepoHead(ctx, user) 709 + if err != nil { 710 + return err 711 + } 712 + 713 + if rev != nil && *rev != currev { 714 // TODO: we could probably just deal with this 715 return fmt.Errorf("ImportNewRepo called with incorrect base") 716 } 717 718 + err = rm.processNewRepo(ctx, user, r, rev, func(ctx context.Context, root cid.Cid, finish func(context.Context, string) ([]byte, error), bs blockstore.Blockstore) error { 719 + r, err := repo.OpenRepo(ctx, bs, root, true) 720 if err != nil { 721 return fmt.Errorf("opening new repo: %w", err) 722 } ··· 732 return fmt.Errorf("new user signature check failed: %w", err) 733 } 734 735 + diffops, err := r.DiffSince(ctx, curhead) 736 if err != nil { 737 return fmt.Errorf("diff trees: %w", err) 738 } ··· 749 } 750 } 751 752 + slice, err := finish(ctx, scom.Rev) 753 if err != nil { 754 return err 755 } 756 757 if rm.events != nil { 758 rm.events(ctx, &RepoEvent{ 759 + User: user, 760 + //OldRoot: oldroot, 761 + NewRoot: root, 762 + Rev: scom.Rev, 763 + Since: &currev, 764 RepoSlice: slice, 765 Ops: ops, 766 }) ··· 769 return nil 770 }) 771 if err != nil { 772 + return fmt.Errorf("process new repo (current rev: %s): %w:", currev, err) 773 } 774 775 return nil ··· 825 } 826 } 827 828 + func (rm *RepoManager) processNewRepo(ctx context.Context, user models.Uid, r io.Reader, rev *string, cb func(ctx context.Context, root cid.Cid, finish func(context.Context, string) ([]byte, error), bs blockstore.Blockstore) error) error { 829 ctx, span := otel.Tracer("repoman").Start(ctx, "processNewRepo") 830 defer span.End() 831 ··· 854 } 855 } 856 857 seen := make(map[cid.Cid]bool) 858 859 + root := carr.Header.Roots[0] 860 + // TODO: if there are blocks that get convergently recreated throughout 861 + // the repos lifecycle, this will end up erroneously not including 862 + // them. We should compute the set of blocks needed to read any repo 863 + // ops that happened in the commit and use that for our 'output' blocks 864 + cids, err := walkTree(ctx, seen, root, membs, true) 865 + if err != nil { 866 + return fmt.Errorf("walkTree: %w", err) 867 } 868 869 + ds, err := rm.cs.NewDeltaSession(ctx, user, rev) 870 + if err != nil { 871 + return fmt.Errorf("opening delta session: %w", err) 872 } 873 874 + for _, c := range cids { 875 + blk, err := membs.Get(ctx, c) 876 if err != nil { 877 + return fmt.Errorf("copying walked cids to carstore: %w", err) 878 } 879 880 + if err := ds.Put(ctx, blk); err != nil { 881 + return err 882 } 883 + } 884 885 + finish := func(ctx context.Context, nrev string) ([]byte, error) { 886 + return ds.CloseWithRoot(ctx, root, nrev) 887 + } 888 889 + if err := cb(ctx, root, finish, membs); err != nil { 890 + return fmt.Errorf("cb errored root: %s, rev: %s: %w", root, *rev, err) 891 } 892 893 return nil
+1 -1
search/server.go
··· 251 } 252 253 func (s *Server) processTooBigCommit(ctx context.Context, evt *comatproto.SyncSubscribeRepos_Commit) error { 254 - repodata, err := comatproto.SyncGetRepo(ctx, s.bgsxrpc, evt.Repo, "", evt.Commit.String()) 255 if err != nil { 256 return err 257 }
··· 251 } 252 253 func (s *Server) processTooBigCommit(ctx context.Context, evt *comatproto.SyncSubscribeRepos_Commit) error { 254 + repodata, err := comatproto.SyncGetRepo(ctx, s.bgsxrpc, evt.Repo, "") 255 if err != nil { 256 return err 257 }
+1 -1
testing/car_did_repro_test.go
··· 81 82 // verify MST tree reproduced 83 kmgr := &util.FakeKeyManager{} 84 - _, err = secondRepo.Commit(ctx, kmgr.SignForUser) 85 if err != nil { 86 t.Fatal(err) 87 }
··· 81 82 // verify MST tree reproduced 83 kmgr := &util.FakeKeyManager{} 84 + _, _, err = secondRepo.Commit(ctx, kmgr.SignForUser) 85 if err != nil { 86 t.Fatal(err) 87 }
+2 -122
testing/integ_test.go
··· 166 time.Sleep(time.Millisecond * 50) 167 168 // Now, the bgs will discover a gap, and have to catch up somehow 169 socialSim(t, users2, 1, 0) 170 171 time.Sleep(time.Second) ··· 228 p2.RequestScraping(t, b1) 229 time.Sleep(time.Millisecond * 50) 230 231 // Now, the bgs will discover a gap, and have to catch up somehow 232 socialSim(t, users2, 1, 0) 233 ··· 330 331 last := es2.Next() 332 assert.Equal(alice.did, last.RepoCommit.Repo) 333 - } 334 - 335 - func TestRebase(t *testing.T) { 336 - if testing.Short() { 337 - t.Skip("skipping BGS test in 'short' test mode") 338 - } 339 - assert := assert.New(t) 340 - didr := TestPLC(t) 341 - p1 := MustSetupPDS(t, ".tpds", didr) 342 - p1.Run(t) 343 - 344 - b1 := MustSetupBGS(t, didr) 345 - b1.Run(t) 346 - 347 - b1.tr.TrialHosts = []string{p1.RawHost()} 348 - 349 - p1.RequestScraping(t, b1) 350 - 351 - time.Sleep(time.Millisecond * 50) 352 - 353 - bob := p1.MustNewUser(t, "bob.tpds") 354 - 355 - bob.Post(t, "cats for cats") 356 - bob.Post(t, "i am the king of the world") 357 - bob.Post(t, "the name is bob") 358 - bob.Post(t, "why cant i eat pie") 359 - 360 - time.Sleep(time.Millisecond * 100) 361 - 362 - evts1 := b1.Events(t, 0) 363 - defer evts1.Cancel() 364 - 365 - preRebaseEvts := evts1.WaitFor(5) 366 - fmt.Println(preRebaseEvts) 367 - 368 - bob.DoRebase(t) 369 - 370 - rbevt := evts1.Next() 371 - assert.Equal(true, rbevt.RepoCommit.Rebase) 372 - 373 - sc := commitFromSlice(t, rbevt.RepoCommit.Blocks, (cid.Cid)(rbevt.RepoCommit.Commit)) 374 - assert.Nil(sc.Prev) 375 - 376 - lev := preRebaseEvts[4] 377 - oldsc := commitFromSlice(t, lev.RepoCommit.Blocks, (cid.Cid)(lev.RepoCommit.Commit)) 378 - 379 - assert.Equal(sc.Data, oldsc.Data) 380 - 381 - evts2 := b1.Events(t, 0) 382 - afterEvts := evts2.WaitFor(1) 383 - assert.Equal(true, afterEvts[0].RepoCommit.Rebase) 384 - } 385 - 386 - func TestRebaseMulti(t *testing.T) { 387 - if testing.Short() { 388 - t.Skip("skipping BGS test in 'short' test mode") 389 - } 390 - assert := assert.New(t) 391 - didr := TestPLC(t) 392 - p1 := MustSetupPDS(t, ".tpds", didr) 393 - p1.Run(t) 394 - 395 - b1 := MustSetupBGS(t, didr) 396 - b1.Run(t) 397 - 398 - b1.tr.TrialHosts = []string{p1.RawHost()} 399 - 400 - p1.RequestScraping(t, b1) 401 - 402 - esgenesis := b1.Events(t, 0) 403 - 404 - time.Sleep(time.Millisecond * 50) 405 - 406 - bob := p1.MustNewUser(t, "bob.tpds") 407 - 408 - for i := 0; i < 10; i++ { 409 - bob.Post(t, fmt.Sprintf("this is bobs post %d", i)) 410 - } 411 - 412 - // wait for 11 events, the first one is the actor creation 413 - firsten := esgenesis.WaitFor(11) 414 - _ = firsten 415 - 416 - fmt.Println("REBASE ONE") 417 - bob.DoRebase(t) 418 - 419 - var posts []*atproto.RepoStrongRef 420 - for i := 0; i < 10; i++ { 421 - ref := bob.Post(t, fmt.Sprintf("this is bobs post after rebase %d", i)) 422 - posts = append(posts, ref) 423 - } 424 - 425 - time.Sleep(time.Millisecond * 50) 426 - 427 - evts1 := b1.Events(t, 0) 428 - defer evts1.Cancel() 429 - 430 - all := evts1.WaitFor(11) 431 - 432 - assert.Equal(true, all[0].RepoCommit.Rebase) 433 - assert.Equal(int64(12), all[0].RepoCommit.Seq) 434 - assert.Equal(posts[0].Cid, all[1].RepoCommit.Ops[0].Cid.String()) 435 - 436 - // and another one! 437 - fmt.Println("REBASE TWO") 438 - bob.DoRebase(t) 439 - 440 - var posts2 []*atproto.RepoStrongRef 441 - for i := 0; i < 15; i++ { 442 - ref := bob.Post(t, fmt.Sprintf("this is bobs post after second rebase %d", i)) 443 - posts2 = append(posts2, ref) 444 - } 445 - 446 - time.Sleep(time.Millisecond * 50) 447 - 448 - evts2 := b1.Events(t, 0) 449 - defer evts2.Cancel() 450 - 451 - all = evts2.WaitFor(16) 452 - 453 - assert.Equal(true, all[0].RepoCommit.Rebase) 454 - assert.Equal(posts2[0].Cid, all[1].RepoCommit.Ops[0].Cid.String()) 455 } 456 457 func jsonPrint(v any) {
··· 166 time.Sleep(time.Millisecond * 50) 167 168 // Now, the bgs will discover a gap, and have to catch up somehow 169 + fmt.Println("EXPECT BGS TO CATCHUP NOW") 170 socialSim(t, users2, 1, 0) 171 172 time.Sleep(time.Second) ··· 229 p2.RequestScraping(t, b1) 230 time.Sleep(time.Millisecond * 50) 231 232 + fmt.Println("AFTER THIS EXPECT THE BGS TO DO A GETREPO TO CATCH UP") 233 // Now, the bgs will discover a gap, and have to catch up somehow 234 socialSim(t, users2, 1, 0) 235 ··· 332 333 last := es2.Next() 334 assert.Equal(alice.did, last.RepoCommit.Repo) 335 } 336 337 func jsonPrint(v any) {
testing/testdata/greenground.repo.car

This is a binary file and will not be displayed.

+1 -1
testing/utils.go
··· 810 811 kmgr := &bsutil.FakeKeyManager{} 812 813 - nroot, err := r.Commit(ctx, kmgr.SignForUser) 814 if err != nil { 815 return cid.Undef, err 816 }
··· 810 811 kmgr := &bsutil.FakeKeyManager{} 812 813 + nroot, _, err := r.Commit(ctx, kmgr.SignForUser) 814 if err != nil { 815 return cid.Undef, err 816 }